[Ada] Missing Predicated_Parent link on array Itype
[gcc.git] / gcc / cfgexpand.c
1 /* A pass for lowering trees to RTL.
2 Copyright (C) 2004-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "ssa.h"
33 #include "optabs.h"
34 #include "regs.h" /* For reg_renumber. */
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "fold-const.h"
40 #include "varasm.h"
41 #include "stor-layout.h"
42 #include "stmt.h"
43 #include "print-tree.h"
44 #include "cfgrtl.h"
45 #include "cfganal.h"
46 #include "cfgbuild.h"
47 #include "cfgcleanup.h"
48 #include "dojump.h"
49 #include "explow.h"
50 #include "calls.h"
51 #include "expr.h"
52 #include "internal-fn.h"
53 #include "tree-eh.h"
54 #include "gimple-iterator.h"
55 #include "gimple-expr.h"
56 #include "gimple-walk.h"
57 #include "tree-cfg.h"
58 #include "tree-dfa.h"
59 #include "tree-ssa.h"
60 #include "except.h"
61 #include "gimple-pretty-print.h"
62 #include "toplev.h"
63 #include "debug.h"
64 #include "params.h"
65 #include "tree-inline.h"
66 #include "value-prof.h"
67 #include "tree-ssa-live.h"
68 #include "tree-outof-ssa.h"
69 #include "cfgloop.h"
70 #include "insn-attr.h" /* For INSN_SCHEDULING. */
71 #include "stringpool.h"
72 #include "attribs.h"
73 #include "asan.h"
74 #include "tree-ssa-address.h"
75 #include "output.h"
76 #include "builtins.h"
77
78 /* Some systems use __main in a way incompatible with its use in gcc, in these
79 cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
80 give the same symbol without quotes for an alternative entry point. You
81 must define both, or neither. */
82 #ifndef NAME__MAIN
83 #define NAME__MAIN "__main"
84 #endif
85
86 /* This variable holds information helping the rewriting of SSA trees
87 into RTL. */
88 struct ssaexpand SA;
89
90 /* This variable holds the currently expanded gimple statement for purposes
91 of comminucating the profile info to the builtin expanders. */
92 gimple *currently_expanding_gimple_stmt;
93
94 static rtx expand_debug_expr (tree);
95
96 static bool defer_stack_allocation (tree, bool);
97
98 static void record_alignment_for_reg_var (unsigned int);
99
100 /* Return an expression tree corresponding to the RHS of GIMPLE
101 statement STMT. */
102
103 tree
104 gimple_assign_rhs_to_tree (gimple *stmt)
105 {
106 tree t;
107 switch (get_gimple_rhs_class (gimple_expr_code (stmt)))
108 {
109 case GIMPLE_TERNARY_RHS:
110 t = build3 (gimple_assign_rhs_code (stmt),
111 TREE_TYPE (gimple_assign_lhs (stmt)),
112 gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt),
113 gimple_assign_rhs3 (stmt));
114 break;
115 case GIMPLE_BINARY_RHS:
116 t = build2 (gimple_assign_rhs_code (stmt),
117 TREE_TYPE (gimple_assign_lhs (stmt)),
118 gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt));
119 break;
120 case GIMPLE_UNARY_RHS:
121 t = build1 (gimple_assign_rhs_code (stmt),
122 TREE_TYPE (gimple_assign_lhs (stmt)),
123 gimple_assign_rhs1 (stmt));
124 break;
125 case GIMPLE_SINGLE_RHS:
126 {
127 t = gimple_assign_rhs1 (stmt);
128 /* Avoid modifying this tree in place below. */
129 if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)
130 && gimple_location (stmt) != EXPR_LOCATION (t))
131 || (gimple_block (stmt) && currently_expanding_to_rtl
132 && EXPR_P (t)))
133 t = copy_node (t);
134 break;
135 }
136 default:
137 gcc_unreachable ();
138 }
139
140 if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t))
141 SET_EXPR_LOCATION (t, gimple_location (stmt));
142
143 return t;
144 }
145
146
147 #ifndef STACK_ALIGNMENT_NEEDED
148 #define STACK_ALIGNMENT_NEEDED 1
149 #endif
150
151 #define SSAVAR(x) (TREE_CODE (x) == SSA_NAME ? SSA_NAME_VAR (x) : x)
152
153 /* Choose either CUR or NEXT as the leader DECL for a partition.
154 Prefer ignored decls, to simplify debug dumps and reduce ambiguity
155 out of the same user variable being in multiple partitions (this is
156 less likely for compiler-introduced temps). */
157
158 static tree
159 leader_merge (tree cur, tree next)
160 {
161 if (cur == NULL || cur == next)
162 return next;
163
164 if (DECL_P (cur) && DECL_IGNORED_P (cur))
165 return cur;
166
167 if (DECL_P (next) && DECL_IGNORED_P (next))
168 return next;
169
170 return cur;
171 }
172
173 /* Associate declaration T with storage space X. If T is no
174 SSA name this is exactly SET_DECL_RTL, otherwise make the
175 partition of T associated with X. */
176 static inline void
177 set_rtl (tree t, rtx x)
178 {
179 gcc_checking_assert (!x
180 || !(TREE_CODE (t) == SSA_NAME || is_gimple_reg (t))
181 || (use_register_for_decl (t)
182 ? (REG_P (x)
183 || (GET_CODE (x) == CONCAT
184 && (REG_P (XEXP (x, 0))
185 || SUBREG_P (XEXP (x, 0)))
186 && (REG_P (XEXP (x, 1))
187 || SUBREG_P (XEXP (x, 1))))
188 /* We need to accept PARALLELs for RESUT_DECLs
189 because of vector types with BLKmode returned
190 in multiple registers, but they are supposed
191 to be uncoalesced. */
192 || (GET_CODE (x) == PARALLEL
193 && SSAVAR (t)
194 && TREE_CODE (SSAVAR (t)) == RESULT_DECL
195 && (GET_MODE (x) == BLKmode
196 || !flag_tree_coalesce_vars)))
197 : (MEM_P (x) || x == pc_rtx
198 || (GET_CODE (x) == CONCAT
199 && MEM_P (XEXP (x, 0))
200 && MEM_P (XEXP (x, 1))))));
201 /* Check that the RTL for SSA_NAMEs and gimple-reg PARM_DECLs and
202 RESULT_DECLs has the expected mode. For memory, we accept
203 unpromoted modes, since that's what we're likely to get. For
204 PARM_DECLs and RESULT_DECLs, we'll have been called by
205 set_parm_rtl, which will give us the default def, so we don't
206 have to compute it ourselves. For RESULT_DECLs, we accept mode
207 mismatches too, as long as we have BLKmode or are not coalescing
208 across variables, so that we don't reject BLKmode PARALLELs or
209 unpromoted REGs. */
210 gcc_checking_assert (!x || x == pc_rtx || TREE_CODE (t) != SSA_NAME
211 || (SSAVAR (t)
212 && TREE_CODE (SSAVAR (t)) == RESULT_DECL
213 && (promote_ssa_mode (t, NULL) == BLKmode
214 || !flag_tree_coalesce_vars))
215 || !use_register_for_decl (t)
216 || GET_MODE (x) == promote_ssa_mode (t, NULL));
217
218 if (x)
219 {
220 bool skip = false;
221 tree cur = NULL_TREE;
222 rtx xm = x;
223
224 retry:
225 if (MEM_P (xm))
226 cur = MEM_EXPR (xm);
227 else if (REG_P (xm))
228 cur = REG_EXPR (xm);
229 else if (SUBREG_P (xm))
230 {
231 gcc_assert (subreg_lowpart_p (xm));
232 xm = SUBREG_REG (xm);
233 goto retry;
234 }
235 else if (GET_CODE (xm) == CONCAT)
236 {
237 xm = XEXP (xm, 0);
238 goto retry;
239 }
240 else if (GET_CODE (xm) == PARALLEL)
241 {
242 xm = XVECEXP (xm, 0, 0);
243 gcc_assert (GET_CODE (xm) == EXPR_LIST);
244 xm = XEXP (xm, 0);
245 goto retry;
246 }
247 else if (xm == pc_rtx)
248 skip = true;
249 else
250 gcc_unreachable ();
251
252 tree next = skip ? cur : leader_merge (cur, SSAVAR (t) ? SSAVAR (t) : t);
253
254 if (cur != next)
255 {
256 if (MEM_P (x))
257 set_mem_attributes (x,
258 next && TREE_CODE (next) == SSA_NAME
259 ? TREE_TYPE (next)
260 : next, true);
261 else
262 set_reg_attrs_for_decl_rtl (next, x);
263 }
264 }
265
266 if (TREE_CODE (t) == SSA_NAME)
267 {
268 int part = var_to_partition (SA.map, t);
269 if (part != NO_PARTITION)
270 {
271 if (SA.partition_to_pseudo[part])
272 gcc_assert (SA.partition_to_pseudo[part] == x);
273 else if (x != pc_rtx)
274 SA.partition_to_pseudo[part] = x;
275 }
276 /* For the benefit of debug information at -O0 (where
277 vartracking doesn't run) record the place also in the base
278 DECL. For PARMs and RESULTs, do so only when setting the
279 default def. */
280 if (x && x != pc_rtx && SSA_NAME_VAR (t)
281 && (VAR_P (SSA_NAME_VAR (t))
282 || SSA_NAME_IS_DEFAULT_DEF (t)))
283 {
284 tree var = SSA_NAME_VAR (t);
285 /* If we don't yet have something recorded, just record it now. */
286 if (!DECL_RTL_SET_P (var))
287 SET_DECL_RTL (var, x);
288 /* If we have it set already to "multiple places" don't
289 change this. */
290 else if (DECL_RTL (var) == pc_rtx)
291 ;
292 /* If we have something recorded and it's not the same place
293 as we want to record now, we have multiple partitions for the
294 same base variable, with different places. We can't just
295 randomly chose one, hence we have to say that we don't know.
296 This only happens with optimization, and there var-tracking
297 will figure out the right thing. */
298 else if (DECL_RTL (var) != x)
299 SET_DECL_RTL (var, pc_rtx);
300 }
301 }
302 else
303 SET_DECL_RTL (t, x);
304 }
305
306 /* This structure holds data relevant to one variable that will be
307 placed in a stack slot. */
308 class stack_var
309 {
310 public:
311 /* The Variable. */
312 tree decl;
313
314 /* Initially, the size of the variable. Later, the size of the partition,
315 if this variable becomes it's partition's representative. */
316 poly_uint64 size;
317
318 /* The *byte* alignment required for this variable. Or as, with the
319 size, the alignment for this partition. */
320 unsigned int alignb;
321
322 /* The partition representative. */
323 size_t representative;
324
325 /* The next stack variable in the partition, or EOC. */
326 size_t next;
327
328 /* The numbers of conflicting stack variables. */
329 bitmap conflicts;
330 };
331
332 #define EOC ((size_t)-1)
333
334 /* We have an array of such objects while deciding allocation. */
335 static class stack_var *stack_vars;
336 static size_t stack_vars_alloc;
337 static size_t stack_vars_num;
338 static hash_map<tree, size_t> *decl_to_stack_part;
339
340 /* Conflict bitmaps go on this obstack. This allows us to destroy
341 all of them in one big sweep. */
342 static bitmap_obstack stack_var_bitmap_obstack;
343
344 /* An array of indices such that stack_vars[stack_vars_sorted[i]].size
345 is non-decreasing. */
346 static size_t *stack_vars_sorted;
347
348 /* The phase of the stack frame. This is the known misalignment of
349 virtual_stack_vars_rtx from PREFERRED_STACK_BOUNDARY. That is,
350 (frame_offset+frame_phase) % PREFERRED_STACK_BOUNDARY == 0. */
351 static int frame_phase;
352
353 /* Used during expand_used_vars to remember if we saw any decls for
354 which we'd like to enable stack smashing protection. */
355 static bool has_protected_decls;
356
357 /* Used during expand_used_vars. Remember if we say a character buffer
358 smaller than our cutoff threshold. Used for -Wstack-protector. */
359 static bool has_short_buffer;
360
361 /* Compute the byte alignment to use for DECL. Ignore alignment
362 we can't do with expected alignment of the stack boundary. */
363
364 static unsigned int
365 align_local_variable (tree decl, bool really_expand)
366 {
367 unsigned int align;
368
369 if (TREE_CODE (decl) == SSA_NAME)
370 align = TYPE_ALIGN (TREE_TYPE (decl));
371 else
372 {
373 align = LOCAL_DECL_ALIGNMENT (decl);
374 /* Don't change DECL_ALIGN when called from estimated_stack_frame_size.
375 That is done before IPA and could bump alignment based on host
376 backend even for offloaded code which wants different
377 LOCAL_DECL_ALIGNMENT. */
378 if (really_expand)
379 SET_DECL_ALIGN (decl, align);
380 }
381 return align / BITS_PER_UNIT;
382 }
383
384 /* Align given offset BASE with ALIGN. Truncate up if ALIGN_UP is true,
385 down otherwise. Return truncated BASE value. */
386
387 static inline unsigned HOST_WIDE_INT
388 align_base (HOST_WIDE_INT base, unsigned HOST_WIDE_INT align, bool align_up)
389 {
390 return align_up ? (base + align - 1) & -align : base & -align;
391 }
392
393 /* Allocate SIZE bytes at byte alignment ALIGN from the stack frame.
394 Return the frame offset. */
395
396 static poly_int64
397 alloc_stack_frame_space (poly_int64 size, unsigned HOST_WIDE_INT align)
398 {
399 poly_int64 offset, new_frame_offset;
400
401 if (FRAME_GROWS_DOWNWARD)
402 {
403 new_frame_offset
404 = aligned_lower_bound (frame_offset - frame_phase - size,
405 align) + frame_phase;
406 offset = new_frame_offset;
407 }
408 else
409 {
410 new_frame_offset
411 = aligned_upper_bound (frame_offset - frame_phase,
412 align) + frame_phase;
413 offset = new_frame_offset;
414 new_frame_offset += size;
415 }
416 frame_offset = new_frame_offset;
417
418 if (frame_offset_overflow (frame_offset, cfun->decl))
419 frame_offset = offset = 0;
420
421 return offset;
422 }
423
424 /* Accumulate DECL into STACK_VARS. */
425
426 static void
427 add_stack_var (tree decl, bool really_expand)
428 {
429 class stack_var *v;
430
431 if (stack_vars_num >= stack_vars_alloc)
432 {
433 if (stack_vars_alloc)
434 stack_vars_alloc = stack_vars_alloc * 3 / 2;
435 else
436 stack_vars_alloc = 32;
437 stack_vars
438 = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc);
439 }
440 if (!decl_to_stack_part)
441 decl_to_stack_part = new hash_map<tree, size_t>;
442
443 v = &stack_vars[stack_vars_num];
444 decl_to_stack_part->put (decl, stack_vars_num);
445
446 v->decl = decl;
447 tree size = TREE_CODE (decl) == SSA_NAME
448 ? TYPE_SIZE_UNIT (TREE_TYPE (decl))
449 : DECL_SIZE_UNIT (decl);
450 v->size = tree_to_poly_uint64 (size);
451 /* Ensure that all variables have size, so that &a != &b for any two
452 variables that are simultaneously live. */
453 if (known_eq (v->size, 0U))
454 v->size = 1;
455 v->alignb = align_local_variable (decl, really_expand);
456 /* An alignment of zero can mightily confuse us later. */
457 gcc_assert (v->alignb != 0);
458
459 /* All variables are initially in their own partition. */
460 v->representative = stack_vars_num;
461 v->next = EOC;
462
463 /* All variables initially conflict with no other. */
464 v->conflicts = NULL;
465
466 /* Ensure that this decl doesn't get put onto the list twice. */
467 set_rtl (decl, pc_rtx);
468
469 stack_vars_num++;
470 }
471
472 /* Make the decls associated with luid's X and Y conflict. */
473
474 static void
475 add_stack_var_conflict (size_t x, size_t y)
476 {
477 class stack_var *a = &stack_vars[x];
478 class stack_var *b = &stack_vars[y];
479 if (x == y)
480 return;
481 if (!a->conflicts)
482 a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
483 if (!b->conflicts)
484 b->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
485 bitmap_set_bit (a->conflicts, y);
486 bitmap_set_bit (b->conflicts, x);
487 }
488
489 /* Check whether the decls associated with luid's X and Y conflict. */
490
491 static bool
492 stack_var_conflict_p (size_t x, size_t y)
493 {
494 class stack_var *a = &stack_vars[x];
495 class stack_var *b = &stack_vars[y];
496 if (x == y)
497 return false;
498 /* Partitions containing an SSA name result from gimple registers
499 with things like unsupported modes. They are top-level and
500 hence conflict with everything else. */
501 if (TREE_CODE (a->decl) == SSA_NAME || TREE_CODE (b->decl) == SSA_NAME)
502 return true;
503
504 if (!a->conflicts || !b->conflicts)
505 return false;
506 return bitmap_bit_p (a->conflicts, y);
507 }
508
509 /* Callback for walk_stmt_ops. If OP is a decl touched by add_stack_var
510 enter its partition number into bitmap DATA. */
511
512 static bool
513 visit_op (gimple *, tree op, tree, void *data)
514 {
515 bitmap active = (bitmap)data;
516 op = get_base_address (op);
517 if (op
518 && DECL_P (op)
519 && DECL_RTL_IF_SET (op) == pc_rtx)
520 {
521 size_t *v = decl_to_stack_part->get (op);
522 if (v)
523 bitmap_set_bit (active, *v);
524 }
525 return false;
526 }
527
528 /* Callback for walk_stmt_ops. If OP is a decl touched by add_stack_var
529 record conflicts between it and all currently active other partitions
530 from bitmap DATA. */
531
532 static bool
533 visit_conflict (gimple *, tree op, tree, void *data)
534 {
535 bitmap active = (bitmap)data;
536 op = get_base_address (op);
537 if (op
538 && DECL_P (op)
539 && DECL_RTL_IF_SET (op) == pc_rtx)
540 {
541 size_t *v = decl_to_stack_part->get (op);
542 if (v && bitmap_set_bit (active, *v))
543 {
544 size_t num = *v;
545 bitmap_iterator bi;
546 unsigned i;
547 gcc_assert (num < stack_vars_num);
548 EXECUTE_IF_SET_IN_BITMAP (active, 0, i, bi)
549 add_stack_var_conflict (num, i);
550 }
551 }
552 return false;
553 }
554
555 /* Helper routine for add_scope_conflicts, calculating the active partitions
556 at the end of BB, leaving the result in WORK. We're called to generate
557 conflicts when FOR_CONFLICT is true, otherwise we're just tracking
558 liveness. */
559
560 static void
561 add_scope_conflicts_1 (basic_block bb, bitmap work, bool for_conflict)
562 {
563 edge e;
564 edge_iterator ei;
565 gimple_stmt_iterator gsi;
566 walk_stmt_load_store_addr_fn visit;
567
568 bitmap_clear (work);
569 FOR_EACH_EDGE (e, ei, bb->preds)
570 bitmap_ior_into (work, (bitmap)e->src->aux);
571
572 visit = visit_op;
573
574 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
575 {
576 gimple *stmt = gsi_stmt (gsi);
577 walk_stmt_load_store_addr_ops (stmt, work, NULL, NULL, visit);
578 }
579 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
580 {
581 gimple *stmt = gsi_stmt (gsi);
582
583 if (gimple_clobber_p (stmt))
584 {
585 tree lhs = gimple_assign_lhs (stmt);
586 size_t *v;
587 /* Nested function lowering might introduce LHSs
588 that are COMPONENT_REFs. */
589 if (!VAR_P (lhs))
590 continue;
591 if (DECL_RTL_IF_SET (lhs) == pc_rtx
592 && (v = decl_to_stack_part->get (lhs)))
593 bitmap_clear_bit (work, *v);
594 }
595 else if (!is_gimple_debug (stmt))
596 {
597 if (for_conflict
598 && visit == visit_op)
599 {
600 /* If this is the first real instruction in this BB we need
601 to add conflicts for everything live at this point now.
602 Unlike classical liveness for named objects we can't
603 rely on seeing a def/use of the names we're interested in.
604 There might merely be indirect loads/stores. We'd not add any
605 conflicts for such partitions. */
606 bitmap_iterator bi;
607 unsigned i;
608 EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi)
609 {
610 class stack_var *a = &stack_vars[i];
611 if (!a->conflicts)
612 a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
613 bitmap_ior_into (a->conflicts, work);
614 }
615 visit = visit_conflict;
616 }
617 walk_stmt_load_store_addr_ops (stmt, work, visit, visit, visit);
618 }
619 }
620 }
621
622 /* Generate stack partition conflicts between all partitions that are
623 simultaneously live. */
624
625 static void
626 add_scope_conflicts (void)
627 {
628 basic_block bb;
629 bool changed;
630 bitmap work = BITMAP_ALLOC (NULL);
631 int *rpo;
632 int n_bbs;
633
634 /* We approximate the live range of a stack variable by taking the first
635 mention of its name as starting point(s), and by the end-of-scope
636 death clobber added by gimplify as ending point(s) of the range.
637 This overapproximates in the case we for instance moved an address-taken
638 operation upward, without also moving a dereference to it upwards.
639 But it's conservatively correct as a variable never can hold values
640 before its name is mentioned at least once.
641
642 We then do a mostly classical bitmap liveness algorithm. */
643
644 FOR_ALL_BB_FN (bb, cfun)
645 bb->aux = BITMAP_ALLOC (&stack_var_bitmap_obstack);
646
647 rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
648 n_bbs = pre_and_rev_post_order_compute (NULL, rpo, false);
649
650 changed = true;
651 while (changed)
652 {
653 int i;
654 changed = false;
655 for (i = 0; i < n_bbs; i++)
656 {
657 bitmap active;
658 bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
659 active = (bitmap)bb->aux;
660 add_scope_conflicts_1 (bb, work, false);
661 if (bitmap_ior_into (active, work))
662 changed = true;
663 }
664 }
665
666 FOR_EACH_BB_FN (bb, cfun)
667 add_scope_conflicts_1 (bb, work, true);
668
669 free (rpo);
670 BITMAP_FREE (work);
671 FOR_ALL_BB_FN (bb, cfun)
672 BITMAP_FREE (bb->aux);
673 }
674
675 /* A subroutine of partition_stack_vars. A comparison function for qsort,
676 sorting an array of indices by the properties of the object. */
677
678 static int
679 stack_var_cmp (const void *a, const void *b)
680 {
681 size_t ia = *(const size_t *)a;
682 size_t ib = *(const size_t *)b;
683 unsigned int aligna = stack_vars[ia].alignb;
684 unsigned int alignb = stack_vars[ib].alignb;
685 poly_int64 sizea = stack_vars[ia].size;
686 poly_int64 sizeb = stack_vars[ib].size;
687 tree decla = stack_vars[ia].decl;
688 tree declb = stack_vars[ib].decl;
689 bool largea, largeb;
690 unsigned int uida, uidb;
691
692 /* Primary compare on "large" alignment. Large comes first. */
693 largea = (aligna * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT);
694 largeb = (alignb * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT);
695 if (largea != largeb)
696 return (int)largeb - (int)largea;
697
698 /* Secondary compare on size, decreasing */
699 int diff = compare_sizes_for_sort (sizeb, sizea);
700 if (diff != 0)
701 return diff;
702
703 /* Tertiary compare on true alignment, decreasing. */
704 if (aligna < alignb)
705 return -1;
706 if (aligna > alignb)
707 return 1;
708
709 /* Final compare on ID for sort stability, increasing.
710 Two SSA names are compared by their version, SSA names come before
711 non-SSA names, and two normal decls are compared by their DECL_UID. */
712 if (TREE_CODE (decla) == SSA_NAME)
713 {
714 if (TREE_CODE (declb) == SSA_NAME)
715 uida = SSA_NAME_VERSION (decla), uidb = SSA_NAME_VERSION (declb);
716 else
717 return -1;
718 }
719 else if (TREE_CODE (declb) == SSA_NAME)
720 return 1;
721 else
722 uida = DECL_UID (decla), uidb = DECL_UID (declb);
723 if (uida < uidb)
724 return 1;
725 if (uida > uidb)
726 return -1;
727 return 0;
728 }
729
730 struct part_traits : unbounded_int_hashmap_traits <size_t, bitmap> {};
731 typedef hash_map<size_t, bitmap, part_traits> part_hashmap;
732
733 /* If the points-to solution *PI points to variables that are in a partition
734 together with other variables add all partition members to the pointed-to
735 variables bitmap. */
736
737 static void
738 add_partitioned_vars_to_ptset (struct pt_solution *pt,
739 part_hashmap *decls_to_partitions,
740 hash_set<bitmap> *visited, bitmap temp)
741 {
742 bitmap_iterator bi;
743 unsigned i;
744 bitmap *part;
745
746 if (pt->anything
747 || pt->vars == NULL
748 /* The pointed-to vars bitmap is shared, it is enough to
749 visit it once. */
750 || visited->add (pt->vars))
751 return;
752
753 bitmap_clear (temp);
754
755 /* By using a temporary bitmap to store all members of the partitions
756 we have to add we make sure to visit each of the partitions only
757 once. */
758 EXECUTE_IF_SET_IN_BITMAP (pt->vars, 0, i, bi)
759 if ((!temp
760 || !bitmap_bit_p (temp, i))
761 && (part = decls_to_partitions->get (i)))
762 bitmap_ior_into (temp, *part);
763 if (!bitmap_empty_p (temp))
764 bitmap_ior_into (pt->vars, temp);
765 }
766
767 /* Update points-to sets based on partition info, so we can use them on RTL.
768 The bitmaps representing stack partitions will be saved until expand,
769 where partitioned decls used as bases in memory expressions will be
770 rewritten. */
771
772 static void
773 update_alias_info_with_stack_vars (void)
774 {
775 part_hashmap *decls_to_partitions = NULL;
776 size_t i, j;
777 tree var = NULL_TREE;
778
779 for (i = 0; i < stack_vars_num; i++)
780 {
781 bitmap part = NULL;
782 tree name;
783 struct ptr_info_def *pi;
784
785 /* Not interested in partitions with single variable. */
786 if (stack_vars[i].representative != i
787 || stack_vars[i].next == EOC)
788 continue;
789
790 if (!decls_to_partitions)
791 {
792 decls_to_partitions = new part_hashmap;
793 cfun->gimple_df->decls_to_pointers = new hash_map<tree, tree>;
794 }
795
796 /* Create an SSA_NAME that points to the partition for use
797 as base during alias-oracle queries on RTL for bases that
798 have been partitioned. */
799 if (var == NULL_TREE)
800 var = create_tmp_var (ptr_type_node);
801 name = make_ssa_name (var);
802
803 /* Create bitmaps representing partitions. They will be used for
804 points-to sets later, so use GGC alloc. */
805 part = BITMAP_GGC_ALLOC ();
806 for (j = i; j != EOC; j = stack_vars[j].next)
807 {
808 tree decl = stack_vars[j].decl;
809 unsigned int uid = DECL_PT_UID (decl);
810 bitmap_set_bit (part, uid);
811 decls_to_partitions->put (uid, part);
812 cfun->gimple_df->decls_to_pointers->put (decl, name);
813 if (TREE_ADDRESSABLE (decl))
814 TREE_ADDRESSABLE (name) = 1;
815 }
816
817 /* Make the SSA name point to all partition members. */
818 pi = get_ptr_info (name);
819 pt_solution_set (&pi->pt, part, false);
820 }
821
822 /* Make all points-to sets that contain one member of a partition
823 contain all members of the partition. */
824 if (decls_to_partitions)
825 {
826 unsigned i;
827 tree name;
828 hash_set<bitmap> visited;
829 bitmap temp = BITMAP_ALLOC (&stack_var_bitmap_obstack);
830
831 FOR_EACH_SSA_NAME (i, name, cfun)
832 {
833 struct ptr_info_def *pi;
834
835 if (POINTER_TYPE_P (TREE_TYPE (name))
836 && ((pi = SSA_NAME_PTR_INFO (name)) != NULL))
837 add_partitioned_vars_to_ptset (&pi->pt, decls_to_partitions,
838 &visited, temp);
839 }
840
841 add_partitioned_vars_to_ptset (&cfun->gimple_df->escaped,
842 decls_to_partitions, &visited, temp);
843
844 delete decls_to_partitions;
845 BITMAP_FREE (temp);
846 }
847 }
848
849 /* A subroutine of partition_stack_vars. The UNION portion of a UNION/FIND
850 partitioning algorithm. Partitions A and B are known to be non-conflicting.
851 Merge them into a single partition A. */
852
853 static void
854 union_stack_vars (size_t a, size_t b)
855 {
856 class stack_var *vb = &stack_vars[b];
857 bitmap_iterator bi;
858 unsigned u;
859
860 gcc_assert (stack_vars[b].next == EOC);
861 /* Add B to A's partition. */
862 stack_vars[b].next = stack_vars[a].next;
863 stack_vars[b].representative = a;
864 stack_vars[a].next = b;
865
866 /* Update the required alignment of partition A to account for B. */
867 if (stack_vars[a].alignb < stack_vars[b].alignb)
868 stack_vars[a].alignb = stack_vars[b].alignb;
869
870 /* Update the interference graph and merge the conflicts. */
871 if (vb->conflicts)
872 {
873 EXECUTE_IF_SET_IN_BITMAP (vb->conflicts, 0, u, bi)
874 add_stack_var_conflict (a, stack_vars[u].representative);
875 BITMAP_FREE (vb->conflicts);
876 }
877 }
878
879 /* A subroutine of expand_used_vars. Binpack the variables into
880 partitions constrained by the interference graph. The overall
881 algorithm used is as follows:
882
883 Sort the objects by size in descending order.
884 For each object A {
885 S = size(A)
886 O = 0
887 loop {
888 Look for the largest non-conflicting object B with size <= S.
889 UNION (A, B)
890 }
891 }
892 */
893
894 static void
895 partition_stack_vars (void)
896 {
897 size_t si, sj, n = stack_vars_num;
898
899 stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
900 for (si = 0; si < n; ++si)
901 stack_vars_sorted[si] = si;
902
903 if (n == 1)
904 return;
905
906 qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_cmp);
907
908 for (si = 0; si < n; ++si)
909 {
910 size_t i = stack_vars_sorted[si];
911 unsigned int ialign = stack_vars[i].alignb;
912 poly_int64 isize = stack_vars[i].size;
913
914 /* Ignore objects that aren't partition representatives. If we
915 see a var that is not a partition representative, it must
916 have been merged earlier. */
917 if (stack_vars[i].representative != i)
918 continue;
919
920 for (sj = si + 1; sj < n; ++sj)
921 {
922 size_t j = stack_vars_sorted[sj];
923 unsigned int jalign = stack_vars[j].alignb;
924 poly_int64 jsize = stack_vars[j].size;
925
926 /* Ignore objects that aren't partition representatives. */
927 if (stack_vars[j].representative != j)
928 continue;
929
930 /* Do not mix objects of "small" (supported) alignment
931 and "large" (unsupported) alignment. */
932 if ((ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
933 != (jalign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT))
934 break;
935
936 /* For Address Sanitizer do not mix objects with different
937 sizes, as the shorter vars wouldn't be adequately protected.
938 Don't do that for "large" (unsupported) alignment objects,
939 those aren't protected anyway. */
940 if (asan_sanitize_stack_p ()
941 && maybe_ne (isize, jsize)
942 && ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
943 break;
944
945 /* Ignore conflicting objects. */
946 if (stack_var_conflict_p (i, j))
947 continue;
948
949 /* UNION the objects, placing J at OFFSET. */
950 union_stack_vars (i, j);
951 }
952 }
953
954 update_alias_info_with_stack_vars ();
955 }
956
957 /* A debugging aid for expand_used_vars. Dump the generated partitions. */
958
959 static void
960 dump_stack_var_partition (void)
961 {
962 size_t si, i, j, n = stack_vars_num;
963
964 for (si = 0; si < n; ++si)
965 {
966 i = stack_vars_sorted[si];
967
968 /* Skip variables that aren't partition representatives, for now. */
969 if (stack_vars[i].representative != i)
970 continue;
971
972 fprintf (dump_file, "Partition %lu: size ", (unsigned long) i);
973 print_dec (stack_vars[i].size, dump_file);
974 fprintf (dump_file, " align %u\n", stack_vars[i].alignb);
975
976 for (j = i; j != EOC; j = stack_vars[j].next)
977 {
978 fputc ('\t', dump_file);
979 print_generic_expr (dump_file, stack_vars[j].decl, dump_flags);
980 }
981 fputc ('\n', dump_file);
982 }
983 }
984
985 /* Assign rtl to DECL at BASE + OFFSET. */
986
987 static void
988 expand_one_stack_var_at (tree decl, rtx base, unsigned base_align,
989 poly_int64 offset)
990 {
991 unsigned align;
992 rtx x;
993
994 /* If this fails, we've overflowed the stack frame. Error nicely? */
995 gcc_assert (known_eq (offset, trunc_int_for_mode (offset, Pmode)));
996
997 x = plus_constant (Pmode, base, offset);
998 x = gen_rtx_MEM (TREE_CODE (decl) == SSA_NAME
999 ? TYPE_MODE (TREE_TYPE (decl))
1000 : DECL_MODE (SSAVAR (decl)), x);
1001
1002 if (TREE_CODE (decl) != SSA_NAME)
1003 {
1004 /* Set alignment we actually gave this decl if it isn't an SSA name.
1005 If it is we generate stack slots only accidentally so it isn't as
1006 important, we'll simply use the alignment that is already set. */
1007 if (base == virtual_stack_vars_rtx)
1008 offset -= frame_phase;
1009 align = known_alignment (offset);
1010 align *= BITS_PER_UNIT;
1011 if (align == 0 || align > base_align)
1012 align = base_align;
1013
1014 /* One would think that we could assert that we're not decreasing
1015 alignment here, but (at least) the i386 port does exactly this
1016 via the MINIMUM_ALIGNMENT hook. */
1017
1018 SET_DECL_ALIGN (decl, align);
1019 DECL_USER_ALIGN (decl) = 0;
1020 }
1021
1022 set_rtl (decl, x);
1023 }
1024
1025 class stack_vars_data
1026 {
1027 public:
1028 /* Vector of offset pairs, always end of some padding followed
1029 by start of the padding that needs Address Sanitizer protection.
1030 The vector is in reversed, highest offset pairs come first. */
1031 auto_vec<HOST_WIDE_INT> asan_vec;
1032
1033 /* Vector of partition representative decls in between the paddings. */
1034 auto_vec<tree> asan_decl_vec;
1035
1036 /* Base pseudo register for Address Sanitizer protected automatic vars. */
1037 rtx asan_base;
1038
1039 /* Alignment needed for the Address Sanitizer protected automatic vars. */
1040 unsigned int asan_alignb;
1041 };
1042
1043 /* A subroutine of expand_used_vars. Give each partition representative
1044 a unique location within the stack frame. Update each partition member
1045 with that location. */
1046
1047 static void
1048 expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data)
1049 {
1050 size_t si, i, j, n = stack_vars_num;
1051 poly_uint64 large_size = 0, large_alloc = 0;
1052 rtx large_base = NULL;
1053 unsigned large_align = 0;
1054 bool large_allocation_done = false;
1055 tree decl;
1056
1057 /* Determine if there are any variables requiring "large" alignment.
1058 Since these are dynamically allocated, we only process these if
1059 no predicate involved. */
1060 large_align = stack_vars[stack_vars_sorted[0]].alignb * BITS_PER_UNIT;
1061 if (pred == NULL && large_align > MAX_SUPPORTED_STACK_ALIGNMENT)
1062 {
1063 /* Find the total size of these variables. */
1064 for (si = 0; si < n; ++si)
1065 {
1066 unsigned alignb;
1067
1068 i = stack_vars_sorted[si];
1069 alignb = stack_vars[i].alignb;
1070
1071 /* All "large" alignment decls come before all "small" alignment
1072 decls, but "large" alignment decls are not sorted based on
1073 their alignment. Increase large_align to track the largest
1074 required alignment. */
1075 if ((alignb * BITS_PER_UNIT) > large_align)
1076 large_align = alignb * BITS_PER_UNIT;
1077
1078 /* Stop when we get to the first decl with "small" alignment. */
1079 if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
1080 break;
1081
1082 /* Skip variables that aren't partition representatives. */
1083 if (stack_vars[i].representative != i)
1084 continue;
1085
1086 /* Skip variables that have already had rtl assigned. See also
1087 add_stack_var where we perpetrate this pc_rtx hack. */
1088 decl = stack_vars[i].decl;
1089 if (TREE_CODE (decl) == SSA_NAME
1090 ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] != NULL_RTX
1091 : DECL_RTL (decl) != pc_rtx)
1092 continue;
1093
1094 large_size = aligned_upper_bound (large_size, alignb);
1095 large_size += stack_vars[i].size;
1096 }
1097 }
1098
1099 for (si = 0; si < n; ++si)
1100 {
1101 rtx base;
1102 unsigned base_align, alignb;
1103 poly_int64 offset;
1104
1105 i = stack_vars_sorted[si];
1106
1107 /* Skip variables that aren't partition representatives, for now. */
1108 if (stack_vars[i].representative != i)
1109 continue;
1110
1111 /* Skip variables that have already had rtl assigned. See also
1112 add_stack_var where we perpetrate this pc_rtx hack. */
1113 decl = stack_vars[i].decl;
1114 if (TREE_CODE (decl) == SSA_NAME
1115 ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] != NULL_RTX
1116 : DECL_RTL (decl) != pc_rtx)
1117 continue;
1118
1119 /* Check the predicate to see whether this variable should be
1120 allocated in this pass. */
1121 if (pred && !pred (i))
1122 continue;
1123
1124 alignb = stack_vars[i].alignb;
1125 if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
1126 {
1127 base = virtual_stack_vars_rtx;
1128 /* ASAN description strings don't yet have a syntax for expressing
1129 polynomial offsets. */
1130 HOST_WIDE_INT prev_offset;
1131 if (asan_sanitize_stack_p ()
1132 && pred
1133 && frame_offset.is_constant (&prev_offset)
1134 && stack_vars[i].size.is_constant ())
1135 {
1136 if (data->asan_vec.is_empty ())
1137 {
1138 alloc_stack_frame_space (0, ASAN_RED_ZONE_SIZE);
1139 prev_offset = frame_offset.to_constant ();
1140 }
1141 prev_offset = align_base (prev_offset,
1142 ASAN_MIN_RED_ZONE_SIZE,
1143 !FRAME_GROWS_DOWNWARD);
1144 tree repr_decl = NULL_TREE;
1145 unsigned HOST_WIDE_INT size
1146 = asan_var_and_redzone_size (stack_vars[i].size.to_constant ());
1147 if (data->asan_vec.is_empty ())
1148 size = MAX (size, ASAN_RED_ZONE_SIZE);
1149
1150 unsigned HOST_WIDE_INT alignment = MAX (alignb,
1151 ASAN_MIN_RED_ZONE_SIZE);
1152 offset = alloc_stack_frame_space (size, alignment);
1153
1154 data->asan_vec.safe_push (prev_offset);
1155 /* Allocating a constant amount of space from a constant
1156 starting offset must give a constant result. */
1157 data->asan_vec.safe_push ((offset + stack_vars[i].size)
1158 .to_constant ());
1159 /* Find best representative of the partition.
1160 Prefer those with DECL_NAME, even better
1161 satisfying asan_protect_stack_decl predicate. */
1162 for (j = i; j != EOC; j = stack_vars[j].next)
1163 if (asan_protect_stack_decl (stack_vars[j].decl)
1164 && DECL_NAME (stack_vars[j].decl))
1165 {
1166 repr_decl = stack_vars[j].decl;
1167 break;
1168 }
1169 else if (repr_decl == NULL_TREE
1170 && DECL_P (stack_vars[j].decl)
1171 && DECL_NAME (stack_vars[j].decl))
1172 repr_decl = stack_vars[j].decl;
1173 if (repr_decl == NULL_TREE)
1174 repr_decl = stack_vars[i].decl;
1175 data->asan_decl_vec.safe_push (repr_decl);
1176
1177 /* Make sure a representative is unpoison if another
1178 variable in the partition is handled by
1179 use-after-scope sanitization. */
1180 if (asan_handled_variables != NULL
1181 && !asan_handled_variables->contains (repr_decl))
1182 {
1183 for (j = i; j != EOC; j = stack_vars[j].next)
1184 if (asan_handled_variables->contains (stack_vars[j].decl))
1185 break;
1186 if (j != EOC)
1187 asan_handled_variables->add (repr_decl);
1188 }
1189
1190 data->asan_alignb = MAX (data->asan_alignb, alignb);
1191 if (data->asan_base == NULL)
1192 data->asan_base = gen_reg_rtx (Pmode);
1193 base = data->asan_base;
1194
1195 if (!STRICT_ALIGNMENT)
1196 base_align = crtl->max_used_stack_slot_alignment;
1197 else
1198 base_align = MAX (crtl->max_used_stack_slot_alignment,
1199 GET_MODE_ALIGNMENT (SImode)
1200 << ASAN_SHADOW_SHIFT);
1201 }
1202 else
1203 {
1204 offset = alloc_stack_frame_space (stack_vars[i].size, alignb);
1205 base_align = crtl->max_used_stack_slot_alignment;
1206 }
1207 }
1208 else
1209 {
1210 /* Large alignment is only processed in the last pass. */
1211 if (pred)
1212 continue;
1213
1214 /* If there were any variables requiring "large" alignment, allocate
1215 space. */
1216 if (maybe_ne (large_size, 0U) && ! large_allocation_done)
1217 {
1218 poly_int64 loffset;
1219 rtx large_allocsize;
1220
1221 large_allocsize = gen_int_mode (large_size, Pmode);
1222 get_dynamic_stack_size (&large_allocsize, 0, large_align, NULL);
1223 loffset = alloc_stack_frame_space
1224 (rtx_to_poly_int64 (large_allocsize),
1225 PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
1226 large_base = get_dynamic_stack_base (loffset, large_align);
1227 large_allocation_done = true;
1228 }
1229 gcc_assert (large_base != NULL);
1230
1231 large_alloc = aligned_upper_bound (large_alloc, alignb);
1232 offset = large_alloc;
1233 large_alloc += stack_vars[i].size;
1234
1235 base = large_base;
1236 base_align = large_align;
1237 }
1238
1239 /* Create rtl for each variable based on their location within the
1240 partition. */
1241 for (j = i; j != EOC; j = stack_vars[j].next)
1242 {
1243 expand_one_stack_var_at (stack_vars[j].decl,
1244 base, base_align,
1245 offset);
1246 }
1247 }
1248
1249 gcc_assert (known_eq (large_alloc, large_size));
1250 }
1251
1252 /* Take into account all sizes of partitions and reset DECL_RTLs. */
1253 static poly_uint64
1254 account_stack_vars (void)
1255 {
1256 size_t si, j, i, n = stack_vars_num;
1257 poly_uint64 size = 0;
1258
1259 for (si = 0; si < n; ++si)
1260 {
1261 i = stack_vars_sorted[si];
1262
1263 /* Skip variables that aren't partition representatives, for now. */
1264 if (stack_vars[i].representative != i)
1265 continue;
1266
1267 size += stack_vars[i].size;
1268 for (j = i; j != EOC; j = stack_vars[j].next)
1269 set_rtl (stack_vars[j].decl, NULL);
1270 }
1271 return size;
1272 }
1273
1274 /* Record the RTL assignment X for the default def of PARM. */
1275
1276 extern void
1277 set_parm_rtl (tree parm, rtx x)
1278 {
1279 gcc_assert (TREE_CODE (parm) == PARM_DECL
1280 || TREE_CODE (parm) == RESULT_DECL);
1281
1282 if (x && !MEM_P (x))
1283 {
1284 unsigned int align = MINIMUM_ALIGNMENT (TREE_TYPE (parm),
1285 TYPE_MODE (TREE_TYPE (parm)),
1286 TYPE_ALIGN (TREE_TYPE (parm)));
1287
1288 /* If the variable alignment is very large we'll dynamicaly
1289 allocate it, which means that in-frame portion is just a
1290 pointer. ??? We've got a pseudo for sure here, do we
1291 actually dynamically allocate its spilling area if needed?
1292 ??? Isn't it a problem when Pmode alignment also exceeds
1293 MAX_SUPPORTED_STACK_ALIGNMENT, as can happen on cris and lm32? */
1294 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1295 align = GET_MODE_ALIGNMENT (Pmode);
1296
1297 record_alignment_for_reg_var (align);
1298 }
1299
1300 tree ssa = ssa_default_def (cfun, parm);
1301 if (!ssa)
1302 return set_rtl (parm, x);
1303
1304 int part = var_to_partition (SA.map, ssa);
1305 gcc_assert (part != NO_PARTITION);
1306
1307 bool changed = bitmap_bit_p (SA.partitions_for_parm_default_defs, part);
1308 gcc_assert (changed);
1309
1310 set_rtl (ssa, x);
1311 gcc_assert (DECL_RTL (parm) == x);
1312 }
1313
1314 /* A subroutine of expand_one_var. Called to immediately assign rtl
1315 to a variable to be allocated in the stack frame. */
1316
1317 static void
1318 expand_one_stack_var_1 (tree var)
1319 {
1320 poly_uint64 size;
1321 poly_int64 offset;
1322 unsigned byte_align;
1323
1324 if (TREE_CODE (var) == SSA_NAME)
1325 {
1326 tree type = TREE_TYPE (var);
1327 size = tree_to_poly_uint64 (TYPE_SIZE_UNIT (type));
1328 byte_align = TYPE_ALIGN_UNIT (type);
1329 }
1330 else
1331 {
1332 size = tree_to_poly_uint64 (DECL_SIZE_UNIT (var));
1333 byte_align = align_local_variable (var, true);
1334 }
1335
1336 /* We handle highly aligned variables in expand_stack_vars. */
1337 gcc_assert (byte_align * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT);
1338
1339 offset = alloc_stack_frame_space (size, byte_align);
1340
1341 expand_one_stack_var_at (var, virtual_stack_vars_rtx,
1342 crtl->max_used_stack_slot_alignment, offset);
1343 }
1344
1345 /* Wrapper for expand_one_stack_var_1 that checks SSA_NAMEs are
1346 already assigned some MEM. */
1347
1348 static void
1349 expand_one_stack_var (tree var)
1350 {
1351 if (TREE_CODE (var) == SSA_NAME)
1352 {
1353 int part = var_to_partition (SA.map, var);
1354 if (part != NO_PARTITION)
1355 {
1356 rtx x = SA.partition_to_pseudo[part];
1357 gcc_assert (x);
1358 gcc_assert (MEM_P (x));
1359 return;
1360 }
1361 }
1362
1363 return expand_one_stack_var_1 (var);
1364 }
1365
1366 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL
1367 that will reside in a hard register. */
1368
1369 static void
1370 expand_one_hard_reg_var (tree var)
1371 {
1372 rest_of_decl_compilation (var, 0, 0);
1373 }
1374
1375 /* Record the alignment requirements of some variable assigned to a
1376 pseudo. */
1377
1378 static void
1379 record_alignment_for_reg_var (unsigned int align)
1380 {
1381 if (SUPPORTS_STACK_ALIGNMENT
1382 && crtl->stack_alignment_estimated < align)
1383 {
1384 /* stack_alignment_estimated shouldn't change after stack
1385 realign decision made */
1386 gcc_assert (!crtl->stack_realign_processed);
1387 crtl->stack_alignment_estimated = align;
1388 }
1389
1390 /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted.
1391 So here we only make sure stack_alignment_needed >= align. */
1392 if (crtl->stack_alignment_needed < align)
1393 crtl->stack_alignment_needed = align;
1394 if (crtl->max_used_stack_slot_alignment < align)
1395 crtl->max_used_stack_slot_alignment = align;
1396 }
1397
1398 /* Create RTL for an SSA partition. */
1399
1400 static void
1401 expand_one_ssa_partition (tree var)
1402 {
1403 int part = var_to_partition (SA.map, var);
1404 gcc_assert (part != NO_PARTITION);
1405
1406 if (SA.partition_to_pseudo[part])
1407 return;
1408
1409 unsigned int align = MINIMUM_ALIGNMENT (TREE_TYPE (var),
1410 TYPE_MODE (TREE_TYPE (var)),
1411 TYPE_ALIGN (TREE_TYPE (var)));
1412
1413 /* If the variable alignment is very large we'll dynamicaly allocate
1414 it, which means that in-frame portion is just a pointer. */
1415 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1416 align = GET_MODE_ALIGNMENT (Pmode);
1417
1418 record_alignment_for_reg_var (align);
1419
1420 if (!use_register_for_decl (var))
1421 {
1422 if (defer_stack_allocation (var, true))
1423 add_stack_var (var, true);
1424 else
1425 expand_one_stack_var_1 (var);
1426 return;
1427 }
1428
1429 machine_mode reg_mode = promote_ssa_mode (var, NULL);
1430 rtx x = gen_reg_rtx (reg_mode);
1431
1432 set_rtl (var, x);
1433
1434 /* For a promoted variable, X will not be used directly but wrapped in a
1435 SUBREG with SUBREG_PROMOTED_VAR_P set, which means that the RTL land
1436 will assume that its upper bits can be inferred from its lower bits.
1437 Therefore, if X isn't initialized on every path from the entry, then
1438 we must do it manually in order to fulfill the above assumption. */
1439 if (reg_mode != TYPE_MODE (TREE_TYPE (var))
1440 && bitmap_bit_p (SA.partitions_for_undefined_values, part))
1441 emit_move_insn (x, CONST0_RTX (reg_mode));
1442 }
1443
1444 /* Record the association between the RTL generated for partition PART
1445 and the underlying variable of the SSA_NAME VAR. */
1446
1447 static void
1448 adjust_one_expanded_partition_var (tree var)
1449 {
1450 if (!var)
1451 return;
1452
1453 tree decl = SSA_NAME_VAR (var);
1454
1455 int part = var_to_partition (SA.map, var);
1456 if (part == NO_PARTITION)
1457 return;
1458
1459 rtx x = SA.partition_to_pseudo[part];
1460
1461 gcc_assert (x);
1462
1463 set_rtl (var, x);
1464
1465 if (!REG_P (x))
1466 return;
1467
1468 /* Note if the object is a user variable. */
1469 if (decl && !DECL_ARTIFICIAL (decl))
1470 mark_user_reg (x);
1471
1472 if (POINTER_TYPE_P (decl ? TREE_TYPE (decl) : TREE_TYPE (var)))
1473 mark_reg_pointer (x, get_pointer_alignment (var));
1474 }
1475
1476 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL
1477 that will reside in a pseudo register. */
1478
1479 static void
1480 expand_one_register_var (tree var)
1481 {
1482 if (TREE_CODE (var) == SSA_NAME)
1483 {
1484 int part = var_to_partition (SA.map, var);
1485 if (part != NO_PARTITION)
1486 {
1487 rtx x = SA.partition_to_pseudo[part];
1488 gcc_assert (x);
1489 gcc_assert (REG_P (x));
1490 return;
1491 }
1492 gcc_unreachable ();
1493 }
1494
1495 tree decl = var;
1496 tree type = TREE_TYPE (decl);
1497 machine_mode reg_mode = promote_decl_mode (decl, NULL);
1498 rtx x = gen_reg_rtx (reg_mode);
1499
1500 set_rtl (var, x);
1501
1502 /* Note if the object is a user variable. */
1503 if (!DECL_ARTIFICIAL (decl))
1504 mark_user_reg (x);
1505
1506 if (POINTER_TYPE_P (type))
1507 mark_reg_pointer (x, get_pointer_alignment (var));
1508 }
1509
1510 /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL that
1511 has some associated error, e.g. its type is error-mark. We just need
1512 to pick something that won't crash the rest of the compiler. */
1513
1514 static void
1515 expand_one_error_var (tree var)
1516 {
1517 machine_mode mode = DECL_MODE (var);
1518 rtx x;
1519
1520 if (mode == BLKmode)
1521 x = gen_rtx_MEM (BLKmode, const0_rtx);
1522 else if (mode == VOIDmode)
1523 x = const0_rtx;
1524 else
1525 x = gen_reg_rtx (mode);
1526
1527 SET_DECL_RTL (var, x);
1528 }
1529
1530 /* A subroutine of expand_one_var. VAR is a variable that will be
1531 allocated to the local stack frame. Return true if we wish to
1532 add VAR to STACK_VARS so that it will be coalesced with other
1533 variables. Return false to allocate VAR immediately.
1534
1535 This function is used to reduce the number of variables considered
1536 for coalescing, which reduces the size of the quadratic problem. */
1537
1538 static bool
1539 defer_stack_allocation (tree var, bool toplevel)
1540 {
1541 tree size_unit = TREE_CODE (var) == SSA_NAME
1542 ? TYPE_SIZE_UNIT (TREE_TYPE (var))
1543 : DECL_SIZE_UNIT (var);
1544 poly_uint64 size;
1545
1546 /* Whether the variable is small enough for immediate allocation not to be
1547 a problem with regard to the frame size. */
1548 bool smallish
1549 = (poly_int_tree_p (size_unit, &size)
1550 && (estimated_poly_value (size)
1551 < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING)));
1552
1553 /* If stack protection is enabled, *all* stack variables must be deferred,
1554 so that we can re-order the strings to the top of the frame.
1555 Similarly for Address Sanitizer. */
1556 if (flag_stack_protect || asan_sanitize_stack_p ())
1557 return true;
1558
1559 unsigned int align = TREE_CODE (var) == SSA_NAME
1560 ? TYPE_ALIGN (TREE_TYPE (var))
1561 : DECL_ALIGN (var);
1562
1563 /* We handle "large" alignment via dynamic allocation. We want to handle
1564 this extra complication in only one place, so defer them. */
1565 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1566 return true;
1567
1568 bool ignored = TREE_CODE (var) == SSA_NAME
1569 ? !SSAVAR (var) || DECL_IGNORED_P (SSA_NAME_VAR (var))
1570 : DECL_IGNORED_P (var);
1571
1572 /* When optimization is enabled, DECL_IGNORED_P variables originally scoped
1573 might be detached from their block and appear at toplevel when we reach
1574 here. We want to coalesce them with variables from other blocks when
1575 the immediate contribution to the frame size would be noticeable. */
1576 if (toplevel && optimize > 0 && ignored && !smallish)
1577 return true;
1578
1579 /* Variables declared in the outermost scope automatically conflict
1580 with every other variable. The only reason to want to defer them
1581 at all is that, after sorting, we can more efficiently pack
1582 small variables in the stack frame. Continue to defer at -O2. */
1583 if (toplevel && optimize < 2)
1584 return false;
1585
1586 /* Without optimization, *most* variables are allocated from the
1587 stack, which makes the quadratic problem large exactly when we
1588 want compilation to proceed as quickly as possible. On the
1589 other hand, we don't want the function's stack frame size to
1590 get completely out of hand. So we avoid adding scalars and
1591 "small" aggregates to the list at all. */
1592 if (optimize == 0 && smallish)
1593 return false;
1594
1595 return true;
1596 }
1597
1598 /* A subroutine of expand_used_vars. Expand one variable according to
1599 its flavor. Variables to be placed on the stack are not actually
1600 expanded yet, merely recorded.
1601 When REALLY_EXPAND is false, only add stack values to be allocated.
1602 Return stack usage this variable is supposed to take.
1603 */
1604
1605 static poly_uint64
1606 expand_one_var (tree var, bool toplevel, bool really_expand)
1607 {
1608 unsigned int align = BITS_PER_UNIT;
1609 tree origvar = var;
1610
1611 var = SSAVAR (var);
1612
1613 if (TREE_TYPE (var) != error_mark_node && VAR_P (var))
1614 {
1615 if (is_global_var (var))
1616 return 0;
1617
1618 /* Because we don't know if VAR will be in register or on stack,
1619 we conservatively assume it will be on stack even if VAR is
1620 eventually put into register after RA pass. For non-automatic
1621 variables, which won't be on stack, we collect alignment of
1622 type and ignore user specified alignment. Similarly for
1623 SSA_NAMEs for which use_register_for_decl returns true. */
1624 if (TREE_STATIC (var)
1625 || DECL_EXTERNAL (var)
1626 || (TREE_CODE (origvar) == SSA_NAME && use_register_for_decl (var)))
1627 align = MINIMUM_ALIGNMENT (TREE_TYPE (var),
1628 TYPE_MODE (TREE_TYPE (var)),
1629 TYPE_ALIGN (TREE_TYPE (var)));
1630 else if (DECL_HAS_VALUE_EXPR_P (var)
1631 || (DECL_RTL_SET_P (var) && MEM_P (DECL_RTL (var))))
1632 /* Don't consider debug only variables with DECL_HAS_VALUE_EXPR_P set
1633 or variables which were assigned a stack slot already by
1634 expand_one_stack_var_at - in the latter case DECL_ALIGN has been
1635 changed from the offset chosen to it. */
1636 align = crtl->stack_alignment_estimated;
1637 else
1638 align = MINIMUM_ALIGNMENT (var, DECL_MODE (var), DECL_ALIGN (var));
1639
1640 /* If the variable alignment is very large we'll dynamicaly allocate
1641 it, which means that in-frame portion is just a pointer. */
1642 if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
1643 align = GET_MODE_ALIGNMENT (Pmode);
1644 }
1645
1646 record_alignment_for_reg_var (align);
1647
1648 poly_uint64 size;
1649 if (TREE_CODE (origvar) == SSA_NAME)
1650 {
1651 gcc_assert (!VAR_P (var)
1652 || (!DECL_EXTERNAL (var)
1653 && !DECL_HAS_VALUE_EXPR_P (var)
1654 && !TREE_STATIC (var)
1655 && TREE_TYPE (var) != error_mark_node
1656 && !DECL_HARD_REGISTER (var)
1657 && really_expand));
1658 }
1659 if (!VAR_P (var) && TREE_CODE (origvar) != SSA_NAME)
1660 ;
1661 else if (DECL_EXTERNAL (var))
1662 ;
1663 else if (DECL_HAS_VALUE_EXPR_P (var))
1664 ;
1665 else if (TREE_STATIC (var))
1666 ;
1667 else if (TREE_CODE (origvar) != SSA_NAME && DECL_RTL_SET_P (var))
1668 ;
1669 else if (TREE_TYPE (var) == error_mark_node)
1670 {
1671 if (really_expand)
1672 expand_one_error_var (var);
1673 }
1674 else if (VAR_P (var) && DECL_HARD_REGISTER (var))
1675 {
1676 if (really_expand)
1677 {
1678 expand_one_hard_reg_var (var);
1679 if (!DECL_HARD_REGISTER (var))
1680 /* Invalid register specification. */
1681 expand_one_error_var (var);
1682 }
1683 }
1684 else if (use_register_for_decl (var))
1685 {
1686 if (really_expand)
1687 expand_one_register_var (origvar);
1688 }
1689 else if (!poly_int_tree_p (DECL_SIZE_UNIT (var), &size)
1690 || !valid_constant_size_p (DECL_SIZE_UNIT (var)))
1691 {
1692 /* Reject variables which cover more than half of the address-space. */
1693 if (really_expand)
1694 {
1695 if (DECL_NONLOCAL_FRAME (var))
1696 error_at (DECL_SOURCE_LOCATION (current_function_decl),
1697 "total size of local objects is too large");
1698 else
1699 error_at (DECL_SOURCE_LOCATION (var),
1700 "size of variable %q+D is too large", var);
1701 expand_one_error_var (var);
1702 }
1703 }
1704 else if (defer_stack_allocation (var, toplevel))
1705 add_stack_var (origvar, really_expand);
1706 else
1707 {
1708 if (really_expand)
1709 {
1710 if (lookup_attribute ("naked",
1711 DECL_ATTRIBUTES (current_function_decl)))
1712 error ("cannot allocate stack for variable %q+D, naked function",
1713 var);
1714
1715 expand_one_stack_var (origvar);
1716 }
1717 return size;
1718 }
1719 return 0;
1720 }
1721
1722 /* A subroutine of expand_used_vars. Walk down through the BLOCK tree
1723 expanding variables. Those variables that can be put into registers
1724 are allocated pseudos; those that can't are put on the stack.
1725
1726 TOPLEVEL is true if this is the outermost BLOCK. */
1727
1728 static void
1729 expand_used_vars_for_block (tree block, bool toplevel)
1730 {
1731 tree t;
1732
1733 /* Expand all variables at this level. */
1734 for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t))
1735 if (TREE_USED (t)
1736 && ((!VAR_P (t) && TREE_CODE (t) != RESULT_DECL)
1737 || !DECL_NONSHAREABLE (t)))
1738 expand_one_var (t, toplevel, true);
1739
1740 /* Expand all variables at containing levels. */
1741 for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
1742 expand_used_vars_for_block (t, false);
1743 }
1744
1745 /* A subroutine of expand_used_vars. Walk down through the BLOCK tree
1746 and clear TREE_USED on all local variables. */
1747
1748 static void
1749 clear_tree_used (tree block)
1750 {
1751 tree t;
1752
1753 for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t))
1754 /* if (!TREE_STATIC (t) && !DECL_EXTERNAL (t)) */
1755 if ((!VAR_P (t) && TREE_CODE (t) != RESULT_DECL)
1756 || !DECL_NONSHAREABLE (t))
1757 TREE_USED (t) = 0;
1758
1759 for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t))
1760 clear_tree_used (t);
1761 }
1762
1763 enum {
1764 SPCT_FLAG_DEFAULT = 1,
1765 SPCT_FLAG_ALL = 2,
1766 SPCT_FLAG_STRONG = 3,
1767 SPCT_FLAG_EXPLICIT = 4
1768 };
1769
1770 /* Examine TYPE and determine a bit mask of the following features. */
1771
1772 #define SPCT_HAS_LARGE_CHAR_ARRAY 1
1773 #define SPCT_HAS_SMALL_CHAR_ARRAY 2
1774 #define SPCT_HAS_ARRAY 4
1775 #define SPCT_HAS_AGGREGATE 8
1776
1777 static unsigned int
1778 stack_protect_classify_type (tree type)
1779 {
1780 unsigned int ret = 0;
1781 tree t;
1782
1783 switch (TREE_CODE (type))
1784 {
1785 case ARRAY_TYPE:
1786 t = TYPE_MAIN_VARIANT (TREE_TYPE (type));
1787 if (t == char_type_node
1788 || t == signed_char_type_node
1789 || t == unsigned_char_type_node)
1790 {
1791 unsigned HOST_WIDE_INT max = PARAM_VALUE (PARAM_SSP_BUFFER_SIZE);
1792 unsigned HOST_WIDE_INT len;
1793
1794 if (!TYPE_SIZE_UNIT (type)
1795 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
1796 len = max;
1797 else
1798 len = tree_to_uhwi (TYPE_SIZE_UNIT (type));
1799
1800 if (len < max)
1801 ret = SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_ARRAY;
1802 else
1803 ret = SPCT_HAS_LARGE_CHAR_ARRAY | SPCT_HAS_ARRAY;
1804 }
1805 else
1806 ret = SPCT_HAS_ARRAY;
1807 break;
1808
1809 case UNION_TYPE:
1810 case QUAL_UNION_TYPE:
1811 case RECORD_TYPE:
1812 ret = SPCT_HAS_AGGREGATE;
1813 for (t = TYPE_FIELDS (type); t ; t = TREE_CHAIN (t))
1814 if (TREE_CODE (t) == FIELD_DECL)
1815 ret |= stack_protect_classify_type (TREE_TYPE (t));
1816 break;
1817
1818 default:
1819 break;
1820 }
1821
1822 return ret;
1823 }
1824
1825 /* Return nonzero if DECL should be segregated into the "vulnerable" upper
1826 part of the local stack frame. Remember if we ever return nonzero for
1827 any variable in this function. The return value is the phase number in
1828 which the variable should be allocated. */
1829
1830 static int
1831 stack_protect_decl_phase (tree decl)
1832 {
1833 unsigned int bits = stack_protect_classify_type (TREE_TYPE (decl));
1834 int ret = 0;
1835
1836 if (bits & SPCT_HAS_SMALL_CHAR_ARRAY)
1837 has_short_buffer = true;
1838
1839 if (flag_stack_protect == SPCT_FLAG_ALL
1840 || flag_stack_protect == SPCT_FLAG_STRONG
1841 || (flag_stack_protect == SPCT_FLAG_EXPLICIT
1842 && lookup_attribute ("stack_protect",
1843 DECL_ATTRIBUTES (current_function_decl))))
1844 {
1845 if ((bits & (SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_LARGE_CHAR_ARRAY))
1846 && !(bits & SPCT_HAS_AGGREGATE))
1847 ret = 1;
1848 else if (bits & SPCT_HAS_ARRAY)
1849 ret = 2;
1850 }
1851 else
1852 ret = (bits & SPCT_HAS_LARGE_CHAR_ARRAY) != 0;
1853
1854 if (ret)
1855 has_protected_decls = true;
1856
1857 return ret;
1858 }
1859
1860 /* Two helper routines that check for phase 1 and phase 2. These are used
1861 as callbacks for expand_stack_vars. */
1862
1863 static bool
1864 stack_protect_decl_phase_1 (size_t i)
1865 {
1866 return stack_protect_decl_phase (stack_vars[i].decl) == 1;
1867 }
1868
1869 static bool
1870 stack_protect_decl_phase_2 (size_t i)
1871 {
1872 return stack_protect_decl_phase (stack_vars[i].decl) == 2;
1873 }
1874
1875 /* And helper function that checks for asan phase (with stack protector
1876 it is phase 3). This is used as callback for expand_stack_vars.
1877 Returns true if any of the vars in the partition need to be protected. */
1878
1879 static bool
1880 asan_decl_phase_3 (size_t i)
1881 {
1882 while (i != EOC)
1883 {
1884 if (asan_protect_stack_decl (stack_vars[i].decl))
1885 return true;
1886 i = stack_vars[i].next;
1887 }
1888 return false;
1889 }
1890
1891 /* Ensure that variables in different stack protection phases conflict
1892 so that they are not merged and share the same stack slot. */
1893
1894 static void
1895 add_stack_protection_conflicts (void)
1896 {
1897 size_t i, j, n = stack_vars_num;
1898 unsigned char *phase;
1899
1900 phase = XNEWVEC (unsigned char, n);
1901 for (i = 0; i < n; ++i)
1902 phase[i] = stack_protect_decl_phase (stack_vars[i].decl);
1903
1904 for (i = 0; i < n; ++i)
1905 {
1906 unsigned char ph_i = phase[i];
1907 for (j = i + 1; j < n; ++j)
1908 if (ph_i != phase[j])
1909 add_stack_var_conflict (i, j);
1910 }
1911
1912 XDELETEVEC (phase);
1913 }
1914
1915 /* Create a decl for the guard at the top of the stack frame. */
1916
1917 static void
1918 create_stack_guard (void)
1919 {
1920 tree guard = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
1921 VAR_DECL, NULL, ptr_type_node);
1922 TREE_THIS_VOLATILE (guard) = 1;
1923 TREE_USED (guard) = 1;
1924 expand_one_stack_var (guard);
1925 crtl->stack_protect_guard = guard;
1926 }
1927
1928 /* Prepare for expanding variables. */
1929 static void
1930 init_vars_expansion (void)
1931 {
1932 /* Conflict bitmaps, and a few related temporary bitmaps, go here. */
1933 bitmap_obstack_initialize (&stack_var_bitmap_obstack);
1934
1935 /* A map from decl to stack partition. */
1936 decl_to_stack_part = new hash_map<tree, size_t>;
1937
1938 /* Initialize local stack smashing state. */
1939 has_protected_decls = false;
1940 has_short_buffer = false;
1941 }
1942
1943 /* Free up stack variable graph data. */
1944 static void
1945 fini_vars_expansion (void)
1946 {
1947 bitmap_obstack_release (&stack_var_bitmap_obstack);
1948 if (stack_vars)
1949 XDELETEVEC (stack_vars);
1950 if (stack_vars_sorted)
1951 XDELETEVEC (stack_vars_sorted);
1952 stack_vars = NULL;
1953 stack_vars_sorted = NULL;
1954 stack_vars_alloc = stack_vars_num = 0;
1955 delete decl_to_stack_part;
1956 decl_to_stack_part = NULL;
1957 }
1958
1959 /* Make a fair guess for the size of the stack frame of the function
1960 in NODE. This doesn't have to be exact, the result is only used in
1961 the inline heuristics. So we don't want to run the full stack var
1962 packing algorithm (which is quadratic in the number of stack vars).
1963 Instead, we calculate the total size of all stack vars. This turns
1964 out to be a pretty fair estimate -- packing of stack vars doesn't
1965 happen very often. */
1966
1967 HOST_WIDE_INT
1968 estimated_stack_frame_size (struct cgraph_node *node)
1969 {
1970 poly_int64 size = 0;
1971 size_t i;
1972 tree var;
1973 struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
1974
1975 push_cfun (fn);
1976
1977 init_vars_expansion ();
1978
1979 FOR_EACH_LOCAL_DECL (fn, i, var)
1980 if (auto_var_in_fn_p (var, fn->decl))
1981 size += expand_one_var (var, true, false);
1982
1983 if (stack_vars_num > 0)
1984 {
1985 /* Fake sorting the stack vars for account_stack_vars (). */
1986 stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
1987 for (i = 0; i < stack_vars_num; ++i)
1988 stack_vars_sorted[i] = i;
1989 size += account_stack_vars ();
1990 }
1991
1992 fini_vars_expansion ();
1993 pop_cfun ();
1994 return estimated_poly_value (size);
1995 }
1996
1997 /* Helper routine to check if a record or union contains an array field. */
1998
1999 static int
2000 record_or_union_type_has_array_p (const_tree tree_type)
2001 {
2002 tree fields = TYPE_FIELDS (tree_type);
2003 tree f;
2004
2005 for (f = fields; f; f = DECL_CHAIN (f))
2006 if (TREE_CODE (f) == FIELD_DECL)
2007 {
2008 tree field_type = TREE_TYPE (f);
2009 if (RECORD_OR_UNION_TYPE_P (field_type)
2010 && record_or_union_type_has_array_p (field_type))
2011 return 1;
2012 if (TREE_CODE (field_type) == ARRAY_TYPE)
2013 return 1;
2014 }
2015 return 0;
2016 }
2017
2018 /* Check if the current function has local referenced variables that
2019 have their addresses taken, contain an array, or are arrays. */
2020
2021 static bool
2022 stack_protect_decl_p ()
2023 {
2024 unsigned i;
2025 tree var;
2026
2027 FOR_EACH_LOCAL_DECL (cfun, i, var)
2028 if (!is_global_var (var))
2029 {
2030 tree var_type = TREE_TYPE (var);
2031 if (VAR_P (var)
2032 && (TREE_CODE (var_type) == ARRAY_TYPE
2033 || TREE_ADDRESSABLE (var)
2034 || (RECORD_OR_UNION_TYPE_P (var_type)
2035 && record_or_union_type_has_array_p (var_type))))
2036 return true;
2037 }
2038 return false;
2039 }
2040
2041 /* Check if the current function has calls that use a return slot. */
2042
2043 static bool
2044 stack_protect_return_slot_p ()
2045 {
2046 basic_block bb;
2047
2048 FOR_ALL_BB_FN (bb, cfun)
2049 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
2050 !gsi_end_p (gsi); gsi_next (&gsi))
2051 {
2052 gimple *stmt = gsi_stmt (gsi);
2053 /* This assumes that calls to internal-only functions never
2054 use a return slot. */
2055 if (is_gimple_call (stmt)
2056 && !gimple_call_internal_p (stmt)
2057 && aggregate_value_p (TREE_TYPE (gimple_call_fntype (stmt)),
2058 gimple_call_fndecl (stmt)))
2059 return true;
2060 }
2061 return false;
2062 }
2063
2064 /* Expand all variables used in the function. */
2065
2066 static rtx_insn *
2067 expand_used_vars (void)
2068 {
2069 tree var, outer_block = DECL_INITIAL (current_function_decl);
2070 auto_vec<tree> maybe_local_decls;
2071 rtx_insn *var_end_seq = NULL;
2072 unsigned i;
2073 unsigned len;
2074 bool gen_stack_protect_signal = false;
2075
2076 /* Compute the phase of the stack frame for this function. */
2077 {
2078 int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
2079 int off = targetm.starting_frame_offset () % align;
2080 frame_phase = off ? align - off : 0;
2081 }
2082
2083 /* Set TREE_USED on all variables in the local_decls. */
2084 FOR_EACH_LOCAL_DECL (cfun, i, var)
2085 TREE_USED (var) = 1;
2086 /* Clear TREE_USED on all variables associated with a block scope. */
2087 clear_tree_used (DECL_INITIAL (current_function_decl));
2088
2089 init_vars_expansion ();
2090
2091 if (targetm.use_pseudo_pic_reg ())
2092 pic_offset_table_rtx = gen_reg_rtx (Pmode);
2093
2094 for (i = 0; i < SA.map->num_partitions; i++)
2095 {
2096 if (bitmap_bit_p (SA.partitions_for_parm_default_defs, i))
2097 continue;
2098
2099 tree var = partition_to_var (SA.map, i);
2100
2101 gcc_assert (!virtual_operand_p (var));
2102
2103 expand_one_ssa_partition (var);
2104 }
2105
2106 if (flag_stack_protect == SPCT_FLAG_STRONG)
2107 gen_stack_protect_signal
2108 = stack_protect_decl_p () || stack_protect_return_slot_p ();
2109
2110 /* At this point all variables on the local_decls with TREE_USED
2111 set are not associated with any block scope. Lay them out. */
2112
2113 len = vec_safe_length (cfun->local_decls);
2114 FOR_EACH_LOCAL_DECL (cfun, i, var)
2115 {
2116 bool expand_now = false;
2117
2118 /* Expanded above already. */
2119 if (is_gimple_reg (var))
2120 {
2121 TREE_USED (var) = 0;
2122 goto next;
2123 }
2124 /* We didn't set a block for static or extern because it's hard
2125 to tell the difference between a global variable (re)declared
2126 in a local scope, and one that's really declared there to
2127 begin with. And it doesn't really matter much, since we're
2128 not giving them stack space. Expand them now. */
2129 else if (TREE_STATIC (var) || DECL_EXTERNAL (var))
2130 expand_now = true;
2131
2132 /* Expand variables not associated with any block now. Those created by
2133 the optimizers could be live anywhere in the function. Those that
2134 could possibly have been scoped originally and detached from their
2135 block will have their allocation deferred so we coalesce them with
2136 others when optimization is enabled. */
2137 else if (TREE_USED (var))
2138 expand_now = true;
2139
2140 /* Finally, mark all variables on the list as used. We'll use
2141 this in a moment when we expand those associated with scopes. */
2142 TREE_USED (var) = 1;
2143
2144 if (expand_now)
2145 expand_one_var (var, true, true);
2146
2147 next:
2148 if (DECL_ARTIFICIAL (var) && !DECL_IGNORED_P (var))
2149 {
2150 rtx rtl = DECL_RTL_IF_SET (var);
2151
2152 /* Keep artificial non-ignored vars in cfun->local_decls
2153 chain until instantiate_decls. */
2154 if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
2155 add_local_decl (cfun, var);
2156 else if (rtl == NULL_RTX)
2157 /* If rtl isn't set yet, which can happen e.g. with
2158 -fstack-protector, retry before returning from this
2159 function. */
2160 maybe_local_decls.safe_push (var);
2161 }
2162 }
2163
2164 /* We duplicated some of the decls in CFUN->LOCAL_DECLS.
2165
2166 +-----------------+-----------------+
2167 | ...processed... | ...duplicates...|
2168 +-----------------+-----------------+
2169 ^
2170 +-- LEN points here.
2171
2172 We just want the duplicates, as those are the artificial
2173 non-ignored vars that we want to keep until instantiate_decls.
2174 Move them down and truncate the array. */
2175 if (!vec_safe_is_empty (cfun->local_decls))
2176 cfun->local_decls->block_remove (0, len);
2177
2178 /* At this point, all variables within the block tree with TREE_USED
2179 set are actually used by the optimized function. Lay them out. */
2180 expand_used_vars_for_block (outer_block, true);
2181
2182 if (stack_vars_num > 0)
2183 {
2184 add_scope_conflicts ();
2185
2186 /* If stack protection is enabled, we don't share space between
2187 vulnerable data and non-vulnerable data. */
2188 if (flag_stack_protect != 0
2189 && (flag_stack_protect != SPCT_FLAG_EXPLICIT
2190 || (flag_stack_protect == SPCT_FLAG_EXPLICIT
2191 && lookup_attribute ("stack_protect",
2192 DECL_ATTRIBUTES (current_function_decl)))))
2193 add_stack_protection_conflicts ();
2194
2195 /* Now that we have collected all stack variables, and have computed a
2196 minimal interference graph, attempt to save some stack space. */
2197 partition_stack_vars ();
2198 if (dump_file)
2199 dump_stack_var_partition ();
2200 }
2201
2202 switch (flag_stack_protect)
2203 {
2204 case SPCT_FLAG_ALL:
2205 create_stack_guard ();
2206 break;
2207
2208 case SPCT_FLAG_STRONG:
2209 if (gen_stack_protect_signal
2210 || cfun->calls_alloca || has_protected_decls
2211 || lookup_attribute ("stack_protect",
2212 DECL_ATTRIBUTES (current_function_decl)))
2213 create_stack_guard ();
2214 break;
2215
2216 case SPCT_FLAG_DEFAULT:
2217 if (cfun->calls_alloca || has_protected_decls
2218 || lookup_attribute ("stack_protect",
2219 DECL_ATTRIBUTES (current_function_decl)))
2220 create_stack_guard ();
2221 break;
2222
2223 case SPCT_FLAG_EXPLICIT:
2224 if (lookup_attribute ("stack_protect",
2225 DECL_ATTRIBUTES (current_function_decl)))
2226 create_stack_guard ();
2227 break;
2228 default:
2229 ;
2230 }
2231
2232 /* Assign rtl to each variable based on these partitions. */
2233 if (stack_vars_num > 0)
2234 {
2235 class stack_vars_data data;
2236
2237 data.asan_base = NULL_RTX;
2238 data.asan_alignb = 0;
2239
2240 /* Reorder decls to be protected by iterating over the variables
2241 array multiple times, and allocating out of each phase in turn. */
2242 /* ??? We could probably integrate this into the qsort we did
2243 earlier, such that we naturally see these variables first,
2244 and thus naturally allocate things in the right order. */
2245 if (has_protected_decls)
2246 {
2247 /* Phase 1 contains only character arrays. */
2248 expand_stack_vars (stack_protect_decl_phase_1, &data);
2249
2250 /* Phase 2 contains other kinds of arrays. */
2251 if (flag_stack_protect == SPCT_FLAG_ALL
2252 || flag_stack_protect == SPCT_FLAG_STRONG
2253 || (flag_stack_protect == SPCT_FLAG_EXPLICIT
2254 && lookup_attribute ("stack_protect",
2255 DECL_ATTRIBUTES (current_function_decl))))
2256 expand_stack_vars (stack_protect_decl_phase_2, &data);
2257 }
2258
2259 if (asan_sanitize_stack_p ())
2260 /* Phase 3, any partitions that need asan protection
2261 in addition to phase 1 and 2. */
2262 expand_stack_vars (asan_decl_phase_3, &data);
2263
2264 /* ASAN description strings don't yet have a syntax for expressing
2265 polynomial offsets. */
2266 HOST_WIDE_INT prev_offset;
2267 if (!data.asan_vec.is_empty ()
2268 && frame_offset.is_constant (&prev_offset))
2269 {
2270 HOST_WIDE_INT offset, sz, redzonesz;
2271 redzonesz = ASAN_RED_ZONE_SIZE;
2272 sz = data.asan_vec[0] - prev_offset;
2273 if (data.asan_alignb > ASAN_RED_ZONE_SIZE
2274 && data.asan_alignb <= 4096
2275 && sz + ASAN_RED_ZONE_SIZE >= (int) data.asan_alignb)
2276 redzonesz = ((sz + ASAN_RED_ZONE_SIZE + data.asan_alignb - 1)
2277 & ~(data.asan_alignb - HOST_WIDE_INT_1)) - sz;
2278 /* Allocating a constant amount of space from a constant
2279 starting offset must give a constant result. */
2280 offset = (alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE)
2281 .to_constant ());
2282 data.asan_vec.safe_push (prev_offset);
2283 data.asan_vec.safe_push (offset);
2284 /* Leave space for alignment if STRICT_ALIGNMENT. */
2285 if (STRICT_ALIGNMENT)
2286 alloc_stack_frame_space ((GET_MODE_ALIGNMENT (SImode)
2287 << ASAN_SHADOW_SHIFT)
2288 / BITS_PER_UNIT, 1);
2289
2290 var_end_seq
2291 = asan_emit_stack_protection (virtual_stack_vars_rtx,
2292 data.asan_base,
2293 data.asan_alignb,
2294 data.asan_vec.address (),
2295 data.asan_decl_vec.address (),
2296 data.asan_vec.length ());
2297 }
2298
2299 expand_stack_vars (NULL, &data);
2300 }
2301
2302 if (asan_sanitize_allocas_p () && cfun->calls_alloca)
2303 var_end_seq = asan_emit_allocas_unpoison (virtual_stack_dynamic_rtx,
2304 virtual_stack_vars_rtx,
2305 var_end_seq);
2306
2307 fini_vars_expansion ();
2308
2309 /* If there were any artificial non-ignored vars without rtl
2310 found earlier, see if deferred stack allocation hasn't assigned
2311 rtl to them. */
2312 FOR_EACH_VEC_ELT_REVERSE (maybe_local_decls, i, var)
2313 {
2314 rtx rtl = DECL_RTL_IF_SET (var);
2315
2316 /* Keep artificial non-ignored vars in cfun->local_decls
2317 chain until instantiate_decls. */
2318 if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT))
2319 add_local_decl (cfun, var);
2320 }
2321
2322 /* If the target requires that FRAME_OFFSET be aligned, do it. */
2323 if (STACK_ALIGNMENT_NEEDED)
2324 {
2325 HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
2326 if (FRAME_GROWS_DOWNWARD)
2327 frame_offset = aligned_lower_bound (frame_offset, align);
2328 else
2329 frame_offset = aligned_upper_bound (frame_offset, align);
2330 }
2331
2332 return var_end_seq;
2333 }
2334
2335
2336 /* If we need to produce a detailed dump, print the tree representation
2337 for STMT to the dump file. SINCE is the last RTX after which the RTL
2338 generated for STMT should have been appended. */
2339
2340 static void
2341 maybe_dump_rtl_for_gimple_stmt (gimple *stmt, rtx_insn *since)
2342 {
2343 if (dump_file && (dump_flags & TDF_DETAILS))
2344 {
2345 fprintf (dump_file, "\n;; ");
2346 print_gimple_stmt (dump_file, stmt, 0,
2347 TDF_SLIM | (dump_flags & TDF_LINENO));
2348 fprintf (dump_file, "\n");
2349
2350 print_rtl (dump_file, since ? NEXT_INSN (since) : since);
2351 }
2352 }
2353
2354 /* Maps the blocks that do not contain tree labels to rtx labels. */
2355
2356 static hash_map<basic_block, rtx_code_label *> *lab_rtx_for_bb;
2357
2358 /* Returns the label_rtx expression for a label starting basic block BB. */
2359
2360 static rtx_code_label *
2361 label_rtx_for_bb (basic_block bb ATTRIBUTE_UNUSED)
2362 {
2363 gimple_stmt_iterator gsi;
2364 tree lab;
2365
2366 if (bb->flags & BB_RTL)
2367 return block_label (bb);
2368
2369 rtx_code_label **elt = lab_rtx_for_bb->get (bb);
2370 if (elt)
2371 return *elt;
2372
2373 /* Find the tree label if it is present. */
2374
2375 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2376 {
2377 glabel *lab_stmt;
2378
2379 lab_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
2380 if (!lab_stmt)
2381 break;
2382
2383 lab = gimple_label_label (lab_stmt);
2384 if (DECL_NONLOCAL (lab))
2385 break;
2386
2387 return jump_target_rtx (lab);
2388 }
2389
2390 rtx_code_label *l = gen_label_rtx ();
2391 lab_rtx_for_bb->put (bb, l);
2392 return l;
2393 }
2394
2395
2396 /* A subroutine of expand_gimple_cond. Given E, a fallthrough edge
2397 of a basic block where we just expanded the conditional at the end,
2398 possibly clean up the CFG and instruction sequence. LAST is the
2399 last instruction before the just emitted jump sequence. */
2400
2401 static void
2402 maybe_cleanup_end_of_block (edge e, rtx_insn *last)
2403 {
2404 /* Special case: when jumpif decides that the condition is
2405 trivial it emits an unconditional jump (and the necessary
2406 barrier). But we still have two edges, the fallthru one is
2407 wrong. purge_dead_edges would clean this up later. Unfortunately
2408 we have to insert insns (and split edges) before
2409 find_many_sub_basic_blocks and hence before purge_dead_edges.
2410 But splitting edges might create new blocks which depend on the
2411 fact that if there are two edges there's no barrier. So the
2412 barrier would get lost and verify_flow_info would ICE. Instead
2413 of auditing all edge splitters to care for the barrier (which
2414 normally isn't there in a cleaned CFG), fix it here. */
2415 if (BARRIER_P (get_last_insn ()))
2416 {
2417 rtx_insn *insn;
2418 remove_edge (e);
2419 /* Now, we have a single successor block, if we have insns to
2420 insert on the remaining edge we potentially will insert
2421 it at the end of this block (if the dest block isn't feasible)
2422 in order to avoid splitting the edge. This insertion will take
2423 place in front of the last jump. But we might have emitted
2424 multiple jumps (conditional and one unconditional) to the
2425 same destination. Inserting in front of the last one then
2426 is a problem. See PR 40021. We fix this by deleting all
2427 jumps except the last unconditional one. */
2428 insn = PREV_INSN (get_last_insn ());
2429 /* Make sure we have an unconditional jump. Otherwise we're
2430 confused. */
2431 gcc_assert (JUMP_P (insn) && !any_condjump_p (insn));
2432 for (insn = PREV_INSN (insn); insn != last;)
2433 {
2434 insn = PREV_INSN (insn);
2435 if (JUMP_P (NEXT_INSN (insn)))
2436 {
2437 if (!any_condjump_p (NEXT_INSN (insn)))
2438 {
2439 gcc_assert (BARRIER_P (NEXT_INSN (NEXT_INSN (insn))));
2440 delete_insn (NEXT_INSN (NEXT_INSN (insn)));
2441 }
2442 delete_insn (NEXT_INSN (insn));
2443 }
2444 }
2445 }
2446 }
2447
2448 /* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_COND.
2449 Returns a new basic block if we've terminated the current basic
2450 block and created a new one. */
2451
2452 static basic_block
2453 expand_gimple_cond (basic_block bb, gcond *stmt)
2454 {
2455 basic_block new_bb, dest;
2456 edge true_edge;
2457 edge false_edge;
2458 rtx_insn *last2, *last;
2459 enum tree_code code;
2460 tree op0, op1;
2461
2462 code = gimple_cond_code (stmt);
2463 op0 = gimple_cond_lhs (stmt);
2464 op1 = gimple_cond_rhs (stmt);
2465 /* We're sometimes presented with such code:
2466 D.123_1 = x < y;
2467 if (D.123_1 != 0)
2468 ...
2469 This would expand to two comparisons which then later might
2470 be cleaned up by combine. But some pattern matchers like if-conversion
2471 work better when there's only one compare, so make up for this
2472 here as special exception if TER would have made the same change. */
2473 if (SA.values
2474 && TREE_CODE (op0) == SSA_NAME
2475 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
2476 && TREE_CODE (op1) == INTEGER_CST
2477 && ((gimple_cond_code (stmt) == NE_EXPR
2478 && integer_zerop (op1))
2479 || (gimple_cond_code (stmt) == EQ_EXPR
2480 && integer_onep (op1)))
2481 && bitmap_bit_p (SA.values, SSA_NAME_VERSION (op0)))
2482 {
2483 gimple *second = SSA_NAME_DEF_STMT (op0);
2484 if (gimple_code (second) == GIMPLE_ASSIGN)
2485 {
2486 enum tree_code code2 = gimple_assign_rhs_code (second);
2487 if (TREE_CODE_CLASS (code2) == tcc_comparison)
2488 {
2489 code = code2;
2490 op0 = gimple_assign_rhs1 (second);
2491 op1 = gimple_assign_rhs2 (second);
2492 }
2493 /* If jumps are cheap and the target does not support conditional
2494 compare, turn some more codes into jumpy sequences. */
2495 else if (BRANCH_COST (optimize_insn_for_speed_p (), false) < 4
2496 && targetm.gen_ccmp_first == NULL)
2497 {
2498 if ((code2 == BIT_AND_EXPR
2499 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
2500 && TREE_CODE (gimple_assign_rhs2 (second)) != INTEGER_CST)
2501 || code2 == TRUTH_AND_EXPR)
2502 {
2503 code = TRUTH_ANDIF_EXPR;
2504 op0 = gimple_assign_rhs1 (second);
2505 op1 = gimple_assign_rhs2 (second);
2506 }
2507 else if (code2 == BIT_IOR_EXPR || code2 == TRUTH_OR_EXPR)
2508 {
2509 code = TRUTH_ORIF_EXPR;
2510 op0 = gimple_assign_rhs1 (second);
2511 op1 = gimple_assign_rhs2 (second);
2512 }
2513 }
2514 }
2515 }
2516
2517 /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
2518 into (x - C2) * C3 < C4. */
2519 if ((code == EQ_EXPR || code == NE_EXPR)
2520 && TREE_CODE (op0) == SSA_NAME
2521 && TREE_CODE (op1) == INTEGER_CST)
2522 code = maybe_optimize_mod_cmp (code, &op0, &op1);
2523
2524 last2 = last = get_last_insn ();
2525
2526 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2527 set_curr_insn_location (gimple_location (stmt));
2528
2529 /* These flags have no purpose in RTL land. */
2530 true_edge->flags &= ~EDGE_TRUE_VALUE;
2531 false_edge->flags &= ~EDGE_FALSE_VALUE;
2532
2533 /* We can either have a pure conditional jump with one fallthru edge or
2534 two-way jump that needs to be decomposed into two basic blocks. */
2535 if (false_edge->dest == bb->next_bb)
2536 {
2537 jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest),
2538 true_edge->probability);
2539 maybe_dump_rtl_for_gimple_stmt (stmt, last);
2540 if (true_edge->goto_locus != UNKNOWN_LOCATION)
2541 set_curr_insn_location (true_edge->goto_locus);
2542 false_edge->flags |= EDGE_FALLTHRU;
2543 maybe_cleanup_end_of_block (false_edge, last);
2544 return NULL;
2545 }
2546 if (true_edge->dest == bb->next_bb)
2547 {
2548 jumpifnot_1 (code, op0, op1, label_rtx_for_bb (false_edge->dest),
2549 false_edge->probability);
2550 maybe_dump_rtl_for_gimple_stmt (stmt, last);
2551 if (false_edge->goto_locus != UNKNOWN_LOCATION)
2552 set_curr_insn_location (false_edge->goto_locus);
2553 true_edge->flags |= EDGE_FALLTHRU;
2554 maybe_cleanup_end_of_block (true_edge, last);
2555 return NULL;
2556 }
2557
2558 jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest),
2559 true_edge->probability);
2560 last = get_last_insn ();
2561 if (false_edge->goto_locus != UNKNOWN_LOCATION)
2562 set_curr_insn_location (false_edge->goto_locus);
2563 emit_jump (label_rtx_for_bb (false_edge->dest));
2564
2565 BB_END (bb) = last;
2566 if (BARRIER_P (BB_END (bb)))
2567 BB_END (bb) = PREV_INSN (BB_END (bb));
2568 update_bb_for_insn (bb);
2569
2570 new_bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
2571 dest = false_edge->dest;
2572 redirect_edge_succ (false_edge, new_bb);
2573 false_edge->flags |= EDGE_FALLTHRU;
2574 new_bb->count = false_edge->count ();
2575 loop_p loop = find_common_loop (bb->loop_father, dest->loop_father);
2576 add_bb_to_loop (new_bb, loop);
2577 if (loop->latch == bb
2578 && loop->header == dest)
2579 loop->latch = new_bb;
2580 make_single_succ_edge (new_bb, dest, 0);
2581 if (BARRIER_P (BB_END (new_bb)))
2582 BB_END (new_bb) = PREV_INSN (BB_END (new_bb));
2583 update_bb_for_insn (new_bb);
2584
2585 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
2586
2587 if (true_edge->goto_locus != UNKNOWN_LOCATION)
2588 {
2589 set_curr_insn_location (true_edge->goto_locus);
2590 true_edge->goto_locus = curr_insn_location ();
2591 }
2592
2593 return new_bb;
2594 }
2595
2596 /* Mark all calls that can have a transaction restart. */
2597
2598 static void
2599 mark_transaction_restart_calls (gimple *stmt)
2600 {
2601 struct tm_restart_node dummy;
2602 tm_restart_node **slot;
2603
2604 if (!cfun->gimple_df->tm_restart)
2605 return;
2606
2607 dummy.stmt = stmt;
2608 slot = cfun->gimple_df->tm_restart->find_slot (&dummy, NO_INSERT);
2609 if (slot)
2610 {
2611 struct tm_restart_node *n = *slot;
2612 tree list = n->label_or_list;
2613 rtx_insn *insn;
2614
2615 for (insn = next_real_insn (get_last_insn ());
2616 !CALL_P (insn);
2617 insn = next_real_insn (insn))
2618 continue;
2619
2620 if (TREE_CODE (list) == LABEL_DECL)
2621 add_reg_note (insn, REG_TM, label_rtx (list));
2622 else
2623 for (; list ; list = TREE_CHAIN (list))
2624 add_reg_note (insn, REG_TM, label_rtx (TREE_VALUE (list)));
2625 }
2626 }
2627
2628 /* A subroutine of expand_gimple_stmt_1, expanding one GIMPLE_CALL
2629 statement STMT. */
2630
2631 static void
2632 expand_call_stmt (gcall *stmt)
2633 {
2634 tree exp, decl, lhs;
2635 bool builtin_p;
2636 size_t i;
2637
2638 if (gimple_call_internal_p (stmt))
2639 {
2640 expand_internal_call (stmt);
2641 return;
2642 }
2643
2644 /* If this is a call to a built-in function and it has no effect other
2645 than setting the lhs, try to implement it using an internal function
2646 instead. */
2647 decl = gimple_call_fndecl (stmt);
2648 if (gimple_call_lhs (stmt)
2649 && !gimple_has_side_effects (stmt)
2650 && (optimize || (decl && called_as_built_in (decl))))
2651 {
2652 internal_fn ifn = replacement_internal_fn (stmt);
2653 if (ifn != IFN_LAST)
2654 {
2655 expand_internal_call (ifn, stmt);
2656 return;
2657 }
2658 }
2659
2660 exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3);
2661
2662 CALL_EXPR_FN (exp) = gimple_call_fn (stmt);
2663 builtin_p = decl && fndecl_built_in_p (decl);
2664
2665 /* If this is not a builtin function, the function type through which the
2666 call is made may be different from the type of the function. */
2667 if (!builtin_p)
2668 CALL_EXPR_FN (exp)
2669 = fold_convert (build_pointer_type (gimple_call_fntype (stmt)),
2670 CALL_EXPR_FN (exp));
2671
2672 TREE_TYPE (exp) = gimple_call_return_type (stmt);
2673 CALL_EXPR_STATIC_CHAIN (exp) = gimple_call_chain (stmt);
2674
2675 for (i = 0; i < gimple_call_num_args (stmt); i++)
2676 {
2677 tree arg = gimple_call_arg (stmt, i);
2678 gimple *def;
2679 /* TER addresses into arguments of builtin functions so we have a
2680 chance to infer more correct alignment information. See PR39954. */
2681 if (builtin_p
2682 && TREE_CODE (arg) == SSA_NAME
2683 && (def = get_gimple_for_ssa_name (arg))
2684 && gimple_assign_rhs_code (def) == ADDR_EXPR)
2685 arg = gimple_assign_rhs1 (def);
2686 CALL_EXPR_ARG (exp, i) = arg;
2687 }
2688
2689 if (gimple_has_side_effects (stmt))
2690 TREE_SIDE_EFFECTS (exp) = 1;
2691
2692 if (gimple_call_nothrow_p (stmt))
2693 TREE_NOTHROW (exp) = 1;
2694
2695 if (gimple_no_warning_p (stmt))
2696 TREE_NO_WARNING (exp) = 1;
2697
2698 CALL_EXPR_TAILCALL (exp) = gimple_call_tail_p (stmt);
2699 CALL_EXPR_MUST_TAIL_CALL (exp) = gimple_call_must_tail_p (stmt);
2700 CALL_EXPR_RETURN_SLOT_OPT (exp) = gimple_call_return_slot_opt_p (stmt);
2701 if (decl
2702 && fndecl_built_in_p (decl, BUILT_IN_NORMAL)
2703 && ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (decl)))
2704 CALL_ALLOCA_FOR_VAR_P (exp) = gimple_call_alloca_for_var_p (stmt);
2705 else
2706 CALL_FROM_THUNK_P (exp) = gimple_call_from_thunk_p (stmt);
2707 CALL_EXPR_VA_ARG_PACK (exp) = gimple_call_va_arg_pack_p (stmt);
2708 CALL_EXPR_BY_DESCRIPTOR (exp) = gimple_call_by_descriptor_p (stmt);
2709 SET_EXPR_LOCATION (exp, gimple_location (stmt));
2710
2711 /* Ensure RTL is created for debug args. */
2712 if (decl && DECL_HAS_DEBUG_ARGS_P (decl))
2713 {
2714 vec<tree, va_gc> **debug_args = decl_debug_args_lookup (decl);
2715 unsigned int ix;
2716 tree dtemp;
2717
2718 if (debug_args)
2719 for (ix = 1; (*debug_args)->iterate (ix, &dtemp); ix += 2)
2720 {
2721 gcc_assert (TREE_CODE (dtemp) == DEBUG_EXPR_DECL);
2722 expand_debug_expr (dtemp);
2723 }
2724 }
2725
2726 rtx_insn *before_call = get_last_insn ();
2727 lhs = gimple_call_lhs (stmt);
2728 if (lhs)
2729 expand_assignment (lhs, exp, false);
2730 else
2731 expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2732
2733 /* If the gimple call is an indirect call and has 'nocf_check'
2734 attribute find a generated CALL insn to mark it as no
2735 control-flow verification is needed. */
2736 if (gimple_call_nocf_check_p (stmt)
2737 && !gimple_call_fndecl (stmt))
2738 {
2739 rtx_insn *last = get_last_insn ();
2740 while (!CALL_P (last)
2741 && last != before_call)
2742 last = PREV_INSN (last);
2743
2744 if (last != before_call)
2745 add_reg_note (last, REG_CALL_NOCF_CHECK, const0_rtx);
2746 }
2747
2748 mark_transaction_restart_calls (stmt);
2749 }
2750
2751
2752 /* Generate RTL for an asm statement (explicit assembler code).
2753 STRING is a STRING_CST node containing the assembler code text,
2754 or an ADDR_EXPR containing a STRING_CST. VOL nonzero means the
2755 insn is volatile; don't optimize it. */
2756
2757 static void
2758 expand_asm_loc (tree string, int vol, location_t locus)
2759 {
2760 rtx body;
2761
2762 body = gen_rtx_ASM_INPUT_loc (VOIDmode,
2763 ggc_strdup (TREE_STRING_POINTER (string)),
2764 locus);
2765
2766 MEM_VOLATILE_P (body) = vol;
2767
2768 /* Non-empty basic ASM implicitly clobbers memory. */
2769 if (TREE_STRING_LENGTH (string) != 0)
2770 {
2771 rtx asm_op, clob;
2772 unsigned i, nclobbers;
2773 auto_vec<rtx> input_rvec, output_rvec;
2774 auto_vec<const char *> constraints;
2775 auto_vec<rtx> clobber_rvec;
2776 HARD_REG_SET clobbered_regs;
2777 CLEAR_HARD_REG_SET (clobbered_regs);
2778
2779 clob = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
2780 clobber_rvec.safe_push (clob);
2781
2782 if (targetm.md_asm_adjust)
2783 targetm.md_asm_adjust (output_rvec, input_rvec,
2784 constraints, clobber_rvec,
2785 clobbered_regs);
2786
2787 asm_op = body;
2788 nclobbers = clobber_rvec.length ();
2789 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (1 + nclobbers));
2790
2791 XVECEXP (body, 0, 0) = asm_op;
2792 for (i = 0; i < nclobbers; i++)
2793 XVECEXP (body, 0, i + 1) = gen_rtx_CLOBBER (VOIDmode, clobber_rvec[i]);
2794 }
2795
2796 emit_insn (body);
2797 }
2798
2799 /* Return the number of times character C occurs in string S. */
2800 static int
2801 n_occurrences (int c, const char *s)
2802 {
2803 int n = 0;
2804 while (*s)
2805 n += (*s++ == c);
2806 return n;
2807 }
2808
2809 /* A subroutine of expand_asm_operands. Check that all operands have
2810 the same number of alternatives. Return true if so. */
2811
2812 static bool
2813 check_operand_nalternatives (const vec<const char *> &constraints)
2814 {
2815 unsigned len = constraints.length();
2816 if (len > 0)
2817 {
2818 int nalternatives = n_occurrences (',', constraints[0]);
2819
2820 if (nalternatives + 1 > MAX_RECOG_ALTERNATIVES)
2821 {
2822 error ("too many alternatives in %<asm%>");
2823 return false;
2824 }
2825
2826 for (unsigned i = 1; i < len; ++i)
2827 if (n_occurrences (',', constraints[i]) != nalternatives)
2828 {
2829 error ("operand constraints for %<asm%> differ "
2830 "in number of alternatives");
2831 return false;
2832 }
2833 }
2834 return true;
2835 }
2836
2837 /* Check for overlap between registers marked in CLOBBERED_REGS and
2838 anything inappropriate in T. Emit error and return the register
2839 variable definition for error, NULL_TREE for ok. */
2840
2841 static bool
2842 tree_conflicts_with_clobbers_p (tree t, HARD_REG_SET *clobbered_regs)
2843 {
2844 /* Conflicts between asm-declared register variables and the clobber
2845 list are not allowed. */
2846 tree overlap = tree_overlaps_hard_reg_set (t, clobbered_regs);
2847
2848 if (overlap)
2849 {
2850 error ("%<asm%> specifier for variable %qE conflicts with "
2851 "%<asm%> clobber list",
2852 DECL_NAME (overlap));
2853
2854 /* Reset registerness to stop multiple errors emitted for a single
2855 variable. */
2856 DECL_REGISTER (overlap) = 0;
2857 return true;
2858 }
2859
2860 return false;
2861 }
2862
2863 /* Check that the given REGNO spanning NREGS is a valid
2864 asm clobber operand. Some HW registers cannot be
2865 saved/restored, hence they should not be clobbered by
2866 asm statements. */
2867 static bool
2868 asm_clobber_reg_is_valid (int regno, int nregs, const char *regname)
2869 {
2870 bool is_valid = true;
2871 HARD_REG_SET regset;
2872
2873 CLEAR_HARD_REG_SET (regset);
2874
2875 add_range_to_hard_reg_set (&regset, regno, nregs);
2876
2877 /* Clobbering the PIC register is an error. */
2878 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
2879 && overlaps_hard_reg_set_p (regset, Pmode, PIC_OFFSET_TABLE_REGNUM))
2880 {
2881 /* ??? Diagnose during gimplification? */
2882 error ("PIC register clobbered by %qs in %<asm%>", regname);
2883 is_valid = false;
2884 }
2885 else if (!in_hard_reg_set_p
2886 (accessible_reg_set, reg_raw_mode[regno], regno))
2887 {
2888 /* ??? Diagnose during gimplification? */
2889 error ("the register %qs cannot be clobbered in %<asm%>"
2890 " for the current target", regname);
2891 is_valid = false;
2892 }
2893
2894 /* Clobbering the stack pointer register is deprecated. GCC expects
2895 the value of the stack pointer after an asm statement to be the same
2896 as it was before, so no asm can validly clobber the stack pointer in
2897 the usual sense. Adding the stack pointer to the clobber list has
2898 traditionally had some undocumented and somewhat obscure side-effects. */
2899 if (overlaps_hard_reg_set_p (regset, Pmode, STACK_POINTER_REGNUM)
2900 && warning (OPT_Wdeprecated, "listing the stack pointer register"
2901 " %qs in a clobber list is deprecated", regname))
2902 inform (input_location, "the value of the stack pointer after an %<asm%>"
2903 " statement must be the same as it was before the statement");
2904
2905 return is_valid;
2906 }
2907
2908 /* Generate RTL for an asm statement with arguments.
2909 STRING is the instruction template.
2910 OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs.
2911 Each output or input has an expression in the TREE_VALUE and
2912 a tree list in TREE_PURPOSE which in turn contains a constraint
2913 name in TREE_VALUE (or NULL_TREE) and a constraint string
2914 in TREE_PURPOSE.
2915 CLOBBERS is a list of STRING_CST nodes each naming a hard register
2916 that is clobbered by this insn.
2917
2918 LABELS is a list of labels, and if LABELS is non-NULL, FALLTHRU_BB
2919 should be the fallthru basic block of the asm goto.
2920
2921 Not all kinds of lvalue that may appear in OUTPUTS can be stored directly.
2922 Some elements of OUTPUTS may be replaced with trees representing temporary
2923 values. The caller should copy those temporary values to the originally
2924 specified lvalues.
2925
2926 VOL nonzero means the insn is volatile; don't optimize it. */
2927
2928 static void
2929 expand_asm_stmt (gasm *stmt)
2930 {
2931 class save_input_location
2932 {
2933 location_t old;
2934
2935 public:
2936 explicit save_input_location(location_t where)
2937 {
2938 old = input_location;
2939 input_location = where;
2940 }
2941
2942 ~save_input_location()
2943 {
2944 input_location = old;
2945 }
2946 };
2947
2948 location_t locus = gimple_location (stmt);
2949
2950 if (gimple_asm_input_p (stmt))
2951 {
2952 const char *s = gimple_asm_string (stmt);
2953 tree string = build_string (strlen (s), s);
2954 expand_asm_loc (string, gimple_asm_volatile_p (stmt), locus);
2955 return;
2956 }
2957
2958 /* There are some legacy diagnostics in here, and also avoids a
2959 sixth parameger to targetm.md_asm_adjust. */
2960 save_input_location s_i_l(locus);
2961
2962 unsigned noutputs = gimple_asm_noutputs (stmt);
2963 unsigned ninputs = gimple_asm_ninputs (stmt);
2964 unsigned nlabels = gimple_asm_nlabels (stmt);
2965 unsigned i;
2966
2967 /* ??? Diagnose during gimplification? */
2968 if (ninputs + noutputs + nlabels > MAX_RECOG_OPERANDS)
2969 {
2970 error ("more than %d operands in %<asm%>", MAX_RECOG_OPERANDS);
2971 return;
2972 }
2973
2974 auto_vec<tree, MAX_RECOG_OPERANDS> output_tvec;
2975 auto_vec<tree, MAX_RECOG_OPERANDS> input_tvec;
2976 auto_vec<const char *, MAX_RECOG_OPERANDS> constraints;
2977
2978 /* Copy the gimple vectors into new vectors that we can manipulate. */
2979
2980 output_tvec.safe_grow (noutputs);
2981 input_tvec.safe_grow (ninputs);
2982 constraints.safe_grow (noutputs + ninputs);
2983
2984 for (i = 0; i < noutputs; ++i)
2985 {
2986 tree t = gimple_asm_output_op (stmt, i);
2987 output_tvec[i] = TREE_VALUE (t);
2988 constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
2989 }
2990 for (i = 0; i < ninputs; i++)
2991 {
2992 tree t = gimple_asm_input_op (stmt, i);
2993 input_tvec[i] = TREE_VALUE (t);
2994 constraints[i + noutputs]
2995 = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
2996 }
2997
2998 /* ??? Diagnose during gimplification? */
2999 if (! check_operand_nalternatives (constraints))
3000 return;
3001
3002 /* Count the number of meaningful clobbered registers, ignoring what
3003 we would ignore later. */
3004 auto_vec<rtx> clobber_rvec;
3005 HARD_REG_SET clobbered_regs;
3006 CLEAR_HARD_REG_SET (clobbered_regs);
3007
3008 if (unsigned n = gimple_asm_nclobbers (stmt))
3009 {
3010 clobber_rvec.reserve (n);
3011 for (i = 0; i < n; i++)
3012 {
3013 tree t = gimple_asm_clobber_op (stmt, i);
3014 const char *regname = TREE_STRING_POINTER (TREE_VALUE (t));
3015 int nregs, j;
3016
3017 j = decode_reg_name_and_count (regname, &nregs);
3018 if (j < 0)
3019 {
3020 if (j == -2)
3021 {
3022 /* ??? Diagnose during gimplification? */
3023 error ("unknown register name %qs in %<asm%>", regname);
3024 }
3025 else if (j == -4)
3026 {
3027 rtx x = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode));
3028 clobber_rvec.safe_push (x);
3029 }
3030 else
3031 {
3032 /* Otherwise we should have -1 == empty string
3033 or -3 == cc, which is not a register. */
3034 gcc_assert (j == -1 || j == -3);
3035 }
3036 }
3037 else
3038 for (int reg = j; reg < j + nregs; reg++)
3039 {
3040 if (!asm_clobber_reg_is_valid (reg, nregs, regname))
3041 return;
3042
3043 SET_HARD_REG_BIT (clobbered_regs, reg);
3044 rtx x = gen_rtx_REG (reg_raw_mode[reg], reg);
3045 clobber_rvec.safe_push (x);
3046 }
3047 }
3048 }
3049
3050 /* First pass over inputs and outputs checks validity and sets
3051 mark_addressable if needed. */
3052 /* ??? Diagnose during gimplification? */
3053
3054 for (i = 0; i < noutputs; ++i)
3055 {
3056 tree val = output_tvec[i];
3057 tree type = TREE_TYPE (val);
3058 const char *constraint;
3059 bool is_inout;
3060 bool allows_reg;
3061 bool allows_mem;
3062
3063 /* Try to parse the output constraint. If that fails, there's
3064 no point in going further. */
3065 constraint = constraints[i];
3066 if (!parse_output_constraint (&constraint, i, ninputs, noutputs,
3067 &allows_mem, &allows_reg, &is_inout))
3068 return;
3069
3070 /* If the output is a hard register, verify it doesn't conflict with
3071 any other operand's possible hard register use. */
3072 if (DECL_P (val)
3073 && REG_P (DECL_RTL (val))
3074 && HARD_REGISTER_P (DECL_RTL (val)))
3075 {
3076 unsigned j, output_hregno = REGNO (DECL_RTL (val));
3077 bool early_clobber_p = strchr (constraints[i], '&') != NULL;
3078 unsigned long match;
3079
3080 /* Verify the other outputs do not use the same hard register. */
3081 for (j = i + 1; j < noutputs; ++j)
3082 if (DECL_P (output_tvec[j])
3083 && REG_P (DECL_RTL (output_tvec[j]))
3084 && HARD_REGISTER_P (DECL_RTL (output_tvec[j]))
3085 && output_hregno == REGNO (DECL_RTL (output_tvec[j])))
3086 error ("invalid hard register usage between output operands");
3087
3088 /* Verify matching constraint operands use the same hard register
3089 and that the non-matching constraint operands do not use the same
3090 hard register if the output is an early clobber operand. */
3091 for (j = 0; j < ninputs; ++j)
3092 if (DECL_P (input_tvec[j])
3093 && REG_P (DECL_RTL (input_tvec[j]))
3094 && HARD_REGISTER_P (DECL_RTL (input_tvec[j])))
3095 {
3096 unsigned input_hregno = REGNO (DECL_RTL (input_tvec[j]));
3097 switch (*constraints[j + noutputs])
3098 {
3099 case '0': case '1': case '2': case '3': case '4':
3100 case '5': case '6': case '7': case '8': case '9':
3101 match = strtoul (constraints[j + noutputs], NULL, 10);
3102 break;
3103 default:
3104 match = ULONG_MAX;
3105 break;
3106 }
3107 if (i == match
3108 && output_hregno != input_hregno)
3109 error ("invalid hard register usage between output operand "
3110 "and matching constraint operand");
3111 else if (early_clobber_p
3112 && i != match
3113 && output_hregno == input_hregno)
3114 error ("invalid hard register usage between earlyclobber "
3115 "operand and input operand");
3116 }
3117 }
3118
3119 if (! allows_reg
3120 && (allows_mem
3121 || is_inout
3122 || (DECL_P (val)
3123 && REG_P (DECL_RTL (val))
3124 && GET_MODE (DECL_RTL (val)) != TYPE_MODE (type))))
3125 mark_addressable (val);
3126 }
3127
3128 for (i = 0; i < ninputs; ++i)
3129 {
3130 bool allows_reg, allows_mem;
3131 const char *constraint;
3132
3133 constraint = constraints[i + noutputs];
3134 if (! parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
3135 constraints.address (),
3136 &allows_mem, &allows_reg))
3137 return;
3138
3139 if (! allows_reg && allows_mem)
3140 mark_addressable (input_tvec[i]);
3141 }
3142
3143 /* Second pass evaluates arguments. */
3144
3145 /* Make sure stack is consistent for asm goto. */
3146 if (nlabels > 0)
3147 do_pending_stack_adjust ();
3148 int old_generating_concat_p = generating_concat_p;
3149
3150 /* Vector of RTX's of evaluated output operands. */
3151 auto_vec<rtx, MAX_RECOG_OPERANDS> output_rvec;
3152 auto_vec<int, MAX_RECOG_OPERANDS> inout_opnum;
3153 rtx_insn *after_rtl_seq = NULL, *after_rtl_end = NULL;
3154
3155 output_rvec.safe_grow (noutputs);
3156
3157 for (i = 0; i < noutputs; ++i)
3158 {
3159 tree val = output_tvec[i];
3160 tree type = TREE_TYPE (val);
3161 bool is_inout, allows_reg, allows_mem, ok;
3162 rtx op;
3163
3164 ok = parse_output_constraint (&constraints[i], i, ninputs,
3165 noutputs, &allows_mem, &allows_reg,
3166 &is_inout);
3167 gcc_assert (ok);
3168
3169 /* If an output operand is not a decl or indirect ref and our constraint
3170 allows a register, make a temporary to act as an intermediate.
3171 Make the asm insn write into that, then we will copy it to
3172 the real output operand. Likewise for promoted variables. */
3173
3174 generating_concat_p = 0;
3175
3176 if ((TREE_CODE (val) == INDIRECT_REF && allows_mem)
3177 || (DECL_P (val)
3178 && (allows_mem || REG_P (DECL_RTL (val)))
3179 && ! (REG_P (DECL_RTL (val))
3180 && GET_MODE (DECL_RTL (val)) != TYPE_MODE (type)))
3181 || ! allows_reg
3182 || is_inout
3183 || TREE_ADDRESSABLE (type))
3184 {
3185 op = expand_expr (val, NULL_RTX, VOIDmode,
3186 !allows_reg ? EXPAND_MEMORY : EXPAND_WRITE);
3187 if (MEM_P (op))
3188 op = validize_mem (op);
3189
3190 if (! allows_reg && !MEM_P (op))
3191 error ("output number %d not directly addressable", i);
3192 if ((! allows_mem && MEM_P (op) && GET_MODE (op) != BLKmode)
3193 || GET_CODE (op) == CONCAT)
3194 {
3195 rtx old_op = op;
3196 op = gen_reg_rtx (GET_MODE (op));
3197
3198 generating_concat_p = old_generating_concat_p;
3199
3200 if (is_inout)
3201 emit_move_insn (op, old_op);
3202
3203 push_to_sequence2 (after_rtl_seq, after_rtl_end);
3204 emit_move_insn (old_op, op);
3205 after_rtl_seq = get_insns ();
3206 after_rtl_end = get_last_insn ();
3207 end_sequence ();
3208 }
3209 }
3210 else
3211 {
3212 op = assign_temp (type, 0, 1);
3213 op = validize_mem (op);
3214 if (!MEM_P (op) && TREE_CODE (val) == SSA_NAME)
3215 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (val), op);
3216
3217 generating_concat_p = old_generating_concat_p;
3218
3219 push_to_sequence2 (after_rtl_seq, after_rtl_end);
3220 expand_assignment (val, make_tree (type, op), false);
3221 after_rtl_seq = get_insns ();
3222 after_rtl_end = get_last_insn ();
3223 end_sequence ();
3224 }
3225 output_rvec[i] = op;
3226
3227 if (is_inout)
3228 inout_opnum.safe_push (i);
3229 }
3230
3231 auto_vec<rtx, MAX_RECOG_OPERANDS> input_rvec;
3232 auto_vec<machine_mode, MAX_RECOG_OPERANDS> input_mode;
3233
3234 input_rvec.safe_grow (ninputs);
3235 input_mode.safe_grow (ninputs);
3236
3237 generating_concat_p = 0;
3238
3239 for (i = 0; i < ninputs; ++i)
3240 {
3241 tree val = input_tvec[i];
3242 tree type = TREE_TYPE (val);
3243 bool allows_reg, allows_mem, ok;
3244 const char *constraint;
3245 rtx op;
3246
3247 constraint = constraints[i + noutputs];
3248 ok = parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
3249 constraints.address (),
3250 &allows_mem, &allows_reg);
3251 gcc_assert (ok);
3252
3253 /* EXPAND_INITIALIZER will not generate code for valid initializer
3254 constants, but will still generate code for other types of operand.
3255 This is the behavior we want for constant constraints. */
3256 op = expand_expr (val, NULL_RTX, VOIDmode,
3257 allows_reg ? EXPAND_NORMAL
3258 : allows_mem ? EXPAND_MEMORY
3259 : EXPAND_INITIALIZER);
3260
3261 /* Never pass a CONCAT to an ASM. */
3262 if (GET_CODE (op) == CONCAT)
3263 op = force_reg (GET_MODE (op), op);
3264 else if (MEM_P (op))
3265 op = validize_mem (op);
3266
3267 if (asm_operand_ok (op, constraint, NULL) <= 0)
3268 {
3269 if (allows_reg && TYPE_MODE (type) != BLKmode)
3270 op = force_reg (TYPE_MODE (type), op);
3271 else if (!allows_mem)
3272 warning (0, "%<asm%> operand %d probably does not match "
3273 "constraints",
3274 i + noutputs);
3275 else if (MEM_P (op))
3276 {
3277 /* We won't recognize either volatile memory or memory
3278 with a queued address as available a memory_operand
3279 at this point. Ignore it: clearly this *is* a memory. */
3280 }
3281 else
3282 gcc_unreachable ();
3283 }
3284 input_rvec[i] = op;
3285 input_mode[i] = TYPE_MODE (type);
3286 }
3287
3288 /* For in-out operands, copy output rtx to input rtx. */
3289 unsigned ninout = inout_opnum.length();
3290 for (i = 0; i < ninout; i++)
3291 {
3292 int j = inout_opnum[i];
3293 rtx o = output_rvec[j];
3294
3295 input_rvec.safe_push (o);
3296 input_mode.safe_push (GET_MODE (o));
3297
3298 char buffer[16];
3299 sprintf (buffer, "%d", j);
3300 constraints.safe_push (ggc_strdup (buffer));
3301 }
3302 ninputs += ninout;
3303
3304 /* Sometimes we wish to automatically clobber registers across an asm.
3305 Case in point is when the i386 backend moved from cc0 to a hard reg --
3306 maintaining source-level compatibility means automatically clobbering
3307 the flags register. */
3308 rtx_insn *after_md_seq = NULL;
3309 if (targetm.md_asm_adjust)
3310 after_md_seq = targetm.md_asm_adjust (output_rvec, input_rvec,
3311 constraints, clobber_rvec,
3312 clobbered_regs);
3313
3314 /* Do not allow the hook to change the output and input count,
3315 lest it mess up the operand numbering. */
3316 gcc_assert (output_rvec.length() == noutputs);
3317 gcc_assert (input_rvec.length() == ninputs);
3318 gcc_assert (constraints.length() == noutputs + ninputs);
3319
3320 /* But it certainly can adjust the clobbers. */
3321 unsigned nclobbers = clobber_rvec.length ();
3322
3323 /* Third pass checks for easy conflicts. */
3324 /* ??? Why are we doing this on trees instead of rtx. */
3325
3326 bool clobber_conflict_found = 0;
3327 for (i = 0; i < noutputs; ++i)
3328 if (tree_conflicts_with_clobbers_p (output_tvec[i], &clobbered_regs))
3329 clobber_conflict_found = 1;
3330 for (i = 0; i < ninputs - ninout; ++i)
3331 if (tree_conflicts_with_clobbers_p (input_tvec[i], &clobbered_regs))
3332 clobber_conflict_found = 1;
3333
3334 /* Make vectors for the expression-rtx, constraint strings,
3335 and named operands. */
3336
3337 rtvec argvec = rtvec_alloc (ninputs);
3338 rtvec constraintvec = rtvec_alloc (ninputs);
3339 rtvec labelvec = rtvec_alloc (nlabels);
3340
3341 rtx body = gen_rtx_ASM_OPERANDS ((noutputs == 0 ? VOIDmode
3342 : GET_MODE (output_rvec[0])),
3343 ggc_strdup (gimple_asm_string (stmt)),
3344 "", 0, argvec, constraintvec,
3345 labelvec, locus);
3346 MEM_VOLATILE_P (body) = gimple_asm_volatile_p (stmt);
3347
3348 for (i = 0; i < ninputs; ++i)
3349 {
3350 ASM_OPERANDS_INPUT (body, i) = input_rvec[i];
3351 ASM_OPERANDS_INPUT_CONSTRAINT_EXP (body, i)
3352 = gen_rtx_ASM_INPUT_loc (input_mode[i],
3353 constraints[i + noutputs],
3354 locus);
3355 }
3356
3357 /* Copy labels to the vector. */
3358 rtx_code_label *fallthru_label = NULL;
3359 if (nlabels > 0)
3360 {
3361 basic_block fallthru_bb = NULL;
3362 edge fallthru = find_fallthru_edge (gimple_bb (stmt)->succs);
3363 if (fallthru)
3364 fallthru_bb = fallthru->dest;
3365
3366 for (i = 0; i < nlabels; ++i)
3367 {
3368 tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
3369 rtx_insn *r;
3370 /* If asm goto has any labels in the fallthru basic block, use
3371 a label that we emit immediately after the asm goto. Expansion
3372 may insert further instructions into the same basic block after
3373 asm goto and if we don't do this, insertion of instructions on
3374 the fallthru edge might misbehave. See PR58670. */
3375 if (fallthru_bb && label_to_block (cfun, label) == fallthru_bb)
3376 {
3377 if (fallthru_label == NULL_RTX)
3378 fallthru_label = gen_label_rtx ();
3379 r = fallthru_label;
3380 }
3381 else
3382 r = label_rtx (label);
3383 ASM_OPERANDS_LABEL (body, i) = gen_rtx_LABEL_REF (Pmode, r);
3384 }
3385 }
3386
3387 /* Now, for each output, construct an rtx
3388 (set OUTPUT (asm_operands INSN OUTPUTCONSTRAINT OUTPUTNUMBER
3389 ARGVEC CONSTRAINTS OPNAMES))
3390 If there is more than one, put them inside a PARALLEL. */
3391
3392 if (nlabels > 0 && nclobbers == 0)
3393 {
3394 gcc_assert (noutputs == 0);
3395 emit_jump_insn (body);
3396 }
3397 else if (noutputs == 0 && nclobbers == 0)
3398 {
3399 /* No output operands: put in a raw ASM_OPERANDS rtx. */
3400 emit_insn (body);
3401 }
3402 else if (noutputs == 1 && nclobbers == 0)
3403 {
3404 ASM_OPERANDS_OUTPUT_CONSTRAINT (body) = constraints[0];
3405 emit_insn (gen_rtx_SET (output_rvec[0], body));
3406 }
3407 else
3408 {
3409 rtx obody = body;
3410 int num = noutputs;
3411
3412 if (num == 0)
3413 num = 1;
3414
3415 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num + nclobbers));
3416
3417 /* For each output operand, store a SET. */
3418 for (i = 0; i < noutputs; ++i)
3419 {
3420 rtx src, o = output_rvec[i];
3421 if (i == 0)
3422 {
3423 ASM_OPERANDS_OUTPUT_CONSTRAINT (obody) = constraints[0];
3424 src = obody;
3425 }
3426 else
3427 {
3428 src = gen_rtx_ASM_OPERANDS (GET_MODE (o),
3429 ASM_OPERANDS_TEMPLATE (obody),
3430 constraints[i], i, argvec,
3431 constraintvec, labelvec, locus);
3432 MEM_VOLATILE_P (src) = gimple_asm_volatile_p (stmt);
3433 }
3434 XVECEXP (body, 0, i) = gen_rtx_SET (o, src);
3435 }
3436
3437 /* If there are no outputs (but there are some clobbers)
3438 store the bare ASM_OPERANDS into the PARALLEL. */
3439 if (i == 0)
3440 XVECEXP (body, 0, i++) = obody;
3441
3442 /* Store (clobber REG) for each clobbered register specified. */
3443 for (unsigned j = 0; j < nclobbers; ++j)
3444 {
3445 rtx clobbered_reg = clobber_rvec[j];
3446
3447 /* Do sanity check for overlap between clobbers and respectively
3448 input and outputs that hasn't been handled. Such overlap
3449 should have been detected and reported above. */
3450 if (!clobber_conflict_found && REG_P (clobbered_reg))
3451 {
3452 /* We test the old body (obody) contents to avoid
3453 tripping over the under-construction body. */
3454 for (unsigned k = 0; k < noutputs; ++k)
3455 if (reg_overlap_mentioned_p (clobbered_reg, output_rvec[k]))
3456 internal_error ("%<asm%> clobber conflict with "
3457 "output operand");
3458
3459 for (unsigned k = 0; k < ninputs - ninout; ++k)
3460 if (reg_overlap_mentioned_p (clobbered_reg, input_rvec[k]))
3461 internal_error ("%<asm%> clobber conflict with "
3462 "input operand");
3463 }
3464
3465 XVECEXP (body, 0, i++) = gen_rtx_CLOBBER (VOIDmode, clobbered_reg);
3466 }
3467
3468 if (nlabels > 0)
3469 emit_jump_insn (body);
3470 else
3471 emit_insn (body);
3472 }
3473
3474 generating_concat_p = old_generating_concat_p;
3475
3476 if (fallthru_label)
3477 emit_label (fallthru_label);
3478
3479 if (after_md_seq)
3480 emit_insn (after_md_seq);
3481 if (after_rtl_seq)
3482 emit_insn (after_rtl_seq);
3483
3484 free_temp_slots ();
3485 crtl->has_asm_statement = 1;
3486 }
3487
3488 /* Emit code to jump to the address
3489 specified by the pointer expression EXP. */
3490
3491 static void
3492 expand_computed_goto (tree exp)
3493 {
3494 rtx x = expand_normal (exp);
3495
3496 do_pending_stack_adjust ();
3497 emit_indirect_jump (x);
3498 }
3499
3500 /* Generate RTL code for a `goto' statement with target label LABEL.
3501 LABEL should be a LABEL_DECL tree node that was or will later be
3502 defined with `expand_label'. */
3503
3504 static void
3505 expand_goto (tree label)
3506 {
3507 if (flag_checking)
3508 {
3509 /* Check for a nonlocal goto to a containing function. Should have
3510 gotten translated to __builtin_nonlocal_goto. */
3511 tree context = decl_function_context (label);
3512 gcc_assert (!context || context == current_function_decl);
3513 }
3514
3515 emit_jump (jump_target_rtx (label));
3516 }
3517
3518 /* Output a return with no value. */
3519
3520 static void
3521 expand_null_return_1 (void)
3522 {
3523 clear_pending_stack_adjust ();
3524 do_pending_stack_adjust ();
3525 emit_jump (return_label);
3526 }
3527
3528 /* Generate RTL to return from the current function, with no value.
3529 (That is, we do not do anything about returning any value.) */
3530
3531 void
3532 expand_null_return (void)
3533 {
3534 /* If this function was declared to return a value, but we
3535 didn't, clobber the return registers so that they are not
3536 propagated live to the rest of the function. */
3537 clobber_return_register ();
3538
3539 expand_null_return_1 ();
3540 }
3541
3542 /* Generate RTL to return from the current function, with value VAL. */
3543
3544 static void
3545 expand_value_return (rtx val)
3546 {
3547 /* Copy the value to the return location unless it's already there. */
3548
3549 tree decl = DECL_RESULT (current_function_decl);
3550 rtx return_reg = DECL_RTL (decl);
3551 if (return_reg != val)
3552 {
3553 tree funtype = TREE_TYPE (current_function_decl);
3554 tree type = TREE_TYPE (decl);
3555 int unsignedp = TYPE_UNSIGNED (type);
3556 machine_mode old_mode = DECL_MODE (decl);
3557 machine_mode mode;
3558 if (DECL_BY_REFERENCE (decl))
3559 mode = promote_function_mode (type, old_mode, &unsignedp, funtype, 2);
3560 else
3561 mode = promote_function_mode (type, old_mode, &unsignedp, funtype, 1);
3562
3563 if (mode != old_mode)
3564 val = convert_modes (mode, old_mode, val, unsignedp);
3565
3566 if (GET_CODE (return_reg) == PARALLEL)
3567 emit_group_load (return_reg, val, type, int_size_in_bytes (type));
3568 else
3569 emit_move_insn (return_reg, val);
3570 }
3571
3572 expand_null_return_1 ();
3573 }
3574
3575 /* Generate RTL to evaluate the expression RETVAL and return it
3576 from the current function. */
3577
3578 static void
3579 expand_return (tree retval)
3580 {
3581 rtx result_rtl;
3582 rtx val = 0;
3583 tree retval_rhs;
3584
3585 /* If function wants no value, give it none. */
3586 if (TREE_CODE (TREE_TYPE (TREE_TYPE (current_function_decl))) == VOID_TYPE)
3587 {
3588 expand_normal (retval);
3589 expand_null_return ();
3590 return;
3591 }
3592
3593 if (retval == error_mark_node)
3594 {
3595 /* Treat this like a return of no value from a function that
3596 returns a value. */
3597 expand_null_return ();
3598 return;
3599 }
3600 else if ((TREE_CODE (retval) == MODIFY_EXPR
3601 || TREE_CODE (retval) == INIT_EXPR)
3602 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
3603 retval_rhs = TREE_OPERAND (retval, 1);
3604 else
3605 retval_rhs = retval;
3606
3607 result_rtl = DECL_RTL (DECL_RESULT (current_function_decl));
3608
3609 /* If we are returning the RESULT_DECL, then the value has already
3610 been stored into it, so we don't have to do anything special. */
3611 if (TREE_CODE (retval_rhs) == RESULT_DECL)
3612 expand_value_return (result_rtl);
3613
3614 /* If the result is an aggregate that is being returned in one (or more)
3615 registers, load the registers here. */
3616
3617 else if (retval_rhs != 0
3618 && TYPE_MODE (TREE_TYPE (retval_rhs)) == BLKmode
3619 && REG_P (result_rtl))
3620 {
3621 val = copy_blkmode_to_reg (GET_MODE (result_rtl), retval_rhs);
3622 if (val)
3623 {
3624 /* Use the mode of the result value on the return register. */
3625 PUT_MODE (result_rtl, GET_MODE (val));
3626 expand_value_return (val);
3627 }
3628 else
3629 expand_null_return ();
3630 }
3631 else if (retval_rhs != 0
3632 && !VOID_TYPE_P (TREE_TYPE (retval_rhs))
3633 && (REG_P (result_rtl)
3634 || (GET_CODE (result_rtl) == PARALLEL)))
3635 {
3636 /* Compute the return value into a temporary (usually a pseudo reg). */
3637 val
3638 = assign_temp (TREE_TYPE (DECL_RESULT (current_function_decl)), 0, 1);
3639 val = expand_expr (retval_rhs, val, GET_MODE (val), EXPAND_NORMAL);
3640 val = force_not_mem (val);
3641 expand_value_return (val);
3642 }
3643 else
3644 {
3645 /* No hard reg used; calculate value into hard return reg. */
3646 expand_expr (retval, const0_rtx, VOIDmode, EXPAND_NORMAL);
3647 expand_value_return (result_rtl);
3648 }
3649 }
3650
3651 /* Expand a clobber of LHS. If LHS is stored it in a multi-part
3652 register, tell the rtl optimizers that its value is no longer
3653 needed. */
3654
3655 static void
3656 expand_clobber (tree lhs)
3657 {
3658 if (DECL_P (lhs))
3659 {
3660 rtx decl_rtl = DECL_RTL_IF_SET (lhs);
3661 if (decl_rtl && REG_P (decl_rtl))
3662 {
3663 machine_mode decl_mode = GET_MODE (decl_rtl);
3664 if (maybe_gt (GET_MODE_SIZE (decl_mode),
3665 REGMODE_NATURAL_SIZE (decl_mode)))
3666 emit_clobber (decl_rtl);
3667 }
3668 }
3669 }
3670
3671 /* A subroutine of expand_gimple_stmt, expanding one gimple statement
3672 STMT that doesn't require special handling for outgoing edges. That
3673 is no tailcalls and no GIMPLE_COND. */
3674
3675 static void
3676 expand_gimple_stmt_1 (gimple *stmt)
3677 {
3678 tree op0;
3679
3680 set_curr_insn_location (gimple_location (stmt));
3681
3682 switch (gimple_code (stmt))
3683 {
3684 case GIMPLE_GOTO:
3685 op0 = gimple_goto_dest (stmt);
3686 if (TREE_CODE (op0) == LABEL_DECL)
3687 expand_goto (op0);
3688 else
3689 expand_computed_goto (op0);
3690 break;
3691 case GIMPLE_LABEL:
3692 expand_label (gimple_label_label (as_a <glabel *> (stmt)));
3693 break;
3694 case GIMPLE_NOP:
3695 case GIMPLE_PREDICT:
3696 break;
3697 case GIMPLE_SWITCH:
3698 {
3699 gswitch *swtch = as_a <gswitch *> (stmt);
3700 if (gimple_switch_num_labels (swtch) == 1)
3701 expand_goto (CASE_LABEL (gimple_switch_default_label (swtch)));
3702 else
3703 expand_case (swtch);
3704 }
3705 break;
3706 case GIMPLE_ASM:
3707 expand_asm_stmt (as_a <gasm *> (stmt));
3708 break;
3709 case GIMPLE_CALL:
3710 expand_call_stmt (as_a <gcall *> (stmt));
3711 break;
3712
3713 case GIMPLE_RETURN:
3714 {
3715 op0 = gimple_return_retval (as_a <greturn *> (stmt));
3716
3717 /* If a return doesn't have a location, it very likely represents
3718 multiple user returns so we cannot let it inherit the location
3719 of the last statement of the previous basic block in RTL. */
3720 if (!gimple_has_location (stmt))
3721 set_curr_insn_location (cfun->function_end_locus);
3722
3723 if (op0 && op0 != error_mark_node)
3724 {
3725 tree result = DECL_RESULT (current_function_decl);
3726
3727 /* If we are not returning the current function's RESULT_DECL,
3728 build an assignment to it. */
3729 if (op0 != result)
3730 {
3731 /* I believe that a function's RESULT_DECL is unique. */
3732 gcc_assert (TREE_CODE (op0) != RESULT_DECL);
3733
3734 /* ??? We'd like to use simply expand_assignment here,
3735 but this fails if the value is of BLKmode but the return
3736 decl is a register. expand_return has special handling
3737 for this combination, which eventually should move
3738 to common code. See comments there. Until then, let's
3739 build a modify expression :-/ */
3740 op0 = build2 (MODIFY_EXPR, TREE_TYPE (result),
3741 result, op0);
3742 }
3743 }
3744
3745 if (!op0)
3746 expand_null_return ();
3747 else
3748 expand_return (op0);
3749 }
3750 break;
3751
3752 case GIMPLE_ASSIGN:
3753 {
3754 gassign *assign_stmt = as_a <gassign *> (stmt);
3755 tree lhs = gimple_assign_lhs (assign_stmt);
3756
3757 /* Tree expand used to fiddle with |= and &= of two bitfield
3758 COMPONENT_REFs here. This can't happen with gimple, the LHS
3759 of binary assigns must be a gimple reg. */
3760
3761 if (TREE_CODE (lhs) != SSA_NAME
3762 || get_gimple_rhs_class (gimple_expr_code (stmt))
3763 == GIMPLE_SINGLE_RHS)
3764 {
3765 tree rhs = gimple_assign_rhs1 (assign_stmt);
3766 gcc_assert (get_gimple_rhs_class (gimple_expr_code (stmt))
3767 == GIMPLE_SINGLE_RHS);
3768 if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (rhs)
3769 /* Do not put locations on possibly shared trees. */
3770 && !is_gimple_min_invariant (rhs))
3771 SET_EXPR_LOCATION (rhs, gimple_location (stmt));
3772 if (TREE_CLOBBER_P (rhs))
3773 /* This is a clobber to mark the going out of scope for
3774 this LHS. */
3775 expand_clobber (lhs);
3776 else
3777 expand_assignment (lhs, rhs,
3778 gimple_assign_nontemporal_move_p (
3779 assign_stmt));
3780 }
3781 else
3782 {
3783 rtx target, temp;
3784 bool nontemporal = gimple_assign_nontemporal_move_p (assign_stmt);
3785 struct separate_ops ops;
3786 bool promoted = false;
3787
3788 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3789 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
3790 promoted = true;
3791
3792 ops.code = gimple_assign_rhs_code (assign_stmt);
3793 ops.type = TREE_TYPE (lhs);
3794 switch (get_gimple_rhs_class (ops.code))
3795 {
3796 case GIMPLE_TERNARY_RHS:
3797 ops.op2 = gimple_assign_rhs3 (assign_stmt);
3798 /* Fallthru */
3799 case GIMPLE_BINARY_RHS:
3800 ops.op1 = gimple_assign_rhs2 (assign_stmt);
3801 /* Fallthru */
3802 case GIMPLE_UNARY_RHS:
3803 ops.op0 = gimple_assign_rhs1 (assign_stmt);
3804 break;
3805 default:
3806 gcc_unreachable ();
3807 }
3808 ops.location = gimple_location (stmt);
3809
3810 /* If we want to use a nontemporal store, force the value to
3811 register first. If we store into a promoted register,
3812 don't directly expand to target. */
3813 temp = nontemporal || promoted ? NULL_RTX : target;
3814 temp = expand_expr_real_2 (&ops, temp, GET_MODE (target),
3815 EXPAND_NORMAL);
3816
3817 if (temp == target)
3818 ;
3819 else if (promoted)
3820 {
3821 int unsignedp = SUBREG_PROMOTED_SIGN (target);
3822 /* If TEMP is a VOIDmode constant, use convert_modes to make
3823 sure that we properly convert it. */
3824 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
3825 {
3826 temp = convert_modes (GET_MODE (target),
3827 TYPE_MODE (ops.type),
3828 temp, unsignedp);
3829 temp = convert_modes (GET_MODE (SUBREG_REG (target)),
3830 GET_MODE (target), temp, unsignedp);
3831 }
3832
3833 convert_move (SUBREG_REG (target), temp, unsignedp);
3834 }
3835 else if (nontemporal && emit_storent_insn (target, temp))
3836 ;
3837 else
3838 {
3839 temp = force_operand (temp, target);
3840 if (temp != target)
3841 emit_move_insn (target, temp);
3842 }
3843 }
3844 }
3845 break;
3846
3847 default:
3848 gcc_unreachable ();
3849 }
3850 }
3851
3852 /* Expand one gimple statement STMT and return the last RTL instruction
3853 before any of the newly generated ones.
3854
3855 In addition to generating the necessary RTL instructions this also
3856 sets REG_EH_REGION notes if necessary and sets the current source
3857 location for diagnostics. */
3858
3859 static rtx_insn *
3860 expand_gimple_stmt (gimple *stmt)
3861 {
3862 location_t saved_location = input_location;
3863 rtx_insn *last = get_last_insn ();
3864 int lp_nr;
3865
3866 gcc_assert (cfun);
3867
3868 /* We need to save and restore the current source location so that errors
3869 discovered during expansion are emitted with the right location. But
3870 it would be better if the diagnostic routines used the source location
3871 embedded in the tree nodes rather than globals. */
3872 if (gimple_has_location (stmt))
3873 input_location = gimple_location (stmt);
3874
3875 expand_gimple_stmt_1 (stmt);
3876
3877 /* Free any temporaries used to evaluate this statement. */
3878 free_temp_slots ();
3879
3880 input_location = saved_location;
3881
3882 /* Mark all insns that may trap. */
3883 lp_nr = lookup_stmt_eh_lp (stmt);
3884 if (lp_nr)
3885 {
3886 rtx_insn *insn;
3887 for (insn = next_real_insn (last); insn;
3888 insn = next_real_insn (insn))
3889 {
3890 if (! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
3891 /* If we want exceptions for non-call insns, any
3892 may_trap_p instruction may throw. */
3893 && GET_CODE (PATTERN (insn)) != CLOBBER
3894 && GET_CODE (PATTERN (insn)) != USE
3895 && insn_could_throw_p (insn))
3896 make_reg_eh_region_note (insn, 0, lp_nr);
3897 }
3898 }
3899
3900 return last;
3901 }
3902
3903 /* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_CALL
3904 that has CALL_EXPR_TAILCALL set. Returns non-null if we actually
3905 generated a tail call (something that might be denied by the ABI
3906 rules governing the call; see calls.c).
3907
3908 Sets CAN_FALLTHRU if we generated a *conditional* tail call, and
3909 can still reach the rest of BB. The case here is __builtin_sqrt,
3910 where the NaN result goes through the external function (with a
3911 tailcall) and the normal result happens via a sqrt instruction. */
3912
3913 static basic_block
3914 expand_gimple_tailcall (basic_block bb, gcall *stmt, bool *can_fallthru)
3915 {
3916 rtx_insn *last2, *last;
3917 edge e;
3918 edge_iterator ei;
3919 profile_probability probability;
3920
3921 last2 = last = expand_gimple_stmt (stmt);
3922
3923 for (last = NEXT_INSN (last); last; last = NEXT_INSN (last))
3924 if (CALL_P (last) && SIBLING_CALL_P (last))
3925 goto found;
3926
3927 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
3928
3929 *can_fallthru = true;
3930 return NULL;
3931
3932 found:
3933 /* ??? Wouldn't it be better to just reset any pending stack adjust?
3934 Any instructions emitted here are about to be deleted. */
3935 do_pending_stack_adjust ();
3936
3937 /* Remove any non-eh, non-abnormal edges that don't go to exit. */
3938 /* ??? I.e. the fallthrough edge. HOWEVER! If there were to be
3939 EH or abnormal edges, we shouldn't have created a tail call in
3940 the first place. So it seems to me we should just be removing
3941 all edges here, or redirecting the existing fallthru edge to
3942 the exit block. */
3943
3944 probability = profile_probability::never ();
3945
3946 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3947 {
3948 if (!(e->flags & (EDGE_ABNORMAL | EDGE_EH)))
3949 {
3950 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3951 e->dest->count -= e->count ();
3952 probability += e->probability;
3953 remove_edge (e);
3954 }
3955 else
3956 ei_next (&ei);
3957 }
3958
3959 /* This is somewhat ugly: the call_expr expander often emits instructions
3960 after the sibcall (to perform the function return). These confuse the
3961 find_many_sub_basic_blocks code, so we need to get rid of these. */
3962 last = NEXT_INSN (last);
3963 gcc_assert (BARRIER_P (last));
3964
3965 *can_fallthru = false;
3966 while (NEXT_INSN (last))
3967 {
3968 /* For instance an sqrt builtin expander expands if with
3969 sibcall in the then and label for `else`. */
3970 if (LABEL_P (NEXT_INSN (last)))
3971 {
3972 *can_fallthru = true;
3973 break;
3974 }
3975 delete_insn (NEXT_INSN (last));
3976 }
3977
3978 e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_ABNORMAL
3979 | EDGE_SIBCALL);
3980 e->probability = probability;
3981 BB_END (bb) = last;
3982 update_bb_for_insn (bb);
3983
3984 if (NEXT_INSN (last))
3985 {
3986 bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb);
3987
3988 last = BB_END (bb);
3989 if (BARRIER_P (last))
3990 BB_END (bb) = PREV_INSN (last);
3991 }
3992
3993 maybe_dump_rtl_for_gimple_stmt (stmt, last2);
3994
3995 return bb;
3996 }
3997
3998 /* Return the difference between the floor and the truncated result of
3999 a signed division by OP1 with remainder MOD. */
4000 static rtx
4001 floor_sdiv_adjust (machine_mode mode, rtx mod, rtx op1)
4002 {
4003 /* (mod != 0 ? (op1 / mod < 0 ? -1 : 0) : 0) */
4004 return gen_rtx_IF_THEN_ELSE
4005 (mode, gen_rtx_NE (BImode, mod, const0_rtx),
4006 gen_rtx_IF_THEN_ELSE
4007 (mode, gen_rtx_LT (BImode,
4008 gen_rtx_DIV (mode, op1, mod),
4009 const0_rtx),
4010 constm1_rtx, const0_rtx),
4011 const0_rtx);
4012 }
4013
4014 /* Return the difference between the ceil and the truncated result of
4015 a signed division by OP1 with remainder MOD. */
4016 static rtx
4017 ceil_sdiv_adjust (machine_mode mode, rtx mod, rtx op1)
4018 {
4019 /* (mod != 0 ? (op1 / mod > 0 ? 1 : 0) : 0) */
4020 return gen_rtx_IF_THEN_ELSE
4021 (mode, gen_rtx_NE (BImode, mod, const0_rtx),
4022 gen_rtx_IF_THEN_ELSE
4023 (mode, gen_rtx_GT (BImode,
4024 gen_rtx_DIV (mode, op1, mod),
4025 const0_rtx),
4026 const1_rtx, const0_rtx),
4027 const0_rtx);
4028 }
4029
4030 /* Return the difference between the ceil and the truncated result of
4031 an unsigned division by OP1 with remainder MOD. */
4032 static rtx
4033 ceil_udiv_adjust (machine_mode mode, rtx mod, rtx op1 ATTRIBUTE_UNUSED)
4034 {
4035 /* (mod != 0 ? 1 : 0) */
4036 return gen_rtx_IF_THEN_ELSE
4037 (mode, gen_rtx_NE (BImode, mod, const0_rtx),
4038 const1_rtx, const0_rtx);
4039 }
4040
4041 /* Return the difference between the rounded and the truncated result
4042 of a signed division by OP1 with remainder MOD. Halfway cases are
4043 rounded away from zero, rather than to the nearest even number. */
4044 static rtx
4045 round_sdiv_adjust (machine_mode mode, rtx mod, rtx op1)
4046 {
4047 /* (abs (mod) >= abs (op1) - abs (mod)
4048 ? (op1 / mod > 0 ? 1 : -1)
4049 : 0) */
4050 return gen_rtx_IF_THEN_ELSE
4051 (mode, gen_rtx_GE (BImode, gen_rtx_ABS (mode, mod),
4052 gen_rtx_MINUS (mode,
4053 gen_rtx_ABS (mode, op1),
4054 gen_rtx_ABS (mode, mod))),
4055 gen_rtx_IF_THEN_ELSE
4056 (mode, gen_rtx_GT (BImode,
4057 gen_rtx_DIV (mode, op1, mod),
4058 const0_rtx),
4059 const1_rtx, constm1_rtx),
4060 const0_rtx);
4061 }
4062
4063 /* Return the difference between the rounded and the truncated result
4064 of a unsigned division by OP1 with remainder MOD. Halfway cases
4065 are rounded away from zero, rather than to the nearest even
4066 number. */
4067 static rtx
4068 round_udiv_adjust (machine_mode mode, rtx mod, rtx op1)
4069 {
4070 /* (mod >= op1 - mod ? 1 : 0) */
4071 return gen_rtx_IF_THEN_ELSE
4072 (mode, gen_rtx_GE (BImode, mod,
4073 gen_rtx_MINUS (mode, op1, mod)),
4074 const1_rtx, const0_rtx);
4075 }
4076
4077 /* Convert X to MODE, that must be Pmode or ptr_mode, without emitting
4078 any rtl. */
4079
4080 static rtx
4081 convert_debug_memory_address (scalar_int_mode mode, rtx x,
4082 addr_space_t as)
4083 {
4084 #ifndef POINTERS_EXTEND_UNSIGNED
4085 gcc_assert (mode == Pmode
4086 || mode == targetm.addr_space.address_mode (as));
4087 gcc_assert (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode);
4088 #else
4089 rtx temp;
4090
4091 gcc_assert (targetm.addr_space.valid_pointer_mode (mode, as));
4092
4093 if (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode)
4094 return x;
4095
4096 /* X must have some form of address mode already. */
4097 scalar_int_mode xmode = as_a <scalar_int_mode> (GET_MODE (x));
4098 if (GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (xmode))
4099 x = lowpart_subreg (mode, x, xmode);
4100 else if (POINTERS_EXTEND_UNSIGNED > 0)
4101 x = gen_rtx_ZERO_EXTEND (mode, x);
4102 else if (!POINTERS_EXTEND_UNSIGNED)
4103 x = gen_rtx_SIGN_EXTEND (mode, x);
4104 else
4105 {
4106 switch (GET_CODE (x))
4107 {
4108 case SUBREG:
4109 if ((SUBREG_PROMOTED_VAR_P (x)
4110 || (REG_P (SUBREG_REG (x)) && REG_POINTER (SUBREG_REG (x)))
4111 || (GET_CODE (SUBREG_REG (x)) == PLUS
4112 && REG_P (XEXP (SUBREG_REG (x), 0))
4113 && REG_POINTER (XEXP (SUBREG_REG (x), 0))
4114 && CONST_INT_P (XEXP (SUBREG_REG (x), 1))))
4115 && GET_MODE (SUBREG_REG (x)) == mode)
4116 return SUBREG_REG (x);
4117 break;
4118 case LABEL_REF:
4119 temp = gen_rtx_LABEL_REF (mode, label_ref_label (x));
4120 LABEL_REF_NONLOCAL_P (temp) = LABEL_REF_NONLOCAL_P (x);
4121 return temp;
4122 case SYMBOL_REF:
4123 temp = shallow_copy_rtx (x);
4124 PUT_MODE (temp, mode);
4125 return temp;
4126 case CONST:
4127 temp = convert_debug_memory_address (mode, XEXP (x, 0), as);
4128 if (temp)
4129 temp = gen_rtx_CONST (mode, temp);
4130 return temp;
4131 case PLUS:
4132 case MINUS:
4133 if (CONST_INT_P (XEXP (x, 1)))
4134 {
4135 temp = convert_debug_memory_address (mode, XEXP (x, 0), as);
4136 if (temp)
4137 return gen_rtx_fmt_ee (GET_CODE (x), mode, temp, XEXP (x, 1));
4138 }
4139 break;
4140 default:
4141 break;
4142 }
4143 /* Don't know how to express ptr_extend as operation in debug info. */
4144 return NULL;
4145 }
4146 #endif /* POINTERS_EXTEND_UNSIGNED */
4147
4148 return x;
4149 }
4150
4151 /* Map from SSA_NAMEs to corresponding DEBUG_EXPR_DECLs created
4152 by avoid_deep_ter_for_debug. */
4153
4154 static hash_map<tree, tree> *deep_ter_debug_map;
4155
4156 /* Split too deep TER chains for debug stmts using debug temporaries. */
4157
4158 static void
4159 avoid_deep_ter_for_debug (gimple *stmt, int depth)
4160 {
4161 use_operand_p use_p;
4162 ssa_op_iter iter;
4163 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4164 {
4165 tree use = USE_FROM_PTR (use_p);
4166 if (TREE_CODE (use) != SSA_NAME || SSA_NAME_IS_DEFAULT_DEF (use))
4167 continue;
4168 gimple *g = get_gimple_for_ssa_name (use);
4169 if (g == NULL)
4170 continue;
4171 if (depth > 6 && !stmt_ends_bb_p (g))
4172 {
4173 if (deep_ter_debug_map == NULL)
4174 deep_ter_debug_map = new hash_map<tree, tree>;
4175
4176 tree &vexpr = deep_ter_debug_map->get_or_insert (use);
4177 if (vexpr != NULL)
4178 continue;
4179 vexpr = make_node (DEBUG_EXPR_DECL);
4180 gimple *def_temp = gimple_build_debug_bind (vexpr, use, g);
4181 DECL_ARTIFICIAL (vexpr) = 1;
4182 TREE_TYPE (vexpr) = TREE_TYPE (use);
4183 SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (use)));
4184 gimple_stmt_iterator gsi = gsi_for_stmt (g);
4185 gsi_insert_after (&gsi, def_temp, GSI_NEW_STMT);
4186 avoid_deep_ter_for_debug (def_temp, 0);
4187 }
4188 else
4189 avoid_deep_ter_for_debug (g, depth + 1);
4190 }
4191 }
4192
4193 /* Return an RTX equivalent to the value of the parameter DECL. */
4194
4195 static rtx
4196 expand_debug_parm_decl (tree decl)
4197 {
4198 rtx incoming = DECL_INCOMING_RTL (decl);
4199
4200 if (incoming
4201 && GET_MODE (incoming) != BLKmode
4202 && ((REG_P (incoming) && HARD_REGISTER_P (incoming))
4203 || (MEM_P (incoming)
4204 && REG_P (XEXP (incoming, 0))
4205 && HARD_REGISTER_P (XEXP (incoming, 0)))))
4206 {
4207 rtx rtl = gen_rtx_ENTRY_VALUE (GET_MODE (incoming));
4208
4209 #ifdef HAVE_window_save
4210 /* DECL_INCOMING_RTL uses the INCOMING_REGNO of parameter registers.
4211 If the target machine has an explicit window save instruction, the
4212 actual entry value is the corresponding OUTGOING_REGNO instead. */
4213 if (REG_P (incoming)
4214 && OUTGOING_REGNO (REGNO (incoming)) != REGNO (incoming))
4215 incoming
4216 = gen_rtx_REG_offset (incoming, GET_MODE (incoming),
4217 OUTGOING_REGNO (REGNO (incoming)), 0);
4218 else if (MEM_P (incoming))
4219 {
4220 rtx reg = XEXP (incoming, 0);
4221 if (OUTGOING_REGNO (REGNO (reg)) != REGNO (reg))
4222 {
4223 reg = gen_raw_REG (GET_MODE (reg), OUTGOING_REGNO (REGNO (reg)));
4224 incoming = replace_equiv_address_nv (incoming, reg);
4225 }
4226 else
4227 incoming = copy_rtx (incoming);
4228 }
4229 #endif
4230
4231 ENTRY_VALUE_EXP (rtl) = incoming;
4232 return rtl;
4233 }
4234
4235 if (incoming
4236 && GET_MODE (incoming) != BLKmode
4237 && !TREE_ADDRESSABLE (decl)
4238 && MEM_P (incoming)
4239 && (XEXP (incoming, 0) == virtual_incoming_args_rtx
4240 || (GET_CODE (XEXP (incoming, 0)) == PLUS
4241 && XEXP (XEXP (incoming, 0), 0) == virtual_incoming_args_rtx
4242 && CONST_INT_P (XEXP (XEXP (incoming, 0), 1)))))
4243 return copy_rtx (incoming);
4244
4245 return NULL_RTX;
4246 }
4247
4248 /* Return an RTX equivalent to the value of the tree expression EXP. */
4249
4250 static rtx
4251 expand_debug_expr (tree exp)
4252 {
4253 rtx op0 = NULL_RTX, op1 = NULL_RTX, op2 = NULL_RTX;
4254 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
4255 machine_mode inner_mode = VOIDmode;
4256 int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
4257 addr_space_t as;
4258 scalar_int_mode op0_mode, op1_mode, addr_mode;
4259
4260 switch (TREE_CODE_CLASS (TREE_CODE (exp)))
4261 {
4262 case tcc_expression:
4263 switch (TREE_CODE (exp))
4264 {
4265 case COND_EXPR:
4266 case DOT_PROD_EXPR:
4267 case SAD_EXPR:
4268 case WIDEN_MULT_PLUS_EXPR:
4269 case WIDEN_MULT_MINUS_EXPR:
4270 goto ternary;
4271
4272 case TRUTH_ANDIF_EXPR:
4273 case TRUTH_ORIF_EXPR:
4274 case TRUTH_AND_EXPR:
4275 case TRUTH_OR_EXPR:
4276 case TRUTH_XOR_EXPR:
4277 goto binary;
4278
4279 case TRUTH_NOT_EXPR:
4280 goto unary;
4281
4282 default:
4283 break;
4284 }
4285 break;
4286
4287 ternary:
4288 op2 = expand_debug_expr (TREE_OPERAND (exp, 2));
4289 if (!op2)
4290 return NULL_RTX;
4291 /* Fall through. */
4292
4293 binary:
4294 case tcc_binary:
4295 if (mode == BLKmode)
4296 return NULL_RTX;
4297 op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
4298 if (!op1)
4299 return NULL_RTX;
4300 switch (TREE_CODE (exp))
4301 {
4302 case LSHIFT_EXPR:
4303 case RSHIFT_EXPR:
4304 case LROTATE_EXPR:
4305 case RROTATE_EXPR:
4306 case WIDEN_LSHIFT_EXPR:
4307 /* Ensure second operand isn't wider than the first one. */
4308 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1)));
4309 if (is_a <scalar_int_mode> (inner_mode, &op1_mode)
4310 && (GET_MODE_UNIT_PRECISION (mode)
4311 < GET_MODE_PRECISION (op1_mode)))
4312 op1 = lowpart_subreg (GET_MODE_INNER (mode), op1, op1_mode);
4313 break;
4314 default:
4315 break;
4316 }
4317 /* Fall through. */
4318
4319 unary:
4320 case tcc_unary:
4321 if (mode == BLKmode)
4322 return NULL_RTX;
4323 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
4324 op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
4325 if (!op0)
4326 return NULL_RTX;
4327 break;
4328
4329 case tcc_comparison:
4330 unsignedp = TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)));
4331 goto binary;
4332
4333 case tcc_type:
4334 case tcc_statement:
4335 gcc_unreachable ();
4336
4337 case tcc_constant:
4338 case tcc_exceptional:
4339 case tcc_declaration:
4340 case tcc_reference:
4341 case tcc_vl_exp:
4342 break;
4343 }
4344
4345 switch (TREE_CODE (exp))
4346 {
4347 case STRING_CST:
4348 if (!lookup_constant_def (exp))
4349 {
4350 if (strlen (TREE_STRING_POINTER (exp)) + 1
4351 != (size_t) TREE_STRING_LENGTH (exp))
4352 return NULL_RTX;
4353 op0 = gen_rtx_CONST_STRING (Pmode, TREE_STRING_POINTER (exp));
4354 op0 = gen_rtx_MEM (BLKmode, op0);
4355 set_mem_attributes (op0, exp, 0);
4356 return op0;
4357 }
4358 /* Fall through. */
4359
4360 case INTEGER_CST:
4361 case REAL_CST:
4362 case FIXED_CST:
4363 op0 = expand_expr (exp, NULL_RTX, mode, EXPAND_INITIALIZER);
4364 return op0;
4365
4366 case POLY_INT_CST:
4367 return immed_wide_int_const (poly_int_cst_value (exp), mode);
4368
4369 case COMPLEX_CST:
4370 gcc_assert (COMPLEX_MODE_P (mode));
4371 op0 = expand_debug_expr (TREE_REALPART (exp));
4372 op1 = expand_debug_expr (TREE_IMAGPART (exp));
4373 return gen_rtx_CONCAT (mode, op0, op1);
4374
4375 case DEBUG_EXPR_DECL:
4376 op0 = DECL_RTL_IF_SET (exp);
4377
4378 if (op0)
4379 return op0;
4380
4381 op0 = gen_rtx_DEBUG_EXPR (mode);
4382 DEBUG_EXPR_TREE_DECL (op0) = exp;
4383 SET_DECL_RTL (exp, op0);
4384
4385 return op0;
4386
4387 case VAR_DECL:
4388 case PARM_DECL:
4389 case FUNCTION_DECL:
4390 case LABEL_DECL:
4391 case CONST_DECL:
4392 case RESULT_DECL:
4393 op0 = DECL_RTL_IF_SET (exp);
4394
4395 /* This decl was probably optimized away. */
4396 if (!op0
4397 /* At least label RTXen are sometimes replaced by
4398 NOTE_INSN_DELETED_LABEL. Any notes here are not
4399 handled by copy_rtx. */
4400 || NOTE_P (op0))
4401 {
4402 if (!VAR_P (exp)
4403 || DECL_EXTERNAL (exp)
4404 || !TREE_STATIC (exp)
4405 || !DECL_NAME (exp)
4406 || DECL_HARD_REGISTER (exp)
4407 || DECL_IN_CONSTANT_POOL (exp)
4408 || mode == VOIDmode)
4409 return NULL;
4410
4411 op0 = make_decl_rtl_for_debug (exp);
4412 if (!MEM_P (op0)
4413 || GET_CODE (XEXP (op0, 0)) != SYMBOL_REF
4414 || SYMBOL_REF_DECL (XEXP (op0, 0)) != exp)
4415 return NULL;
4416 }
4417 else
4418 op0 = copy_rtx (op0);
4419
4420 if (GET_MODE (op0) == BLKmode
4421 /* If op0 is not BLKmode, but mode is, adjust_mode
4422 below would ICE. While it is likely a FE bug,
4423 try to be robust here. See PR43166. */
4424 || mode == BLKmode
4425 || (mode == VOIDmode && GET_MODE (op0) != VOIDmode))
4426 {
4427 gcc_assert (MEM_P (op0));
4428 op0 = adjust_address_nv (op0, mode, 0);
4429 return op0;
4430 }
4431
4432 /* Fall through. */
4433
4434 adjust_mode:
4435 case PAREN_EXPR:
4436 CASE_CONVERT:
4437 {
4438 inner_mode = GET_MODE (op0);
4439
4440 if (mode == inner_mode)
4441 return op0;
4442
4443 if (inner_mode == VOIDmode)
4444 {
4445 if (TREE_CODE (exp) == SSA_NAME)
4446 inner_mode = TYPE_MODE (TREE_TYPE (exp));
4447 else
4448 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
4449 if (mode == inner_mode)
4450 return op0;
4451 }
4452
4453 if (FLOAT_MODE_P (mode) && FLOAT_MODE_P (inner_mode))
4454 {
4455 if (GET_MODE_UNIT_BITSIZE (mode)
4456 == GET_MODE_UNIT_BITSIZE (inner_mode))
4457 op0 = simplify_gen_subreg (mode, op0, inner_mode, 0);
4458 else if (GET_MODE_UNIT_BITSIZE (mode)
4459 < GET_MODE_UNIT_BITSIZE (inner_mode))
4460 op0 = simplify_gen_unary (FLOAT_TRUNCATE, mode, op0, inner_mode);
4461 else
4462 op0 = simplify_gen_unary (FLOAT_EXTEND, mode, op0, inner_mode);
4463 }
4464 else if (FLOAT_MODE_P (mode))
4465 {
4466 gcc_assert (TREE_CODE (exp) != SSA_NAME);
4467 if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))))
4468 op0 = simplify_gen_unary (UNSIGNED_FLOAT, mode, op0, inner_mode);
4469 else
4470 op0 = simplify_gen_unary (FLOAT, mode, op0, inner_mode);
4471 }
4472 else if (FLOAT_MODE_P (inner_mode))
4473 {
4474 if (unsignedp)
4475 op0 = simplify_gen_unary (UNSIGNED_FIX, mode, op0, inner_mode);
4476 else
4477 op0 = simplify_gen_unary (FIX, mode, op0, inner_mode);
4478 }
4479 else if (GET_MODE_UNIT_PRECISION (mode)
4480 == GET_MODE_UNIT_PRECISION (inner_mode))
4481 op0 = lowpart_subreg (mode, op0, inner_mode);
4482 else if (GET_MODE_UNIT_PRECISION (mode)
4483 < GET_MODE_UNIT_PRECISION (inner_mode))
4484 op0 = simplify_gen_unary (TRUNCATE, mode, op0, inner_mode);
4485 else if (UNARY_CLASS_P (exp)
4486 ? TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0)))
4487 : unsignedp)
4488 op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode);
4489 else
4490 op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode);
4491
4492 return op0;
4493 }
4494
4495 case MEM_REF:
4496 if (!is_gimple_mem_ref_addr (TREE_OPERAND (exp, 0)))
4497 {
4498 tree newexp = fold_binary (MEM_REF, TREE_TYPE (exp),
4499 TREE_OPERAND (exp, 0),
4500 TREE_OPERAND (exp, 1));
4501 if (newexp)
4502 return expand_debug_expr (newexp);
4503 }
4504 /* FALLTHROUGH */
4505 case INDIRECT_REF:
4506 inner_mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
4507 op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
4508 if (!op0)
4509 return NULL;
4510
4511 if (TREE_CODE (exp) == MEM_REF)
4512 {
4513 if (GET_CODE (op0) == DEBUG_IMPLICIT_PTR
4514 || (GET_CODE (op0) == PLUS
4515 && GET_CODE (XEXP (op0, 0)) == DEBUG_IMPLICIT_PTR))
4516 /* (mem (debug_implicit_ptr)) might confuse aliasing.
4517 Instead just use get_inner_reference. */
4518 goto component_ref;
4519
4520 op1 = expand_debug_expr (TREE_OPERAND (exp, 1));
4521 poly_int64 offset;
4522 if (!op1 || !poly_int_rtx_p (op1, &offset))
4523 return NULL;
4524
4525 op0 = plus_constant (inner_mode, op0, offset);
4526 }
4527
4528 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
4529
4530 op0 = convert_debug_memory_address (targetm.addr_space.address_mode (as),
4531 op0, as);
4532 if (op0 == NULL_RTX)
4533 return NULL;
4534
4535 op0 = gen_rtx_MEM (mode, op0);
4536 set_mem_attributes (op0, exp, 0);
4537 if (TREE_CODE (exp) == MEM_REF
4538 && !is_gimple_mem_ref_addr (TREE_OPERAND (exp, 0)))
4539 set_mem_expr (op0, NULL_TREE);
4540 set_mem_addr_space (op0, as);
4541
4542 return op0;
4543
4544 case TARGET_MEM_REF:
4545 if (TREE_CODE (TMR_BASE (exp)) == ADDR_EXPR
4546 && !DECL_RTL_SET_P (TREE_OPERAND (TMR_BASE (exp), 0)))
4547 return NULL;
4548
4549 op0 = expand_debug_expr
4550 (tree_mem_ref_addr (build_pointer_type (TREE_TYPE (exp)), exp));
4551 if (!op0)
4552 return NULL;
4553
4554 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
4555 op0 = convert_debug_memory_address (targetm.addr_space.address_mode (as),
4556 op0, as);
4557 if (op0 == NULL_RTX)
4558 return NULL;
4559
4560 op0 = gen_rtx_MEM (mode, op0);
4561
4562 set_mem_attributes (op0, exp, 0);
4563 set_mem_addr_space (op0, as);
4564
4565 return op0;
4566
4567 component_ref:
4568 case ARRAY_REF:
4569 case ARRAY_RANGE_REF:
4570 case COMPONENT_REF:
4571 case BIT_FIELD_REF:
4572 case REALPART_EXPR:
4573 case IMAGPART_EXPR:
4574 case VIEW_CONVERT_EXPR:
4575 {
4576 machine_mode mode1;
4577 poly_int64 bitsize, bitpos;
4578 tree offset;
4579 int reversep, volatilep = 0;
4580 tree tem
4581 = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
4582 &unsignedp, &reversep, &volatilep);
4583 rtx orig_op0;
4584
4585 if (known_eq (bitsize, 0))
4586 return NULL;
4587
4588 orig_op0 = op0 = expand_debug_expr (tem);
4589
4590 if (!op0)
4591 return NULL;
4592
4593 if (offset)
4594 {
4595 machine_mode addrmode, offmode;
4596
4597 if (!MEM_P (op0))
4598 return NULL;
4599
4600 op0 = XEXP (op0, 0);
4601 addrmode = GET_MODE (op0);
4602 if (addrmode == VOIDmode)
4603 addrmode = Pmode;
4604
4605 op1 = expand_debug_expr (offset);
4606 if (!op1)
4607 return NULL;
4608
4609 offmode = GET_MODE (op1);
4610 if (offmode == VOIDmode)
4611 offmode = TYPE_MODE (TREE_TYPE (offset));
4612
4613 if (addrmode != offmode)
4614 op1 = lowpart_subreg (addrmode, op1, offmode);
4615
4616 /* Don't use offset_address here, we don't need a
4617 recognizable address, and we don't want to generate
4618 code. */
4619 op0 = gen_rtx_MEM (mode, simplify_gen_binary (PLUS, addrmode,
4620 op0, op1));
4621 }
4622
4623 if (MEM_P (op0))
4624 {
4625 if (mode1 == VOIDmode)
4626 {
4627 if (maybe_gt (bitsize, MAX_BITSIZE_MODE_ANY_INT))
4628 return NULL;
4629 /* Bitfield. */
4630 mode1 = smallest_int_mode_for_size (bitsize);
4631 }
4632 poly_int64 bytepos = bits_to_bytes_round_down (bitpos);
4633 if (maybe_ne (bytepos, 0))
4634 {
4635 op0 = adjust_address_nv (op0, mode1, bytepos);
4636 bitpos = num_trailing_bits (bitpos);
4637 }
4638 else if (known_eq (bitpos, 0)
4639 && known_eq (bitsize, GET_MODE_BITSIZE (mode)))
4640 op0 = adjust_address_nv (op0, mode, 0);
4641 else if (GET_MODE (op0) != mode1)
4642 op0 = adjust_address_nv (op0, mode1, 0);
4643 else
4644 op0 = copy_rtx (op0);
4645 if (op0 == orig_op0)
4646 op0 = shallow_copy_rtx (op0);
4647 set_mem_attributes (op0, exp, 0);
4648 }
4649
4650 if (known_eq (bitpos, 0) && mode == GET_MODE (op0))
4651 return op0;
4652
4653 if (maybe_lt (bitpos, 0))
4654 return NULL;
4655
4656 if (GET_MODE (op0) == BLKmode || mode == BLKmode)
4657 return NULL;
4658
4659 poly_int64 bytepos;
4660 if (multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
4661 && known_eq (bitsize, GET_MODE_BITSIZE (mode1)))
4662 {
4663 machine_mode opmode = GET_MODE (op0);
4664
4665 if (opmode == VOIDmode)
4666 opmode = TYPE_MODE (TREE_TYPE (tem));
4667
4668 /* This condition may hold if we're expanding the address
4669 right past the end of an array that turned out not to
4670 be addressable (i.e., the address was only computed in
4671 debug stmts). The gen_subreg below would rightfully
4672 crash, and the address doesn't really exist, so just
4673 drop it. */
4674 if (known_ge (bitpos, GET_MODE_BITSIZE (opmode)))
4675 return NULL;
4676
4677 if (multiple_p (bitpos, GET_MODE_BITSIZE (mode)))
4678 return simplify_gen_subreg (mode, op0, opmode, bytepos);
4679 }
4680
4681 return simplify_gen_ternary (SCALAR_INT_MODE_P (GET_MODE (op0))
4682 && TYPE_UNSIGNED (TREE_TYPE (exp))
4683 ? SIGN_EXTRACT
4684 : ZERO_EXTRACT, mode,
4685 GET_MODE (op0) != VOIDmode
4686 ? GET_MODE (op0)
4687 : TYPE_MODE (TREE_TYPE (tem)),
4688 op0, gen_int_mode (bitsize, word_mode),
4689 gen_int_mode (bitpos, word_mode));
4690 }
4691
4692 case ABS_EXPR:
4693 case ABSU_EXPR:
4694 return simplify_gen_unary (ABS, mode, op0, mode);
4695
4696 case NEGATE_EXPR:
4697 return simplify_gen_unary (NEG, mode, op0, mode);
4698
4699 case BIT_NOT_EXPR:
4700 return simplify_gen_unary (NOT, mode, op0, mode);
4701
4702 case FLOAT_EXPR:
4703 return simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
4704 0)))
4705 ? UNSIGNED_FLOAT : FLOAT, mode, op0,
4706 inner_mode);
4707
4708 case FIX_TRUNC_EXPR:
4709 return simplify_gen_unary (unsignedp ? UNSIGNED_FIX : FIX, mode, op0,
4710 inner_mode);
4711
4712 case POINTER_PLUS_EXPR:
4713 /* For the rare target where pointers are not the same size as
4714 size_t, we need to check for mis-matched modes and correct
4715 the addend. */
4716 if (op0 && op1
4717 && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
4718 && is_a <scalar_int_mode> (GET_MODE (op1), &op1_mode)
4719 && op0_mode != op1_mode)
4720 {
4721 if (GET_MODE_BITSIZE (op0_mode) < GET_MODE_BITSIZE (op1_mode)
4722 /* If OP0 is a partial mode, then we must truncate, even
4723 if it has the same bitsize as OP1 as GCC's
4724 representation of partial modes is opaque. */
4725 || (GET_MODE_CLASS (op0_mode) == MODE_PARTIAL_INT
4726 && (GET_MODE_BITSIZE (op0_mode)
4727 == GET_MODE_BITSIZE (op1_mode))))
4728 op1 = simplify_gen_unary (TRUNCATE, op0_mode, op1, op1_mode);
4729 else
4730 /* We always sign-extend, regardless of the signedness of
4731 the operand, because the operand is always unsigned
4732 here even if the original C expression is signed. */
4733 op1 = simplify_gen_unary (SIGN_EXTEND, op0_mode, op1, op1_mode);
4734 }
4735 /* Fall through. */
4736 case PLUS_EXPR:
4737 return simplify_gen_binary (PLUS, mode, op0, op1);
4738
4739 case MINUS_EXPR:
4740 case POINTER_DIFF_EXPR:
4741 return simplify_gen_binary (MINUS, mode, op0, op1);
4742
4743 case MULT_EXPR:
4744 return simplify_gen_binary (MULT, mode, op0, op1);
4745
4746 case RDIV_EXPR:
4747 case TRUNC_DIV_EXPR:
4748 case EXACT_DIV_EXPR:
4749 if (unsignedp)
4750 return simplify_gen_binary (UDIV, mode, op0, op1);
4751 else
4752 return simplify_gen_binary (DIV, mode, op0, op1);
4753
4754 case TRUNC_MOD_EXPR:
4755 return simplify_gen_binary (unsignedp ? UMOD : MOD, mode, op0, op1);
4756
4757 case FLOOR_DIV_EXPR:
4758 if (unsignedp)
4759 return simplify_gen_binary (UDIV, mode, op0, op1);
4760 else
4761 {
4762 rtx div = simplify_gen_binary (DIV, mode, op0, op1);
4763 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4764 rtx adj = floor_sdiv_adjust (mode, mod, op1);
4765 return simplify_gen_binary (PLUS, mode, div, adj);
4766 }
4767
4768 case FLOOR_MOD_EXPR:
4769 if (unsignedp)
4770 return simplify_gen_binary (UMOD, mode, op0, op1);
4771 else
4772 {
4773 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4774 rtx adj = floor_sdiv_adjust (mode, mod, op1);
4775 adj = simplify_gen_unary (NEG, mode,
4776 simplify_gen_binary (MULT, mode, adj, op1),
4777 mode);
4778 return simplify_gen_binary (PLUS, mode, mod, adj);
4779 }
4780
4781 case CEIL_DIV_EXPR:
4782 if (unsignedp)
4783 {
4784 rtx div = simplify_gen_binary (UDIV, mode, op0, op1);
4785 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4786 rtx adj = ceil_udiv_adjust (mode, mod, op1);
4787 return simplify_gen_binary (PLUS, mode, div, adj);
4788 }
4789 else
4790 {
4791 rtx div = simplify_gen_binary (DIV, mode, op0, op1);
4792 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4793 rtx adj = ceil_sdiv_adjust (mode, mod, op1);
4794 return simplify_gen_binary (PLUS, mode, div, adj);
4795 }
4796
4797 case CEIL_MOD_EXPR:
4798 if (unsignedp)
4799 {
4800 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4801 rtx adj = ceil_udiv_adjust (mode, mod, op1);
4802 adj = simplify_gen_unary (NEG, mode,
4803 simplify_gen_binary (MULT, mode, adj, op1),
4804 mode);
4805 return simplify_gen_binary (PLUS, mode, mod, adj);
4806 }
4807 else
4808 {
4809 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4810 rtx adj = ceil_sdiv_adjust (mode, mod, op1);
4811 adj = simplify_gen_unary (NEG, mode,
4812 simplify_gen_binary (MULT, mode, adj, op1),
4813 mode);
4814 return simplify_gen_binary (PLUS, mode, mod, adj);
4815 }
4816
4817 case ROUND_DIV_EXPR:
4818 if (unsignedp)
4819 {
4820 rtx div = simplify_gen_binary (UDIV, mode, op0, op1);
4821 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4822 rtx adj = round_udiv_adjust (mode, mod, op1);
4823 return simplify_gen_binary (PLUS, mode, div, adj);
4824 }
4825 else
4826 {
4827 rtx div = simplify_gen_binary (DIV, mode, op0, op1);
4828 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4829 rtx adj = round_sdiv_adjust (mode, mod, op1);
4830 return simplify_gen_binary (PLUS, mode, div, adj);
4831 }
4832
4833 case ROUND_MOD_EXPR:
4834 if (unsignedp)
4835 {
4836 rtx mod = simplify_gen_binary (UMOD, mode, op0, op1);
4837 rtx adj = round_udiv_adjust (mode, mod, op1);
4838 adj = simplify_gen_unary (NEG, mode,
4839 simplify_gen_binary (MULT, mode, adj, op1),
4840 mode);
4841 return simplify_gen_binary (PLUS, mode, mod, adj);
4842 }
4843 else
4844 {
4845 rtx mod = simplify_gen_binary (MOD, mode, op0, op1);
4846 rtx adj = round_sdiv_adjust (mode, mod, op1);
4847 adj = simplify_gen_unary (NEG, mode,
4848 simplify_gen_binary (MULT, mode, adj, op1),
4849 mode);
4850 return simplify_gen_binary (PLUS, mode, mod, adj);
4851 }
4852
4853 case LSHIFT_EXPR:
4854 return simplify_gen_binary (ASHIFT, mode, op0, op1);
4855
4856 case RSHIFT_EXPR:
4857 if (unsignedp)
4858 return simplify_gen_binary (LSHIFTRT, mode, op0, op1);
4859 else
4860 return simplify_gen_binary (ASHIFTRT, mode, op0, op1);
4861
4862 case LROTATE_EXPR:
4863 return simplify_gen_binary (ROTATE, mode, op0, op1);
4864
4865 case RROTATE_EXPR:
4866 return simplify_gen_binary (ROTATERT, mode, op0, op1);
4867
4868 case MIN_EXPR:
4869 return simplify_gen_binary (unsignedp ? UMIN : SMIN, mode, op0, op1);
4870
4871 case MAX_EXPR:
4872 return simplify_gen_binary (unsignedp ? UMAX : SMAX, mode, op0, op1);
4873
4874 case BIT_AND_EXPR:
4875 case TRUTH_AND_EXPR:
4876 return simplify_gen_binary (AND, mode, op0, op1);
4877
4878 case BIT_IOR_EXPR:
4879 case TRUTH_OR_EXPR:
4880 return simplify_gen_binary (IOR, mode, op0, op1);
4881
4882 case BIT_XOR_EXPR:
4883 case TRUTH_XOR_EXPR:
4884 return simplify_gen_binary (XOR, mode, op0, op1);
4885
4886 case TRUTH_ANDIF_EXPR:
4887 return gen_rtx_IF_THEN_ELSE (mode, op0, op1, const0_rtx);
4888
4889 case TRUTH_ORIF_EXPR:
4890 return gen_rtx_IF_THEN_ELSE (mode, op0, const_true_rtx, op1);
4891
4892 case TRUTH_NOT_EXPR:
4893 return simplify_gen_relational (EQ, mode, inner_mode, op0, const0_rtx);
4894
4895 case LT_EXPR:
4896 return simplify_gen_relational (unsignedp ? LTU : LT, mode, inner_mode,
4897 op0, op1);
4898
4899 case LE_EXPR:
4900 return simplify_gen_relational (unsignedp ? LEU : LE, mode, inner_mode,
4901 op0, op1);
4902
4903 case GT_EXPR:
4904 return simplify_gen_relational (unsignedp ? GTU : GT, mode, inner_mode,
4905 op0, op1);
4906
4907 case GE_EXPR:
4908 return simplify_gen_relational (unsignedp ? GEU : GE, mode, inner_mode,
4909 op0, op1);
4910
4911 case EQ_EXPR:
4912 return simplify_gen_relational (EQ, mode, inner_mode, op0, op1);
4913
4914 case NE_EXPR:
4915 return simplify_gen_relational (NE, mode, inner_mode, op0, op1);
4916
4917 case UNORDERED_EXPR:
4918 return simplify_gen_relational (UNORDERED, mode, inner_mode, op0, op1);
4919
4920 case ORDERED_EXPR:
4921 return simplify_gen_relational (ORDERED, mode, inner_mode, op0, op1);
4922
4923 case UNLT_EXPR:
4924 return simplify_gen_relational (UNLT, mode, inner_mode, op0, op1);
4925
4926 case UNLE_EXPR:
4927 return simplify_gen_relational (UNLE, mode, inner_mode, op0, op1);
4928
4929 case UNGT_EXPR:
4930 return simplify_gen_relational (UNGT, mode, inner_mode, op0, op1);
4931
4932 case UNGE_EXPR:
4933 return simplify_gen_relational (UNGE, mode, inner_mode, op0, op1);
4934
4935 case UNEQ_EXPR:
4936 return simplify_gen_relational (UNEQ, mode, inner_mode, op0, op1);
4937
4938 case LTGT_EXPR:
4939 return simplify_gen_relational (LTGT, mode, inner_mode, op0, op1);
4940
4941 case COND_EXPR:
4942 return gen_rtx_IF_THEN_ELSE (mode, op0, op1, op2);
4943
4944 case COMPLEX_EXPR:
4945 gcc_assert (COMPLEX_MODE_P (mode));
4946 if (GET_MODE (op0) == VOIDmode)
4947 op0 = gen_rtx_CONST (GET_MODE_INNER (mode), op0);
4948 if (GET_MODE (op1) == VOIDmode)
4949 op1 = gen_rtx_CONST (GET_MODE_INNER (mode), op1);
4950 return gen_rtx_CONCAT (mode, op0, op1);
4951
4952 case CONJ_EXPR:
4953 if (GET_CODE (op0) == CONCAT)
4954 return gen_rtx_CONCAT (mode, XEXP (op0, 0),
4955 simplify_gen_unary (NEG, GET_MODE_INNER (mode),
4956 XEXP (op0, 1),
4957 GET_MODE_INNER (mode)));
4958 else
4959 {
4960 scalar_mode imode = GET_MODE_INNER (mode);
4961 rtx re, im;
4962
4963 if (MEM_P (op0))
4964 {
4965 re = adjust_address_nv (op0, imode, 0);
4966 im = adjust_address_nv (op0, imode, GET_MODE_SIZE (imode));
4967 }
4968 else
4969 {
4970 scalar_int_mode ifmode;
4971 scalar_int_mode ihmode;
4972 rtx halfsize;
4973 if (!int_mode_for_mode (mode).exists (&ifmode)
4974 || !int_mode_for_mode (imode).exists (&ihmode))
4975 return NULL;
4976 halfsize = GEN_INT (GET_MODE_BITSIZE (ihmode));
4977 re = op0;
4978 if (mode != ifmode)
4979 re = gen_rtx_SUBREG (ifmode, re, 0);
4980 re = gen_rtx_ZERO_EXTRACT (ihmode, re, halfsize, const0_rtx);
4981 if (imode != ihmode)
4982 re = gen_rtx_SUBREG (imode, re, 0);
4983 im = copy_rtx (op0);
4984 if (mode != ifmode)
4985 im = gen_rtx_SUBREG (ifmode, im, 0);
4986 im = gen_rtx_ZERO_EXTRACT (ihmode, im, halfsize, halfsize);
4987 if (imode != ihmode)
4988 im = gen_rtx_SUBREG (imode, im, 0);
4989 }
4990 im = gen_rtx_NEG (imode, im);
4991 return gen_rtx_CONCAT (mode, re, im);
4992 }
4993
4994 case ADDR_EXPR:
4995 op0 = expand_debug_expr (TREE_OPERAND (exp, 0));
4996 if (!op0 || !MEM_P (op0))
4997 {
4998 if ((TREE_CODE (TREE_OPERAND (exp, 0)) == VAR_DECL
4999 || TREE_CODE (TREE_OPERAND (exp, 0)) == PARM_DECL
5000 || TREE_CODE (TREE_OPERAND (exp, 0)) == RESULT_DECL)
5001 && (!TREE_ADDRESSABLE (TREE_OPERAND (exp, 0))
5002 || target_for_debug_bind (TREE_OPERAND (exp, 0))))
5003 return gen_rtx_DEBUG_IMPLICIT_PTR (mode, TREE_OPERAND (exp, 0));
5004
5005 if (handled_component_p (TREE_OPERAND (exp, 0)))
5006 {
5007 poly_int64 bitoffset, bitsize, maxsize, byteoffset;
5008 bool reverse;
5009 tree decl
5010 = get_ref_base_and_extent (TREE_OPERAND (exp, 0), &bitoffset,
5011 &bitsize, &maxsize, &reverse);
5012 if ((VAR_P (decl)
5013 || TREE_CODE (decl) == PARM_DECL
5014 || TREE_CODE (decl) == RESULT_DECL)
5015 && (!TREE_ADDRESSABLE (decl)
5016 || target_for_debug_bind (decl))
5017 && multiple_p (bitoffset, BITS_PER_UNIT, &byteoffset)
5018 && known_gt (bitsize, 0)
5019 && known_eq (bitsize, maxsize))
5020 {
5021 rtx base = gen_rtx_DEBUG_IMPLICIT_PTR (mode, decl);
5022 return plus_constant (mode, base, byteoffset);
5023 }
5024 }
5025
5026 if (TREE_CODE (TREE_OPERAND (exp, 0)) == MEM_REF
5027 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
5028 == ADDR_EXPR)
5029 {
5030 op0 = expand_debug_expr (TREE_OPERAND (TREE_OPERAND (exp, 0),
5031 0));
5032 if (op0 != NULL
5033 && (GET_CODE (op0) == DEBUG_IMPLICIT_PTR
5034 || (GET_CODE (op0) == PLUS
5035 && GET_CODE (XEXP (op0, 0)) == DEBUG_IMPLICIT_PTR
5036 && CONST_INT_P (XEXP (op0, 1)))))
5037 {
5038 op1 = expand_debug_expr (TREE_OPERAND (TREE_OPERAND (exp, 0),
5039 1));
5040 poly_int64 offset;
5041 if (!op1 || !poly_int_rtx_p (op1, &offset))
5042 return NULL;
5043
5044 return plus_constant (mode, op0, offset);
5045 }
5046 }
5047
5048 return NULL;
5049 }
5050
5051 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
5052 addr_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (exp));
5053 op0 = convert_debug_memory_address (addr_mode, XEXP (op0, 0), as);
5054
5055 return op0;
5056
5057 case VECTOR_CST:
5058 {
5059 unsigned HOST_WIDE_INT i, nelts;
5060
5061 if (!VECTOR_CST_NELTS (exp).is_constant (&nelts))
5062 return NULL;
5063
5064 op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
5065
5066 for (i = 0; i < nelts; ++i)
5067 {
5068 op1 = expand_debug_expr (VECTOR_CST_ELT (exp, i));
5069 if (!op1)
5070 return NULL;
5071 XVECEXP (op0, 0, i) = op1;
5072 }
5073
5074 return op0;
5075 }
5076
5077 case CONSTRUCTOR:
5078 if (TREE_CLOBBER_P (exp))
5079 return NULL;
5080 else if (TREE_CODE (TREE_TYPE (exp)) == VECTOR_TYPE)
5081 {
5082 unsigned i;
5083 unsigned HOST_WIDE_INT nelts;
5084 tree val;
5085
5086 if (!TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)).is_constant (&nelts))
5087 goto flag_unsupported;
5088
5089 op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
5090
5091 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val)
5092 {
5093 op1 = expand_debug_expr (val);
5094 if (!op1)
5095 return NULL;
5096 XVECEXP (op0, 0, i) = op1;
5097 }
5098
5099 if (i < nelts)
5100 {
5101 op1 = expand_debug_expr
5102 (build_zero_cst (TREE_TYPE (TREE_TYPE (exp))));
5103
5104 if (!op1)
5105 return NULL;
5106
5107 for (; i < nelts; i++)
5108 XVECEXP (op0, 0, i) = op1;
5109 }
5110
5111 return op0;
5112 }
5113 else
5114 goto flag_unsupported;
5115
5116 case CALL_EXPR:
5117 /* ??? Maybe handle some builtins? */
5118 return NULL;
5119
5120 case SSA_NAME:
5121 {
5122 gimple *g = get_gimple_for_ssa_name (exp);
5123 if (g)
5124 {
5125 tree t = NULL_TREE;
5126 if (deep_ter_debug_map)
5127 {
5128 tree *slot = deep_ter_debug_map->get (exp);
5129 if (slot)
5130 t = *slot;
5131 }
5132 if (t == NULL_TREE)
5133 t = gimple_assign_rhs_to_tree (g);
5134 op0 = expand_debug_expr (t);
5135 if (!op0)
5136 return NULL;
5137 }
5138 else
5139 {
5140 /* If this is a reference to an incoming value of
5141 parameter that is never used in the code or where the
5142 incoming value is never used in the code, use
5143 PARM_DECL's DECL_RTL if set. */
5144 if (SSA_NAME_IS_DEFAULT_DEF (exp)
5145 && SSA_NAME_VAR (exp)
5146 && TREE_CODE (SSA_NAME_VAR (exp)) == PARM_DECL
5147 && has_zero_uses (exp))
5148 {
5149 op0 = expand_debug_parm_decl (SSA_NAME_VAR (exp));
5150 if (op0)
5151 goto adjust_mode;
5152 op0 = expand_debug_expr (SSA_NAME_VAR (exp));
5153 if (op0)
5154 goto adjust_mode;
5155 }
5156
5157 int part = var_to_partition (SA.map, exp);
5158
5159 if (part == NO_PARTITION)
5160 return NULL;
5161
5162 gcc_assert (part >= 0 && (unsigned)part < SA.map->num_partitions);
5163
5164 op0 = copy_rtx (SA.partition_to_pseudo[part]);
5165 }
5166 goto adjust_mode;
5167 }
5168
5169 case ERROR_MARK:
5170 return NULL;
5171
5172 /* Vector stuff. For most of the codes we don't have rtl codes. */
5173 case REALIGN_LOAD_EXPR:
5174 case VEC_COND_EXPR:
5175 case VEC_PACK_FIX_TRUNC_EXPR:
5176 case VEC_PACK_FLOAT_EXPR:
5177 case VEC_PACK_SAT_EXPR:
5178 case VEC_PACK_TRUNC_EXPR:
5179 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
5180 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
5181 case VEC_UNPACK_FLOAT_HI_EXPR:
5182 case VEC_UNPACK_FLOAT_LO_EXPR:
5183 case VEC_UNPACK_HI_EXPR:
5184 case VEC_UNPACK_LO_EXPR:
5185 case VEC_WIDEN_MULT_HI_EXPR:
5186 case VEC_WIDEN_MULT_LO_EXPR:
5187 case VEC_WIDEN_MULT_EVEN_EXPR:
5188 case VEC_WIDEN_MULT_ODD_EXPR:
5189 case VEC_WIDEN_LSHIFT_HI_EXPR:
5190 case VEC_WIDEN_LSHIFT_LO_EXPR:
5191 case VEC_PERM_EXPR:
5192 case VEC_DUPLICATE_EXPR:
5193 case VEC_SERIES_EXPR:
5194 return NULL;
5195
5196 /* Misc codes. */
5197 case ADDR_SPACE_CONVERT_EXPR:
5198 case FIXED_CONVERT_EXPR:
5199 case OBJ_TYPE_REF:
5200 case WITH_SIZE_EXPR:
5201 case BIT_INSERT_EXPR:
5202 return NULL;
5203
5204 case DOT_PROD_EXPR:
5205 if (SCALAR_INT_MODE_P (GET_MODE (op0))
5206 && SCALAR_INT_MODE_P (mode))
5207 {
5208 op0
5209 = simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
5210 0)))
5211 ? ZERO_EXTEND : SIGN_EXTEND, mode, op0,
5212 inner_mode);
5213 op1
5214 = simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
5215 1)))
5216 ? ZERO_EXTEND : SIGN_EXTEND, mode, op1,
5217 inner_mode);
5218 op0 = simplify_gen_binary (MULT, mode, op0, op1);
5219 return simplify_gen_binary (PLUS, mode, op0, op2);
5220 }
5221 return NULL;
5222
5223 case WIDEN_MULT_EXPR:
5224 case WIDEN_MULT_PLUS_EXPR:
5225 case WIDEN_MULT_MINUS_EXPR:
5226 if (SCALAR_INT_MODE_P (GET_MODE (op0))
5227 && SCALAR_INT_MODE_P (mode))
5228 {
5229 inner_mode = GET_MODE (op0);
5230 if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))))
5231 op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode);
5232 else
5233 op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode);
5234 if (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 1))))
5235 op1 = simplify_gen_unary (ZERO_EXTEND, mode, op1, inner_mode);
5236 else
5237 op1 = simplify_gen_unary (SIGN_EXTEND, mode, op1, inner_mode);
5238 op0 = simplify_gen_binary (MULT, mode, op0, op1);
5239 if (TREE_CODE (exp) == WIDEN_MULT_EXPR)
5240 return op0;
5241 else if (TREE_CODE (exp) == WIDEN_MULT_PLUS_EXPR)
5242 return simplify_gen_binary (PLUS, mode, op0, op2);
5243 else
5244 return simplify_gen_binary (MINUS, mode, op2, op0);
5245 }
5246 return NULL;
5247
5248 case MULT_HIGHPART_EXPR:
5249 /* ??? Similar to the above. */
5250 return NULL;
5251
5252 case WIDEN_SUM_EXPR:
5253 case WIDEN_LSHIFT_EXPR:
5254 if (SCALAR_INT_MODE_P (GET_MODE (op0))
5255 && SCALAR_INT_MODE_P (mode))
5256 {
5257 op0
5258 = simplify_gen_unary (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp,
5259 0)))
5260 ? ZERO_EXTEND : SIGN_EXTEND, mode, op0,
5261 inner_mode);
5262 return simplify_gen_binary (TREE_CODE (exp) == WIDEN_LSHIFT_EXPR
5263 ? ASHIFT : PLUS, mode, op0, op1);
5264 }
5265 return NULL;
5266
5267 default:
5268 flag_unsupported:
5269 if (flag_checking)
5270 {
5271 debug_tree (exp);
5272 gcc_unreachable ();
5273 }
5274 return NULL;
5275 }
5276 }
5277
5278 /* Return an RTX equivalent to the source bind value of the tree expression
5279 EXP. */
5280
5281 static rtx
5282 expand_debug_source_expr (tree exp)
5283 {
5284 rtx op0 = NULL_RTX;
5285 machine_mode mode = VOIDmode, inner_mode;
5286
5287 switch (TREE_CODE (exp))
5288 {
5289 case VAR_DECL:
5290 if (DECL_ABSTRACT_ORIGIN (exp))
5291 return expand_debug_source_expr (DECL_ABSTRACT_ORIGIN (exp));
5292 break;
5293 case PARM_DECL:
5294 {
5295 mode = DECL_MODE (exp);
5296 op0 = expand_debug_parm_decl (exp);
5297 if (op0)
5298 break;
5299 /* See if this isn't an argument that has been completely
5300 optimized out. */
5301 if (!DECL_RTL_SET_P (exp)
5302 && !DECL_INCOMING_RTL (exp)
5303 && DECL_ABSTRACT_ORIGIN (current_function_decl))
5304 {
5305 tree aexp = DECL_ORIGIN (exp);
5306 if (DECL_CONTEXT (aexp)
5307 == DECL_ABSTRACT_ORIGIN (current_function_decl))
5308 {
5309 vec<tree, va_gc> **debug_args;
5310 unsigned int ix;
5311 tree ddecl;
5312 debug_args = decl_debug_args_lookup (current_function_decl);
5313 if (debug_args != NULL)
5314 {
5315 for (ix = 0; vec_safe_iterate (*debug_args, ix, &ddecl);
5316 ix += 2)
5317 if (ddecl == aexp)
5318 return gen_rtx_DEBUG_PARAMETER_REF (mode, aexp);
5319 }
5320 }
5321 }
5322 break;
5323 }
5324 default:
5325 break;
5326 }
5327
5328 if (op0 == NULL_RTX)
5329 return NULL_RTX;
5330
5331 inner_mode = GET_MODE (op0);
5332 if (mode == inner_mode)
5333 return op0;
5334
5335 if (FLOAT_MODE_P (mode) && FLOAT_MODE_P (inner_mode))
5336 {
5337 if (GET_MODE_UNIT_BITSIZE (mode)
5338 == GET_MODE_UNIT_BITSIZE (inner_mode))
5339 op0 = simplify_gen_subreg (mode, op0, inner_mode, 0);
5340 else if (GET_MODE_UNIT_BITSIZE (mode)
5341 < GET_MODE_UNIT_BITSIZE (inner_mode))
5342 op0 = simplify_gen_unary (FLOAT_TRUNCATE, mode, op0, inner_mode);
5343 else
5344 op0 = simplify_gen_unary (FLOAT_EXTEND, mode, op0, inner_mode);
5345 }
5346 else if (FLOAT_MODE_P (mode))
5347 gcc_unreachable ();
5348 else if (FLOAT_MODE_P (inner_mode))
5349 {
5350 if (TYPE_UNSIGNED (TREE_TYPE (exp)))
5351 op0 = simplify_gen_unary (UNSIGNED_FIX, mode, op0, inner_mode);
5352 else
5353 op0 = simplify_gen_unary (FIX, mode, op0, inner_mode);
5354 }
5355 else if (GET_MODE_UNIT_PRECISION (mode)
5356 == GET_MODE_UNIT_PRECISION (inner_mode))
5357 op0 = lowpart_subreg (mode, op0, inner_mode);
5358 else if (GET_MODE_UNIT_PRECISION (mode)
5359 < GET_MODE_UNIT_PRECISION (inner_mode))
5360 op0 = simplify_gen_unary (TRUNCATE, mode, op0, inner_mode);
5361 else if (TYPE_UNSIGNED (TREE_TYPE (exp)))
5362 op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode);
5363 else
5364 op0 = simplify_gen_unary (SIGN_EXTEND, mode, op0, inner_mode);
5365
5366 return op0;
5367 }
5368
5369 /* Ensure INSN_VAR_LOCATION_LOC (insn) doesn't have unbound complexity.
5370 Allow 4 levels of rtl nesting for most rtl codes, and if we see anything
5371 deeper than that, create DEBUG_EXPRs and emit DEBUG_INSNs before INSN. */
5372
5373 static void
5374 avoid_complex_debug_insns (rtx_insn *insn, rtx *exp_p, int depth)
5375 {
5376 rtx exp = *exp_p;
5377
5378 if (exp == NULL_RTX)
5379 return;
5380
5381 if ((OBJECT_P (exp) && !MEM_P (exp)) || GET_CODE (exp) == CLOBBER)
5382 return;
5383
5384 if (depth == 4)
5385 {
5386 /* Create DEBUG_EXPR (and DEBUG_EXPR_DECL). */
5387 rtx dval = make_debug_expr_from_rtl (exp);
5388
5389 /* Emit a debug bind insn before INSN. */
5390 rtx bind = gen_rtx_VAR_LOCATION (GET_MODE (exp),
5391 DEBUG_EXPR_TREE_DECL (dval), exp,
5392 VAR_INIT_STATUS_INITIALIZED);
5393
5394 emit_debug_insn_before (bind, insn);
5395 *exp_p = dval;
5396 return;
5397 }
5398
5399 const char *format_ptr = GET_RTX_FORMAT (GET_CODE (exp));
5400 int i, j;
5401 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (exp)); i++)
5402 switch (*format_ptr++)
5403 {
5404 case 'e':
5405 avoid_complex_debug_insns (insn, &XEXP (exp, i), depth + 1);
5406 break;
5407
5408 case 'E':
5409 case 'V':
5410 for (j = 0; j < XVECLEN (exp, i); j++)
5411 avoid_complex_debug_insns (insn, &XVECEXP (exp, i, j), depth + 1);
5412 break;
5413
5414 default:
5415 break;
5416 }
5417 }
5418
5419 /* Expand the _LOCs in debug insns. We run this after expanding all
5420 regular insns, so that any variables referenced in the function
5421 will have their DECL_RTLs set. */
5422
5423 static void
5424 expand_debug_locations (void)
5425 {
5426 rtx_insn *insn;
5427 rtx_insn *last = get_last_insn ();
5428 int save_strict_alias = flag_strict_aliasing;
5429
5430 /* New alias sets while setting up memory attributes cause
5431 -fcompare-debug failures, even though it doesn't bring about any
5432 codegen changes. */
5433 flag_strict_aliasing = 0;
5434
5435 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5436 if (DEBUG_BIND_INSN_P (insn))
5437 {
5438 tree value = (tree)INSN_VAR_LOCATION_LOC (insn);
5439 rtx val;
5440 rtx_insn *prev_insn, *insn2;
5441 machine_mode mode;
5442
5443 if (value == NULL_TREE)
5444 val = NULL_RTX;
5445 else
5446 {
5447 if (INSN_VAR_LOCATION_STATUS (insn)
5448 == VAR_INIT_STATUS_UNINITIALIZED)
5449 val = expand_debug_source_expr (value);
5450 /* The avoid_deep_ter_for_debug function inserts
5451 debug bind stmts after SSA_NAME definition, with the
5452 SSA_NAME as the whole bind location. Disable temporarily
5453 expansion of that SSA_NAME into the DEBUG_EXPR_DECL
5454 being defined in this DEBUG_INSN. */
5455 else if (deep_ter_debug_map && TREE_CODE (value) == SSA_NAME)
5456 {
5457 tree *slot = deep_ter_debug_map->get (value);
5458 if (slot)
5459 {
5460 if (*slot == INSN_VAR_LOCATION_DECL (insn))
5461 *slot = NULL_TREE;
5462 else
5463 slot = NULL;
5464 }
5465 val = expand_debug_expr (value);
5466 if (slot)
5467 *slot = INSN_VAR_LOCATION_DECL (insn);
5468 }
5469 else
5470 val = expand_debug_expr (value);
5471 gcc_assert (last == get_last_insn ());
5472 }
5473
5474 if (!val)
5475 val = gen_rtx_UNKNOWN_VAR_LOC ();
5476 else
5477 {
5478 mode = GET_MODE (INSN_VAR_LOCATION (insn));
5479
5480 gcc_assert (mode == GET_MODE (val)
5481 || (GET_MODE (val) == VOIDmode
5482 && (CONST_SCALAR_INT_P (val)
5483 || GET_CODE (val) == CONST_FIXED
5484 || GET_CODE (val) == LABEL_REF)));
5485 }
5486
5487 INSN_VAR_LOCATION_LOC (insn) = val;
5488 prev_insn = PREV_INSN (insn);
5489 for (insn2 = insn; insn2 != prev_insn; insn2 = PREV_INSN (insn2))
5490 avoid_complex_debug_insns (insn2, &INSN_VAR_LOCATION_LOC (insn2), 0);
5491 }
5492
5493 flag_strict_aliasing = save_strict_alias;
5494 }
5495
5496 /* Performs swapping operands of commutative operations to expand
5497 the expensive one first. */
5498
5499 static void
5500 reorder_operands (basic_block bb)
5501 {
5502 unsigned int *lattice; /* Hold cost of each statement. */
5503 unsigned int i = 0, n = 0;
5504 gimple_stmt_iterator gsi;
5505 gimple_seq stmts;
5506 gimple *stmt;
5507 bool swap;
5508 tree op0, op1;
5509 ssa_op_iter iter;
5510 use_operand_p use_p;
5511 gimple *def0, *def1;
5512
5513 /* Compute cost of each statement using estimate_num_insns. */
5514 stmts = bb_seq (bb);
5515 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
5516 {
5517 stmt = gsi_stmt (gsi);
5518 if (!is_gimple_debug (stmt))
5519 gimple_set_uid (stmt, n++);
5520 }
5521 lattice = XNEWVEC (unsigned int, n);
5522 for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
5523 {
5524 unsigned cost;
5525 stmt = gsi_stmt (gsi);
5526 if (is_gimple_debug (stmt))
5527 continue;
5528 cost = estimate_num_insns (stmt, &eni_size_weights);
5529 lattice[i] = cost;
5530 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
5531 {
5532 tree use = USE_FROM_PTR (use_p);
5533 gimple *def_stmt;
5534 if (TREE_CODE (use) != SSA_NAME)
5535 continue;
5536 def_stmt = get_gimple_for_ssa_name (use);
5537 if (!def_stmt)
5538 continue;
5539 lattice[i] += lattice[gimple_uid (def_stmt)];
5540 }
5541 i++;
5542 if (!is_gimple_assign (stmt)
5543 || !commutative_tree_code (gimple_assign_rhs_code (stmt)))
5544 continue;
5545 op0 = gimple_op (stmt, 1);
5546 op1 = gimple_op (stmt, 2);
5547 if (TREE_CODE (op0) != SSA_NAME
5548 || TREE_CODE (op1) != SSA_NAME)
5549 continue;
5550 /* Swap operands if the second one is more expensive. */
5551 def0 = get_gimple_for_ssa_name (op0);
5552 def1 = get_gimple_for_ssa_name (op1);
5553 if (!def1)
5554 continue;
5555 swap = false;
5556 if (!def0 || lattice[gimple_uid (def1)] > lattice[gimple_uid (def0)])
5557 swap = true;
5558 if (swap)
5559 {
5560 if (dump_file && (dump_flags & TDF_DETAILS))
5561 {
5562 fprintf (dump_file, "Swap operands in stmt:\n");
5563 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
5564 fprintf (dump_file, "Cost left opnd=%d, right opnd=%d\n",
5565 def0 ? lattice[gimple_uid (def0)] : 0,
5566 lattice[gimple_uid (def1)]);
5567 }
5568 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
5569 gimple_assign_rhs2_ptr (stmt));
5570 }
5571 }
5572 XDELETE (lattice);
5573 }
5574
5575 /* Expand basic block BB from GIMPLE trees to RTL. */
5576
5577 static basic_block
5578 expand_gimple_basic_block (basic_block bb, bool disable_tail_calls)
5579 {
5580 gimple_stmt_iterator gsi;
5581 gimple_seq stmts;
5582 gimple *stmt = NULL;
5583 rtx_note *note = NULL;
5584 rtx_insn *last;
5585 edge e;
5586 edge_iterator ei;
5587
5588 if (dump_file)
5589 fprintf (dump_file, "\n;; Generating RTL for gimple basic block %d\n",
5590 bb->index);
5591
5592 /* Note that since we are now transitioning from GIMPLE to RTL, we
5593 cannot use the gsi_*_bb() routines because they expect the basic
5594 block to be in GIMPLE, instead of RTL. Therefore, we need to
5595 access the BB sequence directly. */
5596 if (optimize)
5597 reorder_operands (bb);
5598 stmts = bb_seq (bb);
5599 bb->il.gimple.seq = NULL;
5600 bb->il.gimple.phi_nodes = NULL;
5601 rtl_profile_for_bb (bb);
5602 init_rtl_bb_info (bb);
5603 bb->flags |= BB_RTL;
5604
5605 /* Remove the RETURN_EXPR if we may fall though to the exit
5606 instead. */
5607 gsi = gsi_last (stmts);
5608 if (!gsi_end_p (gsi)
5609 && gimple_code (gsi_stmt (gsi)) == GIMPLE_RETURN)
5610 {
5611 greturn *ret_stmt = as_a <greturn *> (gsi_stmt (gsi));
5612
5613 gcc_assert (single_succ_p (bb));
5614 gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun));
5615
5616 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
5617 && !gimple_return_retval (ret_stmt))
5618 {
5619 gsi_remove (&gsi, false);
5620 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
5621 }
5622 }
5623
5624 gsi = gsi_start (stmts);
5625 if (!gsi_end_p (gsi))
5626 {
5627 stmt = gsi_stmt (gsi);
5628 if (gimple_code (stmt) != GIMPLE_LABEL)
5629 stmt = NULL;
5630 }
5631
5632 rtx_code_label **elt = lab_rtx_for_bb->get (bb);
5633
5634 if (stmt || elt)
5635 {
5636 gcc_checking_assert (!note);
5637 last = get_last_insn ();
5638
5639 if (stmt)
5640 {
5641 expand_gimple_stmt (stmt);
5642 gsi_next (&gsi);
5643 }
5644
5645 if (elt)
5646 emit_label (*elt);
5647
5648 BB_HEAD (bb) = NEXT_INSN (last);
5649 if (NOTE_P (BB_HEAD (bb)))
5650 BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
5651 gcc_assert (LABEL_P (BB_HEAD (bb)));
5652 note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb));
5653
5654 maybe_dump_rtl_for_gimple_stmt (stmt, last);
5655 }
5656 else
5657 BB_HEAD (bb) = note = emit_note (NOTE_INSN_BASIC_BLOCK);
5658
5659 if (note)
5660 NOTE_BASIC_BLOCK (note) = bb;
5661
5662 for (; !gsi_end_p (gsi); gsi_next (&gsi))
5663 {
5664 basic_block new_bb;
5665
5666 stmt = gsi_stmt (gsi);
5667
5668 /* If this statement is a non-debug one, and we generate debug
5669 insns, then this one might be the last real use of a TERed
5670 SSA_NAME, but where there are still some debug uses further
5671 down. Expanding the current SSA name in such further debug
5672 uses by their RHS might lead to wrong debug info, as coalescing
5673 might make the operands of such RHS be placed into the same
5674 pseudo as something else. Like so:
5675 a_1 = a_0 + 1; // Assume a_1 is TERed and a_0 is dead
5676 use(a_1);
5677 a_2 = ...
5678 #DEBUG ... => a_1
5679 As a_0 and a_2 don't overlap in lifetime, assume they are coalesced.
5680 If we now would expand a_1 by it's RHS (a_0 + 1) in the debug use,
5681 the write to a_2 would actually have clobbered the place which
5682 formerly held a_0.
5683
5684 So, instead of that, we recognize the situation, and generate
5685 debug temporaries at the last real use of TERed SSA names:
5686 a_1 = a_0 + 1;
5687 #DEBUG #D1 => a_1
5688 use(a_1);
5689 a_2 = ...
5690 #DEBUG ... => #D1
5691 */
5692 if (MAY_HAVE_DEBUG_BIND_INSNS
5693 && SA.values
5694 && !is_gimple_debug (stmt))
5695 {
5696 ssa_op_iter iter;
5697 tree op;
5698 gimple *def;
5699
5700 location_t sloc = curr_insn_location ();
5701
5702 /* Look for SSA names that have their last use here (TERed
5703 names always have only one real use). */
5704 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
5705 if ((def = get_gimple_for_ssa_name (op)))
5706 {
5707 imm_use_iterator imm_iter;
5708 use_operand_p use_p;
5709 bool have_debug_uses = false;
5710
5711 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
5712 {
5713 if (gimple_debug_bind_p (USE_STMT (use_p)))
5714 {
5715 have_debug_uses = true;
5716 break;
5717 }
5718 }
5719
5720 if (have_debug_uses)
5721 {
5722 /* OP is a TERed SSA name, with DEF its defining
5723 statement, and where OP is used in further debug
5724 instructions. Generate a debug temporary, and
5725 replace all uses of OP in debug insns with that
5726 temporary. */
5727 gimple *debugstmt;
5728 tree value = gimple_assign_rhs_to_tree (def);
5729 tree vexpr = make_node (DEBUG_EXPR_DECL);
5730 rtx val;
5731 machine_mode mode;
5732
5733 set_curr_insn_location (gimple_location (def));
5734
5735 DECL_ARTIFICIAL (vexpr) = 1;
5736 TREE_TYPE (vexpr) = TREE_TYPE (value);
5737 if (DECL_P (value))
5738 mode = DECL_MODE (value);
5739 else
5740 mode = TYPE_MODE (TREE_TYPE (value));
5741 SET_DECL_MODE (vexpr, mode);
5742
5743 val = gen_rtx_VAR_LOCATION
5744 (mode, vexpr, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
5745
5746 emit_debug_insn (val);
5747
5748 FOR_EACH_IMM_USE_STMT (debugstmt, imm_iter, op)
5749 {
5750 if (!gimple_debug_bind_p (debugstmt))
5751 continue;
5752
5753 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5754 SET_USE (use_p, vexpr);
5755
5756 update_stmt (debugstmt);
5757 }
5758 }
5759 }
5760 set_curr_insn_location (sloc);
5761 }
5762
5763 currently_expanding_gimple_stmt = stmt;
5764
5765 /* Expand this statement, then evaluate the resulting RTL and
5766 fixup the CFG accordingly. */
5767 if (gimple_code (stmt) == GIMPLE_COND)
5768 {
5769 new_bb = expand_gimple_cond (bb, as_a <gcond *> (stmt));
5770 if (new_bb)
5771 return new_bb;
5772 }
5773 else if (is_gimple_debug (stmt))
5774 {
5775 location_t sloc = curr_insn_location ();
5776 gimple_stmt_iterator nsi = gsi;
5777
5778 for (;;)
5779 {
5780 tree var;
5781 tree value = NULL_TREE;
5782 rtx val = NULL_RTX;
5783 machine_mode mode;
5784
5785 if (!gimple_debug_nonbind_marker_p (stmt))
5786 {
5787 if (gimple_debug_bind_p (stmt))
5788 {
5789 var = gimple_debug_bind_get_var (stmt);
5790
5791 if (TREE_CODE (var) != DEBUG_EXPR_DECL
5792 && TREE_CODE (var) != LABEL_DECL
5793 && !target_for_debug_bind (var))
5794 goto delink_debug_stmt;
5795
5796 if (DECL_P (var))
5797 mode = DECL_MODE (var);
5798 else
5799 mode = TYPE_MODE (TREE_TYPE (var));
5800
5801 if (gimple_debug_bind_has_value_p (stmt))
5802 value = gimple_debug_bind_get_value (stmt);
5803
5804 val = gen_rtx_VAR_LOCATION
5805 (mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
5806 }
5807 else if (gimple_debug_source_bind_p (stmt))
5808 {
5809 var = gimple_debug_source_bind_get_var (stmt);
5810
5811 value = gimple_debug_source_bind_get_value (stmt);
5812
5813 mode = DECL_MODE (var);
5814
5815 val = gen_rtx_VAR_LOCATION (mode, var, (rtx)value,
5816 VAR_INIT_STATUS_UNINITIALIZED);
5817 }
5818 else
5819 gcc_unreachable ();
5820 }
5821 /* If this function was first compiled with markers
5822 enabled, but they're now disable (e.g. LTO), drop
5823 them on the floor. */
5824 else if (gimple_debug_nonbind_marker_p (stmt)
5825 && !MAY_HAVE_DEBUG_MARKER_INSNS)
5826 goto delink_debug_stmt;
5827 else if (gimple_debug_begin_stmt_p (stmt))
5828 val = GEN_RTX_DEBUG_MARKER_BEGIN_STMT_PAT ();
5829 else if (gimple_debug_inline_entry_p (stmt))
5830 {
5831 tree block = gimple_block (stmt);
5832
5833 if (block)
5834 val = GEN_RTX_DEBUG_MARKER_INLINE_ENTRY_PAT ();
5835 else
5836 goto delink_debug_stmt;
5837 }
5838 else
5839 gcc_unreachable ();
5840
5841 last = get_last_insn ();
5842
5843 set_curr_insn_location (gimple_location (stmt));
5844
5845 emit_debug_insn (val);
5846
5847 if (dump_file && (dump_flags & TDF_DETAILS))
5848 {
5849 /* We can't dump the insn with a TREE where an RTX
5850 is expected. */
5851 if (GET_CODE (val) == VAR_LOCATION)
5852 {
5853 gcc_checking_assert (PAT_VAR_LOCATION_LOC (val) == (rtx)value);
5854 PAT_VAR_LOCATION_LOC (val) = const0_rtx;
5855 }
5856 maybe_dump_rtl_for_gimple_stmt (stmt, last);
5857 if (GET_CODE (val) == VAR_LOCATION)
5858 PAT_VAR_LOCATION_LOC (val) = (rtx)value;
5859 }
5860
5861 delink_debug_stmt:
5862 /* In order not to generate too many debug temporaries,
5863 we delink all uses of debug statements we already expanded.
5864 Therefore debug statements between definition and real
5865 use of TERed SSA names will continue to use the SSA name,
5866 and not be replaced with debug temps. */
5867 delink_stmt_imm_use (stmt);
5868
5869 gsi = nsi;
5870 gsi_next (&nsi);
5871 if (gsi_end_p (nsi))
5872 break;
5873 stmt = gsi_stmt (nsi);
5874 if (!is_gimple_debug (stmt))
5875 break;
5876 }
5877
5878 set_curr_insn_location (sloc);
5879 }
5880 else
5881 {
5882 gcall *call_stmt = dyn_cast <gcall *> (stmt);
5883 if (call_stmt
5884 && gimple_call_tail_p (call_stmt)
5885 && disable_tail_calls)
5886 gimple_call_set_tail (call_stmt, false);
5887
5888 if (call_stmt && gimple_call_tail_p (call_stmt))
5889 {
5890 bool can_fallthru;
5891 new_bb = expand_gimple_tailcall (bb, call_stmt, &can_fallthru);
5892 if (new_bb)
5893 {
5894 if (can_fallthru)
5895 bb = new_bb;
5896 else
5897 return new_bb;
5898 }
5899 }
5900 else
5901 {
5902 def_operand_p def_p;
5903 def_p = SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF);
5904
5905 if (def_p != NULL)
5906 {
5907 /* Ignore this stmt if it is in the list of
5908 replaceable expressions. */
5909 if (SA.values
5910 && bitmap_bit_p (SA.values,
5911 SSA_NAME_VERSION (DEF_FROM_PTR (def_p))))
5912 continue;
5913 }
5914 last = expand_gimple_stmt (stmt);
5915 maybe_dump_rtl_for_gimple_stmt (stmt, last);
5916 }
5917 }
5918 }
5919
5920 currently_expanding_gimple_stmt = NULL;
5921
5922 /* Expand implicit goto and convert goto_locus. */
5923 FOR_EACH_EDGE (e, ei, bb->succs)
5924 {
5925 if (e->goto_locus != UNKNOWN_LOCATION)
5926 set_curr_insn_location (e->goto_locus);
5927 if ((e->flags & EDGE_FALLTHRU) && e->dest != bb->next_bb)
5928 {
5929 emit_jump (label_rtx_for_bb (e->dest));
5930 e->flags &= ~EDGE_FALLTHRU;
5931 }
5932 }
5933
5934 /* Expanded RTL can create a jump in the last instruction of block.
5935 This later might be assumed to be a jump to successor and break edge insertion.
5936 We need to insert dummy move to prevent this. PR41440. */
5937 if (single_succ_p (bb)
5938 && (single_succ_edge (bb)->flags & EDGE_FALLTHRU)
5939 && (last = get_last_insn ())
5940 && (JUMP_P (last)
5941 || (DEBUG_INSN_P (last)
5942 && JUMP_P (prev_nondebug_insn (last)))))
5943 {
5944 rtx dummy = gen_reg_rtx (SImode);
5945 emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
5946 }
5947
5948 do_pending_stack_adjust ();
5949
5950 /* Find the block tail. The last insn in the block is the insn
5951 before a barrier and/or table jump insn. */
5952 last = get_last_insn ();
5953 if (BARRIER_P (last))
5954 last = PREV_INSN (last);
5955 if (JUMP_TABLE_DATA_P (last))
5956 last = PREV_INSN (PREV_INSN (last));
5957 if (BARRIER_P (last))
5958 last = PREV_INSN (last);
5959 BB_END (bb) = last;
5960
5961 update_bb_for_insn (bb);
5962
5963 return bb;
5964 }
5965
5966
5967 /* Create a basic block for initialization code. */
5968
5969 static basic_block
5970 construct_init_block (void)
5971 {
5972 basic_block init_block, first_block;
5973 edge e = NULL;
5974 int flags;
5975
5976 /* Multiple entry points not supported yet. */
5977 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1);
5978 init_rtl_bb_info (ENTRY_BLOCK_PTR_FOR_FN (cfun));
5979 init_rtl_bb_info (EXIT_BLOCK_PTR_FOR_FN (cfun));
5980 ENTRY_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
5981 EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_RTL;
5982
5983 e = EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0);
5984
5985 /* When entry edge points to first basic block, we don't need jump,
5986 otherwise we have to jump into proper target. */
5987 if (e && e->dest != ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
5988 {
5989 tree label = gimple_block_label (e->dest);
5990
5991 emit_jump (jump_target_rtx (label));
5992 flags = 0;
5993 }
5994 else
5995 flags = EDGE_FALLTHRU;
5996
5997 init_block = create_basic_block (NEXT_INSN (get_insns ()),
5998 get_last_insn (),
5999 ENTRY_BLOCK_PTR_FOR_FN (cfun));
6000 init_block->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
6001 add_bb_to_loop (init_block, ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father);
6002 if (e)
6003 {
6004 first_block = e->dest;
6005 redirect_edge_succ (e, init_block);
6006 make_single_succ_edge (init_block, first_block, flags);
6007 }
6008 else
6009 make_single_succ_edge (init_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
6010 EDGE_FALLTHRU);
6011
6012 update_bb_for_insn (init_block);
6013 return init_block;
6014 }
6015
6016 /* For each lexical block, set BLOCK_NUMBER to the depth at which it is
6017 found in the block tree. */
6018
6019 static void
6020 set_block_levels (tree block, int level)
6021 {
6022 while (block)
6023 {
6024 BLOCK_NUMBER (block) = level;
6025 set_block_levels (BLOCK_SUBBLOCKS (block), level + 1);
6026 block = BLOCK_CHAIN (block);
6027 }
6028 }
6029
6030 /* Create a block containing landing pads and similar stuff. */
6031
6032 static void
6033 construct_exit_block (void)
6034 {
6035 rtx_insn *head = get_last_insn ();
6036 rtx_insn *end;
6037 basic_block exit_block;
6038 edge e, e2;
6039 unsigned ix;
6040 edge_iterator ei;
6041 basic_block prev_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
6042 rtx_insn *orig_end = BB_END (prev_bb);
6043
6044 rtl_profile_for_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
6045
6046 /* Make sure the locus is set to the end of the function, so that
6047 epilogue line numbers and warnings are set properly. */
6048 if (LOCATION_LOCUS (cfun->function_end_locus) != UNKNOWN_LOCATION)
6049 input_location = cfun->function_end_locus;
6050
6051 /* Generate rtl for function exit. */
6052 expand_function_end ();
6053
6054 end = get_last_insn ();
6055 if (head == end)
6056 return;
6057 /* While emitting the function end we could move end of the last basic
6058 block. */
6059 BB_END (prev_bb) = orig_end;
6060 while (NEXT_INSN (head) && NOTE_P (NEXT_INSN (head)))
6061 head = NEXT_INSN (head);
6062 /* But make sure exit_block starts with RETURN_LABEL, otherwise the
6063 bb count counting will be confused. Any instructions before that
6064 label are emitted for the case where PREV_BB falls through into the
6065 exit block, so append those instructions to prev_bb in that case. */
6066 if (NEXT_INSN (head) != return_label)
6067 {
6068 while (NEXT_INSN (head) != return_label)
6069 {
6070 if (!NOTE_P (NEXT_INSN (head)))
6071 BB_END (prev_bb) = NEXT_INSN (head);
6072 head = NEXT_INSN (head);
6073 }
6074 }
6075 exit_block = create_basic_block (NEXT_INSN (head), end, prev_bb);
6076 exit_block->count = EXIT_BLOCK_PTR_FOR_FN (cfun)->count;
6077 add_bb_to_loop (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun)->loop_father);
6078
6079 ix = 0;
6080 while (ix < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
6081 {
6082 e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), ix);
6083 if (!(e->flags & EDGE_ABNORMAL))
6084 redirect_edge_succ (e, exit_block);
6085 else
6086 ix++;
6087 }
6088
6089 e = make_single_succ_edge (exit_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
6090 EDGE_FALLTHRU);
6091 FOR_EACH_EDGE (e2, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
6092 if (e2 != e)
6093 {
6094 exit_block->count -= e2->count ();
6095 }
6096 update_bb_for_insn (exit_block);
6097 }
6098
6099 /* Helper function for discover_nonconstant_array_refs.
6100 Look for ARRAY_REF nodes with non-constant indexes and mark them
6101 addressable. */
6102
6103 static tree
6104 discover_nonconstant_array_refs_r (tree * tp, int *walk_subtrees,
6105 void *data ATTRIBUTE_UNUSED)
6106 {
6107 tree t = *tp;
6108
6109 if (IS_TYPE_OR_DECL_P (t))
6110 *walk_subtrees = 0;
6111 else if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
6112 {
6113 while (((TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
6114 && is_gimple_min_invariant (TREE_OPERAND (t, 1))
6115 && (!TREE_OPERAND (t, 2)
6116 || is_gimple_min_invariant (TREE_OPERAND (t, 2))))
6117 || (TREE_CODE (t) == COMPONENT_REF
6118 && (!TREE_OPERAND (t,2)
6119 || is_gimple_min_invariant (TREE_OPERAND (t, 2))))
6120 || TREE_CODE (t) == BIT_FIELD_REF
6121 || TREE_CODE (t) == REALPART_EXPR
6122 || TREE_CODE (t) == IMAGPART_EXPR
6123 || TREE_CODE (t) == VIEW_CONVERT_EXPR
6124 || CONVERT_EXPR_P (t))
6125 t = TREE_OPERAND (t, 0);
6126
6127 if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
6128 {
6129 t = get_base_address (t);
6130 if (t && DECL_P (t)
6131 && DECL_MODE (t) != BLKmode)
6132 TREE_ADDRESSABLE (t) = 1;
6133 }
6134
6135 *walk_subtrees = 0;
6136 }
6137
6138 return NULL_TREE;
6139 }
6140
6141 /* RTL expansion is not able to compile array references with variable
6142 offsets for arrays stored in single register. Discover such
6143 expressions and mark variables as addressable to avoid this
6144 scenario. */
6145
6146 static void
6147 discover_nonconstant_array_refs (void)
6148 {
6149 basic_block bb;
6150 gimple_stmt_iterator gsi;
6151
6152 FOR_EACH_BB_FN (bb, cfun)
6153 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6154 {
6155 gimple *stmt = gsi_stmt (gsi);
6156 if (!is_gimple_debug (stmt))
6157 {
6158 walk_gimple_op (stmt, discover_nonconstant_array_refs_r, NULL);
6159 gcall *call = dyn_cast <gcall *> (stmt);
6160 if (call && gimple_call_internal_p (call))
6161 switch (gimple_call_internal_fn (call))
6162 {
6163 case IFN_LOAD_LANES:
6164 /* The source must be a MEM. */
6165 mark_addressable (gimple_call_arg (call, 0));
6166 break;
6167 case IFN_STORE_LANES:
6168 /* The destination must be a MEM. */
6169 mark_addressable (gimple_call_lhs (call));
6170 break;
6171 default:
6172 break;
6173 }
6174 }
6175 }
6176 }
6177
6178 /* This function sets crtl->args.internal_arg_pointer to a virtual
6179 register if DRAP is needed. Local register allocator will replace
6180 virtual_incoming_args_rtx with the virtual register. */
6181
6182 static void
6183 expand_stack_alignment (void)
6184 {
6185 rtx drap_rtx;
6186 unsigned int preferred_stack_boundary;
6187
6188 if (! SUPPORTS_STACK_ALIGNMENT)
6189 return;
6190
6191 if (cfun->calls_alloca
6192 || cfun->has_nonlocal_label
6193 || crtl->has_nonlocal_goto)
6194 crtl->need_drap = true;
6195
6196 /* Call update_stack_boundary here again to update incoming stack
6197 boundary. It may set incoming stack alignment to a different
6198 value after RTL expansion. TARGET_FUNCTION_OK_FOR_SIBCALL may
6199 use the minimum incoming stack alignment to check if it is OK
6200 to perform sibcall optimization since sibcall optimization will
6201 only align the outgoing stack to incoming stack boundary. */
6202 if (targetm.calls.update_stack_boundary)
6203 targetm.calls.update_stack_boundary ();
6204
6205 /* The incoming stack frame has to be aligned at least at
6206 parm_stack_boundary. */
6207 gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY);
6208
6209 /* Update crtl->stack_alignment_estimated and use it later to align
6210 stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call
6211 exceptions since callgraph doesn't collect incoming stack alignment
6212 in this case. */
6213 if (cfun->can_throw_non_call_exceptions
6214 && PREFERRED_STACK_BOUNDARY > crtl->preferred_stack_boundary)
6215 preferred_stack_boundary = PREFERRED_STACK_BOUNDARY;
6216 else
6217 preferred_stack_boundary = crtl->preferred_stack_boundary;
6218 if (preferred_stack_boundary > crtl->stack_alignment_estimated)
6219 crtl->stack_alignment_estimated = preferred_stack_boundary;
6220 if (preferred_stack_boundary > crtl->stack_alignment_needed)
6221 crtl->stack_alignment_needed = preferred_stack_boundary;
6222
6223 gcc_assert (crtl->stack_alignment_needed
6224 <= crtl->stack_alignment_estimated);
6225
6226 crtl->stack_realign_needed
6227 = INCOMING_STACK_BOUNDARY < crtl->stack_alignment_estimated;
6228 crtl->stack_realign_tried = crtl->stack_realign_needed;
6229
6230 crtl->stack_realign_processed = true;
6231
6232 /* Target has to redefine TARGET_GET_DRAP_RTX to support stack
6233 alignment. */
6234 gcc_assert (targetm.calls.get_drap_rtx != NULL);
6235 drap_rtx = targetm.calls.get_drap_rtx ();
6236
6237 /* stack_realign_drap and drap_rtx must match. */
6238 gcc_assert ((stack_realign_drap != 0) == (drap_rtx != NULL));
6239
6240 /* Do nothing if NULL is returned, which means DRAP is not needed. */
6241 if (drap_rtx != NULL)
6242 {
6243 crtl->args.internal_arg_pointer = drap_rtx;
6244
6245 /* Call fixup_tail_calls to clean up REG_EQUIV note if DRAP is
6246 needed. */
6247 fixup_tail_calls ();
6248 }
6249 }
6250 \f
6251
6252 static void
6253 expand_main_function (void)
6254 {
6255 #if (defined(INVOKE__main) \
6256 || (!defined(HAS_INIT_SECTION) \
6257 && !defined(INIT_SECTION_ASM_OP) \
6258 && !defined(INIT_ARRAY_SECTION_ASM_OP)))
6259 emit_library_call (init_one_libfunc (NAME__MAIN), LCT_NORMAL, VOIDmode);
6260 #endif
6261 }
6262 \f
6263
6264 /* Expand code to initialize the stack_protect_guard. This is invoked at
6265 the beginning of a function to be protected. */
6266
6267 static void
6268 stack_protect_prologue (void)
6269 {
6270 tree guard_decl = targetm.stack_protect_guard ();
6271 rtx x, y;
6272
6273 crtl->stack_protect_guard_decl = guard_decl;
6274 x = expand_normal (crtl->stack_protect_guard);
6275
6276 if (targetm.have_stack_protect_combined_set () && guard_decl)
6277 {
6278 gcc_assert (DECL_P (guard_decl));
6279 y = DECL_RTL (guard_decl);
6280
6281 /* Allow the target to compute address of Y and copy it to X without
6282 leaking Y into a register. This combined address + copy pattern
6283 allows the target to prevent spilling of any intermediate results by
6284 splitting it after register allocator. */
6285 if (rtx_insn *insn = targetm.gen_stack_protect_combined_set (x, y))
6286 {
6287 emit_insn (insn);
6288 return;
6289 }
6290 }
6291
6292 if (guard_decl)
6293 y = expand_normal (guard_decl);
6294 else
6295 y = const0_rtx;
6296
6297 /* Allow the target to copy from Y to X without leaking Y into a
6298 register. */
6299 if (targetm.have_stack_protect_set ())
6300 if (rtx_insn *insn = targetm.gen_stack_protect_set (x, y))
6301 {
6302 emit_insn (insn);
6303 return;
6304 }
6305
6306 /* Otherwise do a straight move. */
6307 emit_move_insn (x, y);
6308 }
6309
6310 /* Translate the intermediate representation contained in the CFG
6311 from GIMPLE trees to RTL.
6312
6313 We do conversion per basic block and preserve/update the tree CFG.
6314 This implies we have to do some magic as the CFG can simultaneously
6315 consist of basic blocks containing RTL and GIMPLE trees. This can
6316 confuse the CFG hooks, so be careful to not manipulate CFG during
6317 the expansion. */
6318
6319 namespace {
6320
6321 const pass_data pass_data_expand =
6322 {
6323 RTL_PASS, /* type */
6324 "expand", /* name */
6325 OPTGROUP_NONE, /* optinfo_flags */
6326 TV_EXPAND, /* tv_id */
6327 ( PROP_ssa | PROP_gimple_leh | PROP_cfg
6328 | PROP_gimple_lcx
6329 | PROP_gimple_lvec
6330 | PROP_gimple_lva), /* properties_required */
6331 PROP_rtl, /* properties_provided */
6332 ( PROP_ssa | PROP_trees ), /* properties_destroyed */
6333 0, /* todo_flags_start */
6334 0, /* todo_flags_finish */
6335 };
6336
6337 class pass_expand : public rtl_opt_pass
6338 {
6339 public:
6340 pass_expand (gcc::context *ctxt)
6341 : rtl_opt_pass (pass_data_expand, ctxt)
6342 {}
6343
6344 /* opt_pass methods: */
6345 virtual unsigned int execute (function *);
6346
6347 }; // class pass_expand
6348
6349 unsigned int
6350 pass_expand::execute (function *fun)
6351 {
6352 basic_block bb, init_block;
6353 edge_iterator ei;
6354 edge e;
6355 rtx_insn *var_seq, *var_ret_seq;
6356 unsigned i;
6357
6358 timevar_push (TV_OUT_OF_SSA);
6359 rewrite_out_of_ssa (&SA);
6360 timevar_pop (TV_OUT_OF_SSA);
6361 SA.partition_to_pseudo = XCNEWVEC (rtx, SA.map->num_partitions);
6362
6363 if (MAY_HAVE_DEBUG_BIND_STMTS && flag_tree_ter)
6364 {
6365 gimple_stmt_iterator gsi;
6366 FOR_EACH_BB_FN (bb, cfun)
6367 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6368 if (gimple_debug_bind_p (gsi_stmt (gsi)))
6369 avoid_deep_ter_for_debug (gsi_stmt (gsi), 0);
6370 }
6371
6372 /* Mark arrays indexed with non-constant indices with TREE_ADDRESSABLE. */
6373 discover_nonconstant_array_refs ();
6374
6375 /* Make sure all values used by the optimization passes have sane
6376 defaults. */
6377 reg_renumber = 0;
6378
6379 /* Some backends want to know that we are expanding to RTL. */
6380 currently_expanding_to_rtl = 1;
6381 /* Dominators are not kept up-to-date as we may create new basic-blocks. */
6382 free_dominance_info (CDI_DOMINATORS);
6383
6384 rtl_profile_for_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
6385
6386 insn_locations_init ();
6387 if (!DECL_IS_BUILTIN (current_function_decl))
6388 {
6389 /* Eventually, all FEs should explicitly set function_start_locus. */
6390 if (LOCATION_LOCUS (fun->function_start_locus) == UNKNOWN_LOCATION)
6391 set_curr_insn_location
6392 (DECL_SOURCE_LOCATION (current_function_decl));
6393 else
6394 set_curr_insn_location (fun->function_start_locus);
6395 }
6396 else
6397 set_curr_insn_location (UNKNOWN_LOCATION);
6398 prologue_location = curr_insn_location ();
6399
6400 #ifdef INSN_SCHEDULING
6401 init_sched_attrs ();
6402 #endif
6403
6404 /* Make sure first insn is a note even if we don't want linenums.
6405 This makes sure the first insn will never be deleted.
6406 Also, final expects a note to appear there. */
6407 emit_note (NOTE_INSN_DELETED);
6408
6409 targetm.expand_to_rtl_hook ();
6410 crtl->init_stack_alignment ();
6411 fun->cfg->max_jumptable_ents = 0;
6412
6413 /* Resovle the function section. Some targets, like ARM EABI rely on knowledge
6414 of the function section at exapnsion time to predict distance of calls. */
6415 resolve_unique_section (current_function_decl, 0, flag_function_sections);
6416
6417 /* Expand the variables recorded during gimple lowering. */
6418 timevar_push (TV_VAR_EXPAND);
6419 start_sequence ();
6420
6421 var_ret_seq = expand_used_vars ();
6422
6423 var_seq = get_insns ();
6424 end_sequence ();
6425 timevar_pop (TV_VAR_EXPAND);
6426
6427 /* Honor stack protection warnings. */
6428 if (warn_stack_protect)
6429 {
6430 if (fun->calls_alloca)
6431 warning (OPT_Wstack_protector,
6432 "stack protector not protecting local variables: "
6433 "variable length buffer");
6434 if (has_short_buffer && !crtl->stack_protect_guard)
6435 warning (OPT_Wstack_protector,
6436 "stack protector not protecting function: "
6437 "all local arrays are less than %d bytes long",
6438 (int) PARAM_VALUE (PARAM_SSP_BUFFER_SIZE));
6439 }
6440
6441 /* Set up parameters and prepare for return, for the function. */
6442 expand_function_start (current_function_decl);
6443
6444 /* If we emitted any instructions for setting up the variables,
6445 emit them before the FUNCTION_START note. */
6446 if (var_seq)
6447 {
6448 emit_insn_before (var_seq, parm_birth_insn);
6449
6450 /* In expand_function_end we'll insert the alloca save/restore
6451 before parm_birth_insn. We've just insertted an alloca call.
6452 Adjust the pointer to match. */
6453 parm_birth_insn = var_seq;
6454 }
6455
6456 /* Now propagate the RTL assignment of each partition to the
6457 underlying var of each SSA_NAME. */
6458 tree name;
6459
6460 FOR_EACH_SSA_NAME (i, name, cfun)
6461 {
6462 /* We might have generated new SSA names in
6463 update_alias_info_with_stack_vars. They will have a NULL
6464 defining statements, and won't be part of the partitioning,
6465 so ignore those. */
6466 if (!SSA_NAME_DEF_STMT (name))
6467 continue;
6468
6469 adjust_one_expanded_partition_var (name);
6470 }
6471
6472 /* Clean up RTL of variables that straddle across multiple
6473 partitions, and check that the rtl of any PARM_DECLs that are not
6474 cleaned up is that of their default defs. */
6475 FOR_EACH_SSA_NAME (i, name, cfun)
6476 {
6477 int part;
6478
6479 /* We might have generated new SSA names in
6480 update_alias_info_with_stack_vars. They will have a NULL
6481 defining statements, and won't be part of the partitioning,
6482 so ignore those. */
6483 if (!SSA_NAME_DEF_STMT (name))
6484 continue;
6485 part = var_to_partition (SA.map, name);
6486 if (part == NO_PARTITION)
6487 continue;
6488
6489 /* If this decl was marked as living in multiple places, reset
6490 this now to NULL. */
6491 tree var = SSA_NAME_VAR (name);
6492 if (var && DECL_RTL_IF_SET (var) == pc_rtx)
6493 SET_DECL_RTL (var, NULL);
6494 /* Check that the pseudos chosen by assign_parms are those of
6495 the corresponding default defs. */
6496 else if (SSA_NAME_IS_DEFAULT_DEF (name)
6497 && (TREE_CODE (var) == PARM_DECL
6498 || TREE_CODE (var) == RESULT_DECL))
6499 {
6500 rtx in = DECL_RTL_IF_SET (var);
6501 gcc_assert (in);
6502 rtx out = SA.partition_to_pseudo[part];
6503 gcc_assert (in == out);
6504
6505 /* Now reset VAR's RTL to IN, so that the _EXPR attrs match
6506 those expected by debug backends for each parm and for
6507 the result. This is particularly important for stabs,
6508 whose register elimination from parm's DECL_RTL may cause
6509 -fcompare-debug differences as SET_DECL_RTL changes reg's
6510 attrs. So, make sure the RTL already has the parm as the
6511 EXPR, so that it won't change. */
6512 SET_DECL_RTL (var, NULL_RTX);
6513 if (MEM_P (in))
6514 set_mem_attributes (in, var, true);
6515 SET_DECL_RTL (var, in);
6516 }
6517 }
6518
6519 /* If this function is `main', emit a call to `__main'
6520 to run global initializers, etc. */
6521 if (DECL_NAME (current_function_decl)
6522 && MAIN_NAME_P (DECL_NAME (current_function_decl))
6523 && DECL_FILE_SCOPE_P (current_function_decl))
6524 expand_main_function ();
6525
6526 /* Initialize the stack_protect_guard field. This must happen after the
6527 call to __main (if any) so that the external decl is initialized. */
6528 if (crtl->stack_protect_guard && targetm.stack_protect_runtime_enabled_p ())
6529 stack_protect_prologue ();
6530
6531 expand_phi_nodes (&SA);
6532
6533 /* Release any stale SSA redirection data. */
6534 redirect_edge_var_map_empty ();
6535
6536 /* Register rtl specific functions for cfg. */
6537 rtl_register_cfg_hooks ();
6538
6539 init_block = construct_init_block ();
6540
6541 /* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
6542 remaining edges later. */
6543 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fun)->succs)
6544 e->flags &= ~EDGE_EXECUTABLE;
6545
6546 /* If the function has too many markers, drop them while expanding. */
6547 if (cfun->debug_marker_count
6548 >= PARAM_VALUE (PARAM_MAX_DEBUG_MARKER_COUNT))
6549 cfun->debug_nonbind_markers = false;
6550
6551 lab_rtx_for_bb = new hash_map<basic_block, rtx_code_label *>;
6552 FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR_FOR_FN (fun),
6553 next_bb)
6554 bb = expand_gimple_basic_block (bb, var_ret_seq != NULL_RTX);
6555
6556 if (MAY_HAVE_DEBUG_BIND_INSNS)
6557 expand_debug_locations ();
6558
6559 if (deep_ter_debug_map)
6560 {
6561 delete deep_ter_debug_map;
6562 deep_ter_debug_map = NULL;
6563 }
6564
6565 /* Free stuff we no longer need after GIMPLE optimizations. */
6566 free_dominance_info (CDI_DOMINATORS);
6567 free_dominance_info (CDI_POST_DOMINATORS);
6568 delete_tree_cfg_annotations (fun);
6569
6570 timevar_push (TV_OUT_OF_SSA);
6571 finish_out_of_ssa (&SA);
6572 timevar_pop (TV_OUT_OF_SSA);
6573
6574 timevar_push (TV_POST_EXPAND);
6575 /* We are no longer in SSA form. */
6576 fun->gimple_df->in_ssa_p = false;
6577 loops_state_clear (LOOP_CLOSED_SSA);
6578
6579 /* Expansion is used by optimization passes too, set maybe_hot_insn_p
6580 conservatively to true until they are all profile aware. */
6581 delete lab_rtx_for_bb;
6582 free_histograms (fun);
6583
6584 construct_exit_block ();
6585 insn_locations_finalize ();
6586
6587 if (var_ret_seq)
6588 {
6589 rtx_insn *after = return_label;
6590 rtx_insn *next = NEXT_INSN (after);
6591 if (next && NOTE_INSN_BASIC_BLOCK_P (next))
6592 after = next;
6593 emit_insn_after (var_ret_seq, after);
6594 }
6595
6596 /* Zap the tree EH table. */
6597 set_eh_throw_stmt_table (fun, NULL);
6598
6599 /* We need JUMP_LABEL be set in order to redirect jumps, and hence
6600 split edges which edge insertions might do. */
6601 rebuild_jump_labels (get_insns ());
6602
6603 /* If we have a single successor to the entry block, put the pending insns
6604 after parm birth, but before NOTE_INSNS_FUNCTION_BEG. */
6605 if (single_succ_p (ENTRY_BLOCK_PTR_FOR_FN (fun)))
6606 {
6607 edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (fun));
6608 if (e->insns.r)
6609 {
6610 rtx_insn *insns = e->insns.r;
6611 e->insns.r = NULL;
6612 rebuild_jump_labels_chain (insns);
6613 if (NOTE_P (parm_birth_insn)
6614 && NOTE_KIND (parm_birth_insn) == NOTE_INSN_FUNCTION_BEG)
6615 emit_insn_before_noloc (insns, parm_birth_insn, e->dest);
6616 else
6617 emit_insn_after_noloc (insns, parm_birth_insn, e->dest);
6618 }
6619 }
6620
6621 /* Otherwise, as well as for other edges, take the usual way. */
6622 commit_edge_insertions ();
6623
6624 /* We're done expanding trees to RTL. */
6625 currently_expanding_to_rtl = 0;
6626
6627 flush_mark_addressable_queue ();
6628
6629 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (fun)->next_bb,
6630 EXIT_BLOCK_PTR_FOR_FN (fun), next_bb)
6631 {
6632 edge e;
6633 edge_iterator ei;
6634 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
6635 {
6636 /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */
6637 e->flags &= ~EDGE_EXECUTABLE;
6638
6639 /* At the moment not all abnormal edges match the RTL
6640 representation. It is safe to remove them here as
6641 find_many_sub_basic_blocks will rediscover them.
6642 In the future we should get this fixed properly. */
6643 if ((e->flags & EDGE_ABNORMAL)
6644 && !(e->flags & EDGE_SIBCALL))
6645 remove_edge (e);
6646 else
6647 ei_next (&ei);
6648 }
6649 }
6650
6651 auto_sbitmap blocks (last_basic_block_for_fn (fun));
6652 bitmap_ones (blocks);
6653 find_many_sub_basic_blocks (blocks);
6654 purge_all_dead_edges ();
6655
6656 /* After initial rtl generation, call back to finish generating
6657 exception support code. We need to do this before cleaning up
6658 the CFG as the code does not expect dead landing pads. */
6659 if (fun->eh->region_tree != NULL)
6660 finish_eh_generation ();
6661
6662 /* Call expand_stack_alignment after finishing all
6663 updates to crtl->preferred_stack_boundary. */
6664 expand_stack_alignment ();
6665
6666 /* Fixup REG_EQUIV notes in the prologue if there are tailcalls in this
6667 function. */
6668 if (crtl->tail_call_emit)
6669 fixup_tail_calls ();
6670
6671 /* BB subdivision may have created basic blocks that are are only reachable
6672 from unlikely bbs but not marked as such in the profile. */
6673 if (optimize)
6674 propagate_unlikely_bbs_forward ();
6675
6676 /* Remove unreachable blocks, otherwise we cannot compute dominators
6677 which are needed for loop state verification. As a side-effect
6678 this also compacts blocks.
6679 ??? We cannot remove trivially dead insns here as for example
6680 the DRAP reg on i?86 is not magically live at this point.
6681 gcc.c-torture/execute/ipa-sra-2.c execution, -Os -m32 fails otherwise. */
6682 cleanup_cfg (CLEANUP_NO_INSN_DEL);
6683
6684 checking_verify_flow_info ();
6685
6686 /* Initialize pseudos allocated for hard registers. */
6687 emit_initial_value_sets ();
6688
6689 /* And finally unshare all RTL. */
6690 unshare_all_rtl ();
6691
6692 /* There's no need to defer outputting this function any more; we
6693 know we want to output it. */
6694 DECL_DEFER_OUTPUT (current_function_decl) = 0;
6695
6696 /* Now that we're done expanding trees to RTL, we shouldn't have any
6697 more CONCATs anywhere. */
6698 generating_concat_p = 0;
6699
6700 if (dump_file)
6701 {
6702 fprintf (dump_file,
6703 "\n\n;;\n;; Full RTL generated for this function:\n;;\n");
6704 /* And the pass manager will dump RTL for us. */
6705 }
6706
6707 /* If we're emitting a nested function, make sure its parent gets
6708 emitted as well. Doing otherwise confuses debug info. */
6709 {
6710 tree parent;
6711 for (parent = DECL_CONTEXT (current_function_decl);
6712 parent != NULL_TREE;
6713 parent = get_containing_scope (parent))
6714 if (TREE_CODE (parent) == FUNCTION_DECL)
6715 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (parent)) = 1;
6716 }
6717
6718 TREE_ASM_WRITTEN (current_function_decl) = 1;
6719
6720 /* After expanding, the return labels are no longer needed. */
6721 return_label = NULL;
6722 naked_return_label = NULL;
6723
6724 /* After expanding, the tm_restart map is no longer needed. */
6725 if (fun->gimple_df->tm_restart)
6726 fun->gimple_df->tm_restart = NULL;
6727
6728 /* Tag the blocks with a depth number so that change_scope can find
6729 the common parent easily. */
6730 set_block_levels (DECL_INITIAL (fun->decl), 0);
6731 default_rtl_profile ();
6732
6733 /* For -dx discard loops now, otherwise IL verify in clean_state will
6734 ICE. */
6735 if (rtl_dump_and_exit)
6736 {
6737 cfun->curr_properties &= ~PROP_loops;
6738 loop_optimizer_finalize ();
6739 }
6740
6741 timevar_pop (TV_POST_EXPAND);
6742
6743 return 0;
6744 }
6745
6746 } // anon namespace
6747
6748 rtl_opt_pass *
6749 make_pass_expand (gcc::context *ctxt)
6750 {
6751 return new pass_expand (ctxt);
6752 }