dwarf2cfi.c (DW_STACK_POINTER_REGNUM): New.
[gcc.git] / gcc / dwarf2cfi.c
1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "version.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "function.h"
30 #include "dwarf2.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
33 #include "ggc.h"
34 #include "tm_p.h"
35 #include "target.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
38
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
44
45
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
52 #endif
53
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
56 #endif
57
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
60
61 /* Short-hand for commonly used register numbers. */
62 #define DW_STACK_POINTER_REGNUM dwarf_frame_regnum (STACK_POINTER_REGNUM)
63 #define DW_FRAME_POINTER_REGNUM dwarf_frame_regnum (HARD_FRAME_POINTER_REGNUM)
64 \f
65 /* A vector of call frame insns for the CIE. */
66 cfi_vec cie_cfi_vec;
67
68 static GTY(()) unsigned long dwarf2out_cfi_label_num;
69
70 /* The insn after which a new CFI note should be emitted. */
71 static rtx cfi_insn;
72
73 /* When non-null, add_cfi will add the CFI to this vector. */
74 static cfi_vec *add_cfi_vec;
75
76 /* True if remember_state should be emitted before following CFI directive. */
77 static bool emit_cfa_remember;
78
79 /* True if any CFI directives were emitted at the current insn. */
80 static bool any_cfis_emitted;
81 \f
82
83 static void dwarf2out_cfi_begin_epilogue (rtx insn);
84 static void dwarf2out_frame_debug_restore_state (void);
85
86 \f
87 /* Hook used by __throw. */
88
89 rtx
90 expand_builtin_dwarf_sp_column (void)
91 {
92 unsigned int dwarf_regnum = DW_STACK_POINTER_REGNUM;
93 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
94 }
95
96 /* MEM is a memory reference for the register size table, each element of
97 which has mode MODE. Initialize column C as a return address column. */
98
99 static void
100 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
101 {
102 HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
103 HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
104 emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
105 }
106
107 /* Generate code to initialize the register size table. */
108
109 void
110 expand_builtin_init_dwarf_reg_sizes (tree address)
111 {
112 unsigned int i;
113 enum machine_mode mode = TYPE_MODE (char_type_node);
114 rtx addr = expand_normal (address);
115 rtx mem = gen_rtx_MEM (BLKmode, addr);
116 bool wrote_return_column = false;
117
118 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
119 {
120 unsigned int rnum = DWARF2_FRAME_REG_OUT (dwarf_frame_regnum (i), 1);
121
122 if (rnum < DWARF_FRAME_REGISTERS)
123 {
124 HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
125 enum machine_mode save_mode = reg_raw_mode[i];
126 HOST_WIDE_INT size;
127
128 if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
129 save_mode = choose_hard_reg_mode (i, 1, true);
130 if (dwarf_frame_regnum (i) == DWARF_FRAME_RETURN_COLUMN)
131 {
132 if (save_mode == VOIDmode)
133 continue;
134 wrote_return_column = true;
135 }
136 size = GET_MODE_SIZE (save_mode);
137 if (offset < 0)
138 continue;
139
140 emit_move_insn (adjust_address (mem, mode, offset),
141 gen_int_mode (size, mode));
142 }
143 }
144
145 if (!wrote_return_column)
146 init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
147
148 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
149 init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
150 #endif
151
152 targetm.init_dwarf_reg_sizes_extra (address);
153 }
154
155 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
156
157 static inline HOST_WIDE_INT
158 div_data_align (HOST_WIDE_INT off)
159 {
160 HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
161 gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
162 return r;
163 }
164
165 /* Return true if we need a signed version of a given opcode
166 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
167
168 static inline bool
169 need_data_align_sf_opcode (HOST_WIDE_INT off)
170 {
171 return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
172 }
173
174 /* Return a pointer to a newly allocated Call Frame Instruction. */
175
176 static inline dw_cfi_ref
177 new_cfi (void)
178 {
179 dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
180
181 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
182 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
183
184 return cfi;
185 }
186
187 /* Generate a new label for the CFI info to refer to. */
188
189 static char *
190 dwarf2out_cfi_label (void)
191 {
192 int num = dwarf2out_cfi_label_num++;
193 char label[20];
194
195 ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
196
197 return xstrdup (label);
198 }
199
200 /* Add CFI either to the current insn stream or to a vector, or both. */
201
202 static void
203 add_cfi (dw_cfi_ref cfi)
204 {
205 if (emit_cfa_remember)
206 {
207 dw_cfi_ref cfi_remember;
208
209 /* Emit the state save. */
210 emit_cfa_remember = false;
211 cfi_remember = new_cfi ();
212 cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
213 add_cfi (cfi_remember);
214 }
215
216 any_cfis_emitted = true;
217 if (cfi_insn != NULL)
218 {
219 cfi_insn = emit_note_after (NOTE_INSN_CFI, cfi_insn);
220 NOTE_CFI (cfi_insn) = cfi;
221 }
222 if (add_cfi_vec != NULL)
223 VEC_safe_push (dw_cfi_ref, gc, *add_cfi_vec, cfi);
224 }
225
226 /* This function fills in aa dw_cfa_location structure from a dwarf location
227 descriptor sequence. */
228
229 static void
230 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
231 {
232 struct dw_loc_descr_struct *ptr;
233 cfa->offset = 0;
234 cfa->base_offset = 0;
235 cfa->indirect = 0;
236 cfa->reg = -1;
237
238 for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
239 {
240 enum dwarf_location_atom op = ptr->dw_loc_opc;
241
242 switch (op)
243 {
244 case DW_OP_reg0:
245 case DW_OP_reg1:
246 case DW_OP_reg2:
247 case DW_OP_reg3:
248 case DW_OP_reg4:
249 case DW_OP_reg5:
250 case DW_OP_reg6:
251 case DW_OP_reg7:
252 case DW_OP_reg8:
253 case DW_OP_reg9:
254 case DW_OP_reg10:
255 case DW_OP_reg11:
256 case DW_OP_reg12:
257 case DW_OP_reg13:
258 case DW_OP_reg14:
259 case DW_OP_reg15:
260 case DW_OP_reg16:
261 case DW_OP_reg17:
262 case DW_OP_reg18:
263 case DW_OP_reg19:
264 case DW_OP_reg20:
265 case DW_OP_reg21:
266 case DW_OP_reg22:
267 case DW_OP_reg23:
268 case DW_OP_reg24:
269 case DW_OP_reg25:
270 case DW_OP_reg26:
271 case DW_OP_reg27:
272 case DW_OP_reg28:
273 case DW_OP_reg29:
274 case DW_OP_reg30:
275 case DW_OP_reg31:
276 cfa->reg = op - DW_OP_reg0;
277 break;
278 case DW_OP_regx:
279 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
280 break;
281 case DW_OP_breg0:
282 case DW_OP_breg1:
283 case DW_OP_breg2:
284 case DW_OP_breg3:
285 case DW_OP_breg4:
286 case DW_OP_breg5:
287 case DW_OP_breg6:
288 case DW_OP_breg7:
289 case DW_OP_breg8:
290 case DW_OP_breg9:
291 case DW_OP_breg10:
292 case DW_OP_breg11:
293 case DW_OP_breg12:
294 case DW_OP_breg13:
295 case DW_OP_breg14:
296 case DW_OP_breg15:
297 case DW_OP_breg16:
298 case DW_OP_breg17:
299 case DW_OP_breg18:
300 case DW_OP_breg19:
301 case DW_OP_breg20:
302 case DW_OP_breg21:
303 case DW_OP_breg22:
304 case DW_OP_breg23:
305 case DW_OP_breg24:
306 case DW_OP_breg25:
307 case DW_OP_breg26:
308 case DW_OP_breg27:
309 case DW_OP_breg28:
310 case DW_OP_breg29:
311 case DW_OP_breg30:
312 case DW_OP_breg31:
313 cfa->reg = op - DW_OP_breg0;
314 cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
315 break;
316 case DW_OP_bregx:
317 cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
318 cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
319 break;
320 case DW_OP_deref:
321 cfa->indirect = 1;
322 break;
323 case DW_OP_plus_uconst:
324 cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
325 break;
326 default:
327 gcc_unreachable ();
328 }
329 }
330 }
331
332 /* Find the previous value for the CFA, iteratively. CFI is the opcode
333 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
334 one level of remember/restore state processing. */
335
336 void
337 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
338 {
339 switch (cfi->dw_cfi_opc)
340 {
341 case DW_CFA_def_cfa_offset:
342 case DW_CFA_def_cfa_offset_sf:
343 loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
344 break;
345 case DW_CFA_def_cfa_register:
346 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
347 break;
348 case DW_CFA_def_cfa:
349 case DW_CFA_def_cfa_sf:
350 loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
351 loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
352 break;
353 case DW_CFA_def_cfa_expression:
354 get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
355 break;
356
357 case DW_CFA_remember_state:
358 gcc_assert (!remember->in_use);
359 *remember = *loc;
360 remember->in_use = 1;
361 break;
362 case DW_CFA_restore_state:
363 gcc_assert (remember->in_use);
364 *loc = *remember;
365 remember->in_use = 0;
366 break;
367
368 default:
369 break;
370 }
371 }
372
373 /* The current rule for calculating the DWARF2 canonical frame address. */
374 static dw_cfa_location cfa;
375
376 /* A copy of the CFA, for comparison purposes. */
377 static dw_cfa_location old_cfa;
378
379 /* The register used for saving registers to the stack, and its offset
380 from the CFA. */
381 static dw_cfa_location cfa_store;
382
383 /* The current save location around an epilogue. */
384 static dw_cfa_location cfa_remember;
385
386 /* Like cfa_remember, but a copy of old_cfa. */
387 static dw_cfa_location old_cfa_remember;
388
389 /* The running total of the size of arguments pushed onto the stack. */
390 static HOST_WIDE_INT args_size;
391
392 /* The last args_size we actually output. */
393 static HOST_WIDE_INT old_args_size;
394
395 /* Determine if two dw_cfa_location structures define the same data. */
396
397 bool
398 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
399 {
400 return (loc1->reg == loc2->reg
401 && loc1->offset == loc2->offset
402 && loc1->indirect == loc2->indirect
403 && (loc1->indirect == 0
404 || loc1->base_offset == loc2->base_offset));
405 }
406
407 /* This routine does the actual work. The CFA is now calculated from
408 the dw_cfa_location structure. */
409
410 static void
411 def_cfa_1 (dw_cfa_location *loc_p)
412 {
413 dw_cfi_ref cfi;
414 dw_cfa_location loc;
415
416 cfa = *loc_p;
417 loc = *loc_p;
418
419 if (cfa_store.reg == loc.reg && loc.indirect == 0)
420 cfa_store.offset = loc.offset;
421
422 /* If nothing changed, no need to issue any call frame instructions. */
423 if (cfa_equal_p (&loc, &old_cfa))
424 return;
425
426 cfi = new_cfi ();
427
428 if (loc.reg == old_cfa.reg && !loc.indirect && !old_cfa.indirect)
429 {
430 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
431 the CFA register did not change but the offset did. The data
432 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
433 in the assembler via the .cfi_def_cfa_offset directive. */
434 if (loc.offset < 0)
435 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
436 else
437 cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
438 cfi->dw_cfi_oprnd1.dw_cfi_offset = loc.offset;
439 }
440
441 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
442 else if (loc.offset == old_cfa.offset
443 && old_cfa.reg != INVALID_REGNUM
444 && !loc.indirect
445 && !old_cfa.indirect)
446 {
447 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
448 indicating the CFA register has changed to <register> but the
449 offset has not changed. */
450 cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
451 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
452 }
453 #endif
454
455 else if (loc.indirect == 0)
456 {
457 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
458 indicating the CFA register has changed to <register> with
459 the specified offset. The data factoring for DW_CFA_def_cfa_sf
460 happens in output_cfi, or in the assembler via the .cfi_def_cfa
461 directive. */
462 if (loc.offset < 0)
463 cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
464 else
465 cfi->dw_cfi_opc = DW_CFA_def_cfa;
466 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = loc.reg;
467 cfi->dw_cfi_oprnd2.dw_cfi_offset = loc.offset;
468 }
469 else
470 {
471 /* Construct a DW_CFA_def_cfa_expression instruction to
472 calculate the CFA using a full location expression since no
473 register-offset pair is available. */
474 struct dw_loc_descr_struct *loc_list;
475
476 cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
477 loc_list = build_cfa_loc (&loc, 0);
478 cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
479 }
480
481 add_cfi (cfi);
482 old_cfa = loc;
483 }
484
485 /* Add the CFI for saving a register. REG is the CFA column number.
486 If SREG is -1, the register is saved at OFFSET from the CFA;
487 otherwise it is saved in SREG. */
488
489 static void
490 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
491 {
492 dw_fde_ref fde = cfun ? cfun->fde : NULL;
493 dw_cfi_ref cfi = new_cfi ();
494
495 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
496
497 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
498 if (fde
499 && fde->stack_realign
500 && sreg == INVALID_REGNUM)
501 {
502 cfi->dw_cfi_opc = DW_CFA_expression;
503 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
504 cfi->dw_cfi_oprnd2.dw_cfi_loc
505 = build_cfa_aligned_loc (&cfa, offset, fde->stack_realignment);
506 }
507 else if (sreg == INVALID_REGNUM)
508 {
509 if (need_data_align_sf_opcode (offset))
510 cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
511 else if (reg & ~0x3f)
512 cfi->dw_cfi_opc = DW_CFA_offset_extended;
513 else
514 cfi->dw_cfi_opc = DW_CFA_offset;
515 cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
516 }
517 else if (sreg == reg)
518 cfi->dw_cfi_opc = DW_CFA_same_value;
519 else
520 {
521 cfi->dw_cfi_opc = DW_CFA_register;
522 cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
523 }
524
525 add_cfi (cfi);
526 }
527
528 /* Given a SET, calculate the amount of stack adjustment it
529 contains. */
530
531 static HOST_WIDE_INT
532 stack_adjust_offset (const_rtx pattern, HOST_WIDE_INT cur_args_size,
533 HOST_WIDE_INT cur_offset)
534 {
535 const_rtx src = SET_SRC (pattern);
536 const_rtx dest = SET_DEST (pattern);
537 HOST_WIDE_INT offset = 0;
538 enum rtx_code code;
539
540 if (dest == stack_pointer_rtx)
541 {
542 code = GET_CODE (src);
543
544 /* Assume (set (reg sp) (reg whatever)) sets args_size
545 level to 0. */
546 if (code == REG && src != stack_pointer_rtx)
547 {
548 offset = -cur_args_size;
549 #ifndef STACK_GROWS_DOWNWARD
550 offset = -offset;
551 #endif
552 return offset - cur_offset;
553 }
554
555 if (! (code == PLUS || code == MINUS)
556 || XEXP (src, 0) != stack_pointer_rtx
557 || !CONST_INT_P (XEXP (src, 1)))
558 return 0;
559
560 /* (set (reg sp) (plus (reg sp) (const_int))) */
561 offset = INTVAL (XEXP (src, 1));
562 if (code == PLUS)
563 offset = -offset;
564 return offset;
565 }
566
567 if (MEM_P (src) && !MEM_P (dest))
568 dest = src;
569 if (MEM_P (dest))
570 {
571 /* (set (mem (pre_dec (reg sp))) (foo)) */
572 src = XEXP (dest, 0);
573 code = GET_CODE (src);
574
575 switch (code)
576 {
577 case PRE_MODIFY:
578 case POST_MODIFY:
579 if (XEXP (src, 0) == stack_pointer_rtx)
580 {
581 rtx val = XEXP (XEXP (src, 1), 1);
582 /* We handle only adjustments by constant amount. */
583 gcc_assert (GET_CODE (XEXP (src, 1)) == PLUS
584 && CONST_INT_P (val));
585 offset = -INTVAL (val);
586 break;
587 }
588 return 0;
589
590 case PRE_DEC:
591 case POST_DEC:
592 if (XEXP (src, 0) == stack_pointer_rtx)
593 {
594 offset = GET_MODE_SIZE (GET_MODE (dest));
595 break;
596 }
597 return 0;
598
599 case PRE_INC:
600 case POST_INC:
601 if (XEXP (src, 0) == stack_pointer_rtx)
602 {
603 offset = -GET_MODE_SIZE (GET_MODE (dest));
604 break;
605 }
606 return 0;
607
608 default:
609 return 0;
610 }
611 }
612 else
613 return 0;
614
615 return offset;
616 }
617
618 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
619 indexed by INSN_UID. */
620
621 static HOST_WIDE_INT *barrier_args_size;
622
623 /* Helper function for compute_barrier_args_size. Handle one insn. */
624
625 static HOST_WIDE_INT
626 compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
627 VEC (rtx, heap) **next)
628 {
629 HOST_WIDE_INT offset = 0;
630 int i;
631
632 if (! RTX_FRAME_RELATED_P (insn))
633 {
634 if (prologue_epilogue_contains (insn))
635 /* Nothing */;
636 else if (GET_CODE (PATTERN (insn)) == SET)
637 offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
638 else if (GET_CODE (PATTERN (insn)) == PARALLEL
639 || GET_CODE (PATTERN (insn)) == SEQUENCE)
640 {
641 /* There may be stack adjustments inside compound insns. Search
642 for them. */
643 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
644 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
645 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
646 cur_args_size, offset);
647 }
648 }
649 else
650 {
651 rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
652
653 if (expr)
654 {
655 expr = XEXP (expr, 0);
656 if (GET_CODE (expr) == PARALLEL
657 || GET_CODE (expr) == SEQUENCE)
658 for (i = 1; i < XVECLEN (expr, 0); i++)
659 {
660 rtx elem = XVECEXP (expr, 0, i);
661
662 if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
663 offset += stack_adjust_offset (elem, cur_args_size, offset);
664 }
665 }
666 }
667
668 #ifndef STACK_GROWS_DOWNWARD
669 offset = -offset;
670 #endif
671
672 cur_args_size += offset;
673 if (cur_args_size < 0)
674 cur_args_size = 0;
675
676 if (JUMP_P (insn))
677 {
678 rtx dest = JUMP_LABEL (insn);
679
680 if (dest)
681 {
682 if (barrier_args_size [INSN_UID (dest)] < 0)
683 {
684 barrier_args_size [INSN_UID (dest)] = cur_args_size;
685 VEC_safe_push (rtx, heap, *next, dest);
686 }
687 }
688 }
689
690 return cur_args_size;
691 }
692
693 /* Walk the whole function and compute args_size on BARRIERs. */
694
695 static void
696 compute_barrier_args_size (void)
697 {
698 int max_uid = get_max_uid (), i;
699 rtx insn;
700 VEC (rtx, heap) *worklist, *next, *tmp;
701
702 barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
703 for (i = 0; i < max_uid; i++)
704 barrier_args_size[i] = -1;
705
706 worklist = VEC_alloc (rtx, heap, 20);
707 next = VEC_alloc (rtx, heap, 20);
708 insn = get_insns ();
709 barrier_args_size[INSN_UID (insn)] = 0;
710 VEC_quick_push (rtx, worklist, insn);
711 for (;;)
712 {
713 while (!VEC_empty (rtx, worklist))
714 {
715 rtx prev, body, first_insn;
716 HOST_WIDE_INT cur_args_size;
717
718 first_insn = insn = VEC_pop (rtx, worklist);
719 cur_args_size = barrier_args_size[INSN_UID (insn)];
720 prev = prev_nonnote_insn (insn);
721 if (prev && BARRIER_P (prev))
722 barrier_args_size[INSN_UID (prev)] = cur_args_size;
723
724 for (; insn; insn = NEXT_INSN (insn))
725 {
726 if (INSN_DELETED_P (insn) || NOTE_P (insn))
727 continue;
728 if (BARRIER_P (insn))
729 break;
730
731 if (LABEL_P (insn))
732 {
733 if (insn == first_insn)
734 continue;
735 else if (barrier_args_size[INSN_UID (insn)] < 0)
736 {
737 barrier_args_size[INSN_UID (insn)] = cur_args_size;
738 continue;
739 }
740 else
741 {
742 /* The insns starting with this label have been
743 already scanned or are in the worklist. */
744 break;
745 }
746 }
747
748 body = PATTERN (insn);
749 if (GET_CODE (body) == SEQUENCE)
750 {
751 HOST_WIDE_INT dest_args_size = cur_args_size;
752 for (i = 1; i < XVECLEN (body, 0); i++)
753 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
754 && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
755 dest_args_size
756 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
757 dest_args_size, &next);
758 else
759 cur_args_size
760 = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
761 cur_args_size, &next);
762
763 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
764 compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
765 dest_args_size, &next);
766 else
767 cur_args_size
768 = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
769 cur_args_size, &next);
770 }
771 else
772 cur_args_size
773 = compute_barrier_args_size_1 (insn, cur_args_size, &next);
774 }
775 }
776
777 if (VEC_empty (rtx, next))
778 break;
779
780 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
781 tmp = next;
782 next = worklist;
783 worklist = tmp;
784 VEC_truncate (rtx, next, 0);
785 }
786
787 VEC_free (rtx, heap, worklist);
788 VEC_free (rtx, heap, next);
789 }
790
791 /* Add a CFI to update the running total of the size of arguments
792 pushed onto the stack. */
793
794 static void
795 dwarf2out_args_size (HOST_WIDE_INT size)
796 {
797 dw_cfi_ref cfi;
798
799 if (size == old_args_size)
800 return;
801
802 old_args_size = size;
803
804 cfi = new_cfi ();
805 cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
806 cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
807 add_cfi (cfi);
808 }
809
810 /* Record a stack adjustment of OFFSET bytes. */
811
812 static void
813 dwarf2out_stack_adjust (HOST_WIDE_INT offset)
814 {
815 if (cfa.reg == DW_STACK_POINTER_REGNUM)
816 cfa.offset += offset;
817
818 if (cfa_store.reg == DW_STACK_POINTER_REGNUM)
819 cfa_store.offset += offset;
820
821 if (ACCUMULATE_OUTGOING_ARGS)
822 return;
823
824 #ifndef STACK_GROWS_DOWNWARD
825 offset = -offset;
826 #endif
827
828 args_size += offset;
829 if (args_size < 0)
830 args_size = 0;
831
832 def_cfa_1 (&cfa);
833 if (flag_asynchronous_unwind_tables)
834 dwarf2out_args_size (args_size);
835 }
836
837 /* Check INSN to see if it looks like a push or a stack adjustment, and
838 make a note of it if it does. EH uses this information to find out
839 how much extra space it needs to pop off the stack. */
840
841 static void
842 dwarf2out_notice_stack_adjust (rtx insn, bool after_p)
843 {
844 HOST_WIDE_INT offset;
845 int i;
846
847 /* Don't handle epilogues at all. Certainly it would be wrong to do so
848 with this function. Proper support would require all frame-related
849 insns to be marked, and to be able to handle saving state around
850 epilogues textually in the middle of the function. */
851 if (prologue_epilogue_contains (insn))
852 return;
853
854 /* If INSN is an instruction from target of an annulled branch, the
855 effects are for the target only and so current argument size
856 shouldn't change at all. */
857 if (final_sequence
858 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
859 && INSN_FROM_TARGET_P (insn))
860 return;
861
862 /* If only calls can throw, and we have a frame pointer,
863 save up adjustments until we see the CALL_INSN. */
864 if (!flag_asynchronous_unwind_tables && cfa.reg != DW_STACK_POINTER_REGNUM)
865 {
866 if (CALL_P (insn) && !after_p)
867 {
868 /* Extract the size of the args from the CALL rtx itself. */
869 insn = PATTERN (insn);
870 if (GET_CODE (insn) == PARALLEL)
871 insn = XVECEXP (insn, 0, 0);
872 if (GET_CODE (insn) == SET)
873 insn = SET_SRC (insn);
874 gcc_assert (GET_CODE (insn) == CALL);
875 dwarf2out_args_size (INTVAL (XEXP (insn, 1)));
876 }
877 return;
878 }
879
880 if (CALL_P (insn) && !after_p)
881 {
882 if (!flag_asynchronous_unwind_tables)
883 dwarf2out_args_size (args_size);
884 return;
885 }
886 else if (BARRIER_P (insn))
887 {
888 /* Don't call compute_barrier_args_size () if the only
889 BARRIER is at the end of function. */
890 if (barrier_args_size == NULL && next_nonnote_insn (insn))
891 compute_barrier_args_size ();
892 if (barrier_args_size == NULL)
893 offset = 0;
894 else
895 {
896 offset = barrier_args_size[INSN_UID (insn)];
897 if (offset < 0)
898 offset = 0;
899 }
900
901 offset -= args_size;
902 #ifndef STACK_GROWS_DOWNWARD
903 offset = -offset;
904 #endif
905 }
906 else if (GET_CODE (PATTERN (insn)) == SET)
907 offset = stack_adjust_offset (PATTERN (insn), args_size, 0);
908 else if (GET_CODE (PATTERN (insn)) == PARALLEL
909 || GET_CODE (PATTERN (insn)) == SEQUENCE)
910 {
911 /* There may be stack adjustments inside compound insns. Search
912 for them. */
913 for (offset = 0, i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
914 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
915 offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
916 args_size, offset);
917 }
918 else
919 return;
920
921 if (offset == 0)
922 return;
923
924 dwarf2out_stack_adjust (offset);
925 }
926
927 /* We delay emitting a register save until either (a) we reach the end
928 of the prologue or (b) the register is clobbered. This clusters
929 register saves so that there are fewer pc advances. */
930
931 struct GTY(()) queued_reg_save {
932 struct queued_reg_save *next;
933 rtx reg;
934 HOST_WIDE_INT cfa_offset;
935 rtx saved_reg;
936 };
937
938 static GTY(()) struct queued_reg_save *queued_reg_saves;
939
940 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
941 typedef struct GTY(()) reg_saved_in_data {
942 rtx orig_reg;
943 rtx saved_in_reg;
944 } reg_saved_in_data;
945
946 DEF_VEC_O (reg_saved_in_data);
947 DEF_VEC_ALLOC_O (reg_saved_in_data, gc);
948
949 /* A set of registers saved in other registers. This is implemented as
950 a flat array because it normally contains zero or 1 entry, depending
951 on the target. IA-64 is the big spender here, using a maximum of
952 5 entries. */
953 static GTY(()) VEC(reg_saved_in_data, gc) *regs_saved_in_regs;
954
955 static GTY(()) reg_saved_in_data *cie_return_save;
956
957 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
958 /* ??? This ought to go into dwarf2out.h alongside dwarf_frame_regnum,
959 except that dwarf2out.h is used in places where rtl is prohibited. */
960
961 static inline unsigned
962 dwf_regno (const_rtx reg)
963 {
964 return dwarf_frame_regnum (REGNO (reg));
965 }
966
967 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
968
969 static bool
970 compare_reg_or_pc (rtx x, rtx y)
971 {
972 if (REG_P (x) && REG_P (y))
973 return REGNO (x) == REGNO (y);
974 return x == y;
975 }
976
977 /* Record SRC as being saved in DEST. DEST may be null to delete an
978 existing entry. SRC may be a register or PC_RTX. */
979
980 static void
981 record_reg_saved_in_reg (rtx dest, rtx src)
982 {
983 reg_saved_in_data *elt;
984 size_t i;
985
986 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, elt)
987 if (compare_reg_or_pc (elt->orig_reg, src))
988 {
989 if (dest == NULL)
990 VEC_unordered_remove(reg_saved_in_data, regs_saved_in_regs, i);
991 else
992 elt->saved_in_reg = dest;
993 return;
994 }
995
996 if (dest == NULL)
997 return;
998
999 elt = VEC_safe_push(reg_saved_in_data, gc, regs_saved_in_regs, NULL);
1000 elt->orig_reg = src;
1001 elt->saved_in_reg = dest;
1002 }
1003
1004 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1005 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1006
1007 static void
1008 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1009 {
1010 struct queued_reg_save *q;
1011
1012 /* Duplicates waste space, but it's also necessary to remove them
1013 for correctness, since the queue gets output in reverse order. */
1014 for (q = queued_reg_saves; q != NULL; q = q->next)
1015 if (compare_reg_or_pc (q->reg, reg))
1016 break;
1017
1018 if (q == NULL)
1019 {
1020 q = ggc_alloc_queued_reg_save ();
1021 q->next = queued_reg_saves;
1022 queued_reg_saves = q;
1023 }
1024
1025 q->reg = reg;
1026 q->cfa_offset = offset;
1027 q->saved_reg = sreg;
1028 }
1029
1030 /* Output all the entries in QUEUED_REG_SAVES. */
1031
1032 static void
1033 dwarf2out_flush_queued_reg_saves (void)
1034 {
1035 struct queued_reg_save *q;
1036
1037 for (q = queued_reg_saves; q; q = q->next)
1038 {
1039 unsigned int reg, sreg;
1040
1041 record_reg_saved_in_reg (q->saved_reg, q->reg);
1042
1043 if (q->reg == pc_rtx)
1044 reg = DWARF_FRAME_RETURN_COLUMN;
1045 else
1046 reg = dwf_regno (q->reg);
1047 if (q->saved_reg)
1048 sreg = dwf_regno (q->saved_reg);
1049 else
1050 sreg = INVALID_REGNUM;
1051 reg_save (reg, sreg, q->cfa_offset);
1052 }
1053
1054 queued_reg_saves = NULL;
1055 }
1056
1057 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1058 location for? Or, does it clobber a register which we've previously
1059 said that some other register is saved in, and for which we now
1060 have a new location for? */
1061
1062 static bool
1063 clobbers_queued_reg_save (const_rtx insn)
1064 {
1065 struct queued_reg_save *q;
1066
1067 for (q = queued_reg_saves; q; q = q->next)
1068 {
1069 size_t i;
1070 reg_saved_in_data *rir;
1071
1072 if (modified_in_p (q->reg, insn))
1073 return true;
1074
1075 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1076 if (compare_reg_or_pc (q->reg, rir->orig_reg)
1077 && modified_in_p (rir->saved_in_reg, insn))
1078 return true;
1079 }
1080
1081 return false;
1082 }
1083
1084 /* What register, if any, is currently saved in REG? */
1085
1086 static rtx
1087 reg_saved_in (rtx reg)
1088 {
1089 unsigned int regn = REGNO (reg);
1090 struct queued_reg_save *q;
1091 reg_saved_in_data *rir;
1092 size_t i;
1093
1094 for (q = queued_reg_saves; q; q = q->next)
1095 if (q->saved_reg && regn == REGNO (q->saved_reg))
1096 return q->reg;
1097
1098 FOR_EACH_VEC_ELT (reg_saved_in_data, regs_saved_in_regs, i, rir)
1099 if (regn == REGNO (rir->saved_in_reg))
1100 return rir->orig_reg;
1101
1102 return NULL_RTX;
1103 }
1104
1105
1106 /* A temporary register holding an integral value used in adjusting SP
1107 or setting up the store_reg. The "offset" field holds the integer
1108 value, not an offset. */
1109 static dw_cfa_location cfa_temp;
1110
1111 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1112
1113 static void
1114 dwarf2out_frame_debug_def_cfa (rtx pat)
1115 {
1116 memset (&cfa, 0, sizeof (cfa));
1117
1118 switch (GET_CODE (pat))
1119 {
1120 case PLUS:
1121 cfa.reg = dwf_regno (XEXP (pat, 0));
1122 cfa.offset = INTVAL (XEXP (pat, 1));
1123 break;
1124
1125 case REG:
1126 cfa.reg = dwf_regno (pat);
1127 break;
1128
1129 case MEM:
1130 cfa.indirect = 1;
1131 pat = XEXP (pat, 0);
1132 if (GET_CODE (pat) == PLUS)
1133 {
1134 cfa.base_offset = INTVAL (XEXP (pat, 1));
1135 pat = XEXP (pat, 0);
1136 }
1137 cfa.reg = dwf_regno (pat);
1138 break;
1139
1140 default:
1141 /* Recurse and define an expression. */
1142 gcc_unreachable ();
1143 }
1144
1145 def_cfa_1 (&cfa);
1146 }
1147
1148 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1149
1150 static void
1151 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1152 {
1153 rtx src, dest;
1154
1155 gcc_assert (GET_CODE (pat) == SET);
1156 dest = XEXP (pat, 0);
1157 src = XEXP (pat, 1);
1158
1159 switch (GET_CODE (src))
1160 {
1161 case PLUS:
1162 gcc_assert (dwf_regno (XEXP (src, 0)) == cfa.reg);
1163 cfa.offset -= INTVAL (XEXP (src, 1));
1164 break;
1165
1166 case REG:
1167 break;
1168
1169 default:
1170 gcc_unreachable ();
1171 }
1172
1173 cfa.reg = dwf_regno (dest);
1174 gcc_assert (cfa.indirect == 0);
1175
1176 def_cfa_1 (&cfa);
1177 }
1178
1179 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1180
1181 static void
1182 dwarf2out_frame_debug_cfa_offset (rtx set)
1183 {
1184 HOST_WIDE_INT offset;
1185 rtx src, addr, span;
1186 unsigned int sregno;
1187
1188 src = XEXP (set, 1);
1189 addr = XEXP (set, 0);
1190 gcc_assert (MEM_P (addr));
1191 addr = XEXP (addr, 0);
1192
1193 /* As documented, only consider extremely simple addresses. */
1194 switch (GET_CODE (addr))
1195 {
1196 case REG:
1197 gcc_assert (dwf_regno (addr) == cfa.reg);
1198 offset = -cfa.offset;
1199 break;
1200 case PLUS:
1201 gcc_assert (dwf_regno (XEXP (addr, 0)) == cfa.reg);
1202 offset = INTVAL (XEXP (addr, 1)) - cfa.offset;
1203 break;
1204 default:
1205 gcc_unreachable ();
1206 }
1207
1208 if (src == pc_rtx)
1209 {
1210 span = NULL;
1211 sregno = DWARF_FRAME_RETURN_COLUMN;
1212 }
1213 else
1214 {
1215 span = targetm.dwarf_register_span (src);
1216 sregno = dwf_regno (src);
1217 }
1218
1219 /* ??? We'd like to use queue_reg_save, but we need to come up with
1220 a different flushing heuristic for epilogues. */
1221 if (!span)
1222 reg_save (sregno, INVALID_REGNUM, offset);
1223 else
1224 {
1225 /* We have a PARALLEL describing where the contents of SRC live.
1226 Queue register saves for each piece of the PARALLEL. */
1227 int par_index;
1228 int limit;
1229 HOST_WIDE_INT span_offset = offset;
1230
1231 gcc_assert (GET_CODE (span) == PARALLEL);
1232
1233 limit = XVECLEN (span, 0);
1234 for (par_index = 0; par_index < limit; par_index++)
1235 {
1236 rtx elem = XVECEXP (span, 0, par_index);
1237
1238 sregno = dwf_regno (src);
1239 reg_save (sregno, INVALID_REGNUM, span_offset);
1240 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1241 }
1242 }
1243 }
1244
1245 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1246
1247 static void
1248 dwarf2out_frame_debug_cfa_register (rtx set)
1249 {
1250 rtx src, dest;
1251 unsigned sregno, dregno;
1252
1253 src = XEXP (set, 1);
1254 dest = XEXP (set, 0);
1255
1256 record_reg_saved_in_reg (dest, src);
1257 if (src == pc_rtx)
1258 sregno = DWARF_FRAME_RETURN_COLUMN;
1259 else
1260 sregno = dwf_regno (src);
1261
1262 dregno = dwf_regno (dest);
1263
1264 /* ??? We'd like to use queue_reg_save, but we need to come up with
1265 a different flushing heuristic for epilogues. */
1266 reg_save (sregno, dregno, 0);
1267 }
1268
1269 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1270
1271 static void
1272 dwarf2out_frame_debug_cfa_expression (rtx set)
1273 {
1274 rtx src, dest, span;
1275 dw_cfi_ref cfi = new_cfi ();
1276
1277 dest = SET_DEST (set);
1278 src = SET_SRC (set);
1279
1280 gcc_assert (REG_P (src));
1281 gcc_assert (MEM_P (dest));
1282
1283 span = targetm.dwarf_register_span (src);
1284 gcc_assert (!span);
1285
1286 cfi->dw_cfi_opc = DW_CFA_expression;
1287 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (src);
1288 cfi->dw_cfi_oprnd2.dw_cfi_loc
1289 = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1290 GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1291
1292 /* ??? We'd like to use queue_reg_save, were the interface different,
1293 and, as above, we could manage flushing for epilogues. */
1294 add_cfi (cfi);
1295 }
1296
1297 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1298
1299 static void
1300 dwarf2out_frame_debug_cfa_restore (rtx reg)
1301 {
1302 dw_cfi_ref cfi = new_cfi ();
1303 unsigned int regno = dwf_regno (reg);
1304
1305 cfi->dw_cfi_opc = (regno & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
1306 cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1307
1308 add_cfi (cfi);
1309 }
1310
1311 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1312 ??? Perhaps we should note in the CIE where windows are saved (instead of
1313 assuming 0(cfa)) and what registers are in the window. */
1314
1315 static void
1316 dwarf2out_frame_debug_cfa_window_save (void)
1317 {
1318 dw_cfi_ref cfi = new_cfi ();
1319
1320 cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1321 add_cfi (cfi);
1322 }
1323
1324 /* Record call frame debugging information for an expression EXPR,
1325 which either sets SP or FP (adjusting how we calculate the frame
1326 address) or saves a register to the stack or another register.
1327 LABEL indicates the address of EXPR.
1328
1329 This function encodes a state machine mapping rtxes to actions on
1330 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1331 users need not read the source code.
1332
1333 The High-Level Picture
1334
1335 Changes in the register we use to calculate the CFA: Currently we
1336 assume that if you copy the CFA register into another register, we
1337 should take the other one as the new CFA register; this seems to
1338 work pretty well. If it's wrong for some target, it's simple
1339 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1340
1341 Changes in the register we use for saving registers to the stack:
1342 This is usually SP, but not always. Again, we deduce that if you
1343 copy SP into another register (and SP is not the CFA register),
1344 then the new register is the one we will be using for register
1345 saves. This also seems to work.
1346
1347 Register saves: There's not much guesswork about this one; if
1348 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1349 register save, and the register used to calculate the destination
1350 had better be the one we think we're using for this purpose.
1351 It's also assumed that a copy from a call-saved register to another
1352 register is saving that register if RTX_FRAME_RELATED_P is set on
1353 that instruction. If the copy is from a call-saved register to
1354 the *same* register, that means that the register is now the same
1355 value as in the caller.
1356
1357 Except: If the register being saved is the CFA register, and the
1358 offset is nonzero, we are saving the CFA, so we assume we have to
1359 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1360 the intent is to save the value of SP from the previous frame.
1361
1362 In addition, if a register has previously been saved to a different
1363 register,
1364
1365 Invariants / Summaries of Rules
1366
1367 cfa current rule for calculating the CFA. It usually
1368 consists of a register and an offset.
1369 cfa_store register used by prologue code to save things to the stack
1370 cfa_store.offset is the offset from the value of
1371 cfa_store.reg to the actual CFA
1372 cfa_temp register holding an integral value. cfa_temp.offset
1373 stores the value, which will be used to adjust the
1374 stack pointer. cfa_temp is also used like cfa_store,
1375 to track stores to the stack via fp or a temp reg.
1376
1377 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1378 with cfa.reg as the first operand changes the cfa.reg and its
1379 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1380 cfa_temp.offset.
1381
1382 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1383 expression yielding a constant. This sets cfa_temp.reg
1384 and cfa_temp.offset.
1385
1386 Rule 5: Create a new register cfa_store used to save items to the
1387 stack.
1388
1389 Rules 10-14: Save a register to the stack. Define offset as the
1390 difference of the original location and cfa_store's
1391 location (or cfa_temp's location if cfa_temp is used).
1392
1393 Rules 16-20: If AND operation happens on sp in prologue, we assume
1394 stack is realigned. We will use a group of DW_OP_XXX
1395 expressions to represent the location of the stored
1396 register instead of CFA+offset.
1397
1398 The Rules
1399
1400 "{a,b}" indicates a choice of a xor b.
1401 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1402
1403 Rule 1:
1404 (set <reg1> <reg2>:cfa.reg)
1405 effects: cfa.reg = <reg1>
1406 cfa.offset unchanged
1407 cfa_temp.reg = <reg1>
1408 cfa_temp.offset = cfa.offset
1409
1410 Rule 2:
1411 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1412 {<const_int>,<reg>:cfa_temp.reg}))
1413 effects: cfa.reg = sp if fp used
1414 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1415 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1416 if cfa_store.reg==sp
1417
1418 Rule 3:
1419 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1420 effects: cfa.reg = fp
1421 cfa_offset += +/- <const_int>
1422
1423 Rule 4:
1424 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1425 constraints: <reg1> != fp
1426 <reg1> != sp
1427 effects: cfa.reg = <reg1>
1428 cfa_temp.reg = <reg1>
1429 cfa_temp.offset = cfa.offset
1430
1431 Rule 5:
1432 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1433 constraints: <reg1> != fp
1434 <reg1> != sp
1435 effects: cfa_store.reg = <reg1>
1436 cfa_store.offset = cfa.offset - cfa_temp.offset
1437
1438 Rule 6:
1439 (set <reg> <const_int>)
1440 effects: cfa_temp.reg = <reg>
1441 cfa_temp.offset = <const_int>
1442
1443 Rule 7:
1444 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1445 effects: cfa_temp.reg = <reg1>
1446 cfa_temp.offset |= <const_int>
1447
1448 Rule 8:
1449 (set <reg> (high <exp>))
1450 effects: none
1451
1452 Rule 9:
1453 (set <reg> (lo_sum <exp> <const_int>))
1454 effects: cfa_temp.reg = <reg>
1455 cfa_temp.offset = <const_int>
1456
1457 Rule 10:
1458 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1459 effects: cfa_store.offset -= <const_int>
1460 cfa.offset = cfa_store.offset if cfa.reg == sp
1461 cfa.reg = sp
1462 cfa.base_offset = -cfa_store.offset
1463
1464 Rule 11:
1465 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1466 effects: cfa_store.offset += -/+ mode_size(mem)
1467 cfa.offset = cfa_store.offset if cfa.reg == sp
1468 cfa.reg = sp
1469 cfa.base_offset = -cfa_store.offset
1470
1471 Rule 12:
1472 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1473
1474 <reg2>)
1475 effects: cfa.reg = <reg1>
1476 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1477
1478 Rule 13:
1479 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1480 effects: cfa.reg = <reg1>
1481 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1482
1483 Rule 14:
1484 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1485 effects: cfa.reg = <reg1>
1486 cfa.base_offset = -cfa_temp.offset
1487 cfa_temp.offset -= mode_size(mem)
1488
1489 Rule 15:
1490 (set <reg> {unspec, unspec_volatile})
1491 effects: target-dependent
1492
1493 Rule 16:
1494 (set sp (and: sp <const_int>))
1495 constraints: cfa_store.reg == sp
1496 effects: cfun->fde.stack_realign = 1
1497 cfa_store.offset = 0
1498 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1499
1500 Rule 17:
1501 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1502 effects: cfa_store.offset += -/+ mode_size(mem)
1503
1504 Rule 18:
1505 (set (mem ({pre_inc, pre_dec} sp)) fp)
1506 constraints: fde->stack_realign == 1
1507 effects: cfa_store.offset = 0
1508 cfa.reg != HARD_FRAME_POINTER_REGNUM
1509
1510 Rule 19:
1511 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1512 constraints: fde->stack_realign == 1
1513 && cfa.offset == 0
1514 && cfa.indirect == 0
1515 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1516 effects: Use DW_CFA_def_cfa_expression to define cfa
1517 cfa.reg == fde->drap_reg */
1518
1519 static void
1520 dwarf2out_frame_debug_expr (rtx expr)
1521 {
1522 rtx src, dest, span;
1523 HOST_WIDE_INT offset;
1524 dw_fde_ref fde;
1525
1526 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1527 the PARALLEL independently. The first element is always processed if
1528 it is a SET. This is for backward compatibility. Other elements
1529 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1530 flag is set in them. */
1531 if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1532 {
1533 int par_index;
1534 int limit = XVECLEN (expr, 0);
1535 rtx elem;
1536
1537 /* PARALLELs have strict read-modify-write semantics, so we
1538 ought to evaluate every rvalue before changing any lvalue.
1539 It's cumbersome to do that in general, but there's an
1540 easy approximation that is enough for all current users:
1541 handle register saves before register assignments. */
1542 if (GET_CODE (expr) == PARALLEL)
1543 for (par_index = 0; par_index < limit; par_index++)
1544 {
1545 elem = XVECEXP (expr, 0, par_index);
1546 if (GET_CODE (elem) == SET
1547 && MEM_P (SET_DEST (elem))
1548 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1549 dwarf2out_frame_debug_expr (elem);
1550 }
1551
1552 for (par_index = 0; par_index < limit; par_index++)
1553 {
1554 elem = XVECEXP (expr, 0, par_index);
1555 if (GET_CODE (elem) == SET
1556 && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1557 && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1558 dwarf2out_frame_debug_expr (elem);
1559 else if (GET_CODE (elem) == SET
1560 && par_index != 0
1561 && !RTX_FRAME_RELATED_P (elem))
1562 {
1563 /* Stack adjustment combining might combine some post-prologue
1564 stack adjustment into a prologue stack adjustment. */
1565 HOST_WIDE_INT offset = stack_adjust_offset (elem, args_size, 0);
1566
1567 if (offset != 0)
1568 dwarf2out_stack_adjust (offset);
1569 }
1570 }
1571 return;
1572 }
1573
1574 gcc_assert (GET_CODE (expr) == SET);
1575
1576 src = SET_SRC (expr);
1577 dest = SET_DEST (expr);
1578
1579 if (REG_P (src))
1580 {
1581 rtx rsi = reg_saved_in (src);
1582 if (rsi)
1583 src = rsi;
1584 }
1585
1586 fde = cfun->fde;
1587
1588 switch (GET_CODE (dest))
1589 {
1590 case REG:
1591 switch (GET_CODE (src))
1592 {
1593 /* Setting FP from SP. */
1594 case REG:
1595 if (cfa.reg == dwf_regno (src))
1596 {
1597 /* Rule 1 */
1598 /* Update the CFA rule wrt SP or FP. Make sure src is
1599 relative to the current CFA register.
1600
1601 We used to require that dest be either SP or FP, but the
1602 ARM copies SP to a temporary register, and from there to
1603 FP. So we just rely on the backends to only set
1604 RTX_FRAME_RELATED_P on appropriate insns. */
1605 cfa.reg = dwf_regno (dest);
1606 cfa_temp.reg = cfa.reg;
1607 cfa_temp.offset = cfa.offset;
1608 }
1609 else
1610 {
1611 /* Saving a register in a register. */
1612 gcc_assert (!fixed_regs [REGNO (dest)]
1613 /* For the SPARC and its register window. */
1614 || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1615
1616 /* After stack is aligned, we can only save SP in FP
1617 if drap register is used. In this case, we have
1618 to restore stack pointer with the CFA value and we
1619 don't generate this DWARF information. */
1620 if (fde
1621 && fde->stack_realign
1622 && REGNO (src) == STACK_POINTER_REGNUM)
1623 gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1624 && fde->drap_reg != INVALID_REGNUM
1625 && cfa.reg != dwf_regno (src));
1626 else
1627 queue_reg_save (src, dest, 0);
1628 }
1629 break;
1630
1631 case PLUS:
1632 case MINUS:
1633 case LO_SUM:
1634 if (dest == stack_pointer_rtx)
1635 {
1636 /* Rule 2 */
1637 /* Adjusting SP. */
1638 switch (GET_CODE (XEXP (src, 1)))
1639 {
1640 case CONST_INT:
1641 offset = INTVAL (XEXP (src, 1));
1642 break;
1643 case REG:
1644 gcc_assert (dwf_regno (XEXP (src, 1)) == cfa_temp.reg);
1645 offset = cfa_temp.offset;
1646 break;
1647 default:
1648 gcc_unreachable ();
1649 }
1650
1651 if (XEXP (src, 0) == hard_frame_pointer_rtx)
1652 {
1653 /* Restoring SP from FP in the epilogue. */
1654 gcc_assert (cfa.reg == DW_FRAME_POINTER_REGNUM);
1655 cfa.reg = DW_STACK_POINTER_REGNUM;
1656 }
1657 else if (GET_CODE (src) == LO_SUM)
1658 /* Assume we've set the source reg of the LO_SUM from sp. */
1659 ;
1660 else
1661 gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1662
1663 if (GET_CODE (src) != MINUS)
1664 offset = -offset;
1665 if (cfa.reg == DW_STACK_POINTER_REGNUM)
1666 cfa.offset += offset;
1667 if (cfa_store.reg == DW_STACK_POINTER_REGNUM)
1668 cfa_store.offset += offset;
1669 }
1670 else if (dest == hard_frame_pointer_rtx)
1671 {
1672 /* Rule 3 */
1673 /* Either setting the FP from an offset of the SP,
1674 or adjusting the FP */
1675 gcc_assert (frame_pointer_needed);
1676
1677 gcc_assert (REG_P (XEXP (src, 0))
1678 && dwf_regno (XEXP (src, 0)) == cfa.reg
1679 && CONST_INT_P (XEXP (src, 1)));
1680 offset = INTVAL (XEXP (src, 1));
1681 if (GET_CODE (src) != MINUS)
1682 offset = -offset;
1683 cfa.offset += offset;
1684 cfa.reg = DW_FRAME_POINTER_REGNUM;
1685 }
1686 else
1687 {
1688 gcc_assert (GET_CODE (src) != MINUS);
1689
1690 /* Rule 4 */
1691 if (REG_P (XEXP (src, 0))
1692 && dwf_regno (XEXP (src, 0)) == cfa.reg
1693 && CONST_INT_P (XEXP (src, 1)))
1694 {
1695 /* Setting a temporary CFA register that will be copied
1696 into the FP later on. */
1697 offset = - INTVAL (XEXP (src, 1));
1698 cfa.offset += offset;
1699 cfa.reg = dwf_regno (dest);
1700 /* Or used to save regs to the stack. */
1701 cfa_temp.reg = cfa.reg;
1702 cfa_temp.offset = cfa.offset;
1703 }
1704
1705 /* Rule 5 */
1706 else if (REG_P (XEXP (src, 0))
1707 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1708 && XEXP (src, 1) == stack_pointer_rtx)
1709 {
1710 /* Setting a scratch register that we will use instead
1711 of SP for saving registers to the stack. */
1712 gcc_assert (cfa.reg == DW_STACK_POINTER_REGNUM);
1713 cfa_store.reg = dwf_regno (dest);
1714 cfa_store.offset = cfa.offset - cfa_temp.offset;
1715 }
1716
1717 /* Rule 9 */
1718 else if (GET_CODE (src) == LO_SUM
1719 && CONST_INT_P (XEXP (src, 1)))
1720 {
1721 cfa_temp.reg = dwf_regno (dest);
1722 cfa_temp.offset = INTVAL (XEXP (src, 1));
1723 }
1724 else
1725 gcc_unreachable ();
1726 }
1727 break;
1728
1729 /* Rule 6 */
1730 case CONST_INT:
1731 cfa_temp.reg = dwf_regno (dest);
1732 cfa_temp.offset = INTVAL (src);
1733 break;
1734
1735 /* Rule 7 */
1736 case IOR:
1737 gcc_assert (REG_P (XEXP (src, 0))
1738 && dwf_regno (XEXP (src, 0)) == cfa_temp.reg
1739 && CONST_INT_P (XEXP (src, 1)));
1740
1741 cfa_temp.reg = dwf_regno (dest);
1742 cfa_temp.offset |= INTVAL (XEXP (src, 1));
1743 break;
1744
1745 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1746 which will fill in all of the bits. */
1747 /* Rule 8 */
1748 case HIGH:
1749 break;
1750
1751 /* Rule 15 */
1752 case UNSPEC:
1753 case UNSPEC_VOLATILE:
1754 /* All unspecs should be represented by REG_CFA_* notes. */
1755 gcc_unreachable ();
1756 return;
1757
1758 /* Rule 16 */
1759 case AND:
1760 /* If this AND operation happens on stack pointer in prologue,
1761 we assume the stack is realigned and we extract the
1762 alignment. */
1763 if (fde && XEXP (src, 0) == stack_pointer_rtx)
1764 {
1765 /* We interpret reg_save differently with stack_realign set.
1766 Thus we must flush whatever we have queued first. */
1767 dwarf2out_flush_queued_reg_saves ();
1768
1769 gcc_assert (cfa_store.reg == dwf_regno (XEXP (src, 0)));
1770 fde->stack_realign = 1;
1771 fde->stack_realignment = INTVAL (XEXP (src, 1));
1772 cfa_store.offset = 0;
1773
1774 if (cfa.reg != DW_STACK_POINTER_REGNUM
1775 && cfa.reg != DW_FRAME_POINTER_REGNUM)
1776 fde->drap_reg = cfa.reg;
1777 }
1778 return;
1779
1780 default:
1781 gcc_unreachable ();
1782 }
1783
1784 def_cfa_1 (&cfa);
1785 break;
1786
1787 case MEM:
1788
1789 /* Saving a register to the stack. Make sure dest is relative to the
1790 CFA register. */
1791 switch (GET_CODE (XEXP (dest, 0)))
1792 {
1793 /* Rule 10 */
1794 /* With a push. */
1795 case PRE_MODIFY:
1796 case POST_MODIFY:
1797 /* We can't handle variable size modifications. */
1798 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1799 == CONST_INT);
1800 offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1801
1802 gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1803 && cfa_store.reg == DW_STACK_POINTER_REGNUM);
1804
1805 cfa_store.offset += offset;
1806 if (cfa.reg == DW_STACK_POINTER_REGNUM)
1807 cfa.offset = cfa_store.offset;
1808
1809 if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1810 offset -= cfa_store.offset;
1811 else
1812 offset = -cfa_store.offset;
1813 break;
1814
1815 /* Rule 11 */
1816 case PRE_INC:
1817 case PRE_DEC:
1818 case POST_DEC:
1819 offset = GET_MODE_SIZE (GET_MODE (dest));
1820 if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1821 offset = -offset;
1822
1823 gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1824 == STACK_POINTER_REGNUM)
1825 && cfa_store.reg == DW_STACK_POINTER_REGNUM);
1826
1827 cfa_store.offset += offset;
1828
1829 /* Rule 18: If stack is aligned, we will use FP as a
1830 reference to represent the address of the stored
1831 regiser. */
1832 if (fde
1833 && fde->stack_realign
1834 && src == hard_frame_pointer_rtx)
1835 {
1836 gcc_assert (cfa.reg != DW_FRAME_POINTER_REGNUM);
1837 cfa_store.offset = 0;
1838 }
1839
1840 if (cfa.reg == DW_STACK_POINTER_REGNUM)
1841 cfa.offset = cfa_store.offset;
1842
1843 if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1844 offset += -cfa_store.offset;
1845 else
1846 offset = -cfa_store.offset;
1847 break;
1848
1849 /* Rule 12 */
1850 /* With an offset. */
1851 case PLUS:
1852 case MINUS:
1853 case LO_SUM:
1854 {
1855 unsigned int regno;
1856
1857 gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1858 && REG_P (XEXP (XEXP (dest, 0), 0)));
1859 offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1860 if (GET_CODE (XEXP (dest, 0)) == MINUS)
1861 offset = -offset;
1862
1863 regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1864
1865 if (cfa.reg == regno)
1866 offset -= cfa.offset;
1867 else if (cfa_store.reg == regno)
1868 offset -= cfa_store.offset;
1869 else
1870 {
1871 gcc_assert (cfa_temp.reg == regno);
1872 offset -= cfa_temp.offset;
1873 }
1874 }
1875 break;
1876
1877 /* Rule 13 */
1878 /* Without an offset. */
1879 case REG:
1880 {
1881 unsigned int regno = dwf_regno (XEXP (dest, 0));
1882
1883 if (cfa.reg == regno)
1884 offset = -cfa.offset;
1885 else if (cfa_store.reg == regno)
1886 offset = -cfa_store.offset;
1887 else
1888 {
1889 gcc_assert (cfa_temp.reg == regno);
1890 offset = -cfa_temp.offset;
1891 }
1892 }
1893 break;
1894
1895 /* Rule 14 */
1896 case POST_INC:
1897 gcc_assert (cfa_temp.reg == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1898 offset = -cfa_temp.offset;
1899 cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1900 break;
1901
1902 default:
1903 gcc_unreachable ();
1904 }
1905
1906 /* Rule 17 */
1907 /* If the source operand of this MEM operation is a memory,
1908 we only care how much stack grew. */
1909 if (MEM_P (src))
1910 break;
1911
1912 if (REG_P (src)
1913 && REGNO (src) != STACK_POINTER_REGNUM
1914 && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1915 && dwf_regno (src) == cfa.reg)
1916 {
1917 /* We're storing the current CFA reg into the stack. */
1918
1919 if (cfa.offset == 0)
1920 {
1921 /* Rule 19 */
1922 /* If stack is aligned, putting CFA reg into stack means
1923 we can no longer use reg + offset to represent CFA.
1924 Here we use DW_CFA_def_cfa_expression instead. The
1925 result of this expression equals to the original CFA
1926 value. */
1927 if (fde
1928 && fde->stack_realign
1929 && cfa.indirect == 0
1930 && cfa.reg != DW_FRAME_POINTER_REGNUM)
1931 {
1932 dw_cfa_location cfa_exp;
1933
1934 gcc_assert (fde->drap_reg == cfa.reg);
1935
1936 cfa_exp.indirect = 1;
1937 cfa_exp.reg = DW_FRAME_POINTER_REGNUM;
1938 cfa_exp.base_offset = offset;
1939 cfa_exp.offset = 0;
1940
1941 fde->drap_reg_saved = 1;
1942
1943 def_cfa_1 (&cfa_exp);
1944 break;
1945 }
1946
1947 /* If the source register is exactly the CFA, assume
1948 we're saving SP like any other register; this happens
1949 on the ARM. */
1950 def_cfa_1 (&cfa);
1951 queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1952 break;
1953 }
1954 else
1955 {
1956 /* Otherwise, we'll need to look in the stack to
1957 calculate the CFA. */
1958 rtx x = XEXP (dest, 0);
1959
1960 if (!REG_P (x))
1961 x = XEXP (x, 0);
1962 gcc_assert (REG_P (x));
1963
1964 cfa.reg = dwf_regno (x);
1965 cfa.base_offset = offset;
1966 cfa.indirect = 1;
1967 def_cfa_1 (&cfa);
1968 break;
1969 }
1970 }
1971
1972 def_cfa_1 (&cfa);
1973
1974 span = NULL;
1975 if (REG_P (src))
1976 span = targetm.dwarf_register_span (src);
1977 if (!span)
1978 queue_reg_save (src, NULL_RTX, offset);
1979 else
1980 {
1981 /* We have a PARALLEL describing where the contents of SRC live.
1982 Queue register saves for each piece of the PARALLEL. */
1983 int par_index;
1984 int limit;
1985 HOST_WIDE_INT span_offset = offset;
1986
1987 gcc_assert (GET_CODE (span) == PARALLEL);
1988
1989 limit = XVECLEN (span, 0);
1990 for (par_index = 0; par_index < limit; par_index++)
1991 {
1992 rtx elem = XVECEXP (span, 0, par_index);
1993 queue_reg_save (elem, NULL_RTX, span_offset);
1994 span_offset += GET_MODE_SIZE (GET_MODE (elem));
1995 }
1996 }
1997 break;
1998
1999 default:
2000 gcc_unreachable ();
2001 }
2002 }
2003
2004 /* Record call frame debugging information for INSN, which either
2005 sets SP or FP (adjusting how we calculate the frame address) or saves a
2006 register to the stack. If INSN is NULL_RTX, initialize our state.
2007
2008 If AFTER_P is false, we're being called before the insn is emitted,
2009 otherwise after. Call instructions get invoked twice. */
2010
2011 static void
2012 dwarf2out_frame_debug (rtx insn, bool after_p)
2013 {
2014 rtx note, n;
2015 bool handled_one = false;
2016 bool need_flush = false;
2017
2018 if (!NONJUMP_INSN_P (insn) || clobbers_queued_reg_save (insn))
2019 dwarf2out_flush_queued_reg_saves ();
2020
2021 if (!RTX_FRAME_RELATED_P (insn))
2022 {
2023 /* ??? This should be done unconditionally since stack adjustments
2024 matter if the stack pointer is not the CFA register anymore but
2025 is still used to save registers. */
2026 if (!ACCUMULATE_OUTGOING_ARGS)
2027 dwarf2out_notice_stack_adjust (insn, after_p);
2028 return;
2029 }
2030
2031 any_cfis_emitted = false;
2032
2033 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2034 switch (REG_NOTE_KIND (note))
2035 {
2036 case REG_FRAME_RELATED_EXPR:
2037 insn = XEXP (note, 0);
2038 goto do_frame_expr;
2039
2040 case REG_CFA_DEF_CFA:
2041 dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2042 handled_one = true;
2043 break;
2044
2045 case REG_CFA_ADJUST_CFA:
2046 n = XEXP (note, 0);
2047 if (n == NULL)
2048 {
2049 n = PATTERN (insn);
2050 if (GET_CODE (n) == PARALLEL)
2051 n = XVECEXP (n, 0, 0);
2052 }
2053 dwarf2out_frame_debug_adjust_cfa (n);
2054 handled_one = true;
2055 break;
2056
2057 case REG_CFA_OFFSET:
2058 n = XEXP (note, 0);
2059 if (n == NULL)
2060 n = single_set (insn);
2061 dwarf2out_frame_debug_cfa_offset (n);
2062 handled_one = true;
2063 break;
2064
2065 case REG_CFA_REGISTER:
2066 n = XEXP (note, 0);
2067 if (n == NULL)
2068 {
2069 n = PATTERN (insn);
2070 if (GET_CODE (n) == PARALLEL)
2071 n = XVECEXP (n, 0, 0);
2072 }
2073 dwarf2out_frame_debug_cfa_register (n);
2074 handled_one = true;
2075 break;
2076
2077 case REG_CFA_EXPRESSION:
2078 n = XEXP (note, 0);
2079 if (n == NULL)
2080 n = single_set (insn);
2081 dwarf2out_frame_debug_cfa_expression (n);
2082 handled_one = true;
2083 break;
2084
2085 case REG_CFA_RESTORE:
2086 n = XEXP (note, 0);
2087 if (n == NULL)
2088 {
2089 n = PATTERN (insn);
2090 if (GET_CODE (n) == PARALLEL)
2091 n = XVECEXP (n, 0, 0);
2092 n = XEXP (n, 0);
2093 }
2094 dwarf2out_frame_debug_cfa_restore (n);
2095 handled_one = true;
2096 break;
2097
2098 case REG_CFA_SET_VDRAP:
2099 n = XEXP (note, 0);
2100 if (REG_P (n))
2101 {
2102 dw_fde_ref fde = cfun->fde;
2103 if (fde)
2104 {
2105 gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2106 if (REG_P (n))
2107 fde->vdrap_reg = dwf_regno (n);
2108 }
2109 }
2110 handled_one = true;
2111 break;
2112
2113 case REG_CFA_WINDOW_SAVE:
2114 dwarf2out_frame_debug_cfa_window_save ();
2115 handled_one = true;
2116 break;
2117
2118 case REG_CFA_FLUSH_QUEUE:
2119 /* The actual flush happens below. */
2120 need_flush = true;
2121 handled_one = true;
2122 break;
2123
2124 default:
2125 break;
2126 }
2127
2128 if (handled_one)
2129 {
2130 /* Minimize the number of advances by emitting the entire queue
2131 once anything is emitted. */
2132 need_flush |= any_cfis_emitted;
2133 }
2134 else
2135 {
2136 insn = PATTERN (insn);
2137 do_frame_expr:
2138 dwarf2out_frame_debug_expr (insn);
2139
2140 /* Check again. A parallel can save and update the same register.
2141 We could probably check just once, here, but this is safer than
2142 removing the check at the start of the function. */
2143 if (any_cfis_emitted || clobbers_queued_reg_save (insn))
2144 need_flush = true;
2145 }
2146
2147 if (need_flush)
2148 dwarf2out_flush_queued_reg_saves ();
2149 }
2150
2151 /* Examine CFI and return true if a cfi label and set_loc is needed
2152 beforehand. Even when generating CFI assembler instructions, we
2153 still have to add the cfi to the list so that lookup_cfa_1 works
2154 later on. When -g2 and above we even need to force emitting of
2155 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2156 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2157 and so don't use convert_cfa_to_fb_loc_list. */
2158
2159 static bool
2160 cfi_label_required_p (dw_cfi_ref cfi)
2161 {
2162 if (!dwarf2out_do_cfi_asm ())
2163 return true;
2164
2165 if (dwarf_version == 2
2166 && debug_info_level > DINFO_LEVEL_TERSE
2167 && (write_symbols == DWARF2_DEBUG
2168 || write_symbols == VMS_AND_DWARF2_DEBUG))
2169 {
2170 switch (cfi->dw_cfi_opc)
2171 {
2172 case DW_CFA_def_cfa_offset:
2173 case DW_CFA_def_cfa_offset_sf:
2174 case DW_CFA_def_cfa_register:
2175 case DW_CFA_def_cfa:
2176 case DW_CFA_def_cfa_sf:
2177 case DW_CFA_def_cfa_expression:
2178 case DW_CFA_restore_state:
2179 return true;
2180 default:
2181 return false;
2182 }
2183 }
2184 return false;
2185 }
2186
2187 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2188 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2189 necessary. */
2190 static void
2191 add_cfis_to_fde (void)
2192 {
2193 dw_fde_ref fde = cfun->fde;
2194 rtx insn, next;
2195 /* We always start with a function_begin label. */
2196 bool first = false;
2197
2198 for (insn = get_insns (); insn; insn = next)
2199 {
2200 next = NEXT_INSN (insn);
2201
2202 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2203 {
2204 /* Don't attempt to advance_loc4 between labels
2205 in different sections. */
2206 first = true;
2207 }
2208
2209 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2210 {
2211 bool required = cfi_label_required_p (NOTE_CFI (insn));
2212 while (next && NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2213 {
2214 required |= cfi_label_required_p (NOTE_CFI (next));
2215 next = NEXT_INSN (next);
2216 }
2217 if (required)
2218 {
2219 int num = dwarf2out_cfi_label_num;
2220 const char *label = dwarf2out_cfi_label ();
2221 dw_cfi_ref xcfi;
2222 rtx tmp;
2223
2224 /* Set the location counter to the new label. */
2225 xcfi = new_cfi ();
2226 xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2227 : DW_CFA_advance_loc4);
2228 xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2229 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, xcfi);
2230
2231 tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2232 NOTE_LABEL_NUMBER (tmp) = num;
2233 }
2234
2235 do
2236 {
2237 VEC_safe_push (dw_cfi_ref, gc, fde->dw_fde_cfi, NOTE_CFI (insn));
2238 insn = NEXT_INSN (insn);
2239 }
2240 while (insn != next);
2241 first = false;
2242 }
2243 }
2244 }
2245
2246 /* Scan the function and create the initial set of CFI notes. */
2247
2248 static void
2249 create_cfi_notes (void)
2250 {
2251 rtx insn;
2252
2253 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
2254 {
2255 rtx pat;
2256
2257 cfi_insn = PREV_INSN (insn);
2258
2259 if (BARRIER_P (insn))
2260 {
2261 dwarf2out_frame_debug (insn, false);
2262 continue;
2263 }
2264
2265 if (NOTE_P (insn))
2266 {
2267 switch (NOTE_KIND (insn))
2268 {
2269 case NOTE_INSN_PROLOGUE_END:
2270 dwarf2out_flush_queued_reg_saves ();
2271 break;
2272
2273 case NOTE_INSN_EPILOGUE_BEG:
2274 #if defined(HAVE_epilogue)
2275 dwarf2out_cfi_begin_epilogue (insn);
2276 #endif
2277 break;
2278
2279 case NOTE_INSN_CFA_RESTORE_STATE:
2280 cfi_insn = insn;
2281 dwarf2out_frame_debug_restore_state ();
2282 break;
2283 }
2284 continue;
2285 }
2286
2287 if (!NONDEBUG_INSN_P (insn))
2288 continue;
2289
2290 pat = PATTERN (insn);
2291 if (asm_noperands (pat) >= 0)
2292 {
2293 dwarf2out_frame_debug (insn, false);
2294 continue;
2295 }
2296
2297 if (GET_CODE (pat) == SEQUENCE)
2298 {
2299 int i, n = XVECLEN (pat, 0);
2300 for (i = 1; i < n; ++i)
2301 dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
2302 }
2303
2304 if (CALL_P (insn)
2305 || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2306 dwarf2out_frame_debug (insn, false);
2307
2308 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2309 Putting the note after the VEC should be ok. */
2310 if (!tablejump_p (insn, NULL, &cfi_insn))
2311 cfi_insn = insn;
2312
2313 dwarf2out_frame_debug (insn, true);
2314 }
2315
2316 cfi_insn = NULL;
2317 }
2318
2319 /* Determine if we need to save and restore CFI information around this
2320 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2321 we do need to save/restore, then emit the save now, and insert a
2322 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2323
2324 static void
2325 dwarf2out_cfi_begin_epilogue (rtx insn)
2326 {
2327 bool saw_frp = false;
2328 rtx i;
2329
2330 /* Scan forward to the return insn, noticing if there are possible
2331 frame related insns. */
2332 for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
2333 {
2334 if (!INSN_P (i))
2335 continue;
2336
2337 /* Look for both regular and sibcalls to end the block. */
2338 if (returnjump_p (i))
2339 break;
2340 if (CALL_P (i) && SIBLING_CALL_P (i))
2341 break;
2342
2343 if (GET_CODE (PATTERN (i)) == SEQUENCE)
2344 {
2345 int idx;
2346 rtx seq = PATTERN (i);
2347
2348 if (returnjump_p (XVECEXP (seq, 0, 0)))
2349 break;
2350 if (CALL_P (XVECEXP (seq, 0, 0))
2351 && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
2352 break;
2353
2354 for (idx = 0; idx < XVECLEN (seq, 0); idx++)
2355 if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
2356 saw_frp = true;
2357 }
2358
2359 if (RTX_FRAME_RELATED_P (i))
2360 saw_frp = true;
2361 }
2362
2363 /* If the port doesn't emit epilogue unwind info, we don't need a
2364 save/restore pair. */
2365 if (!saw_frp)
2366 return;
2367
2368 /* Otherwise, search forward to see if the return insn was the last
2369 basic block of the function. If so, we don't need save/restore. */
2370 gcc_assert (i != NULL);
2371 i = next_real_insn (i);
2372 if (i == NULL)
2373 return;
2374
2375 /* Insert the restore before that next real insn in the stream, and before
2376 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2377 properly nested. This should be after any label or alignment. This
2378 will be pushed into the CFI stream by the function below. */
2379 while (1)
2380 {
2381 rtx p = PREV_INSN (i);
2382 if (!NOTE_P (p))
2383 break;
2384 if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
2385 break;
2386 i = p;
2387 }
2388 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
2389
2390 emit_cfa_remember = true;
2391
2392 /* And emulate the state save. */
2393 gcc_assert (!cfa_remember.in_use);
2394 cfa_remember = cfa;
2395 old_cfa_remember = old_cfa;
2396 cfa_remember.in_use = 1;
2397 }
2398
2399 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2400 required. */
2401
2402 static void
2403 dwarf2out_frame_debug_restore_state (void)
2404 {
2405 dw_cfi_ref cfi = new_cfi ();
2406
2407 cfi->dw_cfi_opc = DW_CFA_restore_state;
2408 add_cfi (cfi);
2409
2410 gcc_assert (cfa_remember.in_use);
2411 cfa = cfa_remember;
2412 old_cfa = old_cfa_remember;
2413 cfa_remember.in_use = 0;
2414 }
2415 \f
2416 /* Record the initial position of the return address. RTL is
2417 INCOMING_RETURN_ADDR_RTX. */
2418
2419 static void
2420 initial_return_save (rtx rtl)
2421 {
2422 unsigned int reg = INVALID_REGNUM;
2423 HOST_WIDE_INT offset = 0;
2424
2425 switch (GET_CODE (rtl))
2426 {
2427 case REG:
2428 /* RA is in a register. */
2429 reg = dwf_regno (rtl);
2430 break;
2431
2432 case MEM:
2433 /* RA is on the stack. */
2434 rtl = XEXP (rtl, 0);
2435 switch (GET_CODE (rtl))
2436 {
2437 case REG:
2438 gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2439 offset = 0;
2440 break;
2441
2442 case PLUS:
2443 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2444 offset = INTVAL (XEXP (rtl, 1));
2445 break;
2446
2447 case MINUS:
2448 gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2449 offset = -INTVAL (XEXP (rtl, 1));
2450 break;
2451
2452 default:
2453 gcc_unreachable ();
2454 }
2455
2456 break;
2457
2458 case PLUS:
2459 /* The return address is at some offset from any value we can
2460 actually load. For instance, on the SPARC it is in %i7+8. Just
2461 ignore the offset for now; it doesn't matter for unwinding frames. */
2462 gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2463 initial_return_save (XEXP (rtl, 0));
2464 return;
2465
2466 default:
2467 gcc_unreachable ();
2468 }
2469
2470 if (reg != DWARF_FRAME_RETURN_COLUMN)
2471 {
2472 if (reg != INVALID_REGNUM)
2473 record_reg_saved_in_reg (rtl, pc_rtx);
2474 reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cfa.offset);
2475 }
2476 }
2477
2478 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2479 state at each location within the function. These notes will be
2480 emitted during pass_final. */
2481
2482 static unsigned int
2483 execute_dwarf2_frame (void)
2484 {
2485 /* The first time we're called, compute the incoming frame state. */
2486 if (cie_cfi_vec == NULL)
2487 {
2488 dw_cfa_location loc;
2489
2490 add_cfi_vec = &cie_cfi_vec;
2491
2492 memset(&old_cfa, 0, sizeof (old_cfa));
2493 old_cfa.reg = INVALID_REGNUM;
2494
2495 /* On entry, the Canonical Frame Address is at SP. */
2496 memset(&loc, 0, sizeof (loc));
2497 loc.reg = DW_STACK_POINTER_REGNUM;
2498 loc.offset = INCOMING_FRAME_SP_OFFSET;
2499 def_cfa_1 (&loc);
2500
2501 if (targetm.debug_unwind_info () == UI_DWARF2
2502 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2503 {
2504 initial_return_save (INCOMING_RETURN_ADDR_RTX);
2505
2506 /* For a few targets, we have the return address incoming into a
2507 register, but choose a different return column. This will result
2508 in a DW_CFA_register for the return, and an entry in
2509 regs_saved_in_regs to match. If the target later stores that
2510 return address register to the stack, we want to be able to emit
2511 the DW_CFA_offset against the return column, not the intermediate
2512 save register. Save the contents of regs_saved_in_regs so that
2513 we can re-initialize it at the start of each function. */
2514 switch (VEC_length (reg_saved_in_data, regs_saved_in_regs))
2515 {
2516 case 0:
2517 break;
2518 case 1:
2519 cie_return_save = ggc_alloc_reg_saved_in_data ();
2520 *cie_return_save = *VEC_index (reg_saved_in_data,
2521 regs_saved_in_regs, 0);
2522 regs_saved_in_regs = NULL;
2523 break;
2524 default:
2525 gcc_unreachable ();
2526 }
2527 }
2528
2529 add_cfi_vec = NULL;
2530 }
2531
2532 /* Set up state for generating call frame debug info. */
2533 gcc_checking_assert (queued_reg_saves == NULL);
2534 gcc_checking_assert (regs_saved_in_regs == NULL);
2535
2536 memset (&cfa, 0, sizeof(cfa));
2537 cfa.reg = DW_STACK_POINTER_REGNUM;
2538 cfa.offset = INCOMING_FRAME_SP_OFFSET;
2539
2540 old_cfa = cfa;
2541 cfa_store = cfa;
2542
2543 memset (&cfa_temp, 0, sizeof(cfa_temp));
2544 cfa_temp.reg = INVALID_REGNUM;
2545
2546 if (cie_return_save)
2547 VEC_safe_push (reg_saved_in_data, gc, regs_saved_in_regs, cie_return_save);
2548
2549 dwarf2out_alloc_current_fde ();
2550
2551 /* Do the work. */
2552 create_cfi_notes ();
2553 add_cfis_to_fde ();
2554
2555 /* Reset all function-specific information, particularly for GC. */
2556 XDELETEVEC (barrier_args_size);
2557 barrier_args_size = NULL;
2558 regs_saved_in_regs = NULL;
2559 queued_reg_saves = NULL;
2560
2561 return 0;
2562 }
2563 \f
2564 /* Convert a DWARF call frame info. operation to its string name */
2565
2566 static const char *
2567 dwarf_cfi_name (unsigned int cfi_opc)
2568 {
2569 switch (cfi_opc)
2570 {
2571 case DW_CFA_advance_loc:
2572 return "DW_CFA_advance_loc";
2573 case DW_CFA_offset:
2574 return "DW_CFA_offset";
2575 case DW_CFA_restore:
2576 return "DW_CFA_restore";
2577 case DW_CFA_nop:
2578 return "DW_CFA_nop";
2579 case DW_CFA_set_loc:
2580 return "DW_CFA_set_loc";
2581 case DW_CFA_advance_loc1:
2582 return "DW_CFA_advance_loc1";
2583 case DW_CFA_advance_loc2:
2584 return "DW_CFA_advance_loc2";
2585 case DW_CFA_advance_loc4:
2586 return "DW_CFA_advance_loc4";
2587 case DW_CFA_offset_extended:
2588 return "DW_CFA_offset_extended";
2589 case DW_CFA_restore_extended:
2590 return "DW_CFA_restore_extended";
2591 case DW_CFA_undefined:
2592 return "DW_CFA_undefined";
2593 case DW_CFA_same_value:
2594 return "DW_CFA_same_value";
2595 case DW_CFA_register:
2596 return "DW_CFA_register";
2597 case DW_CFA_remember_state:
2598 return "DW_CFA_remember_state";
2599 case DW_CFA_restore_state:
2600 return "DW_CFA_restore_state";
2601 case DW_CFA_def_cfa:
2602 return "DW_CFA_def_cfa";
2603 case DW_CFA_def_cfa_register:
2604 return "DW_CFA_def_cfa_register";
2605 case DW_CFA_def_cfa_offset:
2606 return "DW_CFA_def_cfa_offset";
2607
2608 /* DWARF 3 */
2609 case DW_CFA_def_cfa_expression:
2610 return "DW_CFA_def_cfa_expression";
2611 case DW_CFA_expression:
2612 return "DW_CFA_expression";
2613 case DW_CFA_offset_extended_sf:
2614 return "DW_CFA_offset_extended_sf";
2615 case DW_CFA_def_cfa_sf:
2616 return "DW_CFA_def_cfa_sf";
2617 case DW_CFA_def_cfa_offset_sf:
2618 return "DW_CFA_def_cfa_offset_sf";
2619
2620 /* SGI/MIPS specific */
2621 case DW_CFA_MIPS_advance_loc8:
2622 return "DW_CFA_MIPS_advance_loc8";
2623
2624 /* GNU extensions */
2625 case DW_CFA_GNU_window_save:
2626 return "DW_CFA_GNU_window_save";
2627 case DW_CFA_GNU_args_size:
2628 return "DW_CFA_GNU_args_size";
2629 case DW_CFA_GNU_negative_offset_extended:
2630 return "DW_CFA_GNU_negative_offset_extended";
2631
2632 default:
2633 return "DW_CFA_<unknown>";
2634 }
2635 }
2636
2637 /* This routine will generate the correct assembly data for a location
2638 description based on a cfi entry with a complex address. */
2639
2640 static void
2641 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2642 {
2643 dw_loc_descr_ref loc;
2644 unsigned long size;
2645
2646 if (cfi->dw_cfi_opc == DW_CFA_expression)
2647 {
2648 unsigned r =
2649 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2650 dw2_asm_output_data (1, r, NULL);
2651 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2652 }
2653 else
2654 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2655
2656 /* Output the size of the block. */
2657 size = size_of_locs (loc);
2658 dw2_asm_output_data_uleb128 (size, NULL);
2659
2660 /* Now output the operations themselves. */
2661 output_loc_sequence (loc, for_eh);
2662 }
2663
2664 /* Similar, but used for .cfi_escape. */
2665
2666 static void
2667 output_cfa_loc_raw (dw_cfi_ref cfi)
2668 {
2669 dw_loc_descr_ref loc;
2670 unsigned long size;
2671
2672 if (cfi->dw_cfi_opc == DW_CFA_expression)
2673 {
2674 unsigned r =
2675 DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2676 fprintf (asm_out_file, "%#x,", r);
2677 loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2678 }
2679 else
2680 loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2681
2682 /* Output the size of the block. */
2683 size = size_of_locs (loc);
2684 dw2_asm_output_data_uleb128_raw (size);
2685 fputc (',', asm_out_file);
2686
2687 /* Now output the operations themselves. */
2688 output_loc_sequence_raw (loc);
2689 }
2690
2691 /* Output a Call Frame Information opcode and its operand(s). */
2692
2693 void
2694 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2695 {
2696 unsigned long r;
2697 HOST_WIDE_INT off;
2698
2699 if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2700 dw2_asm_output_data (1, (cfi->dw_cfi_opc
2701 | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2702 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2703 ((unsigned HOST_WIDE_INT)
2704 cfi->dw_cfi_oprnd1.dw_cfi_offset));
2705 else if (cfi->dw_cfi_opc == DW_CFA_offset)
2706 {
2707 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2708 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2709 "DW_CFA_offset, column %#lx", r);
2710 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2711 dw2_asm_output_data_uleb128 (off, NULL);
2712 }
2713 else if (cfi->dw_cfi_opc == DW_CFA_restore)
2714 {
2715 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2716 dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
2717 "DW_CFA_restore, column %#lx", r);
2718 }
2719 else
2720 {
2721 dw2_asm_output_data (1, cfi->dw_cfi_opc,
2722 "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
2723
2724 switch (cfi->dw_cfi_opc)
2725 {
2726 case DW_CFA_set_loc:
2727 if (for_eh)
2728 dw2_asm_output_encoded_addr_rtx (
2729 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2730 gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
2731 false, NULL);
2732 else
2733 dw2_asm_output_addr (DWARF2_ADDR_SIZE,
2734 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
2735 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2736 break;
2737
2738 case DW_CFA_advance_loc1:
2739 dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2740 fde->dw_fde_current_label, NULL);
2741 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2742 break;
2743
2744 case DW_CFA_advance_loc2:
2745 dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2746 fde->dw_fde_current_label, NULL);
2747 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2748 break;
2749
2750 case DW_CFA_advance_loc4:
2751 dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2752 fde->dw_fde_current_label, NULL);
2753 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2754 break;
2755
2756 case DW_CFA_MIPS_advance_loc8:
2757 dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
2758 fde->dw_fde_current_label, NULL);
2759 fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
2760 break;
2761
2762 case DW_CFA_offset_extended:
2763 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2764 dw2_asm_output_data_uleb128 (r, NULL);
2765 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2766 dw2_asm_output_data_uleb128 (off, NULL);
2767 break;
2768
2769 case DW_CFA_def_cfa:
2770 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2771 dw2_asm_output_data_uleb128 (r, NULL);
2772 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
2773 break;
2774
2775 case DW_CFA_offset_extended_sf:
2776 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2777 dw2_asm_output_data_uleb128 (r, NULL);
2778 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2779 dw2_asm_output_data_sleb128 (off, NULL);
2780 break;
2781
2782 case DW_CFA_def_cfa_sf:
2783 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2784 dw2_asm_output_data_uleb128 (r, NULL);
2785 off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
2786 dw2_asm_output_data_sleb128 (off, NULL);
2787 break;
2788
2789 case DW_CFA_restore_extended:
2790 case DW_CFA_undefined:
2791 case DW_CFA_same_value:
2792 case DW_CFA_def_cfa_register:
2793 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2794 dw2_asm_output_data_uleb128 (r, NULL);
2795 break;
2796
2797 case DW_CFA_register:
2798 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2799 dw2_asm_output_data_uleb128 (r, NULL);
2800 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
2801 dw2_asm_output_data_uleb128 (r, NULL);
2802 break;
2803
2804 case DW_CFA_def_cfa_offset:
2805 case DW_CFA_GNU_args_size:
2806 dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
2807 break;
2808
2809 case DW_CFA_def_cfa_offset_sf:
2810 off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2811 dw2_asm_output_data_sleb128 (off, NULL);
2812 break;
2813
2814 case DW_CFA_GNU_window_save:
2815 break;
2816
2817 case DW_CFA_def_cfa_expression:
2818 case DW_CFA_expression:
2819 output_cfa_loc (cfi, for_eh);
2820 break;
2821
2822 case DW_CFA_GNU_negative_offset_extended:
2823 /* Obsoleted by DW_CFA_offset_extended_sf. */
2824 gcc_unreachable ();
2825
2826 default:
2827 break;
2828 }
2829 }
2830 }
2831
2832 /* Similar, but do it via assembler directives instead. */
2833
2834 void
2835 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
2836 {
2837 unsigned long r, r2;
2838
2839 switch (cfi->dw_cfi_opc)
2840 {
2841 case DW_CFA_advance_loc:
2842 case DW_CFA_advance_loc1:
2843 case DW_CFA_advance_loc2:
2844 case DW_CFA_advance_loc4:
2845 case DW_CFA_MIPS_advance_loc8:
2846 case DW_CFA_set_loc:
2847 /* Should only be created in a code path not followed when emitting
2848 via directives. The assembler is going to take care of this for
2849 us. But this routines is also used for debugging dumps, so
2850 print something. */
2851 gcc_assert (f != asm_out_file);
2852 fprintf (f, "\t.cfi_advance_loc\n");
2853 break;
2854
2855 case DW_CFA_offset:
2856 case DW_CFA_offset_extended:
2857 case DW_CFA_offset_extended_sf:
2858 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2859 fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2860 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2861 break;
2862
2863 case DW_CFA_restore:
2864 case DW_CFA_restore_extended:
2865 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2866 fprintf (f, "\t.cfi_restore %lu\n", r);
2867 break;
2868
2869 case DW_CFA_undefined:
2870 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2871 fprintf (f, "\t.cfi_undefined %lu\n", r);
2872 break;
2873
2874 case DW_CFA_same_value:
2875 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2876 fprintf (f, "\t.cfi_same_value %lu\n", r);
2877 break;
2878
2879 case DW_CFA_def_cfa:
2880 case DW_CFA_def_cfa_sf:
2881 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2882 fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
2883 r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
2884 break;
2885
2886 case DW_CFA_def_cfa_register:
2887 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2888 fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
2889 break;
2890
2891 case DW_CFA_register:
2892 r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2893 r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
2894 fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
2895 break;
2896
2897 case DW_CFA_def_cfa_offset:
2898 case DW_CFA_def_cfa_offset_sf:
2899 fprintf (f, "\t.cfi_def_cfa_offset "
2900 HOST_WIDE_INT_PRINT_DEC"\n",
2901 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2902 break;
2903
2904 case DW_CFA_remember_state:
2905 fprintf (f, "\t.cfi_remember_state\n");
2906 break;
2907 case DW_CFA_restore_state:
2908 fprintf (f, "\t.cfi_restore_state\n");
2909 break;
2910
2911 case DW_CFA_GNU_args_size:
2912 if (f == asm_out_file)
2913 {
2914 fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
2915 dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
2916 if (flag_debug_asm)
2917 fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
2918 ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
2919 fputc ('\n', f);
2920 }
2921 else
2922 {
2923 fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
2924 cfi->dw_cfi_oprnd1.dw_cfi_offset);
2925 }
2926 break;
2927
2928 case DW_CFA_GNU_window_save:
2929 fprintf (f, "\t.cfi_window_save\n");
2930 break;
2931
2932 case DW_CFA_def_cfa_expression:
2933 if (f != asm_out_file)
2934 {
2935 fprintf (f, "\t.cfi_def_cfa_expression ...\n");
2936 break;
2937 }
2938 /* FALLTHRU */
2939 case DW_CFA_expression:
2940 if (f != asm_out_file)
2941 {
2942 fprintf (f, "\t.cfi_cfa_expression ...\n");
2943 break;
2944 }
2945 fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
2946 output_cfa_loc_raw (cfi);
2947 fputc ('\n', f);
2948 break;
2949
2950 default:
2951 gcc_unreachable ();
2952 }
2953 }
2954
2955 void
2956 dwarf2out_emit_cfi (dw_cfi_ref cfi)
2957 {
2958 if (dwarf2out_do_cfi_asm ())
2959 output_cfi_directive (asm_out_file, cfi);
2960 }
2961
2962 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
2963 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
2964 true if .cfi_* directives shall be emitted, false otherwise. If it
2965 is false, FDE and FOR_EH are the other arguments to pass to
2966 output_cfi. */
2967
2968 void
2969 output_cfis (cfi_vec vec, int upto, bool do_cfi_asm,
2970 dw_fde_ref fde, bool for_eh)
2971 {
2972 int ix;
2973 struct dw_cfi_struct cfi_buf;
2974 dw_cfi_ref cfi2;
2975 dw_cfi_ref cfi_args_size = NULL, cfi_cfa = NULL, cfi_cfa_offset = NULL;
2976 VEC(dw_cfi_ref, heap) *regs = VEC_alloc (dw_cfi_ref, heap, 32);
2977 unsigned int len, idx;
2978
2979 for (ix = 0; ix < upto + 1; ix++)
2980 {
2981 dw_cfi_ref cfi = ix < upto ? VEC_index (dw_cfi_ref, vec, ix) : NULL;
2982 switch (cfi ? cfi->dw_cfi_opc : DW_CFA_nop)
2983 {
2984 case DW_CFA_advance_loc:
2985 case DW_CFA_advance_loc1:
2986 case DW_CFA_advance_loc2:
2987 case DW_CFA_advance_loc4:
2988 case DW_CFA_MIPS_advance_loc8:
2989 case DW_CFA_set_loc:
2990 /* All advances should be ignored. */
2991 break;
2992 case DW_CFA_remember_state:
2993 {
2994 dw_cfi_ref args_size = cfi_args_size;
2995
2996 /* Skip everything between .cfi_remember_state and
2997 .cfi_restore_state. */
2998 ix++;
2999 if (ix == upto)
3000 goto flush_all;
3001
3002 for (; ix < upto; ix++)
3003 {
3004 cfi2 = VEC_index (dw_cfi_ref, vec, ix);
3005 if (cfi2->dw_cfi_opc == DW_CFA_restore_state)
3006 break;
3007 else if (cfi2->dw_cfi_opc == DW_CFA_GNU_args_size)
3008 args_size = cfi2;
3009 else
3010 gcc_assert (cfi2->dw_cfi_opc != DW_CFA_remember_state);
3011 }
3012
3013 cfi_args_size = args_size;
3014 break;
3015 }
3016 case DW_CFA_GNU_args_size:
3017 cfi_args_size = cfi;
3018 break;
3019 case DW_CFA_GNU_window_save:
3020 goto flush_all;
3021 case DW_CFA_offset:
3022 case DW_CFA_offset_extended:
3023 case DW_CFA_offset_extended_sf:
3024 case DW_CFA_restore:
3025 case DW_CFA_restore_extended:
3026 case DW_CFA_undefined:
3027 case DW_CFA_same_value:
3028 case DW_CFA_register:
3029 case DW_CFA_val_offset:
3030 case DW_CFA_val_offset_sf:
3031 case DW_CFA_expression:
3032 case DW_CFA_val_expression:
3033 case DW_CFA_GNU_negative_offset_extended:
3034 if (VEC_length (dw_cfi_ref, regs)
3035 <= cfi->dw_cfi_oprnd1.dw_cfi_reg_num)
3036 VEC_safe_grow_cleared (dw_cfi_ref, heap, regs,
3037 cfi->dw_cfi_oprnd1.dw_cfi_reg_num + 1);
3038 VEC_replace (dw_cfi_ref, regs, cfi->dw_cfi_oprnd1.dw_cfi_reg_num,
3039 cfi);
3040 break;
3041 case DW_CFA_def_cfa:
3042 case DW_CFA_def_cfa_sf:
3043 case DW_CFA_def_cfa_expression:
3044 cfi_cfa = cfi;
3045 cfi_cfa_offset = cfi;
3046 break;
3047 case DW_CFA_def_cfa_register:
3048 cfi_cfa = cfi;
3049 break;
3050 case DW_CFA_def_cfa_offset:
3051 case DW_CFA_def_cfa_offset_sf:
3052 cfi_cfa_offset = cfi;
3053 break;
3054 case DW_CFA_nop:
3055 gcc_assert (cfi == NULL);
3056 flush_all:
3057 len = VEC_length (dw_cfi_ref, regs);
3058 for (idx = 0; idx < len; idx++)
3059 {
3060 cfi2 = VEC_replace (dw_cfi_ref, regs, idx, NULL);
3061 if (cfi2 != NULL
3062 && cfi2->dw_cfi_opc != DW_CFA_restore
3063 && cfi2->dw_cfi_opc != DW_CFA_restore_extended)
3064 {
3065 if (do_cfi_asm)
3066 output_cfi_directive (asm_out_file, cfi2);
3067 else
3068 output_cfi (cfi2, fde, for_eh);
3069 }
3070 }
3071 if (cfi_cfa && cfi_cfa_offset && cfi_cfa_offset != cfi_cfa)
3072 {
3073 gcc_assert (cfi_cfa->dw_cfi_opc != DW_CFA_def_cfa_expression);
3074 cfi_buf = *cfi_cfa;
3075 switch (cfi_cfa_offset->dw_cfi_opc)
3076 {
3077 case DW_CFA_def_cfa_offset:
3078 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa;
3079 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3080 break;
3081 case DW_CFA_def_cfa_offset_sf:
3082 cfi_buf.dw_cfi_opc = DW_CFA_def_cfa_sf;
3083 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd1;
3084 break;
3085 case DW_CFA_def_cfa:
3086 case DW_CFA_def_cfa_sf:
3087 cfi_buf.dw_cfi_opc = cfi_cfa_offset->dw_cfi_opc;
3088 cfi_buf.dw_cfi_oprnd2 = cfi_cfa_offset->dw_cfi_oprnd2;
3089 break;
3090 default:
3091 gcc_unreachable ();
3092 }
3093 cfi_cfa = &cfi_buf;
3094 }
3095 else if (cfi_cfa_offset)
3096 cfi_cfa = cfi_cfa_offset;
3097 if (cfi_cfa)
3098 {
3099 if (do_cfi_asm)
3100 output_cfi_directive (asm_out_file, cfi_cfa);
3101 else
3102 output_cfi (cfi_cfa, fde, for_eh);
3103 }
3104 cfi_cfa = NULL;
3105 cfi_cfa_offset = NULL;
3106 if (cfi_args_size
3107 && cfi_args_size->dw_cfi_oprnd1.dw_cfi_offset)
3108 {
3109 if (do_cfi_asm)
3110 output_cfi_directive (asm_out_file, cfi_args_size);
3111 else
3112 output_cfi (cfi_args_size, fde, for_eh);
3113 }
3114 cfi_args_size = NULL;
3115 if (cfi == NULL)
3116 {
3117 VEC_free (dw_cfi_ref, heap, regs);
3118 return;
3119 }
3120 else if (do_cfi_asm)
3121 output_cfi_directive (asm_out_file, cfi);
3122 else
3123 output_cfi (cfi, fde, for_eh);
3124 break;
3125 default:
3126 gcc_unreachable ();
3127 }
3128 }
3129 }
3130 \f
3131
3132 /* Save the result of dwarf2out_do_frame across PCH.
3133 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3134 static GTY(()) signed char saved_do_cfi_asm = 0;
3135
3136 /* Decide whether we want to emit frame unwind information for the current
3137 translation unit. */
3138
3139 bool
3140 dwarf2out_do_frame (void)
3141 {
3142 /* We want to emit correct CFA location expressions or lists, so we
3143 have to return true if we're going to output debug info, even if
3144 we're not going to output frame or unwind info. */
3145 if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3146 return true;
3147
3148 if (saved_do_cfi_asm > 0)
3149 return true;
3150
3151 if (targetm.debug_unwind_info () == UI_DWARF2)
3152 return true;
3153
3154 if ((flag_unwind_tables || flag_exceptions)
3155 && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3156 return true;
3157
3158 return false;
3159 }
3160
3161 /* Decide whether to emit frame unwind via assembler directives. */
3162
3163 bool
3164 dwarf2out_do_cfi_asm (void)
3165 {
3166 int enc;
3167
3168 #ifdef MIPS_DEBUGGING_INFO
3169 return false;
3170 #endif
3171
3172 if (saved_do_cfi_asm != 0)
3173 return saved_do_cfi_asm > 0;
3174
3175 /* Assume failure for a moment. */
3176 saved_do_cfi_asm = -1;
3177
3178 if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3179 return false;
3180 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3181 return false;
3182
3183 /* Make sure the personality encoding is one the assembler can support.
3184 In particular, aligned addresses can't be handled. */
3185 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3186 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3187 return false;
3188 enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3189 if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3190 return false;
3191
3192 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3193 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3194 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3195 && !flag_unwind_tables && !flag_exceptions
3196 && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3197 return false;
3198
3199 /* Success! */
3200 saved_do_cfi_asm = 1;
3201 return true;
3202 }
3203
3204 static bool
3205 gate_dwarf2_frame (void)
3206 {
3207 #ifndef HAVE_prologue
3208 /* Targets which still implement the prologue in assembler text
3209 cannot use the generic dwarf2 unwinding. */
3210 return false;
3211 #endif
3212
3213 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3214 from the optimized shrink-wrapping annotations that we will compute.
3215 For now, only produce the CFI notes for dwarf2. */
3216 return dwarf2out_do_frame ();
3217 }
3218
3219 struct rtl_opt_pass pass_dwarf2_frame =
3220 {
3221 {
3222 RTL_PASS,
3223 "dwarf2", /* name */
3224 gate_dwarf2_frame, /* gate */
3225 execute_dwarf2_frame, /* execute */
3226 NULL, /* sub */
3227 NULL, /* next */
3228 0, /* static_pass_number */
3229 TV_FINAL, /* tv_id */
3230 0, /* properties_required */
3231 0, /* properties_provided */
3232 0, /* properties_destroyed */
3233 0, /* todo_flags_start */
3234 0 /* todo_flags_finish */
3235 }
3236 };
3237
3238 #include "gt-dwarf2cfi.h"