1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* Short-hand for commonly used register numbers. */
62 #define DW_STACK_POINTER_REGNUM dwarf_frame_regnum (STACK_POINTER_REGNUM)
63 #define DW_FRAME_POINTER_REGNUM dwarf_frame_regnum (HARD_FRAME_POINTER_REGNUM)
65 /* A vector of call frame insns for the CIE. */
68 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
70 /* The insn after which a new CFI note should be emitted. */
73 /* When non-null, add_cfi will add the CFI to this vector. */
74 static cfi_vec
*add_cfi_vec
;
76 /* True if remember_state should be emitted before following CFI directive. */
77 static bool emit_cfa_remember
;
79 /* True if any CFI directives were emitted at the current insn. */
80 static bool any_cfis_emitted
;
83 static void dwarf2out_cfi_begin_epilogue (rtx insn
);
84 static void dwarf2out_frame_debug_restore_state (void);
87 /* Hook used by __throw. */
90 expand_builtin_dwarf_sp_column (void)
92 unsigned int dwarf_regnum
= DW_STACK_POINTER_REGNUM
;
93 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
96 /* MEM is a memory reference for the register size table, each element of
97 which has mode MODE. Initialize column C as a return address column. */
100 init_return_column_size (enum machine_mode mode
, rtx mem
, unsigned int c
)
102 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
103 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
104 emit_move_insn (adjust_address (mem
, mode
, offset
), GEN_INT (size
));
107 /* Generate code to initialize the register size table. */
110 expand_builtin_init_dwarf_reg_sizes (tree address
)
113 enum machine_mode mode
= TYPE_MODE (char_type_node
);
114 rtx addr
= expand_normal (address
);
115 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
116 bool wrote_return_column
= false;
118 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
120 unsigned int rnum
= DWARF2_FRAME_REG_OUT (dwarf_frame_regnum (i
), 1);
122 if (rnum
< DWARF_FRAME_REGISTERS
)
124 HOST_WIDE_INT offset
= rnum
* GET_MODE_SIZE (mode
);
125 enum machine_mode save_mode
= reg_raw_mode
[i
];
128 if (HARD_REGNO_CALL_PART_CLOBBERED (i
, save_mode
))
129 save_mode
= choose_hard_reg_mode (i
, 1, true);
130 if (dwarf_frame_regnum (i
) == DWARF_FRAME_RETURN_COLUMN
)
132 if (save_mode
== VOIDmode
)
134 wrote_return_column
= true;
136 size
= GET_MODE_SIZE (save_mode
);
140 emit_move_insn (adjust_address (mem
, mode
, offset
),
141 gen_int_mode (size
, mode
));
145 if (!wrote_return_column
)
146 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
148 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
149 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
152 targetm
.init_dwarf_reg_sizes_extra (address
);
155 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
157 static inline HOST_WIDE_INT
158 div_data_align (HOST_WIDE_INT off
)
160 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
161 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
165 /* Return true if we need a signed version of a given opcode
166 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
169 need_data_align_sf_opcode (HOST_WIDE_INT off
)
171 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
174 /* Return a pointer to a newly allocated Call Frame Instruction. */
176 static inline dw_cfi_ref
179 dw_cfi_ref cfi
= ggc_alloc_dw_cfi_node ();
181 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
182 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
187 /* Generate a new label for the CFI info to refer to. */
190 dwarf2out_cfi_label (void)
192 int num
= dwarf2out_cfi_label_num
++;
195 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
197 return xstrdup (label
);
200 /* Add CFI either to the current insn stream or to a vector, or both. */
203 add_cfi (dw_cfi_ref cfi
)
205 if (emit_cfa_remember
)
207 dw_cfi_ref cfi_remember
;
209 /* Emit the state save. */
210 emit_cfa_remember
= false;
211 cfi_remember
= new_cfi ();
212 cfi_remember
->dw_cfi_opc
= DW_CFA_remember_state
;
213 add_cfi (cfi_remember
);
216 any_cfis_emitted
= true;
217 if (cfi_insn
!= NULL
)
219 cfi_insn
= emit_note_after (NOTE_INSN_CFI
, cfi_insn
);
220 NOTE_CFI (cfi_insn
) = cfi
;
222 if (add_cfi_vec
!= NULL
)
223 VEC_safe_push (dw_cfi_ref
, gc
, *add_cfi_vec
, cfi
);
226 /* This function fills in aa dw_cfa_location structure from a dwarf location
227 descriptor sequence. */
230 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_struct
*loc
)
232 struct dw_loc_descr_struct
*ptr
;
234 cfa
->base_offset
= 0;
238 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
240 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
276 cfa
->reg
= op
- DW_OP_reg0
;
279 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
313 cfa
->reg
= op
- DW_OP_breg0
;
314 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
317 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
318 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
323 case DW_OP_plus_uconst
:
324 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
332 /* Find the previous value for the CFA, iteratively. CFI is the opcode
333 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
334 one level of remember/restore state processing. */
337 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
339 switch (cfi
->dw_cfi_opc
)
341 case DW_CFA_def_cfa_offset
:
342 case DW_CFA_def_cfa_offset_sf
:
343 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
345 case DW_CFA_def_cfa_register
:
346 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
349 case DW_CFA_def_cfa_sf
:
350 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
351 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
353 case DW_CFA_def_cfa_expression
:
354 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
357 case DW_CFA_remember_state
:
358 gcc_assert (!remember
->in_use
);
360 remember
->in_use
= 1;
362 case DW_CFA_restore_state
:
363 gcc_assert (remember
->in_use
);
365 remember
->in_use
= 0;
373 /* The current rule for calculating the DWARF2 canonical frame address. */
374 static dw_cfa_location cfa
;
376 /* A copy of the CFA, for comparison purposes. */
377 static dw_cfa_location old_cfa
;
379 /* The register used for saving registers to the stack, and its offset
381 static dw_cfa_location cfa_store
;
383 /* The current save location around an epilogue. */
384 static dw_cfa_location cfa_remember
;
386 /* Like cfa_remember, but a copy of old_cfa. */
387 static dw_cfa_location old_cfa_remember
;
389 /* The running total of the size of arguments pushed onto the stack. */
390 static HOST_WIDE_INT args_size
;
392 /* The last args_size we actually output. */
393 static HOST_WIDE_INT old_args_size
;
395 /* Determine if two dw_cfa_location structures define the same data. */
398 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
400 return (loc1
->reg
== loc2
->reg
401 && loc1
->offset
== loc2
->offset
402 && loc1
->indirect
== loc2
->indirect
403 && (loc1
->indirect
== 0
404 || loc1
->base_offset
== loc2
->base_offset
));
407 /* This routine does the actual work. The CFA is now calculated from
408 the dw_cfa_location structure. */
411 def_cfa_1 (dw_cfa_location
*loc_p
)
419 if (cfa_store
.reg
== loc
.reg
&& loc
.indirect
== 0)
420 cfa_store
.offset
= loc
.offset
;
422 /* If nothing changed, no need to issue any call frame instructions. */
423 if (cfa_equal_p (&loc
, &old_cfa
))
428 if (loc
.reg
== old_cfa
.reg
&& !loc
.indirect
&& !old_cfa
.indirect
)
430 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
431 the CFA register did not change but the offset did. The data
432 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
433 in the assembler via the .cfi_def_cfa_offset directive. */
435 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
437 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
438 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= loc
.offset
;
441 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
442 else if (loc
.offset
== old_cfa
.offset
443 && old_cfa
.reg
!= INVALID_REGNUM
445 && !old_cfa
.indirect
)
447 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
448 indicating the CFA register has changed to <register> but the
449 offset has not changed. */
450 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
451 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= loc
.reg
;
455 else if (loc
.indirect
== 0)
457 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
458 indicating the CFA register has changed to <register> with
459 the specified offset. The data factoring for DW_CFA_def_cfa_sf
460 happens in output_cfi, or in the assembler via the .cfi_def_cfa
463 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
465 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
466 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= loc
.reg
;
467 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= loc
.offset
;
471 /* Construct a DW_CFA_def_cfa_expression instruction to
472 calculate the CFA using a full location expression since no
473 register-offset pair is available. */
474 struct dw_loc_descr_struct
*loc_list
;
476 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
477 loc_list
= build_cfa_loc (&loc
, 0);
478 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
485 /* Add the CFI for saving a register. REG is the CFA column number.
486 If SREG is -1, the register is saved at OFFSET from the CFA;
487 otherwise it is saved in SREG. */
490 reg_save (unsigned int reg
, unsigned int sreg
, HOST_WIDE_INT offset
)
492 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
493 dw_cfi_ref cfi
= new_cfi ();
495 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
497 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
499 && fde
->stack_realign
500 && sreg
== INVALID_REGNUM
)
502 cfi
->dw_cfi_opc
= DW_CFA_expression
;
503 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
504 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
505 = build_cfa_aligned_loc (&cfa
, offset
, fde
->stack_realignment
);
507 else if (sreg
== INVALID_REGNUM
)
509 if (need_data_align_sf_opcode (offset
))
510 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
511 else if (reg
& ~0x3f)
512 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
514 cfi
->dw_cfi_opc
= DW_CFA_offset
;
515 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= offset
;
517 else if (sreg
== reg
)
518 cfi
->dw_cfi_opc
= DW_CFA_same_value
;
521 cfi
->dw_cfi_opc
= DW_CFA_register
;
522 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
528 /* Given a SET, calculate the amount of stack adjustment it
532 stack_adjust_offset (const_rtx pattern
, HOST_WIDE_INT cur_args_size
,
533 HOST_WIDE_INT cur_offset
)
535 const_rtx src
= SET_SRC (pattern
);
536 const_rtx dest
= SET_DEST (pattern
);
537 HOST_WIDE_INT offset
= 0;
540 if (dest
== stack_pointer_rtx
)
542 code
= GET_CODE (src
);
544 /* Assume (set (reg sp) (reg whatever)) sets args_size
546 if (code
== REG
&& src
!= stack_pointer_rtx
)
548 offset
= -cur_args_size
;
549 #ifndef STACK_GROWS_DOWNWARD
552 return offset
- cur_offset
;
555 if (! (code
== PLUS
|| code
== MINUS
)
556 || XEXP (src
, 0) != stack_pointer_rtx
557 || !CONST_INT_P (XEXP (src
, 1)))
560 /* (set (reg sp) (plus (reg sp) (const_int))) */
561 offset
= INTVAL (XEXP (src
, 1));
567 if (MEM_P (src
) && !MEM_P (dest
))
571 /* (set (mem (pre_dec (reg sp))) (foo)) */
572 src
= XEXP (dest
, 0);
573 code
= GET_CODE (src
);
579 if (XEXP (src
, 0) == stack_pointer_rtx
)
581 rtx val
= XEXP (XEXP (src
, 1), 1);
582 /* We handle only adjustments by constant amount. */
583 gcc_assert (GET_CODE (XEXP (src
, 1)) == PLUS
584 && CONST_INT_P (val
));
585 offset
= -INTVAL (val
);
592 if (XEXP (src
, 0) == stack_pointer_rtx
)
594 offset
= GET_MODE_SIZE (GET_MODE (dest
));
601 if (XEXP (src
, 0) == stack_pointer_rtx
)
603 offset
= -GET_MODE_SIZE (GET_MODE (dest
));
618 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
619 indexed by INSN_UID. */
621 static HOST_WIDE_INT
*barrier_args_size
;
623 /* Helper function for compute_barrier_args_size. Handle one insn. */
626 compute_barrier_args_size_1 (rtx insn
, HOST_WIDE_INT cur_args_size
,
627 VEC (rtx
, heap
) **next
)
629 HOST_WIDE_INT offset
= 0;
632 if (! RTX_FRAME_RELATED_P (insn
))
634 if (prologue_epilogue_contains (insn
))
636 else if (GET_CODE (PATTERN (insn
)) == SET
)
637 offset
= stack_adjust_offset (PATTERN (insn
), cur_args_size
, 0);
638 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
639 || GET_CODE (PATTERN (insn
)) == SEQUENCE
)
641 /* There may be stack adjustments inside compound insns. Search
643 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
644 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
645 offset
+= stack_adjust_offset (XVECEXP (PATTERN (insn
), 0, i
),
646 cur_args_size
, offset
);
651 rtx expr
= find_reg_note (insn
, REG_FRAME_RELATED_EXPR
, NULL_RTX
);
655 expr
= XEXP (expr
, 0);
656 if (GET_CODE (expr
) == PARALLEL
657 || GET_CODE (expr
) == SEQUENCE
)
658 for (i
= 1; i
< XVECLEN (expr
, 0); i
++)
660 rtx elem
= XVECEXP (expr
, 0, i
);
662 if (GET_CODE (elem
) == SET
&& !RTX_FRAME_RELATED_P (elem
))
663 offset
+= stack_adjust_offset (elem
, cur_args_size
, offset
);
668 #ifndef STACK_GROWS_DOWNWARD
672 cur_args_size
+= offset
;
673 if (cur_args_size
< 0)
678 rtx dest
= JUMP_LABEL (insn
);
682 if (barrier_args_size
[INSN_UID (dest
)] < 0)
684 barrier_args_size
[INSN_UID (dest
)] = cur_args_size
;
685 VEC_safe_push (rtx
, heap
, *next
, dest
);
690 return cur_args_size
;
693 /* Walk the whole function and compute args_size on BARRIERs. */
696 compute_barrier_args_size (void)
698 int max_uid
= get_max_uid (), i
;
700 VEC (rtx
, heap
) *worklist
, *next
, *tmp
;
702 barrier_args_size
= XNEWVEC (HOST_WIDE_INT
, max_uid
);
703 for (i
= 0; i
< max_uid
; i
++)
704 barrier_args_size
[i
] = -1;
706 worklist
= VEC_alloc (rtx
, heap
, 20);
707 next
= VEC_alloc (rtx
, heap
, 20);
709 barrier_args_size
[INSN_UID (insn
)] = 0;
710 VEC_quick_push (rtx
, worklist
, insn
);
713 while (!VEC_empty (rtx
, worklist
))
715 rtx prev
, body
, first_insn
;
716 HOST_WIDE_INT cur_args_size
;
718 first_insn
= insn
= VEC_pop (rtx
, worklist
);
719 cur_args_size
= barrier_args_size
[INSN_UID (insn
)];
720 prev
= prev_nonnote_insn (insn
);
721 if (prev
&& BARRIER_P (prev
))
722 barrier_args_size
[INSN_UID (prev
)] = cur_args_size
;
724 for (; insn
; insn
= NEXT_INSN (insn
))
726 if (INSN_DELETED_P (insn
) || NOTE_P (insn
))
728 if (BARRIER_P (insn
))
733 if (insn
== first_insn
)
735 else if (barrier_args_size
[INSN_UID (insn
)] < 0)
737 barrier_args_size
[INSN_UID (insn
)] = cur_args_size
;
742 /* The insns starting with this label have been
743 already scanned or are in the worklist. */
748 body
= PATTERN (insn
);
749 if (GET_CODE (body
) == SEQUENCE
)
751 HOST_WIDE_INT dest_args_size
= cur_args_size
;
752 for (i
= 1; i
< XVECLEN (body
, 0); i
++)
753 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body
, 0, 0))
754 && INSN_FROM_TARGET_P (XVECEXP (body
, 0, i
)))
756 = compute_barrier_args_size_1 (XVECEXP (body
, 0, i
),
757 dest_args_size
, &next
);
760 = compute_barrier_args_size_1 (XVECEXP (body
, 0, i
),
761 cur_args_size
, &next
);
763 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body
, 0, 0)))
764 compute_barrier_args_size_1 (XVECEXP (body
, 0, 0),
765 dest_args_size
, &next
);
768 = compute_barrier_args_size_1 (XVECEXP (body
, 0, 0),
769 cur_args_size
, &next
);
773 = compute_barrier_args_size_1 (insn
, cur_args_size
, &next
);
777 if (VEC_empty (rtx
, next
))
780 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
784 VEC_truncate (rtx
, next
, 0);
787 VEC_free (rtx
, heap
, worklist
);
788 VEC_free (rtx
, heap
, next
);
791 /* Add a CFI to update the running total of the size of arguments
792 pushed onto the stack. */
795 dwarf2out_args_size (HOST_WIDE_INT size
)
799 if (size
== old_args_size
)
802 old_args_size
= size
;
805 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
806 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= size
;
810 /* Record a stack adjustment of OFFSET bytes. */
813 dwarf2out_stack_adjust (HOST_WIDE_INT offset
)
815 if (cfa
.reg
== DW_STACK_POINTER_REGNUM
)
816 cfa
.offset
+= offset
;
818 if (cfa_store
.reg
== DW_STACK_POINTER_REGNUM
)
819 cfa_store
.offset
+= offset
;
821 if (ACCUMULATE_OUTGOING_ARGS
)
824 #ifndef STACK_GROWS_DOWNWARD
833 if (flag_asynchronous_unwind_tables
)
834 dwarf2out_args_size (args_size
);
837 /* Check INSN to see if it looks like a push or a stack adjustment, and
838 make a note of it if it does. EH uses this information to find out
839 how much extra space it needs to pop off the stack. */
842 dwarf2out_notice_stack_adjust (rtx insn
, bool after_p
)
844 HOST_WIDE_INT offset
;
847 /* Don't handle epilogues at all. Certainly it would be wrong to do so
848 with this function. Proper support would require all frame-related
849 insns to be marked, and to be able to handle saving state around
850 epilogues textually in the middle of the function. */
851 if (prologue_epilogue_contains (insn
))
854 /* If INSN is an instruction from target of an annulled branch, the
855 effects are for the target only and so current argument size
856 shouldn't change at all. */
858 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))
859 && INSN_FROM_TARGET_P (insn
))
862 /* If only calls can throw, and we have a frame pointer,
863 save up adjustments until we see the CALL_INSN. */
864 if (!flag_asynchronous_unwind_tables
&& cfa
.reg
!= DW_STACK_POINTER_REGNUM
)
866 if (CALL_P (insn
) && !after_p
)
868 /* Extract the size of the args from the CALL rtx itself. */
869 insn
= PATTERN (insn
);
870 if (GET_CODE (insn
) == PARALLEL
)
871 insn
= XVECEXP (insn
, 0, 0);
872 if (GET_CODE (insn
) == SET
)
873 insn
= SET_SRC (insn
);
874 gcc_assert (GET_CODE (insn
) == CALL
);
875 dwarf2out_args_size (INTVAL (XEXP (insn
, 1)));
880 if (CALL_P (insn
) && !after_p
)
882 if (!flag_asynchronous_unwind_tables
)
883 dwarf2out_args_size (args_size
);
886 else if (BARRIER_P (insn
))
888 /* Don't call compute_barrier_args_size () if the only
889 BARRIER is at the end of function. */
890 if (barrier_args_size
== NULL
&& next_nonnote_insn (insn
))
891 compute_barrier_args_size ();
892 if (barrier_args_size
== NULL
)
896 offset
= barrier_args_size
[INSN_UID (insn
)];
902 #ifndef STACK_GROWS_DOWNWARD
906 else if (GET_CODE (PATTERN (insn
)) == SET
)
907 offset
= stack_adjust_offset (PATTERN (insn
), args_size
, 0);
908 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
909 || GET_CODE (PATTERN (insn
)) == SEQUENCE
)
911 /* There may be stack adjustments inside compound insns. Search
913 for (offset
= 0, i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
914 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
915 offset
+= stack_adjust_offset (XVECEXP (PATTERN (insn
), 0, i
),
924 dwarf2out_stack_adjust (offset
);
927 /* We delay emitting a register save until either (a) we reach the end
928 of the prologue or (b) the register is clobbered. This clusters
929 register saves so that there are fewer pc advances. */
931 struct GTY(()) queued_reg_save
{
932 struct queued_reg_save
*next
;
934 HOST_WIDE_INT cfa_offset
;
938 static GTY(()) struct queued_reg_save
*queued_reg_saves
;
940 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
941 typedef struct GTY(()) reg_saved_in_data
{
946 DEF_VEC_O (reg_saved_in_data
);
947 DEF_VEC_ALLOC_O (reg_saved_in_data
, gc
);
949 /* A set of registers saved in other registers. This is implemented as
950 a flat array because it normally contains zero or 1 entry, depending
951 on the target. IA-64 is the big spender here, using a maximum of
953 static GTY(()) VEC(reg_saved_in_data
, gc
) *regs_saved_in_regs
;
955 static GTY(()) reg_saved_in_data
*cie_return_save
;
957 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
958 /* ??? This ought to go into dwarf2out.h alongside dwarf_frame_regnum,
959 except that dwarf2out.h is used in places where rtl is prohibited. */
961 static inline unsigned
962 dwf_regno (const_rtx reg
)
964 return dwarf_frame_regnum (REGNO (reg
));
967 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
970 compare_reg_or_pc (rtx x
, rtx y
)
972 if (REG_P (x
) && REG_P (y
))
973 return REGNO (x
) == REGNO (y
);
977 /* Record SRC as being saved in DEST. DEST may be null to delete an
978 existing entry. SRC may be a register or PC_RTX. */
981 record_reg_saved_in_reg (rtx dest
, rtx src
)
983 reg_saved_in_data
*elt
;
986 FOR_EACH_VEC_ELT (reg_saved_in_data
, regs_saved_in_regs
, i
, elt
)
987 if (compare_reg_or_pc (elt
->orig_reg
, src
))
990 VEC_unordered_remove(reg_saved_in_data
, regs_saved_in_regs
, i
);
992 elt
->saved_in_reg
= dest
;
999 elt
= VEC_safe_push(reg_saved_in_data
, gc
, regs_saved_in_regs
, NULL
);
1000 elt
->orig_reg
= src
;
1001 elt
->saved_in_reg
= dest
;
1004 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1005 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1008 queue_reg_save (rtx reg
, rtx sreg
, HOST_WIDE_INT offset
)
1010 struct queued_reg_save
*q
;
1012 /* Duplicates waste space, but it's also necessary to remove them
1013 for correctness, since the queue gets output in reverse order. */
1014 for (q
= queued_reg_saves
; q
!= NULL
; q
= q
->next
)
1015 if (compare_reg_or_pc (q
->reg
, reg
))
1020 q
= ggc_alloc_queued_reg_save ();
1021 q
->next
= queued_reg_saves
;
1022 queued_reg_saves
= q
;
1026 q
->cfa_offset
= offset
;
1027 q
->saved_reg
= sreg
;
1030 /* Output all the entries in QUEUED_REG_SAVES. */
1033 dwarf2out_flush_queued_reg_saves (void)
1035 struct queued_reg_save
*q
;
1037 for (q
= queued_reg_saves
; q
; q
= q
->next
)
1039 unsigned int reg
, sreg
;
1041 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
1043 if (q
->reg
== pc_rtx
)
1044 reg
= DWARF_FRAME_RETURN_COLUMN
;
1046 reg
= dwf_regno (q
->reg
);
1048 sreg
= dwf_regno (q
->saved_reg
);
1050 sreg
= INVALID_REGNUM
;
1051 reg_save (reg
, sreg
, q
->cfa_offset
);
1054 queued_reg_saves
= NULL
;
1057 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1058 location for? Or, does it clobber a register which we've previously
1059 said that some other register is saved in, and for which we now
1060 have a new location for? */
1063 clobbers_queued_reg_save (const_rtx insn
)
1065 struct queued_reg_save
*q
;
1067 for (q
= queued_reg_saves
; q
; q
= q
->next
)
1070 reg_saved_in_data
*rir
;
1072 if (modified_in_p (q
->reg
, insn
))
1075 FOR_EACH_VEC_ELT (reg_saved_in_data
, regs_saved_in_regs
, i
, rir
)
1076 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1077 && modified_in_p (rir
->saved_in_reg
, insn
))
1084 /* What register, if any, is currently saved in REG? */
1087 reg_saved_in (rtx reg
)
1089 unsigned int regn
= REGNO (reg
);
1090 struct queued_reg_save
*q
;
1091 reg_saved_in_data
*rir
;
1094 for (q
= queued_reg_saves
; q
; q
= q
->next
)
1095 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1098 FOR_EACH_VEC_ELT (reg_saved_in_data
, regs_saved_in_regs
, i
, rir
)
1099 if (regn
== REGNO (rir
->saved_in_reg
))
1100 return rir
->orig_reg
;
1106 /* A temporary register holding an integral value used in adjusting SP
1107 or setting up the store_reg. The "offset" field holds the integer
1108 value, not an offset. */
1109 static dw_cfa_location cfa_temp
;
1111 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1114 dwarf2out_frame_debug_def_cfa (rtx pat
)
1116 memset (&cfa
, 0, sizeof (cfa
));
1118 switch (GET_CODE (pat
))
1121 cfa
.reg
= dwf_regno (XEXP (pat
, 0));
1122 cfa
.offset
= INTVAL (XEXP (pat
, 1));
1126 cfa
.reg
= dwf_regno (pat
);
1131 pat
= XEXP (pat
, 0);
1132 if (GET_CODE (pat
) == PLUS
)
1134 cfa
.base_offset
= INTVAL (XEXP (pat
, 1));
1135 pat
= XEXP (pat
, 0);
1137 cfa
.reg
= dwf_regno (pat
);
1141 /* Recurse and define an expression. */
1148 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1151 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1155 gcc_assert (GET_CODE (pat
) == SET
);
1156 dest
= XEXP (pat
, 0);
1157 src
= XEXP (pat
, 1);
1159 switch (GET_CODE (src
))
1162 gcc_assert (dwf_regno (XEXP (src
, 0)) == cfa
.reg
);
1163 cfa
.offset
-= INTVAL (XEXP (src
, 1));
1173 cfa
.reg
= dwf_regno (dest
);
1174 gcc_assert (cfa
.indirect
== 0);
1179 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1182 dwarf2out_frame_debug_cfa_offset (rtx set
)
1184 HOST_WIDE_INT offset
;
1185 rtx src
, addr
, span
;
1186 unsigned int sregno
;
1188 src
= XEXP (set
, 1);
1189 addr
= XEXP (set
, 0);
1190 gcc_assert (MEM_P (addr
));
1191 addr
= XEXP (addr
, 0);
1193 /* As documented, only consider extremely simple addresses. */
1194 switch (GET_CODE (addr
))
1197 gcc_assert (dwf_regno (addr
) == cfa
.reg
);
1198 offset
= -cfa
.offset
;
1201 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cfa
.reg
);
1202 offset
= INTVAL (XEXP (addr
, 1)) - cfa
.offset
;
1211 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1215 span
= targetm
.dwarf_register_span (src
);
1216 sregno
= dwf_regno (src
);
1219 /* ??? We'd like to use queue_reg_save, but we need to come up with
1220 a different flushing heuristic for epilogues. */
1222 reg_save (sregno
, INVALID_REGNUM
, offset
);
1225 /* We have a PARALLEL describing where the contents of SRC live.
1226 Queue register saves for each piece of the PARALLEL. */
1229 HOST_WIDE_INT span_offset
= offset
;
1231 gcc_assert (GET_CODE (span
) == PARALLEL
);
1233 limit
= XVECLEN (span
, 0);
1234 for (par_index
= 0; par_index
< limit
; par_index
++)
1236 rtx elem
= XVECEXP (span
, 0, par_index
);
1238 sregno
= dwf_regno (src
);
1239 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1240 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1245 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1248 dwarf2out_frame_debug_cfa_register (rtx set
)
1251 unsigned sregno
, dregno
;
1253 src
= XEXP (set
, 1);
1254 dest
= XEXP (set
, 0);
1256 record_reg_saved_in_reg (dest
, src
);
1258 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1260 sregno
= dwf_regno (src
);
1262 dregno
= dwf_regno (dest
);
1264 /* ??? We'd like to use queue_reg_save, but we need to come up with
1265 a different flushing heuristic for epilogues. */
1266 reg_save (sregno
, dregno
, 0);
1269 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1272 dwarf2out_frame_debug_cfa_expression (rtx set
)
1274 rtx src
, dest
, span
;
1275 dw_cfi_ref cfi
= new_cfi ();
1277 dest
= SET_DEST (set
);
1278 src
= SET_SRC (set
);
1280 gcc_assert (REG_P (src
));
1281 gcc_assert (MEM_P (dest
));
1283 span
= targetm
.dwarf_register_span (src
);
1286 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1287 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= dwf_regno (src
);
1288 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1289 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1290 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1292 /* ??? We'd like to use queue_reg_save, were the interface different,
1293 and, as above, we could manage flushing for epilogues. */
1297 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1300 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1302 dw_cfi_ref cfi
= new_cfi ();
1303 unsigned int regno
= dwf_regno (reg
);
1305 cfi
->dw_cfi_opc
= (regno
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
1306 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1311 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1312 ??? Perhaps we should note in the CIE where windows are saved (instead of
1313 assuming 0(cfa)) and what registers are in the window. */
1316 dwarf2out_frame_debug_cfa_window_save (void)
1318 dw_cfi_ref cfi
= new_cfi ();
1320 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1324 /* Record call frame debugging information for an expression EXPR,
1325 which either sets SP or FP (adjusting how we calculate the frame
1326 address) or saves a register to the stack or another register.
1327 LABEL indicates the address of EXPR.
1329 This function encodes a state machine mapping rtxes to actions on
1330 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1331 users need not read the source code.
1333 The High-Level Picture
1335 Changes in the register we use to calculate the CFA: Currently we
1336 assume that if you copy the CFA register into another register, we
1337 should take the other one as the new CFA register; this seems to
1338 work pretty well. If it's wrong for some target, it's simple
1339 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1341 Changes in the register we use for saving registers to the stack:
1342 This is usually SP, but not always. Again, we deduce that if you
1343 copy SP into another register (and SP is not the CFA register),
1344 then the new register is the one we will be using for register
1345 saves. This also seems to work.
1347 Register saves: There's not much guesswork about this one; if
1348 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1349 register save, and the register used to calculate the destination
1350 had better be the one we think we're using for this purpose.
1351 It's also assumed that a copy from a call-saved register to another
1352 register is saving that register if RTX_FRAME_RELATED_P is set on
1353 that instruction. If the copy is from a call-saved register to
1354 the *same* register, that means that the register is now the same
1355 value as in the caller.
1357 Except: If the register being saved is the CFA register, and the
1358 offset is nonzero, we are saving the CFA, so we assume we have to
1359 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1360 the intent is to save the value of SP from the previous frame.
1362 In addition, if a register has previously been saved to a different
1365 Invariants / Summaries of Rules
1367 cfa current rule for calculating the CFA. It usually
1368 consists of a register and an offset.
1369 cfa_store register used by prologue code to save things to the stack
1370 cfa_store.offset is the offset from the value of
1371 cfa_store.reg to the actual CFA
1372 cfa_temp register holding an integral value. cfa_temp.offset
1373 stores the value, which will be used to adjust the
1374 stack pointer. cfa_temp is also used like cfa_store,
1375 to track stores to the stack via fp or a temp reg.
1377 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1378 with cfa.reg as the first operand changes the cfa.reg and its
1379 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1382 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1383 expression yielding a constant. This sets cfa_temp.reg
1384 and cfa_temp.offset.
1386 Rule 5: Create a new register cfa_store used to save items to the
1389 Rules 10-14: Save a register to the stack. Define offset as the
1390 difference of the original location and cfa_store's
1391 location (or cfa_temp's location if cfa_temp is used).
1393 Rules 16-20: If AND operation happens on sp in prologue, we assume
1394 stack is realigned. We will use a group of DW_OP_XXX
1395 expressions to represent the location of the stored
1396 register instead of CFA+offset.
1400 "{a,b}" indicates a choice of a xor b.
1401 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1404 (set <reg1> <reg2>:cfa.reg)
1405 effects: cfa.reg = <reg1>
1406 cfa.offset unchanged
1407 cfa_temp.reg = <reg1>
1408 cfa_temp.offset = cfa.offset
1411 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1412 {<const_int>,<reg>:cfa_temp.reg}))
1413 effects: cfa.reg = sp if fp used
1414 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1415 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1416 if cfa_store.reg==sp
1419 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1420 effects: cfa.reg = fp
1421 cfa_offset += +/- <const_int>
1424 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1425 constraints: <reg1> != fp
1427 effects: cfa.reg = <reg1>
1428 cfa_temp.reg = <reg1>
1429 cfa_temp.offset = cfa.offset
1432 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1433 constraints: <reg1> != fp
1435 effects: cfa_store.reg = <reg1>
1436 cfa_store.offset = cfa.offset - cfa_temp.offset
1439 (set <reg> <const_int>)
1440 effects: cfa_temp.reg = <reg>
1441 cfa_temp.offset = <const_int>
1444 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1445 effects: cfa_temp.reg = <reg1>
1446 cfa_temp.offset |= <const_int>
1449 (set <reg> (high <exp>))
1453 (set <reg> (lo_sum <exp> <const_int>))
1454 effects: cfa_temp.reg = <reg>
1455 cfa_temp.offset = <const_int>
1458 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1459 effects: cfa_store.offset -= <const_int>
1460 cfa.offset = cfa_store.offset if cfa.reg == sp
1462 cfa.base_offset = -cfa_store.offset
1465 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1466 effects: cfa_store.offset += -/+ mode_size(mem)
1467 cfa.offset = cfa_store.offset if cfa.reg == sp
1469 cfa.base_offset = -cfa_store.offset
1472 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1475 effects: cfa.reg = <reg1>
1476 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1479 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1480 effects: cfa.reg = <reg1>
1481 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1484 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1485 effects: cfa.reg = <reg1>
1486 cfa.base_offset = -cfa_temp.offset
1487 cfa_temp.offset -= mode_size(mem)
1490 (set <reg> {unspec, unspec_volatile})
1491 effects: target-dependent
1494 (set sp (and: sp <const_int>))
1495 constraints: cfa_store.reg == sp
1496 effects: cfun->fde.stack_realign = 1
1497 cfa_store.offset = 0
1498 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1501 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1502 effects: cfa_store.offset += -/+ mode_size(mem)
1505 (set (mem ({pre_inc, pre_dec} sp)) fp)
1506 constraints: fde->stack_realign == 1
1507 effects: cfa_store.offset = 0
1508 cfa.reg != HARD_FRAME_POINTER_REGNUM
1511 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1512 constraints: fde->stack_realign == 1
1514 && cfa.indirect == 0
1515 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1516 effects: Use DW_CFA_def_cfa_expression to define cfa
1517 cfa.reg == fde->drap_reg */
1520 dwarf2out_frame_debug_expr (rtx expr
)
1522 rtx src
, dest
, span
;
1523 HOST_WIDE_INT offset
;
1526 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1527 the PARALLEL independently. The first element is always processed if
1528 it is a SET. This is for backward compatibility. Other elements
1529 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1530 flag is set in them. */
1531 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1534 int limit
= XVECLEN (expr
, 0);
1537 /* PARALLELs have strict read-modify-write semantics, so we
1538 ought to evaluate every rvalue before changing any lvalue.
1539 It's cumbersome to do that in general, but there's an
1540 easy approximation that is enough for all current users:
1541 handle register saves before register assignments. */
1542 if (GET_CODE (expr
) == PARALLEL
)
1543 for (par_index
= 0; par_index
< limit
; par_index
++)
1545 elem
= XVECEXP (expr
, 0, par_index
);
1546 if (GET_CODE (elem
) == SET
1547 && MEM_P (SET_DEST (elem
))
1548 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1549 dwarf2out_frame_debug_expr (elem
);
1552 for (par_index
= 0; par_index
< limit
; par_index
++)
1554 elem
= XVECEXP (expr
, 0, par_index
);
1555 if (GET_CODE (elem
) == SET
1556 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1557 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1558 dwarf2out_frame_debug_expr (elem
);
1559 else if (GET_CODE (elem
) == SET
1561 && !RTX_FRAME_RELATED_P (elem
))
1563 /* Stack adjustment combining might combine some post-prologue
1564 stack adjustment into a prologue stack adjustment. */
1565 HOST_WIDE_INT offset
= stack_adjust_offset (elem
, args_size
, 0);
1568 dwarf2out_stack_adjust (offset
);
1574 gcc_assert (GET_CODE (expr
) == SET
);
1576 src
= SET_SRC (expr
);
1577 dest
= SET_DEST (expr
);
1581 rtx rsi
= reg_saved_in (src
);
1588 switch (GET_CODE (dest
))
1591 switch (GET_CODE (src
))
1593 /* Setting FP from SP. */
1595 if (cfa
.reg
== dwf_regno (src
))
1598 /* Update the CFA rule wrt SP or FP. Make sure src is
1599 relative to the current CFA register.
1601 We used to require that dest be either SP or FP, but the
1602 ARM copies SP to a temporary register, and from there to
1603 FP. So we just rely on the backends to only set
1604 RTX_FRAME_RELATED_P on appropriate insns. */
1605 cfa
.reg
= dwf_regno (dest
);
1606 cfa_temp
.reg
= cfa
.reg
;
1607 cfa_temp
.offset
= cfa
.offset
;
1611 /* Saving a register in a register. */
1612 gcc_assert (!fixed_regs
[REGNO (dest
)]
1613 /* For the SPARC and its register window. */
1614 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1616 /* After stack is aligned, we can only save SP in FP
1617 if drap register is used. In this case, we have
1618 to restore stack pointer with the CFA value and we
1619 don't generate this DWARF information. */
1621 && fde
->stack_realign
1622 && REGNO (src
) == STACK_POINTER_REGNUM
)
1623 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1624 && fde
->drap_reg
!= INVALID_REGNUM
1625 && cfa
.reg
!= dwf_regno (src
));
1627 queue_reg_save (src
, dest
, 0);
1634 if (dest
== stack_pointer_rtx
)
1638 switch (GET_CODE (XEXP (src
, 1)))
1641 offset
= INTVAL (XEXP (src
, 1));
1644 gcc_assert (dwf_regno (XEXP (src
, 1)) == cfa_temp
.reg
);
1645 offset
= cfa_temp
.offset
;
1651 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1653 /* Restoring SP from FP in the epilogue. */
1654 gcc_assert (cfa
.reg
== DW_FRAME_POINTER_REGNUM
);
1655 cfa
.reg
= DW_STACK_POINTER_REGNUM
;
1657 else if (GET_CODE (src
) == LO_SUM
)
1658 /* Assume we've set the source reg of the LO_SUM from sp. */
1661 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1663 if (GET_CODE (src
) != MINUS
)
1665 if (cfa
.reg
== DW_STACK_POINTER_REGNUM
)
1666 cfa
.offset
+= offset
;
1667 if (cfa_store
.reg
== DW_STACK_POINTER_REGNUM
)
1668 cfa_store
.offset
+= offset
;
1670 else if (dest
== hard_frame_pointer_rtx
)
1673 /* Either setting the FP from an offset of the SP,
1674 or adjusting the FP */
1675 gcc_assert (frame_pointer_needed
);
1677 gcc_assert (REG_P (XEXP (src
, 0))
1678 && dwf_regno (XEXP (src
, 0)) == cfa
.reg
1679 && CONST_INT_P (XEXP (src
, 1)));
1680 offset
= INTVAL (XEXP (src
, 1));
1681 if (GET_CODE (src
) != MINUS
)
1683 cfa
.offset
+= offset
;
1684 cfa
.reg
= DW_FRAME_POINTER_REGNUM
;
1688 gcc_assert (GET_CODE (src
) != MINUS
);
1691 if (REG_P (XEXP (src
, 0))
1692 && dwf_regno (XEXP (src
, 0)) == cfa
.reg
1693 && CONST_INT_P (XEXP (src
, 1)))
1695 /* Setting a temporary CFA register that will be copied
1696 into the FP later on. */
1697 offset
= - INTVAL (XEXP (src
, 1));
1698 cfa
.offset
+= offset
;
1699 cfa
.reg
= dwf_regno (dest
);
1700 /* Or used to save regs to the stack. */
1701 cfa_temp
.reg
= cfa
.reg
;
1702 cfa_temp
.offset
= cfa
.offset
;
1706 else if (REG_P (XEXP (src
, 0))
1707 && dwf_regno (XEXP (src
, 0)) == cfa_temp
.reg
1708 && XEXP (src
, 1) == stack_pointer_rtx
)
1710 /* Setting a scratch register that we will use instead
1711 of SP for saving registers to the stack. */
1712 gcc_assert (cfa
.reg
== DW_STACK_POINTER_REGNUM
);
1713 cfa_store
.reg
= dwf_regno (dest
);
1714 cfa_store
.offset
= cfa
.offset
- cfa_temp
.offset
;
1718 else if (GET_CODE (src
) == LO_SUM
1719 && CONST_INT_P (XEXP (src
, 1)))
1721 cfa_temp
.reg
= dwf_regno (dest
);
1722 cfa_temp
.offset
= INTVAL (XEXP (src
, 1));
1731 cfa_temp
.reg
= dwf_regno (dest
);
1732 cfa_temp
.offset
= INTVAL (src
);
1737 gcc_assert (REG_P (XEXP (src
, 0))
1738 && dwf_regno (XEXP (src
, 0)) == cfa_temp
.reg
1739 && CONST_INT_P (XEXP (src
, 1)));
1741 cfa_temp
.reg
= dwf_regno (dest
);
1742 cfa_temp
.offset
|= INTVAL (XEXP (src
, 1));
1745 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1746 which will fill in all of the bits. */
1753 case UNSPEC_VOLATILE
:
1754 /* All unspecs should be represented by REG_CFA_* notes. */
1760 /* If this AND operation happens on stack pointer in prologue,
1761 we assume the stack is realigned and we extract the
1763 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1765 /* We interpret reg_save differently with stack_realign set.
1766 Thus we must flush whatever we have queued first. */
1767 dwarf2out_flush_queued_reg_saves ();
1769 gcc_assert (cfa_store
.reg
== dwf_regno (XEXP (src
, 0)));
1770 fde
->stack_realign
= 1;
1771 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1772 cfa_store
.offset
= 0;
1774 if (cfa
.reg
!= DW_STACK_POINTER_REGNUM
1775 && cfa
.reg
!= DW_FRAME_POINTER_REGNUM
)
1776 fde
->drap_reg
= cfa
.reg
;
1789 /* Saving a register to the stack. Make sure dest is relative to the
1791 switch (GET_CODE (XEXP (dest
, 0)))
1797 /* We can't handle variable size modifications. */
1798 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest
, 0), 1), 1))
1800 offset
= -INTVAL (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1802 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1803 && cfa_store
.reg
== DW_STACK_POINTER_REGNUM
);
1805 cfa_store
.offset
+= offset
;
1806 if (cfa
.reg
== DW_STACK_POINTER_REGNUM
)
1807 cfa
.offset
= cfa_store
.offset
;
1809 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1810 offset
-= cfa_store
.offset
;
1812 offset
= -cfa_store
.offset
;
1819 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1820 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1823 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1824 == STACK_POINTER_REGNUM
)
1825 && cfa_store
.reg
== DW_STACK_POINTER_REGNUM
);
1827 cfa_store
.offset
+= offset
;
1829 /* Rule 18: If stack is aligned, we will use FP as a
1830 reference to represent the address of the stored
1833 && fde
->stack_realign
1834 && src
== hard_frame_pointer_rtx
)
1836 gcc_assert (cfa
.reg
!= DW_FRAME_POINTER_REGNUM
);
1837 cfa_store
.offset
= 0;
1840 if (cfa
.reg
== DW_STACK_POINTER_REGNUM
)
1841 cfa
.offset
= cfa_store
.offset
;
1843 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1844 offset
+= -cfa_store
.offset
;
1846 offset
= -cfa_store
.offset
;
1850 /* With an offset. */
1857 gcc_assert (CONST_INT_P (XEXP (XEXP (dest
, 0), 1))
1858 && REG_P (XEXP (XEXP (dest
, 0), 0)));
1859 offset
= INTVAL (XEXP (XEXP (dest
, 0), 1));
1860 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1863 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1865 if (cfa
.reg
== regno
)
1866 offset
-= cfa
.offset
;
1867 else if (cfa_store
.reg
== regno
)
1868 offset
-= cfa_store
.offset
;
1871 gcc_assert (cfa_temp
.reg
== regno
);
1872 offset
-= cfa_temp
.offset
;
1878 /* Without an offset. */
1881 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1883 if (cfa
.reg
== regno
)
1884 offset
= -cfa
.offset
;
1885 else if (cfa_store
.reg
== regno
)
1886 offset
= -cfa_store
.offset
;
1889 gcc_assert (cfa_temp
.reg
== regno
);
1890 offset
= -cfa_temp
.offset
;
1897 gcc_assert (cfa_temp
.reg
== dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1898 offset
= -cfa_temp
.offset
;
1899 cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1907 /* If the source operand of this MEM operation is a memory,
1908 we only care how much stack grew. */
1913 && REGNO (src
) != STACK_POINTER_REGNUM
1914 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1915 && dwf_regno (src
) == cfa
.reg
)
1917 /* We're storing the current CFA reg into the stack. */
1919 if (cfa
.offset
== 0)
1922 /* If stack is aligned, putting CFA reg into stack means
1923 we can no longer use reg + offset to represent CFA.
1924 Here we use DW_CFA_def_cfa_expression instead. The
1925 result of this expression equals to the original CFA
1928 && fde
->stack_realign
1929 && cfa
.indirect
== 0
1930 && cfa
.reg
!= DW_FRAME_POINTER_REGNUM
)
1932 dw_cfa_location cfa_exp
;
1934 gcc_assert (fde
->drap_reg
== cfa
.reg
);
1936 cfa_exp
.indirect
= 1;
1937 cfa_exp
.reg
= DW_FRAME_POINTER_REGNUM
;
1938 cfa_exp
.base_offset
= offset
;
1941 fde
->drap_reg_saved
= 1;
1943 def_cfa_1 (&cfa_exp
);
1947 /* If the source register is exactly the CFA, assume
1948 we're saving SP like any other register; this happens
1951 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1956 /* Otherwise, we'll need to look in the stack to
1957 calculate the CFA. */
1958 rtx x
= XEXP (dest
, 0);
1962 gcc_assert (REG_P (x
));
1964 cfa
.reg
= dwf_regno (x
);
1965 cfa
.base_offset
= offset
;
1976 span
= targetm
.dwarf_register_span (src
);
1978 queue_reg_save (src
, NULL_RTX
, offset
);
1981 /* We have a PARALLEL describing where the contents of SRC live.
1982 Queue register saves for each piece of the PARALLEL. */
1985 HOST_WIDE_INT span_offset
= offset
;
1987 gcc_assert (GET_CODE (span
) == PARALLEL
);
1989 limit
= XVECLEN (span
, 0);
1990 for (par_index
= 0; par_index
< limit
; par_index
++)
1992 rtx elem
= XVECEXP (span
, 0, par_index
);
1993 queue_reg_save (elem
, NULL_RTX
, span_offset
);
1994 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
2004 /* Record call frame debugging information for INSN, which either
2005 sets SP or FP (adjusting how we calculate the frame address) or saves a
2006 register to the stack. If INSN is NULL_RTX, initialize our state.
2008 If AFTER_P is false, we're being called before the insn is emitted,
2009 otherwise after. Call instructions get invoked twice. */
2012 dwarf2out_frame_debug (rtx insn
, bool after_p
)
2015 bool handled_one
= false;
2016 bool need_flush
= false;
2018 if (!NONJUMP_INSN_P (insn
) || clobbers_queued_reg_save (insn
))
2019 dwarf2out_flush_queued_reg_saves ();
2021 if (!RTX_FRAME_RELATED_P (insn
))
2023 /* ??? This should be done unconditionally since stack adjustments
2024 matter if the stack pointer is not the CFA register anymore but
2025 is still used to save registers. */
2026 if (!ACCUMULATE_OUTGOING_ARGS
)
2027 dwarf2out_notice_stack_adjust (insn
, after_p
);
2031 any_cfis_emitted
= false;
2033 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2034 switch (REG_NOTE_KIND (note
))
2036 case REG_FRAME_RELATED_EXPR
:
2037 insn
= XEXP (note
, 0);
2040 case REG_CFA_DEF_CFA
:
2041 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
2045 case REG_CFA_ADJUST_CFA
:
2050 if (GET_CODE (n
) == PARALLEL
)
2051 n
= XVECEXP (n
, 0, 0);
2053 dwarf2out_frame_debug_adjust_cfa (n
);
2057 case REG_CFA_OFFSET
:
2060 n
= single_set (insn
);
2061 dwarf2out_frame_debug_cfa_offset (n
);
2065 case REG_CFA_REGISTER
:
2070 if (GET_CODE (n
) == PARALLEL
)
2071 n
= XVECEXP (n
, 0, 0);
2073 dwarf2out_frame_debug_cfa_register (n
);
2077 case REG_CFA_EXPRESSION
:
2080 n
= single_set (insn
);
2081 dwarf2out_frame_debug_cfa_expression (n
);
2085 case REG_CFA_RESTORE
:
2090 if (GET_CODE (n
) == PARALLEL
)
2091 n
= XVECEXP (n
, 0, 0);
2094 dwarf2out_frame_debug_cfa_restore (n
);
2098 case REG_CFA_SET_VDRAP
:
2102 dw_fde_ref fde
= cfun
->fde
;
2105 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
2107 fde
->vdrap_reg
= dwf_regno (n
);
2113 case REG_CFA_WINDOW_SAVE
:
2114 dwarf2out_frame_debug_cfa_window_save ();
2118 case REG_CFA_FLUSH_QUEUE
:
2119 /* The actual flush happens below. */
2130 /* Minimize the number of advances by emitting the entire queue
2131 once anything is emitted. */
2132 need_flush
|= any_cfis_emitted
;
2136 insn
= PATTERN (insn
);
2138 dwarf2out_frame_debug_expr (insn
);
2140 /* Check again. A parallel can save and update the same register.
2141 We could probably check just once, here, but this is safer than
2142 removing the check at the start of the function. */
2143 if (any_cfis_emitted
|| clobbers_queued_reg_save (insn
))
2148 dwarf2out_flush_queued_reg_saves ();
2151 /* Examine CFI and return true if a cfi label and set_loc is needed
2152 beforehand. Even when generating CFI assembler instructions, we
2153 still have to add the cfi to the list so that lookup_cfa_1 works
2154 later on. When -g2 and above we even need to force emitting of
2155 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2156 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2157 and so don't use convert_cfa_to_fb_loc_list. */
2160 cfi_label_required_p (dw_cfi_ref cfi
)
2162 if (!dwarf2out_do_cfi_asm ())
2165 if (dwarf_version
== 2
2166 && debug_info_level
> DINFO_LEVEL_TERSE
2167 && (write_symbols
== DWARF2_DEBUG
2168 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2170 switch (cfi
->dw_cfi_opc
)
2172 case DW_CFA_def_cfa_offset
:
2173 case DW_CFA_def_cfa_offset_sf
:
2174 case DW_CFA_def_cfa_register
:
2175 case DW_CFA_def_cfa
:
2176 case DW_CFA_def_cfa_sf
:
2177 case DW_CFA_def_cfa_expression
:
2178 case DW_CFA_restore_state
:
2187 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2188 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2191 add_cfis_to_fde (void)
2193 dw_fde_ref fde
= cfun
->fde
;
2195 /* We always start with a function_begin label. */
2198 for (insn
= get_insns (); insn
; insn
= next
)
2200 next
= NEXT_INSN (insn
);
2202 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2204 /* Don't attempt to advance_loc4 between labels
2205 in different sections. */
2209 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2211 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2212 while (next
&& NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2214 required
|= cfi_label_required_p (NOTE_CFI (next
));
2215 next
= NEXT_INSN (next
);
2219 int num
= dwarf2out_cfi_label_num
;
2220 const char *label
= dwarf2out_cfi_label ();
2224 /* Set the location counter to the new label. */
2226 xcfi
->dw_cfi_opc
= (first
? DW_CFA_set_loc
2227 : DW_CFA_advance_loc4
);
2228 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2229 VEC_safe_push (dw_cfi_ref
, gc
, fde
->dw_fde_cfi
, xcfi
);
2231 tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2232 NOTE_LABEL_NUMBER (tmp
) = num
;
2237 VEC_safe_push (dw_cfi_ref
, gc
, fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2238 insn
= NEXT_INSN (insn
);
2240 while (insn
!= next
);
2246 /* Scan the function and create the initial set of CFI notes. */
2249 create_cfi_notes (void)
2253 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2257 cfi_insn
= PREV_INSN (insn
);
2259 if (BARRIER_P (insn
))
2261 dwarf2out_frame_debug (insn
, false);
2267 switch (NOTE_KIND (insn
))
2269 case NOTE_INSN_PROLOGUE_END
:
2270 dwarf2out_flush_queued_reg_saves ();
2273 case NOTE_INSN_EPILOGUE_BEG
:
2274 #if defined(HAVE_epilogue)
2275 dwarf2out_cfi_begin_epilogue (insn
);
2279 case NOTE_INSN_CFA_RESTORE_STATE
:
2281 dwarf2out_frame_debug_restore_state ();
2287 if (!NONDEBUG_INSN_P (insn
))
2290 pat
= PATTERN (insn
);
2291 if (asm_noperands (pat
) >= 0)
2293 dwarf2out_frame_debug (insn
, false);
2297 if (GET_CODE (pat
) == SEQUENCE
)
2299 int i
, n
= XVECLEN (pat
, 0);
2300 for (i
= 1; i
< n
; ++i
)
2301 dwarf2out_frame_debug (XVECEXP (pat
, 0, i
), false);
2305 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2306 dwarf2out_frame_debug (insn
, false);
2308 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2309 Putting the note after the VEC should be ok. */
2310 if (!tablejump_p (insn
, NULL
, &cfi_insn
))
2313 dwarf2out_frame_debug (insn
, true);
2319 /* Determine if we need to save and restore CFI information around this
2320 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2321 we do need to save/restore, then emit the save now, and insert a
2322 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2325 dwarf2out_cfi_begin_epilogue (rtx insn
)
2327 bool saw_frp
= false;
2330 /* Scan forward to the return insn, noticing if there are possible
2331 frame related insns. */
2332 for (i
= NEXT_INSN (insn
); i
; i
= NEXT_INSN (i
))
2337 /* Look for both regular and sibcalls to end the block. */
2338 if (returnjump_p (i
))
2340 if (CALL_P (i
) && SIBLING_CALL_P (i
))
2343 if (GET_CODE (PATTERN (i
)) == SEQUENCE
)
2346 rtx seq
= PATTERN (i
);
2348 if (returnjump_p (XVECEXP (seq
, 0, 0)))
2350 if (CALL_P (XVECEXP (seq
, 0, 0))
2351 && SIBLING_CALL_P (XVECEXP (seq
, 0, 0)))
2354 for (idx
= 0; idx
< XVECLEN (seq
, 0); idx
++)
2355 if (RTX_FRAME_RELATED_P (XVECEXP (seq
, 0, idx
)))
2359 if (RTX_FRAME_RELATED_P (i
))
2363 /* If the port doesn't emit epilogue unwind info, we don't need a
2364 save/restore pair. */
2368 /* Otherwise, search forward to see if the return insn was the last
2369 basic block of the function. If so, we don't need save/restore. */
2370 gcc_assert (i
!= NULL
);
2371 i
= next_real_insn (i
);
2375 /* Insert the restore before that next real insn in the stream, and before
2376 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2377 properly nested. This should be after any label or alignment. This
2378 will be pushed into the CFI stream by the function below. */
2381 rtx p
= PREV_INSN (i
);
2384 if (NOTE_KIND (p
) == NOTE_INSN_BASIC_BLOCK
)
2388 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE
, i
);
2390 emit_cfa_remember
= true;
2392 /* And emulate the state save. */
2393 gcc_assert (!cfa_remember
.in_use
);
2395 old_cfa_remember
= old_cfa
;
2396 cfa_remember
.in_use
= 1;
2399 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2403 dwarf2out_frame_debug_restore_state (void)
2405 dw_cfi_ref cfi
= new_cfi ();
2407 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2410 gcc_assert (cfa_remember
.in_use
);
2412 old_cfa
= old_cfa_remember
;
2413 cfa_remember
.in_use
= 0;
2416 /* Record the initial position of the return address. RTL is
2417 INCOMING_RETURN_ADDR_RTX. */
2420 initial_return_save (rtx rtl
)
2422 unsigned int reg
= INVALID_REGNUM
;
2423 HOST_WIDE_INT offset
= 0;
2425 switch (GET_CODE (rtl
))
2428 /* RA is in a register. */
2429 reg
= dwf_regno (rtl
);
2433 /* RA is on the stack. */
2434 rtl
= XEXP (rtl
, 0);
2435 switch (GET_CODE (rtl
))
2438 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2443 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2444 offset
= INTVAL (XEXP (rtl
, 1));
2448 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2449 offset
= -INTVAL (XEXP (rtl
, 1));
2459 /* The return address is at some offset from any value we can
2460 actually load. For instance, on the SPARC it is in %i7+8. Just
2461 ignore the offset for now; it doesn't matter for unwinding frames. */
2462 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2463 initial_return_save (XEXP (rtl
, 0));
2470 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2472 if (reg
!= INVALID_REGNUM
)
2473 record_reg_saved_in_reg (rtl
, pc_rtx
);
2474 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cfa
.offset
);
2478 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2479 state at each location within the function. These notes will be
2480 emitted during pass_final. */
2483 execute_dwarf2_frame (void)
2485 /* The first time we're called, compute the incoming frame state. */
2486 if (cie_cfi_vec
== NULL
)
2488 dw_cfa_location loc
;
2490 add_cfi_vec
= &cie_cfi_vec
;
2492 memset(&old_cfa
, 0, sizeof (old_cfa
));
2493 old_cfa
.reg
= INVALID_REGNUM
;
2495 /* On entry, the Canonical Frame Address is at SP. */
2496 memset(&loc
, 0, sizeof (loc
));
2497 loc
.reg
= DW_STACK_POINTER_REGNUM
;
2498 loc
.offset
= INCOMING_FRAME_SP_OFFSET
;
2501 if (targetm
.debug_unwind_info () == UI_DWARF2
2502 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
2504 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
2506 /* For a few targets, we have the return address incoming into a
2507 register, but choose a different return column. This will result
2508 in a DW_CFA_register for the return, and an entry in
2509 regs_saved_in_regs to match. If the target later stores that
2510 return address register to the stack, we want to be able to emit
2511 the DW_CFA_offset against the return column, not the intermediate
2512 save register. Save the contents of regs_saved_in_regs so that
2513 we can re-initialize it at the start of each function. */
2514 switch (VEC_length (reg_saved_in_data
, regs_saved_in_regs
))
2519 cie_return_save
= ggc_alloc_reg_saved_in_data ();
2520 *cie_return_save
= *VEC_index (reg_saved_in_data
,
2521 regs_saved_in_regs
, 0);
2522 regs_saved_in_regs
= NULL
;
2532 /* Set up state for generating call frame debug info. */
2533 gcc_checking_assert (queued_reg_saves
== NULL
);
2534 gcc_checking_assert (regs_saved_in_regs
== NULL
);
2536 memset (&cfa
, 0, sizeof(cfa
));
2537 cfa
.reg
= DW_STACK_POINTER_REGNUM
;
2538 cfa
.offset
= INCOMING_FRAME_SP_OFFSET
;
2543 memset (&cfa_temp
, 0, sizeof(cfa_temp
));
2544 cfa_temp
.reg
= INVALID_REGNUM
;
2546 if (cie_return_save
)
2547 VEC_safe_push (reg_saved_in_data
, gc
, regs_saved_in_regs
, cie_return_save
);
2549 dwarf2out_alloc_current_fde ();
2552 create_cfi_notes ();
2555 /* Reset all function-specific information, particularly for GC. */
2556 XDELETEVEC (barrier_args_size
);
2557 barrier_args_size
= NULL
;
2558 regs_saved_in_regs
= NULL
;
2559 queued_reg_saves
= NULL
;
2564 /* Convert a DWARF call frame info. operation to its string name */
2567 dwarf_cfi_name (unsigned int cfi_opc
)
2571 case DW_CFA_advance_loc
:
2572 return "DW_CFA_advance_loc";
2574 return "DW_CFA_offset";
2575 case DW_CFA_restore
:
2576 return "DW_CFA_restore";
2578 return "DW_CFA_nop";
2579 case DW_CFA_set_loc
:
2580 return "DW_CFA_set_loc";
2581 case DW_CFA_advance_loc1
:
2582 return "DW_CFA_advance_loc1";
2583 case DW_CFA_advance_loc2
:
2584 return "DW_CFA_advance_loc2";
2585 case DW_CFA_advance_loc4
:
2586 return "DW_CFA_advance_loc4";
2587 case DW_CFA_offset_extended
:
2588 return "DW_CFA_offset_extended";
2589 case DW_CFA_restore_extended
:
2590 return "DW_CFA_restore_extended";
2591 case DW_CFA_undefined
:
2592 return "DW_CFA_undefined";
2593 case DW_CFA_same_value
:
2594 return "DW_CFA_same_value";
2595 case DW_CFA_register
:
2596 return "DW_CFA_register";
2597 case DW_CFA_remember_state
:
2598 return "DW_CFA_remember_state";
2599 case DW_CFA_restore_state
:
2600 return "DW_CFA_restore_state";
2601 case DW_CFA_def_cfa
:
2602 return "DW_CFA_def_cfa";
2603 case DW_CFA_def_cfa_register
:
2604 return "DW_CFA_def_cfa_register";
2605 case DW_CFA_def_cfa_offset
:
2606 return "DW_CFA_def_cfa_offset";
2609 case DW_CFA_def_cfa_expression
:
2610 return "DW_CFA_def_cfa_expression";
2611 case DW_CFA_expression
:
2612 return "DW_CFA_expression";
2613 case DW_CFA_offset_extended_sf
:
2614 return "DW_CFA_offset_extended_sf";
2615 case DW_CFA_def_cfa_sf
:
2616 return "DW_CFA_def_cfa_sf";
2617 case DW_CFA_def_cfa_offset_sf
:
2618 return "DW_CFA_def_cfa_offset_sf";
2620 /* SGI/MIPS specific */
2621 case DW_CFA_MIPS_advance_loc8
:
2622 return "DW_CFA_MIPS_advance_loc8";
2624 /* GNU extensions */
2625 case DW_CFA_GNU_window_save
:
2626 return "DW_CFA_GNU_window_save";
2627 case DW_CFA_GNU_args_size
:
2628 return "DW_CFA_GNU_args_size";
2629 case DW_CFA_GNU_negative_offset_extended
:
2630 return "DW_CFA_GNU_negative_offset_extended";
2633 return "DW_CFA_<unknown>";
2637 /* This routine will generate the correct assembly data for a location
2638 description based on a cfi entry with a complex address. */
2641 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
2643 dw_loc_descr_ref loc
;
2646 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
2649 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2650 dw2_asm_output_data (1, r
, NULL
);
2651 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
2654 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
2656 /* Output the size of the block. */
2657 size
= size_of_locs (loc
);
2658 dw2_asm_output_data_uleb128 (size
, NULL
);
2660 /* Now output the operations themselves. */
2661 output_loc_sequence (loc
, for_eh
);
2664 /* Similar, but used for .cfi_escape. */
2667 output_cfa_loc_raw (dw_cfi_ref cfi
)
2669 dw_loc_descr_ref loc
;
2672 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
2675 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2676 fprintf (asm_out_file
, "%#x,", r
);
2677 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
2680 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
2682 /* Output the size of the block. */
2683 size
= size_of_locs (loc
);
2684 dw2_asm_output_data_uleb128_raw (size
);
2685 fputc (',', asm_out_file
);
2687 /* Now output the operations themselves. */
2688 output_loc_sequence_raw (loc
);
2691 /* Output a Call Frame Information opcode and its operand(s). */
2694 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
2699 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
2700 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
2701 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
2702 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
2703 ((unsigned HOST_WIDE_INT
)
2704 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
2705 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
2707 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2708 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
2709 "DW_CFA_offset, column %#lx", r
);
2710 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2711 dw2_asm_output_data_uleb128 (off
, NULL
);
2713 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
2715 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2716 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
2717 "DW_CFA_restore, column %#lx", r
);
2721 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
2722 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
2724 switch (cfi
->dw_cfi_opc
)
2726 case DW_CFA_set_loc
:
2728 dw2_asm_output_encoded_addr_rtx (
2729 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2730 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
2733 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
2734 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
2735 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2738 case DW_CFA_advance_loc1
:
2739 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2740 fde
->dw_fde_current_label
, NULL
);
2741 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2744 case DW_CFA_advance_loc2
:
2745 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2746 fde
->dw_fde_current_label
, NULL
);
2747 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2750 case DW_CFA_advance_loc4
:
2751 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2752 fde
->dw_fde_current_label
, NULL
);
2753 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2756 case DW_CFA_MIPS_advance_loc8
:
2757 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2758 fde
->dw_fde_current_label
, NULL
);
2759 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2762 case DW_CFA_offset_extended
:
2763 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2764 dw2_asm_output_data_uleb128 (r
, NULL
);
2765 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2766 dw2_asm_output_data_uleb128 (off
, NULL
);
2769 case DW_CFA_def_cfa
:
2770 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2771 dw2_asm_output_data_uleb128 (r
, NULL
);
2772 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
2775 case DW_CFA_offset_extended_sf
:
2776 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2777 dw2_asm_output_data_uleb128 (r
, NULL
);
2778 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2779 dw2_asm_output_data_sleb128 (off
, NULL
);
2782 case DW_CFA_def_cfa_sf
:
2783 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2784 dw2_asm_output_data_uleb128 (r
, NULL
);
2785 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2786 dw2_asm_output_data_sleb128 (off
, NULL
);
2789 case DW_CFA_restore_extended
:
2790 case DW_CFA_undefined
:
2791 case DW_CFA_same_value
:
2792 case DW_CFA_def_cfa_register
:
2793 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2794 dw2_asm_output_data_uleb128 (r
, NULL
);
2797 case DW_CFA_register
:
2798 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2799 dw2_asm_output_data_uleb128 (r
, NULL
);
2800 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
2801 dw2_asm_output_data_uleb128 (r
, NULL
);
2804 case DW_CFA_def_cfa_offset
:
2805 case DW_CFA_GNU_args_size
:
2806 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
2809 case DW_CFA_def_cfa_offset_sf
:
2810 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2811 dw2_asm_output_data_sleb128 (off
, NULL
);
2814 case DW_CFA_GNU_window_save
:
2817 case DW_CFA_def_cfa_expression
:
2818 case DW_CFA_expression
:
2819 output_cfa_loc (cfi
, for_eh
);
2822 case DW_CFA_GNU_negative_offset_extended
:
2823 /* Obsoleted by DW_CFA_offset_extended_sf. */
2832 /* Similar, but do it via assembler directives instead. */
2835 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
2837 unsigned long r
, r2
;
2839 switch (cfi
->dw_cfi_opc
)
2841 case DW_CFA_advance_loc
:
2842 case DW_CFA_advance_loc1
:
2843 case DW_CFA_advance_loc2
:
2844 case DW_CFA_advance_loc4
:
2845 case DW_CFA_MIPS_advance_loc8
:
2846 case DW_CFA_set_loc
:
2847 /* Should only be created in a code path not followed when emitting
2848 via directives. The assembler is going to take care of this for
2849 us. But this routines is also used for debugging dumps, so
2851 gcc_assert (f
!= asm_out_file
);
2852 fprintf (f
, "\t.cfi_advance_loc\n");
2856 case DW_CFA_offset_extended
:
2857 case DW_CFA_offset_extended_sf
:
2858 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2859 fprintf (f
, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC
"\n",
2860 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2863 case DW_CFA_restore
:
2864 case DW_CFA_restore_extended
:
2865 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2866 fprintf (f
, "\t.cfi_restore %lu\n", r
);
2869 case DW_CFA_undefined
:
2870 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2871 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
2874 case DW_CFA_same_value
:
2875 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2876 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
2879 case DW_CFA_def_cfa
:
2880 case DW_CFA_def_cfa_sf
:
2881 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2882 fprintf (f
, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC
"\n",
2883 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2886 case DW_CFA_def_cfa_register
:
2887 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2888 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
2891 case DW_CFA_register
:
2892 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2893 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
2894 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
2897 case DW_CFA_def_cfa_offset
:
2898 case DW_CFA_def_cfa_offset_sf
:
2899 fprintf (f
, "\t.cfi_def_cfa_offset "
2900 HOST_WIDE_INT_PRINT_DEC
"\n",
2901 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2904 case DW_CFA_remember_state
:
2905 fprintf (f
, "\t.cfi_remember_state\n");
2907 case DW_CFA_restore_state
:
2908 fprintf (f
, "\t.cfi_restore_state\n");
2911 case DW_CFA_GNU_args_size
:
2912 if (f
== asm_out_file
)
2914 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
2915 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2917 fprintf (f
, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC
,
2918 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2923 fprintf (f
, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC
"\n",
2924 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2928 case DW_CFA_GNU_window_save
:
2929 fprintf (f
, "\t.cfi_window_save\n");
2932 case DW_CFA_def_cfa_expression
:
2933 if (f
!= asm_out_file
)
2935 fprintf (f
, "\t.cfi_def_cfa_expression ...\n");
2939 case DW_CFA_expression
:
2940 if (f
!= asm_out_file
)
2942 fprintf (f
, "\t.cfi_cfa_expression ...\n");
2945 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
2946 output_cfa_loc_raw (cfi
);
2956 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
2958 if (dwarf2out_do_cfi_asm ())
2959 output_cfi_directive (asm_out_file
, cfi
);
2962 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
2963 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
2964 true if .cfi_* directives shall be emitted, false otherwise. If it
2965 is false, FDE and FOR_EH are the other arguments to pass to
2969 output_cfis (cfi_vec vec
, int upto
, bool do_cfi_asm
,
2970 dw_fde_ref fde
, bool for_eh
)
2973 struct dw_cfi_struct cfi_buf
;
2975 dw_cfi_ref cfi_args_size
= NULL
, cfi_cfa
= NULL
, cfi_cfa_offset
= NULL
;
2976 VEC(dw_cfi_ref
, heap
) *regs
= VEC_alloc (dw_cfi_ref
, heap
, 32);
2977 unsigned int len
, idx
;
2979 for (ix
= 0; ix
< upto
+ 1; ix
++)
2981 dw_cfi_ref cfi
= ix
< upto
? VEC_index (dw_cfi_ref
, vec
, ix
) : NULL
;
2982 switch (cfi
? cfi
->dw_cfi_opc
: DW_CFA_nop
)
2984 case DW_CFA_advance_loc
:
2985 case DW_CFA_advance_loc1
:
2986 case DW_CFA_advance_loc2
:
2987 case DW_CFA_advance_loc4
:
2988 case DW_CFA_MIPS_advance_loc8
:
2989 case DW_CFA_set_loc
:
2990 /* All advances should be ignored. */
2992 case DW_CFA_remember_state
:
2994 dw_cfi_ref args_size
= cfi_args_size
;
2996 /* Skip everything between .cfi_remember_state and
2997 .cfi_restore_state. */
3002 for (; ix
< upto
; ix
++)
3004 cfi2
= VEC_index (dw_cfi_ref
, vec
, ix
);
3005 if (cfi2
->dw_cfi_opc
== DW_CFA_restore_state
)
3007 else if (cfi2
->dw_cfi_opc
== DW_CFA_GNU_args_size
)
3010 gcc_assert (cfi2
->dw_cfi_opc
!= DW_CFA_remember_state
);
3013 cfi_args_size
= args_size
;
3016 case DW_CFA_GNU_args_size
:
3017 cfi_args_size
= cfi
;
3019 case DW_CFA_GNU_window_save
:
3022 case DW_CFA_offset_extended
:
3023 case DW_CFA_offset_extended_sf
:
3024 case DW_CFA_restore
:
3025 case DW_CFA_restore_extended
:
3026 case DW_CFA_undefined
:
3027 case DW_CFA_same_value
:
3028 case DW_CFA_register
:
3029 case DW_CFA_val_offset
:
3030 case DW_CFA_val_offset_sf
:
3031 case DW_CFA_expression
:
3032 case DW_CFA_val_expression
:
3033 case DW_CFA_GNU_negative_offset_extended
:
3034 if (VEC_length (dw_cfi_ref
, regs
)
3035 <= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
)
3036 VEC_safe_grow_cleared (dw_cfi_ref
, heap
, regs
,
3037 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
+ 1);
3038 VEC_replace (dw_cfi_ref
, regs
, cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
,
3041 case DW_CFA_def_cfa
:
3042 case DW_CFA_def_cfa_sf
:
3043 case DW_CFA_def_cfa_expression
:
3045 cfi_cfa_offset
= cfi
;
3047 case DW_CFA_def_cfa_register
:
3050 case DW_CFA_def_cfa_offset
:
3051 case DW_CFA_def_cfa_offset_sf
:
3052 cfi_cfa_offset
= cfi
;
3055 gcc_assert (cfi
== NULL
);
3057 len
= VEC_length (dw_cfi_ref
, regs
);
3058 for (idx
= 0; idx
< len
; idx
++)
3060 cfi2
= VEC_replace (dw_cfi_ref
, regs
, idx
, NULL
);
3062 && cfi2
->dw_cfi_opc
!= DW_CFA_restore
3063 && cfi2
->dw_cfi_opc
!= DW_CFA_restore_extended
)
3066 output_cfi_directive (asm_out_file
, cfi2
);
3068 output_cfi (cfi2
, fde
, for_eh
);
3071 if (cfi_cfa
&& cfi_cfa_offset
&& cfi_cfa_offset
!= cfi_cfa
)
3073 gcc_assert (cfi_cfa
->dw_cfi_opc
!= DW_CFA_def_cfa_expression
);
3075 switch (cfi_cfa_offset
->dw_cfi_opc
)
3077 case DW_CFA_def_cfa_offset
:
3078 cfi_buf
.dw_cfi_opc
= DW_CFA_def_cfa
;
3079 cfi_buf
.dw_cfi_oprnd2
= cfi_cfa_offset
->dw_cfi_oprnd1
;
3081 case DW_CFA_def_cfa_offset_sf
:
3082 cfi_buf
.dw_cfi_opc
= DW_CFA_def_cfa_sf
;
3083 cfi_buf
.dw_cfi_oprnd2
= cfi_cfa_offset
->dw_cfi_oprnd1
;
3085 case DW_CFA_def_cfa
:
3086 case DW_CFA_def_cfa_sf
:
3087 cfi_buf
.dw_cfi_opc
= cfi_cfa_offset
->dw_cfi_opc
;
3088 cfi_buf
.dw_cfi_oprnd2
= cfi_cfa_offset
->dw_cfi_oprnd2
;
3095 else if (cfi_cfa_offset
)
3096 cfi_cfa
= cfi_cfa_offset
;
3100 output_cfi_directive (asm_out_file
, cfi_cfa
);
3102 output_cfi (cfi_cfa
, fde
, for_eh
);
3105 cfi_cfa_offset
= NULL
;
3107 && cfi_args_size
->dw_cfi_oprnd1
.dw_cfi_offset
)
3110 output_cfi_directive (asm_out_file
, cfi_args_size
);
3112 output_cfi (cfi_args_size
, fde
, for_eh
);
3114 cfi_args_size
= NULL
;
3117 VEC_free (dw_cfi_ref
, heap
, regs
);
3120 else if (do_cfi_asm
)
3121 output_cfi_directive (asm_out_file
, cfi
);
3123 output_cfi (cfi
, fde
, for_eh
);
3132 /* Save the result of dwarf2out_do_frame across PCH.
3133 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3134 static GTY(()) signed char saved_do_cfi_asm
= 0;
3136 /* Decide whether we want to emit frame unwind information for the current
3137 translation unit. */
3140 dwarf2out_do_frame (void)
3142 /* We want to emit correct CFA location expressions or lists, so we
3143 have to return true if we're going to output debug info, even if
3144 we're not going to output frame or unwind info. */
3145 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3148 if (saved_do_cfi_asm
> 0)
3151 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3154 if ((flag_unwind_tables
|| flag_exceptions
)
3155 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3161 /* Decide whether to emit frame unwind via assembler directives. */
3164 dwarf2out_do_cfi_asm (void)
3168 #ifdef MIPS_DEBUGGING_INFO
3172 if (saved_do_cfi_asm
!= 0)
3173 return saved_do_cfi_asm
> 0;
3175 /* Assume failure for a moment. */
3176 saved_do_cfi_asm
= -1;
3178 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3180 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3183 /* Make sure the personality encoding is one the assembler can support.
3184 In particular, aligned addresses can't be handled. */
3185 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3186 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3188 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3189 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3192 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3193 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3194 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3195 && !flag_unwind_tables
&& !flag_exceptions
3196 && targetm_common
.except_unwind_info (&global_options
) != UI_DWARF2
)
3200 saved_do_cfi_asm
= 1;
3205 gate_dwarf2_frame (void)
3207 #ifndef HAVE_prologue
3208 /* Targets which still implement the prologue in assembler text
3209 cannot use the generic dwarf2 unwinding. */
3213 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3214 from the optimized shrink-wrapping annotations that we will compute.
3215 For now, only produce the CFI notes for dwarf2. */
3216 return dwarf2out_do_frame ();
3219 struct rtl_opt_pass pass_dwarf2_frame
=
3223 "dwarf2", /* name */
3224 gate_dwarf2_frame
, /* gate */
3225 execute_dwarf2_frame
, /* execute */
3228 0, /* static_pass_number */
3229 TV_FINAL
, /* tv_id */
3230 0, /* properties_required */
3231 0, /* properties_provided */
3232 0, /* properties_destroyed */
3233 0, /* todo_flags_start */
3234 0 /* todo_flags_finish */
3238 #include "gt-dwarf2cfi.h"