1 /* Dwarf2 Call Frame Information helper routines.
2 Copyright (C) 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "dwarf2out.h"
32 #include "dwarf2asm.h"
36 #include "common/common-target.h"
37 #include "tree-pass.h"
39 #include "except.h" /* expand_builtin_dwarf_sp_column */
40 #include "expr.h" /* init_return_column_size */
41 #include "regs.h" /* expand_builtin_init_dwarf_reg_sizes */
42 #include "output.h" /* asm_out_file */
43 #include "debug.h" /* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
46 /* ??? Poison these here until it can be done generically. They've been
47 totally replaced in this file; make sure it stays that way. */
48 #undef DWARF2_UNWIND_INFO
49 #undef DWARF2_FRAME_INFO
50 #if (GCC_VERSION >= 3000)
51 #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
54 #ifndef INCOMING_RETURN_ADDR_RTX
55 #define INCOMING_RETURN_ADDR_RTX (gcc_unreachable (), NULL_RTX)
58 /* Maximum size (in bytes) of an artificially generated label. */
59 #define MAX_ARTIFICIAL_LABEL_BYTES 30
61 /* A vector of call frame insns for the CIE. */
64 static GTY(()) unsigned long dwarf2out_cfi_label_num
;
66 /* The insn after which a new CFI note should be emitted. */
69 /* When non-null, add_cfi will add the CFI to this vector. */
70 static cfi_vec
*add_cfi_vec
;
72 /* True if remember_state should be emitted before following CFI directive. */
73 static bool emit_cfa_remember
;
75 /* True if any CFI directives were emitted at the current insn. */
76 static bool any_cfis_emitted
;
78 /* Short-hand for commonly used register numbers. */
79 static unsigned dw_stack_pointer_regnum
;
80 static unsigned dw_frame_pointer_regnum
;
83 static void dwarf2out_cfi_begin_epilogue (rtx insn
);
84 static void dwarf2out_frame_debug_restore_state (void);
87 /* Hook used by __throw. */
90 expand_builtin_dwarf_sp_column (void)
92 unsigned int dwarf_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
93 return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum
, 1));
96 /* MEM is a memory reference for the register size table, each element of
97 which has mode MODE. Initialize column C as a return address column. */
100 init_return_column_size (enum machine_mode mode
, rtx mem
, unsigned int c
)
102 HOST_WIDE_INT offset
= c
* GET_MODE_SIZE (mode
);
103 HOST_WIDE_INT size
= GET_MODE_SIZE (Pmode
);
104 emit_move_insn (adjust_address (mem
, mode
, offset
), GEN_INT (size
));
107 /* Generate code to initialize the register size table. */
110 expand_builtin_init_dwarf_reg_sizes (tree address
)
113 enum machine_mode mode
= TYPE_MODE (char_type_node
);
114 rtx addr
= expand_normal (address
);
115 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
116 bool wrote_return_column
= false;
118 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
120 unsigned int dnum
= DWARF_FRAME_REGNUM (i
);
121 unsigned int rnum
= DWARF2_FRAME_REG_OUT (dnum
, 1);
123 if (rnum
< DWARF_FRAME_REGISTERS
)
125 HOST_WIDE_INT offset
= rnum
* GET_MODE_SIZE (mode
);
126 enum machine_mode save_mode
= reg_raw_mode
[i
];
129 if (HARD_REGNO_CALL_PART_CLOBBERED (i
, save_mode
))
130 save_mode
= choose_hard_reg_mode (i
, 1, true);
131 if (dnum
== DWARF_FRAME_RETURN_COLUMN
)
133 if (save_mode
== VOIDmode
)
135 wrote_return_column
= true;
137 size
= GET_MODE_SIZE (save_mode
);
141 emit_move_insn (adjust_address (mem
, mode
, offset
),
142 gen_int_mode (size
, mode
));
146 if (!wrote_return_column
)
147 init_return_column_size (mode
, mem
, DWARF_FRAME_RETURN_COLUMN
);
149 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
150 init_return_column_size (mode
, mem
, DWARF_ALT_FRAME_RETURN_COLUMN
);
153 targetm
.init_dwarf_reg_sizes_extra (address
);
156 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
158 static inline HOST_WIDE_INT
159 div_data_align (HOST_WIDE_INT off
)
161 HOST_WIDE_INT r
= off
/ DWARF_CIE_DATA_ALIGNMENT
;
162 gcc_assert (r
* DWARF_CIE_DATA_ALIGNMENT
== off
);
166 /* Return true if we need a signed version of a given opcode
167 (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended). */
170 need_data_align_sf_opcode (HOST_WIDE_INT off
)
172 return DWARF_CIE_DATA_ALIGNMENT
< 0 ? off
> 0 : off
< 0;
175 /* Return a pointer to a newly allocated Call Frame Instruction. */
177 static inline dw_cfi_ref
180 dw_cfi_ref cfi
= ggc_alloc_dw_cfi_node ();
182 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= 0;
183 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= 0;
188 /* Generate a new label for the CFI info to refer to. */
191 dwarf2out_cfi_label (void)
193 int num
= dwarf2out_cfi_label_num
++;
196 ASM_GENERATE_INTERNAL_LABEL (label
, "LCFI", num
);
198 return xstrdup (label
);
201 /* Add CFI either to the current insn stream or to a vector, or both. */
204 add_cfi (dw_cfi_ref cfi
)
206 if (emit_cfa_remember
)
208 dw_cfi_ref cfi_remember
;
210 /* Emit the state save. */
211 emit_cfa_remember
= false;
212 cfi_remember
= new_cfi ();
213 cfi_remember
->dw_cfi_opc
= DW_CFA_remember_state
;
214 add_cfi (cfi_remember
);
217 any_cfis_emitted
= true;
218 if (cfi_insn
!= NULL
)
220 cfi_insn
= emit_note_after (NOTE_INSN_CFI
, cfi_insn
);
221 NOTE_CFI (cfi_insn
) = cfi
;
223 if (add_cfi_vec
!= NULL
)
224 VEC_safe_push (dw_cfi_ref
, gc
, *add_cfi_vec
, cfi
);
227 /* This function fills in aa dw_cfa_location structure from a dwarf location
228 descriptor sequence. */
231 get_cfa_from_loc_descr (dw_cfa_location
*cfa
, struct dw_loc_descr_struct
*loc
)
233 struct dw_loc_descr_struct
*ptr
;
235 cfa
->base_offset
= 0;
239 for (ptr
= loc
; ptr
!= NULL
; ptr
= ptr
->dw_loc_next
)
241 enum dwarf_location_atom op
= ptr
->dw_loc_opc
;
277 cfa
->reg
= op
- DW_OP_reg0
;
280 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
314 cfa
->reg
= op
- DW_OP_breg0
;
315 cfa
->base_offset
= ptr
->dw_loc_oprnd1
.v
.val_int
;
318 cfa
->reg
= ptr
->dw_loc_oprnd1
.v
.val_int
;
319 cfa
->base_offset
= ptr
->dw_loc_oprnd2
.v
.val_int
;
324 case DW_OP_plus_uconst
:
325 cfa
->offset
= ptr
->dw_loc_oprnd1
.v
.val_unsigned
;
333 /* Find the previous value for the CFA, iteratively. CFI is the opcode
334 to interpret, *LOC will be updated as necessary, *REMEMBER is used for
335 one level of remember/restore state processing. */
338 lookup_cfa_1 (dw_cfi_ref cfi
, dw_cfa_location
*loc
, dw_cfa_location
*remember
)
340 switch (cfi
->dw_cfi_opc
)
342 case DW_CFA_def_cfa_offset
:
343 case DW_CFA_def_cfa_offset_sf
:
344 loc
->offset
= cfi
->dw_cfi_oprnd1
.dw_cfi_offset
;
346 case DW_CFA_def_cfa_register
:
347 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
350 case DW_CFA_def_cfa_sf
:
351 loc
->reg
= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
;
352 loc
->offset
= cfi
->dw_cfi_oprnd2
.dw_cfi_offset
;
354 case DW_CFA_def_cfa_expression
:
355 get_cfa_from_loc_descr (loc
, cfi
->dw_cfi_oprnd1
.dw_cfi_loc
);
358 case DW_CFA_remember_state
:
359 gcc_assert (!remember
->in_use
);
361 remember
->in_use
= 1;
363 case DW_CFA_restore_state
:
364 gcc_assert (remember
->in_use
);
366 remember
->in_use
= 0;
374 /* The current rule for calculating the DWARF2 canonical frame address. */
375 static dw_cfa_location cfa
;
377 /* A copy of the CFA, for comparison purposes. */
378 static dw_cfa_location old_cfa
;
380 /* The register used for saving registers to the stack, and its offset
382 static dw_cfa_location cfa_store
;
384 /* The current save location around an epilogue. */
385 static dw_cfa_location cfa_remember
;
387 /* Like cfa_remember, but a copy of old_cfa. */
388 static dw_cfa_location old_cfa_remember
;
390 /* The running total of the size of arguments pushed onto the stack. */
391 static HOST_WIDE_INT args_size
;
393 /* The last args_size we actually output. */
394 static HOST_WIDE_INT old_args_size
;
396 /* Determine if two dw_cfa_location structures define the same data. */
399 cfa_equal_p (const dw_cfa_location
*loc1
, const dw_cfa_location
*loc2
)
401 return (loc1
->reg
== loc2
->reg
402 && loc1
->offset
== loc2
->offset
403 && loc1
->indirect
== loc2
->indirect
404 && (loc1
->indirect
== 0
405 || loc1
->base_offset
== loc2
->base_offset
));
408 /* This routine does the actual work. The CFA is now calculated from
409 the dw_cfa_location structure. */
412 def_cfa_1 (dw_cfa_location
*loc_p
)
420 if (cfa_store
.reg
== loc
.reg
&& loc
.indirect
== 0)
421 cfa_store
.offset
= loc
.offset
;
423 /* If nothing changed, no need to issue any call frame instructions. */
424 if (cfa_equal_p (&loc
, &old_cfa
))
429 if (loc
.reg
== old_cfa
.reg
&& !loc
.indirect
&& !old_cfa
.indirect
)
431 /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
432 the CFA register did not change but the offset did. The data
433 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
434 in the assembler via the .cfi_def_cfa_offset directive. */
436 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset_sf
;
438 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_offset
;
439 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= loc
.offset
;
442 #ifndef MIPS_DEBUGGING_INFO /* SGI dbx thinks this means no offset. */
443 else if (loc
.offset
== old_cfa
.offset
444 && old_cfa
.reg
!= INVALID_REGNUM
446 && !old_cfa
.indirect
)
448 /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
449 indicating the CFA register has changed to <register> but the
450 offset has not changed. */
451 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_register
;
452 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= loc
.reg
;
456 else if (loc
.indirect
== 0)
458 /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
459 indicating the CFA register has changed to <register> with
460 the specified offset. The data factoring for DW_CFA_def_cfa_sf
461 happens in output_cfi, or in the assembler via the .cfi_def_cfa
464 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_sf
;
466 cfi
->dw_cfi_opc
= DW_CFA_def_cfa
;
467 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= loc
.reg
;
468 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= loc
.offset
;
472 /* Construct a DW_CFA_def_cfa_expression instruction to
473 calculate the CFA using a full location expression since no
474 register-offset pair is available. */
475 struct dw_loc_descr_struct
*loc_list
;
477 cfi
->dw_cfi_opc
= DW_CFA_def_cfa_expression
;
478 loc_list
= build_cfa_loc (&loc
, 0);
479 cfi
->dw_cfi_oprnd1
.dw_cfi_loc
= loc_list
;
486 /* Add the CFI for saving a register. REG is the CFA column number.
487 If SREG is -1, the register is saved at OFFSET from the CFA;
488 otherwise it is saved in SREG. */
491 reg_save (unsigned int reg
, unsigned int sreg
, HOST_WIDE_INT offset
)
493 dw_fde_ref fde
= cfun
? cfun
->fde
: NULL
;
494 dw_cfi_ref cfi
= new_cfi ();
496 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
498 /* When stack is aligned, store REG using DW_CFA_expression with FP. */
500 && fde
->stack_realign
501 && sreg
== INVALID_REGNUM
)
503 cfi
->dw_cfi_opc
= DW_CFA_expression
;
504 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= reg
;
505 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
506 = build_cfa_aligned_loc (&cfa
, offset
, fde
->stack_realignment
);
508 else if (sreg
== INVALID_REGNUM
)
510 if (need_data_align_sf_opcode (offset
))
511 cfi
->dw_cfi_opc
= DW_CFA_offset_extended_sf
;
512 else if (reg
& ~0x3f)
513 cfi
->dw_cfi_opc
= DW_CFA_offset_extended
;
515 cfi
->dw_cfi_opc
= DW_CFA_offset
;
516 cfi
->dw_cfi_oprnd2
.dw_cfi_offset
= offset
;
518 else if (sreg
== reg
)
519 cfi
->dw_cfi_opc
= DW_CFA_same_value
;
522 cfi
->dw_cfi_opc
= DW_CFA_register
;
523 cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
= sreg
;
529 /* Given a SET, calculate the amount of stack adjustment it
533 stack_adjust_offset (const_rtx pattern
, HOST_WIDE_INT cur_args_size
,
534 HOST_WIDE_INT cur_offset
)
536 const_rtx src
= SET_SRC (pattern
);
537 const_rtx dest
= SET_DEST (pattern
);
538 HOST_WIDE_INT offset
= 0;
541 if (dest
== stack_pointer_rtx
)
543 code
= GET_CODE (src
);
545 /* Assume (set (reg sp) (reg whatever)) sets args_size
547 if (code
== REG
&& src
!= stack_pointer_rtx
)
549 offset
= -cur_args_size
;
550 #ifndef STACK_GROWS_DOWNWARD
553 return offset
- cur_offset
;
556 if (! (code
== PLUS
|| code
== MINUS
)
557 || XEXP (src
, 0) != stack_pointer_rtx
558 || !CONST_INT_P (XEXP (src
, 1)))
561 /* (set (reg sp) (plus (reg sp) (const_int))) */
562 offset
= INTVAL (XEXP (src
, 1));
568 if (MEM_P (src
) && !MEM_P (dest
))
572 /* (set (mem (pre_dec (reg sp))) (foo)) */
573 src
= XEXP (dest
, 0);
574 code
= GET_CODE (src
);
580 if (XEXP (src
, 0) == stack_pointer_rtx
)
582 rtx val
= XEXP (XEXP (src
, 1), 1);
583 /* We handle only adjustments by constant amount. */
584 gcc_assert (GET_CODE (XEXP (src
, 1)) == PLUS
585 && CONST_INT_P (val
));
586 offset
= -INTVAL (val
);
593 if (XEXP (src
, 0) == stack_pointer_rtx
)
595 offset
= GET_MODE_SIZE (GET_MODE (dest
));
602 if (XEXP (src
, 0) == stack_pointer_rtx
)
604 offset
= -GET_MODE_SIZE (GET_MODE (dest
));
619 /* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
620 indexed by INSN_UID. */
622 static HOST_WIDE_INT
*barrier_args_size
;
624 /* Helper function for compute_barrier_args_size. Handle one insn. */
627 compute_barrier_args_size_1 (rtx insn
, HOST_WIDE_INT cur_args_size
,
628 VEC (rtx
, heap
) **next
)
630 HOST_WIDE_INT offset
= 0;
633 if (! RTX_FRAME_RELATED_P (insn
))
635 if (prologue_epilogue_contains (insn
))
637 else if (GET_CODE (PATTERN (insn
)) == SET
)
638 offset
= stack_adjust_offset (PATTERN (insn
), cur_args_size
, 0);
639 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
640 || GET_CODE (PATTERN (insn
)) == SEQUENCE
)
642 /* There may be stack adjustments inside compound insns. Search
644 for (i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
645 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
646 offset
+= stack_adjust_offset (XVECEXP (PATTERN (insn
), 0, i
),
647 cur_args_size
, offset
);
652 rtx expr
= find_reg_note (insn
, REG_FRAME_RELATED_EXPR
, NULL_RTX
);
656 expr
= XEXP (expr
, 0);
657 if (GET_CODE (expr
) == PARALLEL
658 || GET_CODE (expr
) == SEQUENCE
)
659 for (i
= 1; i
< XVECLEN (expr
, 0); i
++)
661 rtx elem
= XVECEXP (expr
, 0, i
);
663 if (GET_CODE (elem
) == SET
&& !RTX_FRAME_RELATED_P (elem
))
664 offset
+= stack_adjust_offset (elem
, cur_args_size
, offset
);
669 #ifndef STACK_GROWS_DOWNWARD
673 cur_args_size
+= offset
;
674 if (cur_args_size
< 0)
679 rtx dest
= JUMP_LABEL (insn
);
683 if (barrier_args_size
[INSN_UID (dest
)] < 0)
685 barrier_args_size
[INSN_UID (dest
)] = cur_args_size
;
686 VEC_safe_push (rtx
, heap
, *next
, dest
);
691 return cur_args_size
;
694 /* Walk the whole function and compute args_size on BARRIERs. */
697 compute_barrier_args_size (void)
699 int max_uid
= get_max_uid (), i
;
701 VEC (rtx
, heap
) *worklist
, *next
, *tmp
;
703 barrier_args_size
= XNEWVEC (HOST_WIDE_INT
, max_uid
);
704 for (i
= 0; i
< max_uid
; i
++)
705 barrier_args_size
[i
] = -1;
707 worklist
= VEC_alloc (rtx
, heap
, 20);
708 next
= VEC_alloc (rtx
, heap
, 20);
710 barrier_args_size
[INSN_UID (insn
)] = 0;
711 VEC_quick_push (rtx
, worklist
, insn
);
714 while (!VEC_empty (rtx
, worklist
))
716 rtx prev
, body
, first_insn
;
717 HOST_WIDE_INT cur_args_size
;
719 first_insn
= insn
= VEC_pop (rtx
, worklist
);
720 cur_args_size
= barrier_args_size
[INSN_UID (insn
)];
721 prev
= prev_nonnote_insn (insn
);
722 if (prev
&& BARRIER_P (prev
))
723 barrier_args_size
[INSN_UID (prev
)] = cur_args_size
;
725 for (; insn
; insn
= NEXT_INSN (insn
))
727 if (INSN_DELETED_P (insn
) || NOTE_P (insn
))
729 if (BARRIER_P (insn
))
734 if (insn
== first_insn
)
736 else if (barrier_args_size
[INSN_UID (insn
)] < 0)
738 barrier_args_size
[INSN_UID (insn
)] = cur_args_size
;
743 /* The insns starting with this label have been
744 already scanned or are in the worklist. */
749 body
= PATTERN (insn
);
750 if (GET_CODE (body
) == SEQUENCE
)
752 HOST_WIDE_INT dest_args_size
= cur_args_size
;
753 for (i
= 1; i
< XVECLEN (body
, 0); i
++)
754 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body
, 0, 0))
755 && INSN_FROM_TARGET_P (XVECEXP (body
, 0, i
)))
757 = compute_barrier_args_size_1 (XVECEXP (body
, 0, i
),
758 dest_args_size
, &next
);
761 = compute_barrier_args_size_1 (XVECEXP (body
, 0, i
),
762 cur_args_size
, &next
);
764 if (INSN_ANNULLED_BRANCH_P (XVECEXP (body
, 0, 0)))
765 compute_barrier_args_size_1 (XVECEXP (body
, 0, 0),
766 dest_args_size
, &next
);
769 = compute_barrier_args_size_1 (XVECEXP (body
, 0, 0),
770 cur_args_size
, &next
);
774 = compute_barrier_args_size_1 (insn
, cur_args_size
, &next
);
778 if (VEC_empty (rtx
, next
))
781 /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
785 VEC_truncate (rtx
, next
, 0);
788 VEC_free (rtx
, heap
, worklist
);
789 VEC_free (rtx
, heap
, next
);
792 /* Add a CFI to update the running total of the size of arguments
793 pushed onto the stack. */
796 dwarf2out_args_size (HOST_WIDE_INT size
)
800 if (size
== old_args_size
)
803 old_args_size
= size
;
806 cfi
->dw_cfi_opc
= DW_CFA_GNU_args_size
;
807 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
= size
;
811 /* Record a stack adjustment of OFFSET bytes. */
814 dwarf2out_stack_adjust (HOST_WIDE_INT offset
)
816 if (cfa
.reg
== dw_stack_pointer_regnum
)
817 cfa
.offset
+= offset
;
819 if (cfa_store
.reg
== dw_stack_pointer_regnum
)
820 cfa_store
.offset
+= offset
;
822 if (ACCUMULATE_OUTGOING_ARGS
)
825 #ifndef STACK_GROWS_DOWNWARD
834 if (flag_asynchronous_unwind_tables
)
835 dwarf2out_args_size (args_size
);
838 /* Check INSN to see if it looks like a push or a stack adjustment, and
839 make a note of it if it does. EH uses this information to find out
840 how much extra space it needs to pop off the stack. */
843 dwarf2out_notice_stack_adjust (rtx insn
, bool after_p
)
845 HOST_WIDE_INT offset
;
848 /* Don't handle epilogues at all. Certainly it would be wrong to do so
849 with this function. Proper support would require all frame-related
850 insns to be marked, and to be able to handle saving state around
851 epilogues textually in the middle of the function. */
852 if (prologue_epilogue_contains (insn
))
855 /* If INSN is an instruction from target of an annulled branch, the
856 effects are for the target only and so current argument size
857 shouldn't change at all. */
859 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))
860 && INSN_FROM_TARGET_P (insn
))
863 /* If only calls can throw, and we have a frame pointer,
864 save up adjustments until we see the CALL_INSN. */
865 if (!flag_asynchronous_unwind_tables
&& cfa
.reg
!= dw_stack_pointer_regnum
)
867 if (CALL_P (insn
) && !after_p
)
869 /* Extract the size of the args from the CALL rtx itself. */
870 insn
= PATTERN (insn
);
871 if (GET_CODE (insn
) == PARALLEL
)
872 insn
= XVECEXP (insn
, 0, 0);
873 if (GET_CODE (insn
) == SET
)
874 insn
= SET_SRC (insn
);
875 gcc_assert (GET_CODE (insn
) == CALL
);
876 dwarf2out_args_size (INTVAL (XEXP (insn
, 1)));
881 if (CALL_P (insn
) && !after_p
)
883 if (!flag_asynchronous_unwind_tables
)
884 dwarf2out_args_size (args_size
);
887 else if (BARRIER_P (insn
))
889 /* Don't call compute_barrier_args_size () if the only
890 BARRIER is at the end of function. */
891 if (barrier_args_size
== NULL
&& next_nonnote_insn (insn
))
892 compute_barrier_args_size ();
893 if (barrier_args_size
== NULL
)
897 offset
= barrier_args_size
[INSN_UID (insn
)];
903 #ifndef STACK_GROWS_DOWNWARD
907 else if (GET_CODE (PATTERN (insn
)) == SET
)
908 offset
= stack_adjust_offset (PATTERN (insn
), args_size
, 0);
909 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
910 || GET_CODE (PATTERN (insn
)) == SEQUENCE
)
912 /* There may be stack adjustments inside compound insns. Search
914 for (offset
= 0, i
= XVECLEN (PATTERN (insn
), 0) - 1; i
>= 0; i
--)
915 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
916 offset
+= stack_adjust_offset (XVECEXP (PATTERN (insn
), 0, i
),
925 dwarf2out_stack_adjust (offset
);
928 /* We delay emitting a register save until either (a) we reach the end
929 of the prologue or (b) the register is clobbered. This clusters
930 register saves so that there are fewer pc advances. */
932 struct GTY(()) queued_reg_save
{
933 struct queued_reg_save
*next
;
935 HOST_WIDE_INT cfa_offset
;
939 static GTY(()) struct queued_reg_save
*queued_reg_saves
;
941 /* The caller's ORIG_REG is saved in SAVED_IN_REG. */
942 typedef struct GTY(()) reg_saved_in_data
{
947 DEF_VEC_O (reg_saved_in_data
);
948 DEF_VEC_ALLOC_O (reg_saved_in_data
, gc
);
950 /* A set of registers saved in other registers. This is implemented as
951 a flat array because it normally contains zero or 1 entry, depending
952 on the target. IA-64 is the big spender here, using a maximum of
954 static GTY(()) VEC(reg_saved_in_data
, gc
) *regs_saved_in_regs
;
956 static GTY(()) reg_saved_in_data
*cie_return_save
;
958 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation. */
959 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
960 used in places where rtl is prohibited. */
962 static inline unsigned
963 dwf_regno (const_rtx reg
)
965 return DWARF_FRAME_REGNUM (REGNO (reg
));
968 /* Compare X and Y for equivalence. The inputs may be REGs or PC_RTX. */
971 compare_reg_or_pc (rtx x
, rtx y
)
973 if (REG_P (x
) && REG_P (y
))
974 return REGNO (x
) == REGNO (y
);
978 /* Record SRC as being saved in DEST. DEST may be null to delete an
979 existing entry. SRC may be a register or PC_RTX. */
982 record_reg_saved_in_reg (rtx dest
, rtx src
)
984 reg_saved_in_data
*elt
;
987 FOR_EACH_VEC_ELT (reg_saved_in_data
, regs_saved_in_regs
, i
, elt
)
988 if (compare_reg_or_pc (elt
->orig_reg
, src
))
991 VEC_unordered_remove(reg_saved_in_data
, regs_saved_in_regs
, i
);
993 elt
->saved_in_reg
= dest
;
1000 elt
= VEC_safe_push(reg_saved_in_data
, gc
, regs_saved_in_regs
, NULL
);
1001 elt
->orig_reg
= src
;
1002 elt
->saved_in_reg
= dest
;
1005 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
1006 SREG, or if SREG is NULL then it is saved at OFFSET to the CFA. */
1009 queue_reg_save (rtx reg
, rtx sreg
, HOST_WIDE_INT offset
)
1011 struct queued_reg_save
*q
;
1013 /* Duplicates waste space, but it's also necessary to remove them
1014 for correctness, since the queue gets output in reverse order. */
1015 for (q
= queued_reg_saves
; q
!= NULL
; q
= q
->next
)
1016 if (compare_reg_or_pc (q
->reg
, reg
))
1021 q
= ggc_alloc_queued_reg_save ();
1022 q
->next
= queued_reg_saves
;
1023 queued_reg_saves
= q
;
1027 q
->cfa_offset
= offset
;
1028 q
->saved_reg
= sreg
;
1031 /* Output all the entries in QUEUED_REG_SAVES. */
1034 dwarf2out_flush_queued_reg_saves (void)
1036 struct queued_reg_save
*q
;
1038 for (q
= queued_reg_saves
; q
; q
= q
->next
)
1040 unsigned int reg
, sreg
;
1042 record_reg_saved_in_reg (q
->saved_reg
, q
->reg
);
1044 if (q
->reg
== pc_rtx
)
1045 reg
= DWARF_FRAME_RETURN_COLUMN
;
1047 reg
= dwf_regno (q
->reg
);
1049 sreg
= dwf_regno (q
->saved_reg
);
1051 sreg
= INVALID_REGNUM
;
1052 reg_save (reg
, sreg
, q
->cfa_offset
);
1055 queued_reg_saves
= NULL
;
1058 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1059 location for? Or, does it clobber a register which we've previously
1060 said that some other register is saved in, and for which we now
1061 have a new location for? */
1064 clobbers_queued_reg_save (const_rtx insn
)
1066 struct queued_reg_save
*q
;
1068 for (q
= queued_reg_saves
; q
; q
= q
->next
)
1071 reg_saved_in_data
*rir
;
1073 if (modified_in_p (q
->reg
, insn
))
1076 FOR_EACH_VEC_ELT (reg_saved_in_data
, regs_saved_in_regs
, i
, rir
)
1077 if (compare_reg_or_pc (q
->reg
, rir
->orig_reg
)
1078 && modified_in_p (rir
->saved_in_reg
, insn
))
1085 /* What register, if any, is currently saved in REG? */
1088 reg_saved_in (rtx reg
)
1090 unsigned int regn
= REGNO (reg
);
1091 struct queued_reg_save
*q
;
1092 reg_saved_in_data
*rir
;
1095 for (q
= queued_reg_saves
; q
; q
= q
->next
)
1096 if (q
->saved_reg
&& regn
== REGNO (q
->saved_reg
))
1099 FOR_EACH_VEC_ELT (reg_saved_in_data
, regs_saved_in_regs
, i
, rir
)
1100 if (regn
== REGNO (rir
->saved_in_reg
))
1101 return rir
->orig_reg
;
1107 /* A temporary register holding an integral value used in adjusting SP
1108 or setting up the store_reg. The "offset" field holds the integer
1109 value, not an offset. */
1110 static dw_cfa_location cfa_temp
;
1112 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note. */
1115 dwarf2out_frame_debug_def_cfa (rtx pat
)
1117 memset (&cfa
, 0, sizeof (cfa
));
1119 switch (GET_CODE (pat
))
1122 cfa
.reg
= dwf_regno (XEXP (pat
, 0));
1123 cfa
.offset
= INTVAL (XEXP (pat
, 1));
1127 cfa
.reg
= dwf_regno (pat
);
1132 pat
= XEXP (pat
, 0);
1133 if (GET_CODE (pat
) == PLUS
)
1135 cfa
.base_offset
= INTVAL (XEXP (pat
, 1));
1136 pat
= XEXP (pat
, 0);
1138 cfa
.reg
= dwf_regno (pat
);
1142 /* Recurse and define an expression. */
1149 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note. */
1152 dwarf2out_frame_debug_adjust_cfa (rtx pat
)
1156 gcc_assert (GET_CODE (pat
) == SET
);
1157 dest
= XEXP (pat
, 0);
1158 src
= XEXP (pat
, 1);
1160 switch (GET_CODE (src
))
1163 gcc_assert (dwf_regno (XEXP (src
, 0)) == cfa
.reg
);
1164 cfa
.offset
-= INTVAL (XEXP (src
, 1));
1174 cfa
.reg
= dwf_regno (dest
);
1175 gcc_assert (cfa
.indirect
== 0);
1180 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note. */
1183 dwarf2out_frame_debug_cfa_offset (rtx set
)
1185 HOST_WIDE_INT offset
;
1186 rtx src
, addr
, span
;
1187 unsigned int sregno
;
1189 src
= XEXP (set
, 1);
1190 addr
= XEXP (set
, 0);
1191 gcc_assert (MEM_P (addr
));
1192 addr
= XEXP (addr
, 0);
1194 /* As documented, only consider extremely simple addresses. */
1195 switch (GET_CODE (addr
))
1198 gcc_assert (dwf_regno (addr
) == cfa
.reg
);
1199 offset
= -cfa
.offset
;
1202 gcc_assert (dwf_regno (XEXP (addr
, 0)) == cfa
.reg
);
1203 offset
= INTVAL (XEXP (addr
, 1)) - cfa
.offset
;
1212 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1216 span
= targetm
.dwarf_register_span (src
);
1217 sregno
= dwf_regno (src
);
1220 /* ??? We'd like to use queue_reg_save, but we need to come up with
1221 a different flushing heuristic for epilogues. */
1223 reg_save (sregno
, INVALID_REGNUM
, offset
);
1226 /* We have a PARALLEL describing where the contents of SRC live.
1227 Queue register saves for each piece of the PARALLEL. */
1230 HOST_WIDE_INT span_offset
= offset
;
1232 gcc_assert (GET_CODE (span
) == PARALLEL
);
1234 limit
= XVECLEN (span
, 0);
1235 for (par_index
= 0; par_index
< limit
; par_index
++)
1237 rtx elem
= XVECEXP (span
, 0, par_index
);
1239 sregno
= dwf_regno (src
);
1240 reg_save (sregno
, INVALID_REGNUM
, span_offset
);
1241 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
1246 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note. */
1249 dwarf2out_frame_debug_cfa_register (rtx set
)
1252 unsigned sregno
, dregno
;
1254 src
= XEXP (set
, 1);
1255 dest
= XEXP (set
, 0);
1257 record_reg_saved_in_reg (dest
, src
);
1259 sregno
= DWARF_FRAME_RETURN_COLUMN
;
1261 sregno
= dwf_regno (src
);
1263 dregno
= dwf_regno (dest
);
1265 /* ??? We'd like to use queue_reg_save, but we need to come up with
1266 a different flushing heuristic for epilogues. */
1267 reg_save (sregno
, dregno
, 0);
1270 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1273 dwarf2out_frame_debug_cfa_expression (rtx set
)
1275 rtx src
, dest
, span
;
1276 dw_cfi_ref cfi
= new_cfi ();
1278 dest
= SET_DEST (set
);
1279 src
= SET_SRC (set
);
1281 gcc_assert (REG_P (src
));
1282 gcc_assert (MEM_P (dest
));
1284 span
= targetm
.dwarf_register_span (src
);
1287 cfi
->dw_cfi_opc
= DW_CFA_expression
;
1288 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= dwf_regno (src
);
1289 cfi
->dw_cfi_oprnd2
.dw_cfi_loc
1290 = mem_loc_descriptor (XEXP (dest
, 0), get_address_mode (dest
),
1291 GET_MODE (dest
), VAR_INIT_STATUS_INITIALIZED
);
1293 /* ??? We'd like to use queue_reg_save, were the interface different,
1294 and, as above, we could manage flushing for epilogues. */
1298 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note. */
1301 dwarf2out_frame_debug_cfa_restore (rtx reg
)
1303 dw_cfi_ref cfi
= new_cfi ();
1304 unsigned int regno
= dwf_regno (reg
);
1306 cfi
->dw_cfi_opc
= (regno
& ~0x3f ? DW_CFA_restore_extended
: DW_CFA_restore
);
1307 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
= regno
;
1312 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1313 ??? Perhaps we should note in the CIE where windows are saved (instead of
1314 assuming 0(cfa)) and what registers are in the window. */
1317 dwarf2out_frame_debug_cfa_window_save (void)
1319 dw_cfi_ref cfi
= new_cfi ();
1321 cfi
->dw_cfi_opc
= DW_CFA_GNU_window_save
;
1325 /* Record call frame debugging information for an expression EXPR,
1326 which either sets SP or FP (adjusting how we calculate the frame
1327 address) or saves a register to the stack or another register.
1328 LABEL indicates the address of EXPR.
1330 This function encodes a state machine mapping rtxes to actions on
1331 cfa, cfa_store, and cfa_temp.reg. We describe these rules so
1332 users need not read the source code.
1334 The High-Level Picture
1336 Changes in the register we use to calculate the CFA: Currently we
1337 assume that if you copy the CFA register into another register, we
1338 should take the other one as the new CFA register; this seems to
1339 work pretty well. If it's wrong for some target, it's simple
1340 enough not to set RTX_FRAME_RELATED_P on the insn in question.
1342 Changes in the register we use for saving registers to the stack:
1343 This is usually SP, but not always. Again, we deduce that if you
1344 copy SP into another register (and SP is not the CFA register),
1345 then the new register is the one we will be using for register
1346 saves. This also seems to work.
1348 Register saves: There's not much guesswork about this one; if
1349 RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1350 register save, and the register used to calculate the destination
1351 had better be the one we think we're using for this purpose.
1352 It's also assumed that a copy from a call-saved register to another
1353 register is saving that register if RTX_FRAME_RELATED_P is set on
1354 that instruction. If the copy is from a call-saved register to
1355 the *same* register, that means that the register is now the same
1356 value as in the caller.
1358 Except: If the register being saved is the CFA register, and the
1359 offset is nonzero, we are saving the CFA, so we assume we have to
1360 use DW_CFA_def_cfa_expression. If the offset is 0, we assume that
1361 the intent is to save the value of SP from the previous frame.
1363 In addition, if a register has previously been saved to a different
1366 Invariants / Summaries of Rules
1368 cfa current rule for calculating the CFA. It usually
1369 consists of a register and an offset.
1370 cfa_store register used by prologue code to save things to the stack
1371 cfa_store.offset is the offset from the value of
1372 cfa_store.reg to the actual CFA
1373 cfa_temp register holding an integral value. cfa_temp.offset
1374 stores the value, which will be used to adjust the
1375 stack pointer. cfa_temp is also used like cfa_store,
1376 to track stores to the stack via fp or a temp reg.
1378 Rules 1- 4: Setting a register's value to cfa.reg or an expression
1379 with cfa.reg as the first operand changes the cfa.reg and its
1380 cfa.offset. Rule 1 and 4 also set cfa_temp.reg and
1383 Rules 6- 9: Set a non-cfa.reg register value to a constant or an
1384 expression yielding a constant. This sets cfa_temp.reg
1385 and cfa_temp.offset.
1387 Rule 5: Create a new register cfa_store used to save items to the
1390 Rules 10-14: Save a register to the stack. Define offset as the
1391 difference of the original location and cfa_store's
1392 location (or cfa_temp's location if cfa_temp is used).
1394 Rules 16-20: If AND operation happens on sp in prologue, we assume
1395 stack is realigned. We will use a group of DW_OP_XXX
1396 expressions to represent the location of the stored
1397 register instead of CFA+offset.
1401 "{a,b}" indicates a choice of a xor b.
1402 "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1405 (set <reg1> <reg2>:cfa.reg)
1406 effects: cfa.reg = <reg1>
1407 cfa.offset unchanged
1408 cfa_temp.reg = <reg1>
1409 cfa_temp.offset = cfa.offset
1412 (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1413 {<const_int>,<reg>:cfa_temp.reg}))
1414 effects: cfa.reg = sp if fp used
1415 cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1416 cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1417 if cfa_store.reg==sp
1420 (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1421 effects: cfa.reg = fp
1422 cfa_offset += +/- <const_int>
1425 (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1426 constraints: <reg1> != fp
1428 effects: cfa.reg = <reg1>
1429 cfa_temp.reg = <reg1>
1430 cfa_temp.offset = cfa.offset
1433 (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1434 constraints: <reg1> != fp
1436 effects: cfa_store.reg = <reg1>
1437 cfa_store.offset = cfa.offset - cfa_temp.offset
1440 (set <reg> <const_int>)
1441 effects: cfa_temp.reg = <reg>
1442 cfa_temp.offset = <const_int>
1445 (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1446 effects: cfa_temp.reg = <reg1>
1447 cfa_temp.offset |= <const_int>
1450 (set <reg> (high <exp>))
1454 (set <reg> (lo_sum <exp> <const_int>))
1455 effects: cfa_temp.reg = <reg>
1456 cfa_temp.offset = <const_int>
1459 (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1460 effects: cfa_store.offset -= <const_int>
1461 cfa.offset = cfa_store.offset if cfa.reg == sp
1463 cfa.base_offset = -cfa_store.offset
1466 (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1467 effects: cfa_store.offset += -/+ mode_size(mem)
1468 cfa.offset = cfa_store.offset if cfa.reg == sp
1470 cfa.base_offset = -cfa_store.offset
1473 (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1476 effects: cfa.reg = <reg1>
1477 cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1480 (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1481 effects: cfa.reg = <reg1>
1482 cfa.base_offset = -{cfa_store,cfa_temp}.offset
1485 (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1486 effects: cfa.reg = <reg1>
1487 cfa.base_offset = -cfa_temp.offset
1488 cfa_temp.offset -= mode_size(mem)
1491 (set <reg> {unspec, unspec_volatile})
1492 effects: target-dependent
1495 (set sp (and: sp <const_int>))
1496 constraints: cfa_store.reg == sp
1497 effects: cfun->fde.stack_realign = 1
1498 cfa_store.offset = 0
1499 fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1502 (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1503 effects: cfa_store.offset += -/+ mode_size(mem)
1506 (set (mem ({pre_inc, pre_dec} sp)) fp)
1507 constraints: fde->stack_realign == 1
1508 effects: cfa_store.offset = 0
1509 cfa.reg != HARD_FRAME_POINTER_REGNUM
1512 (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1513 constraints: fde->stack_realign == 1
1515 && cfa.indirect == 0
1516 && cfa.reg != HARD_FRAME_POINTER_REGNUM
1517 effects: Use DW_CFA_def_cfa_expression to define cfa
1518 cfa.reg == fde->drap_reg */
1521 dwarf2out_frame_debug_expr (rtx expr
)
1523 rtx src
, dest
, span
;
1524 HOST_WIDE_INT offset
;
1527 /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1528 the PARALLEL independently. The first element is always processed if
1529 it is a SET. This is for backward compatibility. Other elements
1530 are processed only if they are SETs and the RTX_FRAME_RELATED_P
1531 flag is set in them. */
1532 if (GET_CODE (expr
) == PARALLEL
|| GET_CODE (expr
) == SEQUENCE
)
1535 int limit
= XVECLEN (expr
, 0);
1538 /* PARALLELs have strict read-modify-write semantics, so we
1539 ought to evaluate every rvalue before changing any lvalue.
1540 It's cumbersome to do that in general, but there's an
1541 easy approximation that is enough for all current users:
1542 handle register saves before register assignments. */
1543 if (GET_CODE (expr
) == PARALLEL
)
1544 for (par_index
= 0; par_index
< limit
; par_index
++)
1546 elem
= XVECEXP (expr
, 0, par_index
);
1547 if (GET_CODE (elem
) == SET
1548 && MEM_P (SET_DEST (elem
))
1549 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1550 dwarf2out_frame_debug_expr (elem
);
1553 for (par_index
= 0; par_index
< limit
; par_index
++)
1555 elem
= XVECEXP (expr
, 0, par_index
);
1556 if (GET_CODE (elem
) == SET
1557 && (!MEM_P (SET_DEST (elem
)) || GET_CODE (expr
) == SEQUENCE
)
1558 && (RTX_FRAME_RELATED_P (elem
) || par_index
== 0))
1559 dwarf2out_frame_debug_expr (elem
);
1560 else if (GET_CODE (elem
) == SET
1562 && !RTX_FRAME_RELATED_P (elem
))
1564 /* Stack adjustment combining might combine some post-prologue
1565 stack adjustment into a prologue stack adjustment. */
1566 HOST_WIDE_INT offset
= stack_adjust_offset (elem
, args_size
, 0);
1569 dwarf2out_stack_adjust (offset
);
1575 gcc_assert (GET_CODE (expr
) == SET
);
1577 src
= SET_SRC (expr
);
1578 dest
= SET_DEST (expr
);
1582 rtx rsi
= reg_saved_in (src
);
1589 switch (GET_CODE (dest
))
1592 switch (GET_CODE (src
))
1594 /* Setting FP from SP. */
1596 if (cfa
.reg
== dwf_regno (src
))
1599 /* Update the CFA rule wrt SP or FP. Make sure src is
1600 relative to the current CFA register.
1602 We used to require that dest be either SP or FP, but the
1603 ARM copies SP to a temporary register, and from there to
1604 FP. So we just rely on the backends to only set
1605 RTX_FRAME_RELATED_P on appropriate insns. */
1606 cfa
.reg
= dwf_regno (dest
);
1607 cfa_temp
.reg
= cfa
.reg
;
1608 cfa_temp
.offset
= cfa
.offset
;
1612 /* Saving a register in a register. */
1613 gcc_assert (!fixed_regs
[REGNO (dest
)]
1614 /* For the SPARC and its register window. */
1615 || (dwf_regno (src
) == DWARF_FRAME_RETURN_COLUMN
));
1617 /* After stack is aligned, we can only save SP in FP
1618 if drap register is used. In this case, we have
1619 to restore stack pointer with the CFA value and we
1620 don't generate this DWARF information. */
1622 && fde
->stack_realign
1623 && REGNO (src
) == STACK_POINTER_REGNUM
)
1624 gcc_assert (REGNO (dest
) == HARD_FRAME_POINTER_REGNUM
1625 && fde
->drap_reg
!= INVALID_REGNUM
1626 && cfa
.reg
!= dwf_regno (src
));
1628 queue_reg_save (src
, dest
, 0);
1635 if (dest
== stack_pointer_rtx
)
1639 switch (GET_CODE (XEXP (src
, 1)))
1642 offset
= INTVAL (XEXP (src
, 1));
1645 gcc_assert (dwf_regno (XEXP (src
, 1)) == cfa_temp
.reg
);
1646 offset
= cfa_temp
.offset
;
1652 if (XEXP (src
, 0) == hard_frame_pointer_rtx
)
1654 /* Restoring SP from FP in the epilogue. */
1655 gcc_assert (cfa
.reg
== dw_frame_pointer_regnum
);
1656 cfa
.reg
= dw_stack_pointer_regnum
;
1658 else if (GET_CODE (src
) == LO_SUM
)
1659 /* Assume we've set the source reg of the LO_SUM from sp. */
1662 gcc_assert (XEXP (src
, 0) == stack_pointer_rtx
);
1664 if (GET_CODE (src
) != MINUS
)
1666 if (cfa
.reg
== dw_stack_pointer_regnum
)
1667 cfa
.offset
+= offset
;
1668 if (cfa_store
.reg
== dw_stack_pointer_regnum
)
1669 cfa_store
.offset
+= offset
;
1671 else if (dest
== hard_frame_pointer_rtx
)
1674 /* Either setting the FP from an offset of the SP,
1675 or adjusting the FP */
1676 gcc_assert (frame_pointer_needed
);
1678 gcc_assert (REG_P (XEXP (src
, 0))
1679 && dwf_regno (XEXP (src
, 0)) == cfa
.reg
1680 && CONST_INT_P (XEXP (src
, 1)));
1681 offset
= INTVAL (XEXP (src
, 1));
1682 if (GET_CODE (src
) != MINUS
)
1684 cfa
.offset
+= offset
;
1685 cfa
.reg
= dw_frame_pointer_regnum
;
1689 gcc_assert (GET_CODE (src
) != MINUS
);
1692 if (REG_P (XEXP (src
, 0))
1693 && dwf_regno (XEXP (src
, 0)) == cfa
.reg
1694 && CONST_INT_P (XEXP (src
, 1)))
1696 /* Setting a temporary CFA register that will be copied
1697 into the FP later on. */
1698 offset
= - INTVAL (XEXP (src
, 1));
1699 cfa
.offset
+= offset
;
1700 cfa
.reg
= dwf_regno (dest
);
1701 /* Or used to save regs to the stack. */
1702 cfa_temp
.reg
= cfa
.reg
;
1703 cfa_temp
.offset
= cfa
.offset
;
1707 else if (REG_P (XEXP (src
, 0))
1708 && dwf_regno (XEXP (src
, 0)) == cfa_temp
.reg
1709 && XEXP (src
, 1) == stack_pointer_rtx
)
1711 /* Setting a scratch register that we will use instead
1712 of SP for saving registers to the stack. */
1713 gcc_assert (cfa
.reg
== dw_stack_pointer_regnum
);
1714 cfa_store
.reg
= dwf_regno (dest
);
1715 cfa_store
.offset
= cfa
.offset
- cfa_temp
.offset
;
1719 else if (GET_CODE (src
) == LO_SUM
1720 && CONST_INT_P (XEXP (src
, 1)))
1722 cfa_temp
.reg
= dwf_regno (dest
);
1723 cfa_temp
.offset
= INTVAL (XEXP (src
, 1));
1732 cfa_temp
.reg
= dwf_regno (dest
);
1733 cfa_temp
.offset
= INTVAL (src
);
1738 gcc_assert (REG_P (XEXP (src
, 0))
1739 && dwf_regno (XEXP (src
, 0)) == cfa_temp
.reg
1740 && CONST_INT_P (XEXP (src
, 1)));
1742 cfa_temp
.reg
= dwf_regno (dest
);
1743 cfa_temp
.offset
|= INTVAL (XEXP (src
, 1));
1746 /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1747 which will fill in all of the bits. */
1754 case UNSPEC_VOLATILE
:
1755 /* All unspecs should be represented by REG_CFA_* notes. */
1761 /* If this AND operation happens on stack pointer in prologue,
1762 we assume the stack is realigned and we extract the
1764 if (fde
&& XEXP (src
, 0) == stack_pointer_rtx
)
1766 /* We interpret reg_save differently with stack_realign set.
1767 Thus we must flush whatever we have queued first. */
1768 dwarf2out_flush_queued_reg_saves ();
1770 gcc_assert (cfa_store
.reg
== dwf_regno (XEXP (src
, 0)));
1771 fde
->stack_realign
= 1;
1772 fde
->stack_realignment
= INTVAL (XEXP (src
, 1));
1773 cfa_store
.offset
= 0;
1775 if (cfa
.reg
!= dw_stack_pointer_regnum
1776 && cfa
.reg
!= dw_frame_pointer_regnum
)
1777 fde
->drap_reg
= cfa
.reg
;
1790 /* Saving a register to the stack. Make sure dest is relative to the
1792 switch (GET_CODE (XEXP (dest
, 0)))
1798 /* We can't handle variable size modifications. */
1799 gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest
, 0), 1), 1))
1801 offset
= -INTVAL (XEXP (XEXP (XEXP (dest
, 0), 1), 1));
1803 gcc_assert (REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
1804 && cfa_store
.reg
== dw_stack_pointer_regnum
);
1806 cfa_store
.offset
+= offset
;
1807 if (cfa
.reg
== dw_stack_pointer_regnum
)
1808 cfa
.offset
= cfa_store
.offset
;
1810 if (GET_CODE (XEXP (dest
, 0)) == POST_MODIFY
)
1811 offset
-= cfa_store
.offset
;
1813 offset
= -cfa_store
.offset
;
1820 offset
= GET_MODE_SIZE (GET_MODE (dest
));
1821 if (GET_CODE (XEXP (dest
, 0)) == PRE_INC
)
1824 gcc_assert ((REGNO (XEXP (XEXP (dest
, 0), 0))
1825 == STACK_POINTER_REGNUM
)
1826 && cfa_store
.reg
== dw_stack_pointer_regnum
);
1828 cfa_store
.offset
+= offset
;
1830 /* Rule 18: If stack is aligned, we will use FP as a
1831 reference to represent the address of the stored
1834 && fde
->stack_realign
1835 && src
== hard_frame_pointer_rtx
)
1837 gcc_assert (cfa
.reg
!= dw_frame_pointer_regnum
);
1838 cfa_store
.offset
= 0;
1841 if (cfa
.reg
== dw_stack_pointer_regnum
)
1842 cfa
.offset
= cfa_store
.offset
;
1844 if (GET_CODE (XEXP (dest
, 0)) == POST_DEC
)
1845 offset
+= -cfa_store
.offset
;
1847 offset
= -cfa_store
.offset
;
1851 /* With an offset. */
1858 gcc_assert (CONST_INT_P (XEXP (XEXP (dest
, 0), 1))
1859 && REG_P (XEXP (XEXP (dest
, 0), 0)));
1860 offset
= INTVAL (XEXP (XEXP (dest
, 0), 1));
1861 if (GET_CODE (XEXP (dest
, 0)) == MINUS
)
1864 regno
= dwf_regno (XEXP (XEXP (dest
, 0), 0));
1866 if (cfa
.reg
== regno
)
1867 offset
-= cfa
.offset
;
1868 else if (cfa_store
.reg
== regno
)
1869 offset
-= cfa_store
.offset
;
1872 gcc_assert (cfa_temp
.reg
== regno
);
1873 offset
-= cfa_temp
.offset
;
1879 /* Without an offset. */
1882 unsigned int regno
= dwf_regno (XEXP (dest
, 0));
1884 if (cfa
.reg
== regno
)
1885 offset
= -cfa
.offset
;
1886 else if (cfa_store
.reg
== regno
)
1887 offset
= -cfa_store
.offset
;
1890 gcc_assert (cfa_temp
.reg
== regno
);
1891 offset
= -cfa_temp
.offset
;
1898 gcc_assert (cfa_temp
.reg
== dwf_regno (XEXP (XEXP (dest
, 0), 0)));
1899 offset
= -cfa_temp
.offset
;
1900 cfa_temp
.offset
-= GET_MODE_SIZE (GET_MODE (dest
));
1908 /* If the source operand of this MEM operation is a memory,
1909 we only care how much stack grew. */
1914 && REGNO (src
) != STACK_POINTER_REGNUM
1915 && REGNO (src
) != HARD_FRAME_POINTER_REGNUM
1916 && dwf_regno (src
) == cfa
.reg
)
1918 /* We're storing the current CFA reg into the stack. */
1920 if (cfa
.offset
== 0)
1923 /* If stack is aligned, putting CFA reg into stack means
1924 we can no longer use reg + offset to represent CFA.
1925 Here we use DW_CFA_def_cfa_expression instead. The
1926 result of this expression equals to the original CFA
1929 && fde
->stack_realign
1930 && cfa
.indirect
== 0
1931 && cfa
.reg
!= dw_frame_pointer_regnum
)
1933 dw_cfa_location cfa_exp
;
1935 gcc_assert (fde
->drap_reg
== cfa
.reg
);
1937 cfa_exp
.indirect
= 1;
1938 cfa_exp
.reg
= dw_frame_pointer_regnum
;
1939 cfa_exp
.base_offset
= offset
;
1942 fde
->drap_reg_saved
= 1;
1944 def_cfa_1 (&cfa_exp
);
1948 /* If the source register is exactly the CFA, assume
1949 we're saving SP like any other register; this happens
1952 queue_reg_save (stack_pointer_rtx
, NULL_RTX
, offset
);
1957 /* Otherwise, we'll need to look in the stack to
1958 calculate the CFA. */
1959 rtx x
= XEXP (dest
, 0);
1963 gcc_assert (REG_P (x
));
1965 cfa
.reg
= dwf_regno (x
);
1966 cfa
.base_offset
= offset
;
1977 span
= targetm
.dwarf_register_span (src
);
1979 queue_reg_save (src
, NULL_RTX
, offset
);
1982 /* We have a PARALLEL describing where the contents of SRC live.
1983 Queue register saves for each piece of the PARALLEL. */
1986 HOST_WIDE_INT span_offset
= offset
;
1988 gcc_assert (GET_CODE (span
) == PARALLEL
);
1990 limit
= XVECLEN (span
, 0);
1991 for (par_index
= 0; par_index
< limit
; par_index
++)
1993 rtx elem
= XVECEXP (span
, 0, par_index
);
1994 queue_reg_save (elem
, NULL_RTX
, span_offset
);
1995 span_offset
+= GET_MODE_SIZE (GET_MODE (elem
));
2005 /* Record call frame debugging information for INSN, which either
2006 sets SP or FP (adjusting how we calculate the frame address) or saves a
2007 register to the stack. If INSN is NULL_RTX, initialize our state.
2009 If AFTER_P is false, we're being called before the insn is emitted,
2010 otherwise after. Call instructions get invoked twice. */
2013 dwarf2out_frame_debug (rtx insn
, bool after_p
)
2016 bool handled_one
= false;
2017 bool need_flush
= false;
2019 if (!NONJUMP_INSN_P (insn
) || clobbers_queued_reg_save (insn
))
2020 dwarf2out_flush_queued_reg_saves ();
2022 if (!RTX_FRAME_RELATED_P (insn
))
2024 /* ??? This should be done unconditionally since stack adjustments
2025 matter if the stack pointer is not the CFA register anymore but
2026 is still used to save registers. */
2027 if (!ACCUMULATE_OUTGOING_ARGS
)
2028 dwarf2out_notice_stack_adjust (insn
, after_p
);
2032 any_cfis_emitted
= false;
2034 for (note
= REG_NOTES (insn
); note
; note
= XEXP (note
, 1))
2035 switch (REG_NOTE_KIND (note
))
2037 case REG_FRAME_RELATED_EXPR
:
2038 insn
= XEXP (note
, 0);
2041 case REG_CFA_DEF_CFA
:
2042 dwarf2out_frame_debug_def_cfa (XEXP (note
, 0));
2046 case REG_CFA_ADJUST_CFA
:
2051 if (GET_CODE (n
) == PARALLEL
)
2052 n
= XVECEXP (n
, 0, 0);
2054 dwarf2out_frame_debug_adjust_cfa (n
);
2058 case REG_CFA_OFFSET
:
2061 n
= single_set (insn
);
2062 dwarf2out_frame_debug_cfa_offset (n
);
2066 case REG_CFA_REGISTER
:
2071 if (GET_CODE (n
) == PARALLEL
)
2072 n
= XVECEXP (n
, 0, 0);
2074 dwarf2out_frame_debug_cfa_register (n
);
2078 case REG_CFA_EXPRESSION
:
2081 n
= single_set (insn
);
2082 dwarf2out_frame_debug_cfa_expression (n
);
2086 case REG_CFA_RESTORE
:
2091 if (GET_CODE (n
) == PARALLEL
)
2092 n
= XVECEXP (n
, 0, 0);
2095 dwarf2out_frame_debug_cfa_restore (n
);
2099 case REG_CFA_SET_VDRAP
:
2103 dw_fde_ref fde
= cfun
->fde
;
2106 gcc_assert (fde
->vdrap_reg
== INVALID_REGNUM
);
2108 fde
->vdrap_reg
= dwf_regno (n
);
2114 case REG_CFA_WINDOW_SAVE
:
2115 dwarf2out_frame_debug_cfa_window_save ();
2119 case REG_CFA_FLUSH_QUEUE
:
2120 /* The actual flush happens below. */
2131 /* Minimize the number of advances by emitting the entire queue
2132 once anything is emitted. */
2133 need_flush
|= any_cfis_emitted
;
2137 insn
= PATTERN (insn
);
2139 dwarf2out_frame_debug_expr (insn
);
2141 /* Check again. A parallel can save and update the same register.
2142 We could probably check just once, here, but this is safer than
2143 removing the check at the start of the function. */
2144 if (any_cfis_emitted
|| clobbers_queued_reg_save (insn
))
2149 dwarf2out_flush_queued_reg_saves ();
2152 /* Examine CFI and return true if a cfi label and set_loc is needed
2153 beforehand. Even when generating CFI assembler instructions, we
2154 still have to add the cfi to the list so that lookup_cfa_1 works
2155 later on. When -g2 and above we even need to force emitting of
2156 CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2157 purposes. If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2158 and so don't use convert_cfa_to_fb_loc_list. */
2161 cfi_label_required_p (dw_cfi_ref cfi
)
2163 if (!dwarf2out_do_cfi_asm ())
2166 if (dwarf_version
== 2
2167 && debug_info_level
> DINFO_LEVEL_TERSE
2168 && (write_symbols
== DWARF2_DEBUG
2169 || write_symbols
== VMS_AND_DWARF2_DEBUG
))
2171 switch (cfi
->dw_cfi_opc
)
2173 case DW_CFA_def_cfa_offset
:
2174 case DW_CFA_def_cfa_offset_sf
:
2175 case DW_CFA_def_cfa_register
:
2176 case DW_CFA_def_cfa
:
2177 case DW_CFA_def_cfa_sf
:
2178 case DW_CFA_def_cfa_expression
:
2179 case DW_CFA_restore_state
:
2188 /* Walk the function, looking for NOTE_INSN_CFI notes. Add the CFIs to the
2189 function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2192 add_cfis_to_fde (void)
2194 dw_fde_ref fde
= cfun
->fde
;
2196 /* We always start with a function_begin label. */
2199 for (insn
= get_insns (); insn
; insn
= next
)
2201 next
= NEXT_INSN (insn
);
2203 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
2205 /* Don't attempt to advance_loc4 between labels
2206 in different sections. */
2210 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_CFI
)
2212 bool required
= cfi_label_required_p (NOTE_CFI (insn
));
2213 while (next
&& NOTE_P (next
) && NOTE_KIND (next
) == NOTE_INSN_CFI
)
2215 required
|= cfi_label_required_p (NOTE_CFI (next
));
2216 next
= NEXT_INSN (next
);
2220 int num
= dwarf2out_cfi_label_num
;
2221 const char *label
= dwarf2out_cfi_label ();
2225 /* Set the location counter to the new label. */
2227 xcfi
->dw_cfi_opc
= (first
? DW_CFA_set_loc
2228 : DW_CFA_advance_loc4
);
2229 xcfi
->dw_cfi_oprnd1
.dw_cfi_addr
= label
;
2230 VEC_safe_push (dw_cfi_ref
, gc
, fde
->dw_fde_cfi
, xcfi
);
2232 tmp
= emit_note_before (NOTE_INSN_CFI_LABEL
, insn
);
2233 NOTE_LABEL_NUMBER (tmp
) = num
;
2238 VEC_safe_push (dw_cfi_ref
, gc
, fde
->dw_fde_cfi
, NOTE_CFI (insn
));
2239 insn
= NEXT_INSN (insn
);
2241 while (insn
!= next
);
2247 /* Scan the function and create the initial set of CFI notes. */
2250 create_cfi_notes (void)
2254 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2258 cfi_insn
= PREV_INSN (insn
);
2260 if (BARRIER_P (insn
))
2262 dwarf2out_frame_debug (insn
, false);
2268 switch (NOTE_KIND (insn
))
2270 case NOTE_INSN_PROLOGUE_END
:
2271 dwarf2out_flush_queued_reg_saves ();
2274 case NOTE_INSN_EPILOGUE_BEG
:
2275 #if defined(HAVE_epilogue)
2276 dwarf2out_cfi_begin_epilogue (insn
);
2280 case NOTE_INSN_CFA_RESTORE_STATE
:
2282 dwarf2out_frame_debug_restore_state ();
2288 if (!NONDEBUG_INSN_P (insn
))
2291 pat
= PATTERN (insn
);
2292 if (asm_noperands (pat
) >= 0)
2294 dwarf2out_frame_debug (insn
, false);
2298 if (GET_CODE (pat
) == SEQUENCE
)
2300 int i
, n
= XVECLEN (pat
, 0);
2301 for (i
= 1; i
< n
; ++i
)
2302 dwarf2out_frame_debug (XVECEXP (pat
, 0, i
), false);
2306 || find_reg_note (insn
, REG_CFA_FLUSH_QUEUE
, NULL
))
2307 dwarf2out_frame_debug (insn
, false);
2309 /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
2310 Putting the note after the VEC should be ok. */
2311 if (!tablejump_p (insn
, NULL
, &cfi_insn
))
2314 dwarf2out_frame_debug (insn
, true);
2320 /* Determine if we need to save and restore CFI information around this
2321 epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
2322 we do need to save/restore, then emit the save now, and insert a
2323 NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
2326 dwarf2out_cfi_begin_epilogue (rtx insn
)
2328 bool saw_frp
= false;
2331 /* Scan forward to the return insn, noticing if there are possible
2332 frame related insns. */
2333 for (i
= NEXT_INSN (insn
); i
; i
= NEXT_INSN (i
))
2338 /* Look for both regular and sibcalls to end the block. */
2339 if (returnjump_p (i
))
2341 if (CALL_P (i
) && SIBLING_CALL_P (i
))
2344 if (GET_CODE (PATTERN (i
)) == SEQUENCE
)
2347 rtx seq
= PATTERN (i
);
2349 if (returnjump_p (XVECEXP (seq
, 0, 0)))
2351 if (CALL_P (XVECEXP (seq
, 0, 0))
2352 && SIBLING_CALL_P (XVECEXP (seq
, 0, 0)))
2355 for (idx
= 0; idx
< XVECLEN (seq
, 0); idx
++)
2356 if (RTX_FRAME_RELATED_P (XVECEXP (seq
, 0, idx
)))
2360 if (RTX_FRAME_RELATED_P (i
))
2364 /* If the port doesn't emit epilogue unwind info, we don't need a
2365 save/restore pair. */
2369 /* Otherwise, search forward to see if the return insn was the last
2370 basic block of the function. If so, we don't need save/restore. */
2371 gcc_assert (i
!= NULL
);
2372 i
= next_real_insn (i
);
2376 /* Insert the restore before that next real insn in the stream, and before
2377 a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
2378 properly nested. This should be after any label or alignment. This
2379 will be pushed into the CFI stream by the function below. */
2382 rtx p
= PREV_INSN (i
);
2385 if (NOTE_KIND (p
) == NOTE_INSN_BASIC_BLOCK
)
2389 emit_note_before (NOTE_INSN_CFA_RESTORE_STATE
, i
);
2391 emit_cfa_remember
= true;
2393 /* And emulate the state save. */
2394 gcc_assert (!cfa_remember
.in_use
);
2396 old_cfa_remember
= old_cfa
;
2397 cfa_remember
.in_use
= 1;
2400 /* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
2404 dwarf2out_frame_debug_restore_state (void)
2406 dw_cfi_ref cfi
= new_cfi ();
2408 cfi
->dw_cfi_opc
= DW_CFA_restore_state
;
2411 gcc_assert (cfa_remember
.in_use
);
2413 old_cfa
= old_cfa_remember
;
2414 cfa_remember
.in_use
= 0;
2417 /* Record the initial position of the return address. RTL is
2418 INCOMING_RETURN_ADDR_RTX. */
2421 initial_return_save (rtx rtl
)
2423 unsigned int reg
= INVALID_REGNUM
;
2424 HOST_WIDE_INT offset
= 0;
2426 switch (GET_CODE (rtl
))
2429 /* RA is in a register. */
2430 reg
= dwf_regno (rtl
);
2434 /* RA is on the stack. */
2435 rtl
= XEXP (rtl
, 0);
2436 switch (GET_CODE (rtl
))
2439 gcc_assert (REGNO (rtl
) == STACK_POINTER_REGNUM
);
2444 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2445 offset
= INTVAL (XEXP (rtl
, 1));
2449 gcc_assert (REGNO (XEXP (rtl
, 0)) == STACK_POINTER_REGNUM
);
2450 offset
= -INTVAL (XEXP (rtl
, 1));
2460 /* The return address is at some offset from any value we can
2461 actually load. For instance, on the SPARC it is in %i7+8. Just
2462 ignore the offset for now; it doesn't matter for unwinding frames. */
2463 gcc_assert (CONST_INT_P (XEXP (rtl
, 1)));
2464 initial_return_save (XEXP (rtl
, 0));
2471 if (reg
!= DWARF_FRAME_RETURN_COLUMN
)
2473 if (reg
!= INVALID_REGNUM
)
2474 record_reg_saved_in_reg (rtl
, pc_rtx
);
2475 reg_save (DWARF_FRAME_RETURN_COLUMN
, reg
, offset
- cfa
.offset
);
2479 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2480 state at each location within the function. These notes will be
2481 emitted during pass_final. */
2484 execute_dwarf2_frame (void)
2486 /* The first time we're called, compute the incoming frame state. */
2487 if (cie_cfi_vec
== NULL
)
2489 dw_cfa_location loc
;
2491 dw_stack_pointer_regnum
= DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM
);
2492 dw_frame_pointer_regnum
= DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM
);
2494 add_cfi_vec
= &cie_cfi_vec
;
2496 memset (&old_cfa
, 0, sizeof (old_cfa
));
2497 old_cfa
.reg
= INVALID_REGNUM
;
2499 /* On entry, the Canonical Frame Address is at SP. */
2500 memset(&loc
, 0, sizeof (loc
));
2501 loc
.reg
= dw_stack_pointer_regnum
;
2502 loc
.offset
= INCOMING_FRAME_SP_OFFSET
;
2505 if (targetm
.debug_unwind_info () == UI_DWARF2
2506 || targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
2508 initial_return_save (INCOMING_RETURN_ADDR_RTX
);
2510 /* For a few targets, we have the return address incoming into a
2511 register, but choose a different return column. This will result
2512 in a DW_CFA_register for the return, and an entry in
2513 regs_saved_in_regs to match. If the target later stores that
2514 return address register to the stack, we want to be able to emit
2515 the DW_CFA_offset against the return column, not the intermediate
2516 save register. Save the contents of regs_saved_in_regs so that
2517 we can re-initialize it at the start of each function. */
2518 switch (VEC_length (reg_saved_in_data
, regs_saved_in_regs
))
2523 cie_return_save
= ggc_alloc_reg_saved_in_data ();
2524 *cie_return_save
= *VEC_index (reg_saved_in_data
,
2525 regs_saved_in_regs
, 0);
2526 regs_saved_in_regs
= NULL
;
2536 /* Set up state for generating call frame debug info. */
2537 gcc_checking_assert (queued_reg_saves
== NULL
);
2538 gcc_checking_assert (regs_saved_in_regs
== NULL
);
2540 memset (&cfa
, 0, sizeof(cfa
));
2541 cfa
.reg
= dw_stack_pointer_regnum
;
2542 cfa
.offset
= INCOMING_FRAME_SP_OFFSET
;
2547 memset (&cfa_temp
, 0, sizeof(cfa_temp
));
2548 cfa_temp
.reg
= INVALID_REGNUM
;
2550 if (cie_return_save
)
2551 VEC_safe_push (reg_saved_in_data
, gc
, regs_saved_in_regs
, cie_return_save
);
2553 dwarf2out_alloc_current_fde ();
2556 create_cfi_notes ();
2559 /* Reset all function-specific information, particularly for GC. */
2560 XDELETEVEC (barrier_args_size
);
2561 barrier_args_size
= NULL
;
2562 regs_saved_in_regs
= NULL
;
2563 queued_reg_saves
= NULL
;
2568 /* Convert a DWARF call frame info. operation to its string name */
2571 dwarf_cfi_name (unsigned int cfi_opc
)
2575 case DW_CFA_advance_loc
:
2576 return "DW_CFA_advance_loc";
2578 return "DW_CFA_offset";
2579 case DW_CFA_restore
:
2580 return "DW_CFA_restore";
2582 return "DW_CFA_nop";
2583 case DW_CFA_set_loc
:
2584 return "DW_CFA_set_loc";
2585 case DW_CFA_advance_loc1
:
2586 return "DW_CFA_advance_loc1";
2587 case DW_CFA_advance_loc2
:
2588 return "DW_CFA_advance_loc2";
2589 case DW_CFA_advance_loc4
:
2590 return "DW_CFA_advance_loc4";
2591 case DW_CFA_offset_extended
:
2592 return "DW_CFA_offset_extended";
2593 case DW_CFA_restore_extended
:
2594 return "DW_CFA_restore_extended";
2595 case DW_CFA_undefined
:
2596 return "DW_CFA_undefined";
2597 case DW_CFA_same_value
:
2598 return "DW_CFA_same_value";
2599 case DW_CFA_register
:
2600 return "DW_CFA_register";
2601 case DW_CFA_remember_state
:
2602 return "DW_CFA_remember_state";
2603 case DW_CFA_restore_state
:
2604 return "DW_CFA_restore_state";
2605 case DW_CFA_def_cfa
:
2606 return "DW_CFA_def_cfa";
2607 case DW_CFA_def_cfa_register
:
2608 return "DW_CFA_def_cfa_register";
2609 case DW_CFA_def_cfa_offset
:
2610 return "DW_CFA_def_cfa_offset";
2613 case DW_CFA_def_cfa_expression
:
2614 return "DW_CFA_def_cfa_expression";
2615 case DW_CFA_expression
:
2616 return "DW_CFA_expression";
2617 case DW_CFA_offset_extended_sf
:
2618 return "DW_CFA_offset_extended_sf";
2619 case DW_CFA_def_cfa_sf
:
2620 return "DW_CFA_def_cfa_sf";
2621 case DW_CFA_def_cfa_offset_sf
:
2622 return "DW_CFA_def_cfa_offset_sf";
2624 /* SGI/MIPS specific */
2625 case DW_CFA_MIPS_advance_loc8
:
2626 return "DW_CFA_MIPS_advance_loc8";
2628 /* GNU extensions */
2629 case DW_CFA_GNU_window_save
:
2630 return "DW_CFA_GNU_window_save";
2631 case DW_CFA_GNU_args_size
:
2632 return "DW_CFA_GNU_args_size";
2633 case DW_CFA_GNU_negative_offset_extended
:
2634 return "DW_CFA_GNU_negative_offset_extended";
2637 return "DW_CFA_<unknown>";
2641 /* This routine will generate the correct assembly data for a location
2642 description based on a cfi entry with a complex address. */
2645 output_cfa_loc (dw_cfi_ref cfi
, int for_eh
)
2647 dw_loc_descr_ref loc
;
2650 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
2653 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2654 dw2_asm_output_data (1, r
, NULL
);
2655 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
2658 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
2660 /* Output the size of the block. */
2661 size
= size_of_locs (loc
);
2662 dw2_asm_output_data_uleb128 (size
, NULL
);
2664 /* Now output the operations themselves. */
2665 output_loc_sequence (loc
, for_eh
);
2668 /* Similar, but used for .cfi_escape. */
2671 output_cfa_loc_raw (dw_cfi_ref cfi
)
2673 dw_loc_descr_ref loc
;
2676 if (cfi
->dw_cfi_opc
== DW_CFA_expression
)
2679 DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2680 fprintf (asm_out_file
, "%#x,", r
);
2681 loc
= cfi
->dw_cfi_oprnd2
.dw_cfi_loc
;
2684 loc
= cfi
->dw_cfi_oprnd1
.dw_cfi_loc
;
2686 /* Output the size of the block. */
2687 size
= size_of_locs (loc
);
2688 dw2_asm_output_data_uleb128_raw (size
);
2689 fputc (',', asm_out_file
);
2691 /* Now output the operations themselves. */
2692 output_loc_sequence_raw (loc
);
2695 /* Output a Call Frame Information opcode and its operand(s). */
2698 output_cfi (dw_cfi_ref cfi
, dw_fde_ref fde
, int for_eh
)
2703 if (cfi
->dw_cfi_opc
== DW_CFA_advance_loc
)
2704 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
2705 | (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
& 0x3f)),
2706 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX
,
2707 ((unsigned HOST_WIDE_INT
)
2708 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
));
2709 else if (cfi
->dw_cfi_opc
== DW_CFA_offset
)
2711 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2712 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
2713 "DW_CFA_offset, column %#lx", r
);
2714 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2715 dw2_asm_output_data_uleb128 (off
, NULL
);
2717 else if (cfi
->dw_cfi_opc
== DW_CFA_restore
)
2719 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2720 dw2_asm_output_data (1, (cfi
->dw_cfi_opc
| (r
& 0x3f)),
2721 "DW_CFA_restore, column %#lx", r
);
2725 dw2_asm_output_data (1, cfi
->dw_cfi_opc
,
2726 "%s", dwarf_cfi_name (cfi
->dw_cfi_opc
));
2728 switch (cfi
->dw_cfi_opc
)
2730 case DW_CFA_set_loc
:
2732 dw2_asm_output_encoded_addr_rtx (
2733 ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
2734 gen_rtx_SYMBOL_REF (Pmode
, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
),
2737 dw2_asm_output_addr (DWARF2_ADDR_SIZE
,
2738 cfi
->dw_cfi_oprnd1
.dw_cfi_addr
, NULL
);
2739 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2742 case DW_CFA_advance_loc1
:
2743 dw2_asm_output_delta (1, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2744 fde
->dw_fde_current_label
, NULL
);
2745 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2748 case DW_CFA_advance_loc2
:
2749 dw2_asm_output_delta (2, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2750 fde
->dw_fde_current_label
, NULL
);
2751 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2754 case DW_CFA_advance_loc4
:
2755 dw2_asm_output_delta (4, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2756 fde
->dw_fde_current_label
, NULL
);
2757 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2760 case DW_CFA_MIPS_advance_loc8
:
2761 dw2_asm_output_delta (8, cfi
->dw_cfi_oprnd1
.dw_cfi_addr
,
2762 fde
->dw_fde_current_label
, NULL
);
2763 fde
->dw_fde_current_label
= cfi
->dw_cfi_oprnd1
.dw_cfi_addr
;
2766 case DW_CFA_offset_extended
:
2767 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2768 dw2_asm_output_data_uleb128 (r
, NULL
);
2769 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2770 dw2_asm_output_data_uleb128 (off
, NULL
);
2773 case DW_CFA_def_cfa
:
2774 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2775 dw2_asm_output_data_uleb128 (r
, NULL
);
2776 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
, NULL
);
2779 case DW_CFA_offset_extended_sf
:
2780 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2781 dw2_asm_output_data_uleb128 (r
, NULL
);
2782 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2783 dw2_asm_output_data_sleb128 (off
, NULL
);
2786 case DW_CFA_def_cfa_sf
:
2787 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2788 dw2_asm_output_data_uleb128 (r
, NULL
);
2789 off
= div_data_align (cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2790 dw2_asm_output_data_sleb128 (off
, NULL
);
2793 case DW_CFA_restore_extended
:
2794 case DW_CFA_undefined
:
2795 case DW_CFA_same_value
:
2796 case DW_CFA_def_cfa_register
:
2797 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2798 dw2_asm_output_data_uleb128 (r
, NULL
);
2801 case DW_CFA_register
:
2802 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, for_eh
);
2803 dw2_asm_output_data_uleb128 (r
, NULL
);
2804 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, for_eh
);
2805 dw2_asm_output_data_uleb128 (r
, NULL
);
2808 case DW_CFA_def_cfa_offset
:
2809 case DW_CFA_GNU_args_size
:
2810 dw2_asm_output_data_uleb128 (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
, NULL
);
2813 case DW_CFA_def_cfa_offset_sf
:
2814 off
= div_data_align (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2815 dw2_asm_output_data_sleb128 (off
, NULL
);
2818 case DW_CFA_GNU_window_save
:
2821 case DW_CFA_def_cfa_expression
:
2822 case DW_CFA_expression
:
2823 output_cfa_loc (cfi
, for_eh
);
2826 case DW_CFA_GNU_negative_offset_extended
:
2827 /* Obsoleted by DW_CFA_offset_extended_sf. */
2836 /* Similar, but do it via assembler directives instead. */
2839 output_cfi_directive (FILE *f
, dw_cfi_ref cfi
)
2841 unsigned long r
, r2
;
2843 switch (cfi
->dw_cfi_opc
)
2845 case DW_CFA_advance_loc
:
2846 case DW_CFA_advance_loc1
:
2847 case DW_CFA_advance_loc2
:
2848 case DW_CFA_advance_loc4
:
2849 case DW_CFA_MIPS_advance_loc8
:
2850 case DW_CFA_set_loc
:
2851 /* Should only be created in a code path not followed when emitting
2852 via directives. The assembler is going to take care of this for
2853 us. But this routines is also used for debugging dumps, so
2855 gcc_assert (f
!= asm_out_file
);
2856 fprintf (f
, "\t.cfi_advance_loc\n");
2860 case DW_CFA_offset_extended
:
2861 case DW_CFA_offset_extended_sf
:
2862 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2863 fprintf (f
, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC
"\n",
2864 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2867 case DW_CFA_restore
:
2868 case DW_CFA_restore_extended
:
2869 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2870 fprintf (f
, "\t.cfi_restore %lu\n", r
);
2873 case DW_CFA_undefined
:
2874 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2875 fprintf (f
, "\t.cfi_undefined %lu\n", r
);
2878 case DW_CFA_same_value
:
2879 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2880 fprintf (f
, "\t.cfi_same_value %lu\n", r
);
2883 case DW_CFA_def_cfa
:
2884 case DW_CFA_def_cfa_sf
:
2885 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2886 fprintf (f
, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC
"\n",
2887 r
, cfi
->dw_cfi_oprnd2
.dw_cfi_offset
);
2890 case DW_CFA_def_cfa_register
:
2891 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2892 fprintf (f
, "\t.cfi_def_cfa_register %lu\n", r
);
2895 case DW_CFA_register
:
2896 r
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
, 1);
2897 r2
= DWARF2_FRAME_REG_OUT (cfi
->dw_cfi_oprnd2
.dw_cfi_reg_num
, 1);
2898 fprintf (f
, "\t.cfi_register %lu, %lu\n", r
, r2
);
2901 case DW_CFA_def_cfa_offset
:
2902 case DW_CFA_def_cfa_offset_sf
:
2903 fprintf (f
, "\t.cfi_def_cfa_offset "
2904 HOST_WIDE_INT_PRINT_DEC
"\n",
2905 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2908 case DW_CFA_remember_state
:
2909 fprintf (f
, "\t.cfi_remember_state\n");
2911 case DW_CFA_restore_state
:
2912 fprintf (f
, "\t.cfi_restore_state\n");
2915 case DW_CFA_GNU_args_size
:
2916 if (f
== asm_out_file
)
2918 fprintf (f
, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size
);
2919 dw2_asm_output_data_uleb128_raw (cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2921 fprintf (f
, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC
,
2922 ASM_COMMENT_START
, cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2927 fprintf (f
, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC
"\n",
2928 cfi
->dw_cfi_oprnd1
.dw_cfi_offset
);
2932 case DW_CFA_GNU_window_save
:
2933 fprintf (f
, "\t.cfi_window_save\n");
2936 case DW_CFA_def_cfa_expression
:
2937 if (f
!= asm_out_file
)
2939 fprintf (f
, "\t.cfi_def_cfa_expression ...\n");
2943 case DW_CFA_expression
:
2944 if (f
!= asm_out_file
)
2946 fprintf (f
, "\t.cfi_cfa_expression ...\n");
2949 fprintf (f
, "\t.cfi_escape %#x,", cfi
->dw_cfi_opc
);
2950 output_cfa_loc_raw (cfi
);
2960 dwarf2out_emit_cfi (dw_cfi_ref cfi
)
2962 if (dwarf2out_do_cfi_asm ())
2963 output_cfi_directive (asm_out_file
, cfi
);
2966 /* Output CFIs from VEC, up to index UPTO, to bring current FDE to the
2967 same state as after executing CFIs in CFI chain. DO_CFI_ASM is
2968 true if .cfi_* directives shall be emitted, false otherwise. If it
2969 is false, FDE and FOR_EH are the other arguments to pass to
2973 output_cfis (cfi_vec vec
, int upto
, bool do_cfi_asm
,
2974 dw_fde_ref fde
, bool for_eh
)
2977 struct dw_cfi_struct cfi_buf
;
2979 dw_cfi_ref cfi_args_size
= NULL
, cfi_cfa
= NULL
, cfi_cfa_offset
= NULL
;
2980 VEC(dw_cfi_ref
, heap
) *regs
= VEC_alloc (dw_cfi_ref
, heap
, 32);
2981 unsigned int len
, idx
;
2983 for (ix
= 0; ix
< upto
+ 1; ix
++)
2985 dw_cfi_ref cfi
= ix
< upto
? VEC_index (dw_cfi_ref
, vec
, ix
) : NULL
;
2986 switch (cfi
? cfi
->dw_cfi_opc
: DW_CFA_nop
)
2988 case DW_CFA_advance_loc
:
2989 case DW_CFA_advance_loc1
:
2990 case DW_CFA_advance_loc2
:
2991 case DW_CFA_advance_loc4
:
2992 case DW_CFA_MIPS_advance_loc8
:
2993 case DW_CFA_set_loc
:
2994 /* All advances should be ignored. */
2996 case DW_CFA_remember_state
:
2998 dw_cfi_ref args_size
= cfi_args_size
;
3000 /* Skip everything between .cfi_remember_state and
3001 .cfi_restore_state. */
3006 for (; ix
< upto
; ix
++)
3008 cfi2
= VEC_index (dw_cfi_ref
, vec
, ix
);
3009 if (cfi2
->dw_cfi_opc
== DW_CFA_restore_state
)
3011 else if (cfi2
->dw_cfi_opc
== DW_CFA_GNU_args_size
)
3014 gcc_assert (cfi2
->dw_cfi_opc
!= DW_CFA_remember_state
);
3017 cfi_args_size
= args_size
;
3020 case DW_CFA_GNU_args_size
:
3021 cfi_args_size
= cfi
;
3023 case DW_CFA_GNU_window_save
:
3026 case DW_CFA_offset_extended
:
3027 case DW_CFA_offset_extended_sf
:
3028 case DW_CFA_restore
:
3029 case DW_CFA_restore_extended
:
3030 case DW_CFA_undefined
:
3031 case DW_CFA_same_value
:
3032 case DW_CFA_register
:
3033 case DW_CFA_val_offset
:
3034 case DW_CFA_val_offset_sf
:
3035 case DW_CFA_expression
:
3036 case DW_CFA_val_expression
:
3037 case DW_CFA_GNU_negative_offset_extended
:
3038 if (VEC_length (dw_cfi_ref
, regs
)
3039 <= cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
)
3040 VEC_safe_grow_cleared (dw_cfi_ref
, heap
, regs
,
3041 cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
+ 1);
3042 VEC_replace (dw_cfi_ref
, regs
, cfi
->dw_cfi_oprnd1
.dw_cfi_reg_num
,
3045 case DW_CFA_def_cfa
:
3046 case DW_CFA_def_cfa_sf
:
3047 case DW_CFA_def_cfa_expression
:
3049 cfi_cfa_offset
= cfi
;
3051 case DW_CFA_def_cfa_register
:
3054 case DW_CFA_def_cfa_offset
:
3055 case DW_CFA_def_cfa_offset_sf
:
3056 cfi_cfa_offset
= cfi
;
3059 gcc_assert (cfi
== NULL
);
3061 len
= VEC_length (dw_cfi_ref
, regs
);
3062 for (idx
= 0; idx
< len
; idx
++)
3064 cfi2
= VEC_replace (dw_cfi_ref
, regs
, idx
, NULL
);
3066 && cfi2
->dw_cfi_opc
!= DW_CFA_restore
3067 && cfi2
->dw_cfi_opc
!= DW_CFA_restore_extended
)
3070 output_cfi_directive (asm_out_file
, cfi2
);
3072 output_cfi (cfi2
, fde
, for_eh
);
3075 if (cfi_cfa
&& cfi_cfa_offset
&& cfi_cfa_offset
!= cfi_cfa
)
3077 gcc_assert (cfi_cfa
->dw_cfi_opc
!= DW_CFA_def_cfa_expression
);
3079 switch (cfi_cfa_offset
->dw_cfi_opc
)
3081 case DW_CFA_def_cfa_offset
:
3082 cfi_buf
.dw_cfi_opc
= DW_CFA_def_cfa
;
3083 cfi_buf
.dw_cfi_oprnd2
= cfi_cfa_offset
->dw_cfi_oprnd1
;
3085 case DW_CFA_def_cfa_offset_sf
:
3086 cfi_buf
.dw_cfi_opc
= DW_CFA_def_cfa_sf
;
3087 cfi_buf
.dw_cfi_oprnd2
= cfi_cfa_offset
->dw_cfi_oprnd1
;
3089 case DW_CFA_def_cfa
:
3090 case DW_CFA_def_cfa_sf
:
3091 cfi_buf
.dw_cfi_opc
= cfi_cfa_offset
->dw_cfi_opc
;
3092 cfi_buf
.dw_cfi_oprnd2
= cfi_cfa_offset
->dw_cfi_oprnd2
;
3099 else if (cfi_cfa_offset
)
3100 cfi_cfa
= cfi_cfa_offset
;
3104 output_cfi_directive (asm_out_file
, cfi_cfa
);
3106 output_cfi (cfi_cfa
, fde
, for_eh
);
3109 cfi_cfa_offset
= NULL
;
3111 && cfi_args_size
->dw_cfi_oprnd1
.dw_cfi_offset
)
3114 output_cfi_directive (asm_out_file
, cfi_args_size
);
3116 output_cfi (cfi_args_size
, fde
, for_eh
);
3118 cfi_args_size
= NULL
;
3121 VEC_free (dw_cfi_ref
, heap
, regs
);
3124 else if (do_cfi_asm
)
3125 output_cfi_directive (asm_out_file
, cfi
);
3127 output_cfi (cfi
, fde
, for_eh
);
3136 /* Save the result of dwarf2out_do_frame across PCH.
3137 This variable is tri-state, with 0 unset, >0 true, <0 false. */
3138 static GTY(()) signed char saved_do_cfi_asm
= 0;
3140 /* Decide whether we want to emit frame unwind information for the current
3141 translation unit. */
3144 dwarf2out_do_frame (void)
3146 /* We want to emit correct CFA location expressions or lists, so we
3147 have to return true if we're going to output debug info, even if
3148 we're not going to output frame or unwind info. */
3149 if (write_symbols
== DWARF2_DEBUG
|| write_symbols
== VMS_AND_DWARF2_DEBUG
)
3152 if (saved_do_cfi_asm
> 0)
3155 if (targetm
.debug_unwind_info () == UI_DWARF2
)
3158 if ((flag_unwind_tables
|| flag_exceptions
)
3159 && targetm_common
.except_unwind_info (&global_options
) == UI_DWARF2
)
3165 /* Decide whether to emit frame unwind via assembler directives. */
3168 dwarf2out_do_cfi_asm (void)
3172 #ifdef MIPS_DEBUGGING_INFO
3176 if (saved_do_cfi_asm
!= 0)
3177 return saved_do_cfi_asm
> 0;
3179 /* Assume failure for a moment. */
3180 saved_do_cfi_asm
= -1;
3182 if (!flag_dwarf2_cfi_asm
|| !dwarf2out_do_frame ())
3184 if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE
)
3187 /* Make sure the personality encoding is one the assembler can support.
3188 In particular, aligned addresses can't be handled. */
3189 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3190 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3192 enc
= ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3193 if ((enc
& 0x70) != 0 && (enc
& 0x70) != DW_EH_PE_pcrel
)
3196 /* If we can't get the assembler to emit only .debug_frame, and we don't need
3197 dwarf2 unwind info for exceptions, then emit .debug_frame by hand. */
3198 if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3199 && !flag_unwind_tables
&& !flag_exceptions
3200 && targetm_common
.except_unwind_info (&global_options
) != UI_DWARF2
)
3204 saved_do_cfi_asm
= 1;
3209 gate_dwarf2_frame (void)
3211 #ifndef HAVE_prologue
3212 /* Targets which still implement the prologue in assembler text
3213 cannot use the generic dwarf2 unwinding. */
3217 /* ??? What to do for UI_TARGET unwinding? They might be able to benefit
3218 from the optimized shrink-wrapping annotations that we will compute.
3219 For now, only produce the CFI notes for dwarf2. */
3220 return dwarf2out_do_frame ();
3223 struct rtl_opt_pass pass_dwarf2_frame
=
3227 "dwarf2", /* name */
3228 gate_dwarf2_frame
, /* gate */
3229 execute_dwarf2_frame
, /* execute */
3232 0, /* static_pass_number */
3233 TV_FINAL
, /* tv_id */
3234 0, /* properties_required */
3235 0, /* properties_provided */
3236 0, /* properties_destroyed */
3237 0, /* todo_flags_start */
3238 0 /* todo_flags_finish */
3242 #include "gt-dwarf2cfi.h"