#include "flags.h"
#include "rtl.h"
#include "function.h"
+#include "basic-block.h"
#include "dwarf2.h"
#include "dwarf2out.h"
#include "dwarf2asm.h"
/* Since we no longer have a proper CFG, we're going to create a facsimile
of one on the fly while processing the frame-related insns.
- We create dw_trace structures for each instruction trace beginning at
- at a label following a barrier (or beginning of the function), and
- ending at a barrier (or the end of the function).
+ We create dw_trace_info structures for each extended basic block beginning
+ and ending at a "save point". Save points are labels, barriers, certain
+ notes, and of course the beginning and end of the function.
As we encounter control transfer insns, we propagate the "current"
- row state across the edges to the starts of traces. If an edge goes
- to a label that is not the start of a trace, we ignore it. This
- assumes that previous compiler transformations were correct, and that
- we will reach the same row state from any source. (We can perform some
- limited validation of this assumption, but without the full CFG we
- cannot be sure of full validation coverage. It is expensive, so we
- only do so with checking enabled.)
+ row state across the edges to the starts of traces. When checking is
+ enabled, we validate that we propagate the same data from all sources.
All traces are members of the TRACE_INFO array, in the order in which
they appear in the instruction stream.
- All labels are given an LUID that indexes the LABEL_INFO array. If
- the label is the start of a trace, the TRACE pointer will be non-NULL
- and point into the TRACE_INFO array. */
+ All save points are present in the TRACE_INDEX hash, mapping the insn
+ starting a trace to the dw_trace_info describing the trace. */
typedef struct
{
- /* The label that begins the trace. This will be NULL for the first
- trace beginning at function entry. */
- rtx label;
+ /* The insn that begins the trace. */
+ rtx head;
/* The row state at the beginning and end of the trace. */
- dw_cfi_row *enter_row, *exit_row;
+ dw_cfi_row *beg_row, *end_row;
+
+ /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS. */
+ bool switch_sections;
/* The following variables contain data used in interpreting frame related
expressions. These are not part of the "real" row state as defined by
DEF_VEC_O (dw_trace_info);
DEF_VEC_ALLOC_O (dw_trace_info, heap);
-typedef struct
-{
- dw_trace_info *trace;
-
-#ifdef ENABLE_CHECKING
- dw_cfi_row *check_row;
-#endif
-} dw_label_info;
+typedef dw_trace_info *dw_trace_info_ref;
-DEF_VEC_O (dw_label_info);
-DEF_VEC_ALLOC_O (dw_label_info, heap);
+DEF_VEC_P (dw_trace_info_ref);
+DEF_VEC_ALLOC_P (dw_trace_info_ref, heap);
/* The variables making up the pseudo-cfg, as described above. */
-#if 0
-static VEC (int, heap) *uid_luid;
-static VEC (dw_label_info, heap) *label_info;
static VEC (dw_trace_info, heap) *trace_info;
-#endif
+static VEC (dw_trace_info_ref, heap) *trace_work_list;
+static htab_t trace_index;
/* A vector of call frame insns for the CIE. */
cfi_vec cie_cfi_vec;
/* The current, i.e. most recently generated, row of the CFI table. */
static dw_cfi_row *cur_row;
-/* The row state from a preceeding DW_CFA_remember_state. */
-static dw_cfi_row *remember_row;
-
/* We delay emitting a register save until either (a) we reach the end
of the prologue or (b) the register is clobbered. This clusters
register saves so that there are fewer pc advances. */
emitting this data, i.e. updating CUR_ROW, without async unwind. */
static HOST_WIDE_INT queued_args_size;
-/* True if remember_state should be emitted before following CFI directive. */
-static bool emit_cfa_remember;
-
/* True if any CFI directives were emitted at the current insn. */
static bool any_cfis_emitted;
/* Short-hand for commonly used register numbers. */
static unsigned dw_stack_pointer_regnum;
static unsigned dw_frame_pointer_regnum;
-\f
-
-static void dwarf2out_cfi_begin_epilogue (rtx insn);
-static void dwarf2out_frame_debug_restore_state (void);
-
\f
/* Hook used by __throw. */
targetm.init_dwarf_reg_sizes_extra (address);
}
+\f
+static hashval_t
+dw_trace_info_hash (const void *ptr)
+{
+ const dw_trace_info *ti = (const dw_trace_info *) ptr;
+ return INSN_UID (ti->head);
+}
+
+static int
+dw_trace_info_eq (const void *ptr_a, const void *ptr_b)
+{
+ const dw_trace_info *a = (const dw_trace_info *) ptr_a;
+ const dw_trace_info *b = (const dw_trace_info *) ptr_b;
+ return a->head == b->head;
+}
+
+static unsigned
+get_trace_index (dw_trace_info *trace)
+{
+ return trace - VEC_address (dw_trace_info, trace_info);
+}
+
+static dw_trace_info *
+get_trace_info (rtx insn)
+{
+ dw_trace_info dummy;
+ dummy.head = insn;
+ return (dw_trace_info *)
+ htab_find_with_hash (trace_index, &dummy, INSN_UID (insn));
+}
+
+static bool
+save_point_p (rtx insn)
+{
+ /* Labels, except those that are really jump tables. */
+ if (LABEL_P (insn))
+ return inside_basic_block_p (insn);
+
+ /* We split traces at the prologue/epilogue notes because those
+ are points at which the unwind info is usually stable. This
+ makes it easier to find spots with identical unwind info so
+ that we can use remember/restore_state opcodes. */
+ if (NOTE_P (insn))
+ switch (NOTE_KIND (insn))
+ {
+ case NOTE_INSN_PROLOGUE_END:
+ case NOTE_INSN_EPILOGUE_BEG:
+ return true;
+ }
+
+ return false;
+}
+
/* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder. */
static inline HOST_WIDE_INT
return dst;
}
-/* Free an allocated CFI row. */
-
-static void
-free_cfi_row (dw_cfi_row *row)
-{
- if (row != NULL)
- {
- VEC_free (dw_cfi_ref, gc, row->reg_save);
- ggc_free (row);
- }
-}
-
/* Generate a new label for the CFI info to refer to. */
static char *
static void
add_cfi (dw_cfi_ref cfi)
{
- if (emit_cfa_remember)
- {
- dw_cfi_ref cfi_remember;
-
- /* Emit the state save. */
- emit_cfa_remember = false;
- cfi_remember = new_cfi ();
- cfi_remember->dw_cfi_opc = DW_CFA_remember_state;
- add_cfi (cfi_remember);
- }
-
any_cfis_emitted = true;
if (add_cfi_insn != NULL)
&a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
}
+/* Determine if two CFI_ROW structures are identical. */
+
+static bool
+cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
+{
+ size_t i, n_a, n_b, n_max;
+
+ if (a->cfa_cfi)
+ {
+ if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
+ return false;
+ }
+ else if (!cfa_equal_p (&a->cfa, &b->cfa))
+ return false;
+
+ if (a->args_size != b->args_size)
+ return false;
+
+ n_a = VEC_length (dw_cfi_ref, a->reg_save);
+ n_b = VEC_length (dw_cfi_ref, b->reg_save);
+ n_max = MAX (n_a, n_b);
+
+ for (i = 0; i < n_max; ++i)
+ {
+ dw_cfi_ref r_a = NULL, r_b = NULL;
+
+ if (i < n_a)
+ r_a = VEC_index (dw_cfi_ref, a->reg_save, i);
+ if (i < n_b)
+ r_b = VEC_index (dw_cfi_ref, b->reg_save, i);
+
+ if (!cfi_equal_p (r_a, r_b))
+ return false;
+ }
+
+ return true;
+}
+
/* The CFA is now calculated from NEW_CFA. Consider OLD_CFA in determining
what opcode to emit. Returns the CFI opcode to effect the change, or
NULL if NEW_CFA == OLD_CFA. */
return offset;
}
-/* Precomputed args_size for CODE_LABELs and BARRIERs preceeding them,
- indexed by INSN_UID. */
-
-static HOST_WIDE_INT *barrier_args_size;
-
-/* Helper function for compute_barrier_args_size. Handle one insn. */
-
-static HOST_WIDE_INT
-compute_barrier_args_size_1 (rtx insn, HOST_WIDE_INT cur_args_size,
- VEC (rtx, heap) **next)
-{
- HOST_WIDE_INT offset = 0;
- int i;
-
- if (! RTX_FRAME_RELATED_P (insn))
- {
- if (prologue_epilogue_contains (insn))
- /* Nothing */;
- else if (GET_CODE (PATTERN (insn)) == SET)
- offset = stack_adjust_offset (PATTERN (insn), cur_args_size, 0);
- else if (GET_CODE (PATTERN (insn)) == PARALLEL
- || GET_CODE (PATTERN (insn)) == SEQUENCE)
- {
- /* There may be stack adjustments inside compound insns. Search
- for them. */
- for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
- if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
- offset += stack_adjust_offset (XVECEXP (PATTERN (insn), 0, i),
- cur_args_size, offset);
- }
- }
- else
- {
- rtx expr = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
-
- if (expr)
- {
- expr = XEXP (expr, 0);
- if (GET_CODE (expr) == PARALLEL
- || GET_CODE (expr) == SEQUENCE)
- for (i = 1; i < XVECLEN (expr, 0); i++)
- {
- rtx elem = XVECEXP (expr, 0, i);
-
- if (GET_CODE (elem) == SET && !RTX_FRAME_RELATED_P (elem))
- offset += stack_adjust_offset (elem, cur_args_size, offset);
- }
- }
- }
-
-#ifndef STACK_GROWS_DOWNWARD
- offset = -offset;
-#endif
-
- cur_args_size += offset;
- if (cur_args_size < 0)
- cur_args_size = 0;
-
- if (JUMP_P (insn))
- {
- rtx dest = JUMP_LABEL (insn);
-
- if (dest)
- {
- if (barrier_args_size [INSN_UID (dest)] < 0)
- {
- barrier_args_size [INSN_UID (dest)] = cur_args_size;
- VEC_safe_push (rtx, heap, *next, dest);
- }
- }
- }
-
- return cur_args_size;
-}
-
-/* Walk the whole function and compute args_size on BARRIERs. */
-
-static void
-compute_barrier_args_size (void)
-{
- int max_uid = get_max_uid (), i;
- rtx insn;
- VEC (rtx, heap) *worklist, *next, *tmp;
-
- barrier_args_size = XNEWVEC (HOST_WIDE_INT, max_uid);
- for (i = 0; i < max_uid; i++)
- barrier_args_size[i] = -1;
-
- worklist = VEC_alloc (rtx, heap, 20);
- next = VEC_alloc (rtx, heap, 20);
- insn = get_insns ();
- barrier_args_size[INSN_UID (insn)] = 0;
- VEC_quick_push (rtx, worklist, insn);
- for (;;)
- {
- while (!VEC_empty (rtx, worklist))
- {
- rtx prev, body, first_insn;
- HOST_WIDE_INT cur_args_size;
-
- first_insn = insn = VEC_pop (rtx, worklist);
- cur_args_size = barrier_args_size[INSN_UID (insn)];
- prev = prev_nonnote_insn (insn);
- if (prev && BARRIER_P (prev))
- barrier_args_size[INSN_UID (prev)] = cur_args_size;
-
- for (; insn; insn = NEXT_INSN (insn))
- {
- if (INSN_DELETED_P (insn) || NOTE_P (insn))
- continue;
- if (BARRIER_P (insn))
- break;
-
- if (LABEL_P (insn))
- {
- if (insn == first_insn)
- continue;
- else if (barrier_args_size[INSN_UID (insn)] < 0)
- {
- barrier_args_size[INSN_UID (insn)] = cur_args_size;
- continue;
- }
- else
- {
- /* The insns starting with this label have been
- already scanned or are in the worklist. */
- break;
- }
- }
-
- body = PATTERN (insn);
- if (GET_CODE (body) == SEQUENCE)
- {
- HOST_WIDE_INT dest_args_size = cur_args_size;
- for (i = 1; i < XVECLEN (body, 0); i++)
- if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0))
- && INSN_FROM_TARGET_P (XVECEXP (body, 0, i)))
- dest_args_size
- = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
- dest_args_size, &next);
- else
- cur_args_size
- = compute_barrier_args_size_1 (XVECEXP (body, 0, i),
- cur_args_size, &next);
-
- if (INSN_ANNULLED_BRANCH_P (XVECEXP (body, 0, 0)))
- compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
- dest_args_size, &next);
- else
- cur_args_size
- = compute_barrier_args_size_1 (XVECEXP (body, 0, 0),
- cur_args_size, &next);
- }
- else
- cur_args_size
- = compute_barrier_args_size_1 (insn, cur_args_size, &next);
- }
- }
-
- if (VEC_empty (rtx, next))
- break;
-
- /* Swap WORKLIST with NEXT and truncate NEXT for next iteration. */
- tmp = next;
- next = worklist;
- worklist = tmp;
- VEC_truncate (rtx, next, 0);
- }
-
- VEC_free (rtx, heap, worklist);
- VEC_free (rtx, heap, next);
-}
-
/* Add a CFI to update the running total of the size of arguments
pushed onto the stack. */
return;
}
else if (BARRIER_P (insn))
- {
- /* Don't call compute_barrier_args_size () if the only
- BARRIER is at the end of function. */
- if (barrier_args_size == NULL && next_nonnote_insn (insn))
- compute_barrier_args_size ();
- if (barrier_args_size == NULL)
- offset = 0;
- else
- {
- offset = barrier_args_size[INSN_UID (insn)];
- if (offset < 0)
- offset = 0;
- }
-
- offset -= queued_args_size;
-#ifndef STACK_GROWS_DOWNWARD
- offset = -offset;
-#endif
- }
+ return;
else if (GET_CODE (PATTERN (insn)) == SET)
offset = stack_adjust_offset (PATTERN (insn), queued_args_size, 0);
else if (GET_CODE (PATTERN (insn)) == PARALLEL
}
}
-/* Scan the function and create the initial set of CFI notes. */
+/* If LABEL is the start of a trace, then initialize the state of that
+ trace from CUR_TRACE and CUR_ROW. */
static void
-create_cfi_notes (void)
+maybe_record_trace_start (rtx start, rtx origin)
{
- rtx insn;
+ dw_trace_info *ti;
+
+ /* Sync queued data before propagating to a destination,
+ lest we propagate out-of-date data. */
+ dwarf2out_flush_queued_reg_saves ();
+ dwarf2out_args_size (queued_args_size);
+
+ ti = get_trace_info (start);
+ gcc_assert (ti != NULL);
- for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
+ if (dump_file)
{
- rtx pat;
+ fprintf (dump_file, " saw edge from trace %u to %u (via %s %d)\n",
+ get_trace_index (cur_trace), get_trace_index (ti),
+ (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
+ (origin ? INSN_UID (origin) : 0));
+ }
- add_cfi_insn = PREV_INSN (insn);
+ if (ti->beg_row == NULL)
+ {
+ /* This is the first time we've encountered this trace. Propagate
+ state across the edge and push the trace onto the work list. */
+ ti->beg_row = copy_cfi_row (cur_row);
+ ti->cfa_store = cur_trace->cfa_store;
+ ti->cfa_temp = cur_trace->cfa_temp;
+ ti->regs_saved_in_regs = VEC_copy (reg_saved_in_data, heap,
+ cur_trace->regs_saved_in_regs);
+
+ VEC_safe_push (dw_trace_info_ref, heap, trace_work_list, ti);
+
+ if (dump_file)
+ fprintf (dump_file, "\tpush trace %u to worklist\n",
+ get_trace_index (ti));
+ }
+ else
+ {
+ /* We ought to have the same state incoming to a given trace no
+ matter how we arrive at the trace. Anything else means we've
+ got some kind of optimization error. */
+ gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
+ }
+}
- if (BARRIER_P (insn))
+/* Propagate CUR_TRACE state to the destinations implied by INSN. */
+/* ??? Sadly, this is in large part a duplicate of make_edges. */
+
+static void
+create_trace_edges (rtx insn)
+{
+ rtx tmp, lab;
+ int i, n;
+
+ if (JUMP_P (insn))
+ {
+ if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
+ ;
+ else if (tablejump_p (insn, NULL, &tmp))
{
- dwarf2out_frame_debug (insn, false);
- continue;
- }
+ rtvec vec;
- if (NOTE_P (insn))
+ tmp = PATTERN (tmp);
+ vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
+
+ n = GET_NUM_ELEM (vec);
+ for (i = 0; i < n; ++i)
+ {
+ lab = XEXP (RTVEC_ELT (vec, i), 0);
+ maybe_record_trace_start (lab, insn);
+ }
+ }
+ else if (computed_jump_p (insn))
{
- switch (NOTE_KIND (insn))
+ for (lab = forced_labels; lab; lab = XEXP (lab, 1))
+ maybe_record_trace_start (XEXP (lab, 0), insn);
+ }
+ else if (returnjump_p (insn))
+ ;
+ else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
+ {
+ n = ASM_OPERANDS_LABEL_LENGTH (tmp);
+ for (i = 0; i < n; ++i)
{
- case NOTE_INSN_PROLOGUE_END:
- dwarf2out_flush_queued_reg_saves ();
- break;
+ lab = XEXP (ASM_OPERANDS_LABEL (tmp, i), 0);
+ maybe_record_trace_start (lab, insn);
+ }
+ }
+ else
+ {
+ lab = JUMP_LABEL (insn);
+ gcc_assert (lab != NULL);
+ maybe_record_trace_start (lab, insn);
+ }
+ }
+ else if (CALL_P (insn))
+ {
+ /* Sibling calls don't have edges inside this function. */
+ if (SIBLING_CALL_P (insn))
+ return;
- case NOTE_INSN_EPILOGUE_BEG:
-#if defined(HAVE_epilogue)
- dwarf2out_cfi_begin_epilogue (insn);
-#endif
- break;
+ /* Process non-local goto edges. */
+ if (can_nonlocal_goto (insn))
+ for (lab = nonlocal_goto_handler_labels; lab; lab = XEXP (lab, 1))
+ maybe_record_trace_start (XEXP (lab, 0), insn);
+ }
- case NOTE_INSN_CFA_RESTORE_STATE:
- add_cfi_insn = insn;
- dwarf2out_frame_debug_restore_state ();
- break;
+ /* Process EH edges. */
+ if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
+ {
+ eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
+ if (lp)
+ maybe_record_trace_start (lp->landing_pad, insn);
+ }
+}
- case NOTE_INSN_SWITCH_TEXT_SECTIONS:
- /* In dwarf2out_switch_text_section, we'll begin a new FDE
- for the portion of the function in the alternate text
- section. The row state at the very beginning of that
- new FDE will be exactly the row state from the CIE.
- Emit whatever CFIs are necessary to make CUR_ROW current. */
- add_cfi_insn = insn;
- change_cfi_row (cie_cfi_row, cur_row);
- break;
- }
- continue;
+/* Scan the trace beginning at INSN and create the CFI notes for the
+ instructions therein. */
+
+static void
+scan_trace (dw_trace_info *trace)
+{
+ rtx insn = trace->head;
+
+ if (dump_file)
+ fprintf (dump_file, "Processing trace %u : start at %s %d\n",
+ get_trace_index (trace), rtx_name[(int) GET_CODE (insn)],
+ INSN_UID (insn));
+
+ trace->end_row = copy_cfi_row (trace->beg_row);
+
+ cur_trace = trace;
+ cur_row = trace->end_row;
+ queued_args_size = cur_row->args_size;
+
+ for (insn = NEXT_INSN (insn); insn ; insn = NEXT_INSN (insn))
+ {
+ rtx pat;
+
+ add_cfi_insn = PREV_INSN (insn);
+
+ /* Notice the end of a trace. */
+ if (BARRIER_P (insn) || save_point_p (insn))
+ {
+ dwarf2out_flush_queued_reg_saves ();
+ dwarf2out_args_size (queued_args_size);
+
+ /* Propagate across fallthru edges. */
+ if (!BARRIER_P (insn))
+ maybe_record_trace_start (insn, NULL);
+ break;
}
- if (!NONDEBUG_INSN_P (insn))
+ if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
continue;
pat = PATTERN (insn);
if (asm_noperands (pat) >= 0)
{
dwarf2out_frame_debug (insn, false);
- continue;
+ add_cfi_insn = insn;
}
-
- if (GET_CODE (pat) == SEQUENCE)
+ else
{
- int i, n = XVECLEN (pat, 0);
- for (i = 1; i < n; ++i)
- dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
- }
+ if (GET_CODE (pat) == SEQUENCE)
+ {
+ int i, n = XVECLEN (pat, 0);
+ for (i = 1; i < n; ++i)
+ dwarf2out_frame_debug (XVECEXP (pat, 0, i), false);
+ }
+
+ if (CALL_P (insn))
+ dwarf2out_frame_debug (insn, false);
+ else if (find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL)
+ || (cfun->can_throw_non_call_exceptions
+ && can_throw_internal (insn)))
+ dwarf2out_flush_queued_reg_saves ();
- if (CALL_P (insn)
- || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
- dwarf2out_frame_debug (insn, false);
+ /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
+ Putting the note after the VEC should be ok. */
+ if (!tablejump_p (insn, NULL, &add_cfi_insn))
+ add_cfi_insn = insn;
- /* Do not separate tablejump insns from their ADDR_DIFF_VEC.
- Putting the note after the VEC should be ok. */
- if (!tablejump_p (insn, NULL, &add_cfi_insn))
- add_cfi_insn = insn;
+ dwarf2out_frame_debug (insn, true);
+ }
- dwarf2out_frame_debug (insn, true);
+ /* Note that a test for control_flow_insn_p does exactly the
+ same tests as are done to actually create the edges. So
+ always call the routine and let it not create edges for
+ non-control-flow insns. */
+ create_trace_edges (insn);
}
add_cfi_insn = NULL;
+ cur_row = NULL;
+ cur_trace = NULL;
}
-/* Determine if we need to save and restore CFI information around this
- epilogue. If SIBCALL is true, then this is a sibcall epilogue. If
- we do need to save/restore, then emit the save now, and insert a
- NOTE_INSN_CFA_RESTORE_STATE at the appropriate place in the stream. */
+/* Scan the function and create the initial set of CFI notes. */
static void
-dwarf2out_cfi_begin_epilogue (rtx insn)
+create_cfi_notes (void)
{
- bool saw_frp = false;
- rtx i;
+ dw_trace_info *ti;
- /* Scan forward to the return insn, noticing if there are possible
- frame related insns. */
- for (i = NEXT_INSN (insn); i ; i = NEXT_INSN (i))
- {
- if (!INSN_P (i))
- continue;
-
- /* Look for both regular and sibcalls to end the block. */
- if (returnjump_p (i))
- break;
- if (CALL_P (i) && SIBLING_CALL_P (i))
- break;
+ gcc_checking_assert (queued_reg_saves == NULL);
+ gcc_checking_assert (trace_work_list == NULL);
- if (GET_CODE (PATTERN (i)) == SEQUENCE)
- {
- int idx;
- rtx seq = PATTERN (i);
-
- if (returnjump_p (XVECEXP (seq, 0, 0)))
- break;
- if (CALL_P (XVECEXP (seq, 0, 0))
- && SIBLING_CALL_P (XVECEXP (seq, 0, 0)))
- break;
-
- for (idx = 0; idx < XVECLEN (seq, 0); idx++)
- if (RTX_FRAME_RELATED_P (XVECEXP (seq, 0, idx)))
- saw_frp = true;
- }
+ /* Always begin at the entry trace. */
+ ti = VEC_index (dw_trace_info, trace_info, 0);
+ scan_trace (ti);
- if (RTX_FRAME_RELATED_P (i))
- saw_frp = true;
+ while (!VEC_empty (dw_trace_info_ref, trace_work_list))
+ {
+ ti = VEC_pop (dw_trace_info_ref, trace_work_list);
+ scan_trace (ti);
}
- /* If the port doesn't emit epilogue unwind info, we don't need a
- save/restore pair. */
- if (!saw_frp)
- return;
+ VEC_free (queued_reg_save, heap, queued_reg_saves);
+ VEC_free (dw_trace_info_ref, heap, trace_work_list);
+}
- /* Otherwise, search forward to see if the return insn was the last
- basic block of the function. If so, we don't need save/restore. */
- gcc_assert (i != NULL);
- i = next_real_insn (i);
- if (i == NULL)
- return;
+/* Insert CFI notes between traces to properly change state between them. */
+/* ??? TODO: Make use of remember/restore_state. */
- /* Insert the restore before that next real insn in the stream, and before
- a potential NOTE_INSN_EPILOGUE_BEG -- we do need these notes to be
- properly nested. This should be after any label or alignment. This
- will be pushed into the CFI stream by the function below. */
- while (1)
+static void
+connect_traces (void)
+{
+ unsigned i, n = VEC_length (dw_trace_info, trace_info);
+ dw_trace_info *prev_ti, *ti;
+
+ prev_ti = VEC_index (dw_trace_info, trace_info, 0);
+
+ for (i = 1; i < n; ++i, prev_ti = ti)
{
- rtx p = PREV_INSN (i);
- if (!NOTE_P (p))
- break;
- if (NOTE_KIND (p) == NOTE_INSN_BASIC_BLOCK)
- break;
- i = p;
- }
- emit_note_before (NOTE_INSN_CFA_RESTORE_STATE, i);
+ dw_cfi_row *old_row;
- emit_cfa_remember = true;
+ ti = VEC_index (dw_trace_info, trace_info, i);
- /* And emulate the state save. */
- gcc_assert (remember_row == NULL);
- remember_row = copy_cfi_row (cur_row);
+ /* We must have both queued and processed every trace. */
+ gcc_assert (ti->beg_row && ti->end_row);
+
+ /* In dwarf2out_switch_text_section, we'll begin a new FDE
+ for the portion of the function in the alternate text
+ section. The row state at the very beginning of that
+ new FDE will be exactly the row state from the CIE. */
+ if (ti->switch_sections)
+ old_row = cie_cfi_row;
+ else
+ old_row = prev_ti->end_row;
+
+ add_cfi_insn = ti->head;
+ change_cfi_row (old_row, ti->beg_row);
+
+ if (dump_file && add_cfi_insn != ti->head)
+ {
+ rtx note;
+
+ fprintf (dump_file, "Fixup between trace %u and %u:\n", i - 1, i);
+
+ note = ti->head;
+ do
+ {
+ note = NEXT_INSN (note);
+ gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
+ output_cfi_directive (dump_file, NOTE_CFI (note));
+ }
+ while (note != add_cfi_insn);
+ }
+ }
}
-/* A "subroutine" of dwarf2out_cfi_begin_epilogue. Emit the restore
- required. */
+/* Set up the pseudo-cfg of instruction traces, as described at the
+ block comment at the top of the file. */
static void
-dwarf2out_frame_debug_restore_state (void)
+create_pseudo_cfg (void)
{
- dw_cfi_ref cfi = new_cfi ();
+ bool saw_barrier, switch_sections;
+ dw_trace_info *ti;
+ rtx insn;
+ unsigned i;
+
+ /* The first trace begins at the start of the function,
+ and begins with the CIE row state. */
+ trace_info = VEC_alloc (dw_trace_info, heap, 16);
+ ti = VEC_quick_push (dw_trace_info, trace_info, NULL);
+
+ memset (ti, 0, sizeof (*ti));
+ ti->head = get_insns ();
+ ti->beg_row = cie_cfi_row;
+ ti->cfa_store = cie_cfi_row->cfa;
+ ti->cfa_temp.reg = INVALID_REGNUM;
+ if (cie_return_save)
+ VEC_safe_push (reg_saved_in_data, heap,
+ ti->regs_saved_in_regs, cie_return_save);
- cfi->dw_cfi_opc = DW_CFA_restore_state;
- add_cfi (cfi);
+ /* Walk all the insns, collecting start of trace locations. */
+ saw_barrier = false;
+ switch_sections = false;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (BARRIER_P (insn))
+ saw_barrier = true;
+ else if (NOTE_P (insn)
+ && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
+ {
+ /* We should have just seen a barrier. */
+ gcc_assert (saw_barrier);
+ switch_sections = true;
+ }
+ /* Watch out for save_point notes between basic blocks.
+ In particular, a note after a barrier. Do not record these,
+ delaying trace creation until the label. */
+ else if (save_point_p (insn)
+ && (LABEL_P (insn) || !saw_barrier))
+ {
+ ti = VEC_safe_push (dw_trace_info, heap, trace_info, NULL);
+ memset (ti, 0, sizeof (*ti));
+ ti->head = insn;
+ ti->switch_sections = switch_sections;
+
+ saw_barrier = false;
+ switch_sections = false;
+ }
+ }
+
+ /* Create the trace index after we've finished building trace_info,
+ avoiding stale pointer problems due to reallocation. */
+ trace_index = htab_create (VEC_length (dw_trace_info, trace_info),
+ dw_trace_info_hash, dw_trace_info_eq, NULL);
+ FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, ti)
+ {
+ void **slot;
- gcc_assert (remember_row != NULL);
- free_cfi_row (cur_row);
- cur_row = remember_row;
- remember_row = NULL;
+ if (dump_file)
+ fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", i,
+ rtx_name[(int) GET_CODE (ti->head)], INSN_UID (ti->head),
+ ti->switch_sections ? " (section switch)" : "");
+
+ slot = htab_find_slot_with_hash (trace_index, ti,
+ INSN_UID (ti->head), INSERT);
+ gcc_assert (*slot == NULL);
+ *slot = (void *) ti;
+ }
}
-\f
+
/* Record the initial position of the return address. RTL is
INCOMING_RETURN_ADDR_RTX. */
static unsigned int
execute_dwarf2_frame (void)
{
- dw_trace_info dummy_trace;
-
- gcc_checking_assert (queued_reg_saves == NULL);
-
/* The first time we're called, compute the incoming frame state. */
if (cie_cfi_vec == NULL)
create_cie_data ();
- memset (&dummy_trace, 0, sizeof(dummy_trace));
- cur_trace = &dummy_trace;
-
- /* Set up state for generating call frame debug info. */
- cur_row = copy_cfi_row (cie_cfi_row);
- if (cie_return_save)
- VEC_safe_push (reg_saved_in_data, heap,
- cur_trace->regs_saved_in_regs, cie_return_save);
-
- cur_trace->cfa_store = cur_row->cfa;
- cur_trace->cfa_temp.reg = INVALID_REGNUM;
- queued_args_size = 0;
-
dwarf2out_alloc_current_fde ();
+ create_pseudo_cfg ();
+
/* Do the work. */
create_cfi_notes ();
+ connect_traces ();
add_cfis_to_fde ();
- /* Reset all function-specific information, particularly for GC. */
- XDELETEVEC (barrier_args_size);
- barrier_args_size = NULL;
- VEC_free (reg_saved_in_data, heap, cur_trace->regs_saved_in_regs);
- VEC_free (queued_reg_save, heap, queued_reg_saves);
+ /* Free all the data we allocated. */
+ {
+ size_t i;
+ dw_trace_info *ti;
- free_cfi_row (cur_row);
- cur_row = NULL;
- cur_trace = NULL;
+ FOR_EACH_VEC_ELT (dw_trace_info, trace_info, i, ti)
+ VEC_free (reg_saved_in_data, heap, ti->regs_saved_in_regs);
+ }
+ VEC_free (dw_trace_info, heap, trace_info);
+
+ htab_delete (trace_index);
+ trace_index = NULL;
return 0;
}