2017-12-12 Alexandre Oliva <aoliva@redhat.com>
+ * cfgcleanup.c (delete_unreachable_blocks): Use alternate
+ block removal order if MAY_HAVE_DEBUG_BIND_INSNS.
+ * cfgexpand.c (label_rtx_for_bb): Skip debug insns.
+ * cfgrtl.c (try_redirect_by_replacing_jump): Skip debug insns.
+ (rtl_tidy_fallthru_edge): Likewise.
+ (rtl_verify_fallthru): Likewise.
+ (rtl_verify_bb_layout): Likewise.
+ (skip_insns_after_block): Likewise.
+ (duplicate_insn_chain): Use DEBUG_BIND_INSN_P.
+ * dwarf2out.c: Include print-rtl.h.
+ (dwarf2out_next_real_insn): New.
+ (dwarf2out_var_location): Call it. Disregard begin stmt markers.
+ Dump debug binds in asm comments.
+ * gimple-iterator.c (gimple_find_edge_insert_loc): Skip debug stmts.
+ * gimple-iterator.h (gsi_start_bb_nondebug): Remove; adjust
+ callers to use gsi_start_nondebug_bb instead.
+ (gsi_after_labels): Skip gimple debug stmts.
+ (gsi_start_nondebug): New.
+ * gimple-loop-interchange.c (find_deps_in_bb_for_stmt): Adjust.
+ (proper_loop_form_for_interchange): Adjust.
+ * gimple-low.c (gimple_seq_may_fallthru): Take last nondebug stmt.
+ * gimple.h (gimple_seq_last_nondebug_stmt): New.
+ * gimplify.c (last_stmt_in_scope): Skip debug stmts.
+ (collect_fallthrough_labels): Likewise.
+ (should_warn_for_implicit_fallthrough): Likewise.
+ (warn_implicit_fallthrough_r): Likewise.
+ (expand_FALLTHROUGH_r): Likewise.
+ * graphite-isl-ast-to-gimple.c (gsi_insert_earliest): Adjust.
+ (graphite_copy_stmts_from_block): Skip nonbind markers.
+ * haifa-sched.c (sched_extend_bb): Skip debug insns.
+ * ipa-icf-gimple.c (func_checker::compare_bb): Adjust.
+ * jump.c (clean_barriers): Skip debug insns.
+ * omp-expand.c (expand_parallel_call): Skip debug insns.
+ (expand_task_call): Likewise.
+ (remove_exit_barrier): Likewise.
+ (expand_omp_taskreg): Likewise.
+ (expand_omp_for_init_counts): Likewise.
+ (expand_omp_for_generic): Likewise.
+ (expand_omp_for_static_nochunk): Likewise.
+ (expand_omp_for_static_chunk): Likewise.
+ (expand_omp_simd): Likewise.
+ (expand_omp_taskloop_for_outer): Likewise.
+ (expand_omp_taskloop_for_inner): Likewise.
+ (expand_oacc_for): Likewise.
+ (expand_omp_sections): Likewise.
+ (expand_omp_single): Likewise.
+ (expand_omp_synch): Likewise.
+ (expand_omp_atomic_load): Likewise.
+ (expand_omp_atomic_store): Likewise.
+ (expand_omp_atomic_fetch_op): Likewise.
+ (expand_omp_atomic_pipeline): Likewise.
+ (expand_omp_atomic_mutex): Likewise.
+ (expand_omp_target): Likewise.
+ (grid_expand_omp_for_loop): Likewise.
+ (grid_expand_target_grid_body): Likewise.
+ (build_omp_regions_1): Likewise.
+ * omp-low.c (check_combined_parallel): Skip debug stmts.
+ * postreload.c (fixup_debug_insns): Skip nonbind debug insns.
+ * regcprop.c (find_oldest_value_reg): Ensure REGNO is not a pseudo.
+ * sese.c (sese_trivially_empty_bb_p): Call is_gimple_debug in
+ test.
+ * tree-cfg.c (make_blobs_1): Skip debug stmts.
+ (make_edges): Likewise.
+ (cleanup_dead_labels): Likewise.
+ (gimple_can_merge_blocks_p): Likewise.
+ (stmt_starts_bb_p): Likewise.
+ (gimple_block_label): Likewise.
+ (gimple_redirect_edge_and_branch): Likewise.
+ * tree-cfgcleanup.c (remove_forwarder_block): Rearrange skipping
+ of debug stmts.
+ (execute_cleanup_cfg_post_optimizing): Dump enumerated decls with
+ TDF_SLIM.
+ * tree-pretty-print (print_declaration): Omit initializer in slim
+ dumps.
+ * tree-ssa-dce.c (mark_stmt_if_obviously_necessary): Mark begin stmt
+ markers.
+ (eliminate_unnecessary_stmts): Stabilize block removal order.
+ * tree-ssa-tail-merge.c (find_duplicate): Skip debug stmts.
+ * var-tracking.c (get_first_insn): New.
+ (vt_emit_notes): Call it.
+ (vt_initialize): Walk any insns before the first BB.
+ (delete_debug_insns): Likewise.
+
* gimple.h (enum gimple_debug_subcode): Add
GIMPLE_DEBUG_BEGIN_STMT.
(gimple_debug_begin_stmt_p): New.
if (debug_insn && code != CODE_LABEL && code != BARRIER)
prev = PREV_INSN (debug_insn);
fallthru = split_block (bb, prev);
+
if (flow_transfer_insn)
{
BB_END (bb) = flow_transfer_insn;
find_unreachable_blocks ();
- /* When we're in GIMPLE mode and there may be debug insns, we should
- delete blocks in reverse dominator order, so as to get a chance
- to substitute all released DEFs into debug stmts. If we don't
- have dominators information, walking blocks backward gets us a
- better chance of retaining most debug information than
+ /* When we're in GIMPLE mode and there may be debug bind insns, we
+ should delete blocks in reverse dominator order, so as to get a
+ chance to substitute all released DEFs into debug bind stmts. If
+ we don't have dominators information, walking blocks backward
+ gets us a better chance of retaining most debug information than
otherwise. */
- if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
+ if (MAY_HAVE_DEBUG_BIND_INSNS && current_ir_type () == IR_GIMPLE
&& dom_info_available_p (CDI_DOMINATORS))
{
for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
{
glabel *lab_stmt;
+ if (is_gimple_debug (gsi_stmt (gsi)))
+ continue;
+
lab_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
if (!lab_stmt)
break;
gimple_stmt_iterator gsi;
gimple_seq stmts;
gimple *stmt = NULL;
- rtx_note *note;
+ rtx_note *note = NULL;
rtx_insn *last;
edge e;
edge_iterator ei;
}
}
- gsi = gsi_start (stmts);
+ gsi = gsi_start_nondebug (stmts);
if (!gsi_end_p (gsi))
{
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_LABEL)
stmt = NULL;
}
+ gsi = gsi_start (stmts);
+ gimple *label_stmt = stmt;
rtx_code_label **elt = lab_rtx_for_bb->get (bb);
- if (stmt || elt)
+ if (stmt)
+ /* We'll get to it in the loop below, and get back to
+ emit_label_and_note then. */
+ ;
+ else if (stmt || elt)
{
+ emit_label_and_note:
+ gcc_checking_assert (!note);
last = get_last_insn ();
if (stmt)
BB_HEAD (bb) = NEXT_INSN (last);
if (NOTE_P (BB_HEAD (bb)))
BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
+ gcc_assert (LABEL_P (BB_HEAD (bb)));
note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb));
maybe_dump_rtl_for_gimple_stmt (stmt, last);
else
BB_HEAD (bb) = note = emit_note (NOTE_INSN_BASIC_BLOCK);
- NOTE_BASIC_BLOCK (note) = bb;
+ if (note)
+ NOTE_BASIC_BLOCK (note) = bb;
for (; !gsi_end_p (gsi); gsi_next (&gsi))
{
stmt = gsi_stmt (gsi);
+ if (stmt == label_stmt)
+ goto emit_label_and_note;
+
/* If this statement is a non-debug one, and we generate debug
insns, then this one might be the last real use of a TERed
SSA_NAME, but where there are still some debug uses further
if (tablejump_p (insn, &label, &table))
delete_insn_chain (label, table, false);
- barrier = next_nonnote_insn (BB_END (src));
+ barrier = next_nonnote_nondebug_insn (BB_END (src));
if (!barrier || !BARRIER_P (barrier))
emit_barrier_after (BB_END (src));
else
the head of block C and assert that we really do fall through. */
for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
- if (INSN_P (q))
+ if (NONDEBUG_INSN_P (q))
return;
/* Remove what will soon cease being the jump insn from the source block.
else
for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
insn = NEXT_INSN (insn))
- if (BARRIER_P (insn) || INSN_P (insn))
+ if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
{
error ("verify_flow_info: Incorrect fallthru %i->%i",
e->src->index, e->dest->index);
{
basic_block bb;
int err = 0;
- rtx_insn *x;
+ rtx_insn *x, *y;
int num_bb_notes;
rtx_insn * const rtx_first = get_insns ();
basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
{
case BARRIER:
case NOTE:
+ case DEBUG_INSN:
break;
case CODE_LABEL:
if (JUMP_P (x)
&& returnjump_p (x) && ! condjump_p (x)
- && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x))))
+ && ! ((y = next_nonnote_nondebug_insn (x))
+ && BARRIER_P (y)))
fatal_insn ("return not followed by barrier", x);
if (curr_bb && x == BB_END (curr_bb))
last_insn = insn;
continue;
+ case DEBUG_INSN:
+ continue;
+
case NOTE:
switch (NOTE_KIND (insn))
{
{
case DEBUG_INSN:
/* Don't duplicate label debug insns. */
- if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
+ if (DEBUG_BIND_INSN_P (insn)
+ && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
break;
/* FALLTHRU */
case INSN:
#include "toplev.h"
#include "md5.h"
#include "tree-pretty-print.h"
+#include "print-rtl.h"
#include "debug.h"
#include "common/common-target.h"
#include "langhooks.h"
/* One above highest N where .LVLN label might be equal to .Ltext0 label. */
static unsigned int first_loclabel_num_not_at_text_label;
+/* Look ahead for a real insn, or for a begin stmt marker. */
+
+static rtx_insn *
+dwarf2out_next_real_insn (rtx_insn *loc_note)
+{
+ rtx_insn *next_real = NEXT_INSN (loc_note);
+
+ while (next_real)
+ if (INSN_P (next_real))
+ break;
+ else
+ next_real = NEXT_INSN (next_real);
+
+ return next_real;
+}
+
/* Called by the final INSN scan whenever we see a var location. We
use it to drop labels in the right places, and throw the location in
our lookup table. */
loc_note = NULL;
var_loc_p = false;
- next_real = next_real_insn (call_insn);
+ next_real = dwarf2out_next_real_insn (call_insn);
next_note = NULL;
cached_next_real_insn = NULL;
goto create_label;
|| next_note->deleted ()
|| ! NOTE_P (next_note)
|| (NOTE_KIND (next_note) != NOTE_INSN_VAR_LOCATION
+ && NOTE_KIND (next_note) != NOTE_INSN_BEGIN_STMT
&& NOTE_KIND (next_note) != NOTE_INSN_CALL_ARG_LOCATION))
next_note = NULL;
if (! next_real)
- next_real = next_real_insn (loc_note);
+ next_real = dwarf2out_next_real_insn (loc_note);
if (next_note)
{
newloc->label = last_postcall_label;
}
+ if (var_loc_p && flag_debug_asm)
+ {
+ const char *name = NULL, *sep = " => ", *patstr = NULL;
+ if (decl && DECL_NAME (decl))
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ if (NOTE_VAR_LOCATION_LOC (loc_note))
+ patstr = str_pattern_slim (NOTE_VAR_LOCATION_LOC (loc_note));
+ else
+ {
+ sep = " ";
+ patstr = "RESET";
+ }
+ fprintf (asm_out_file, "\t%s DEBUG %s%s%s\n", ASM_COMMENT_START,
+ name, sep, patstr);
+ }
+
last_var_location_insn = next_real;
last_in_cold_section_p = in_cold_section_p;
}
if (gsi_end_p (*gsi))
return true;
- /* Make sure we insert after any leading labels. */
+ /* Make sure we insert after any leading labels. We have to
+ skip debug stmts before or among them, though. We didn't
+ have to skip debug stmts after the last label, but it
+ shouldn't hurt if we do. */
tmp = gsi_stmt (*gsi);
- while (gimple_code (tmp) == GIMPLE_LABEL)
+ while (gimple_code (tmp) == GIMPLE_LABEL
+ || is_gimple_debug (tmp))
{
gsi_next (gsi);
if (gsi_end_p (*gsi))
return true;
tmp = gsi_stmt (*gsi);
- if (!stmt_ends_bb_p (tmp))
+ if (is_gimple_debug (tmp))
+ {
+ gimple_stmt_iterator si = *gsi;
+ gsi_prev_nondebug (&si);
+ if (!gsi_end_p (si))
+ tmp = gsi_stmt (si);
+ /* If we don't have a BB-ending nondebug stmt, we want to
+ insert after the trailing debug stmts. Otherwise, we may
+ insert before the BB-ending nondebug stmt, or split the
+ edge. */
+ if (!stmt_ends_bb_p (tmp))
+ return true;
+ *gsi = si;
+ }
+ else if (!stmt_ends_bb_p (tmp))
return true;
switch (gimple_code (tmp))
return i.ptr;
}
-/* Return a new iterator pointing to the first non-debug statement
- in basic block BB. */
-
-static inline gimple_stmt_iterator
-gsi_start_bb_nondebug (basic_block bb)
-{
- gimple_stmt_iterator gsi = gsi_start_bb (bb);
- while (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
- gsi_next (&gsi);
-
- return gsi;
-}
-
-/* Return a block statement iterator that points to the first non-label
- statement in block BB. */
+/* Return a block statement iterator that points to the first
+ non-label statement in block BB. Skip debug stmts only if they
+ precede labels. */
static inline gimple_stmt_iterator
gsi_after_labels (basic_block bb)
{
gimple_stmt_iterator gsi = gsi_start_bb (bb);
- while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL)
- gsi_next (&gsi);
+ for (gimple_stmt_iterator gskip = gsi;
+ !gsi_end_p (gskip); )
+ {
+ if (is_gimple_debug (gsi_stmt (gskip)))
+ gsi_next (&gskip);
+ else if (gimple_code (gsi_stmt (gskip)) == GIMPLE_LABEL)
+ {
+ gsi_next (&gskip);
+ gsi = gskip;
+ }
+ else
+ break;
+ }
return gsi;
}
while (!gsi_end_p (*i) && is_gimple_debug (gsi_stmt (*i)));
}
+/* Return a new iterator pointing to the first non-debug statement in
+ SEQ. */
+
+static inline gimple_stmt_iterator
+gsi_start_nondebug (gimple_seq seq)
+{
+ gimple_stmt_iterator gsi = gsi_start (seq);
+ if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
+ gsi_next_nondebug (&gsi);
+
+ return gsi;
+}
+
/* Return a new iterator pointing to the first non-debug statement in
basic block BB. */
}
gimple_set_plf (stmt, GF_PLF_1, true);
}
- for (gsi = gsi_start_bb_nondebug (bb);
+ for (gsi = gsi_start_nondebug_bb (bb);
!gsi_end_p (gsi) && (stmt = gsi_stmt (gsi)) != consumer;)
{
/* Move dep stmts to sequence STMTS. */
|| dominated_by_p (CDI_DOMINATORS, exit->src, bb))
continue;
- for (gimple_stmt_iterator gsi = gsi_start_bb_nondebug (bb);
+ for (gimple_stmt_iterator gsi = gsi_start_nondebug_bb (bb);
!gsi_end_p (gsi); gsi_next_nondebug (&gsi))
if (gimple_vuse (gsi_stmt (gsi)))
{
bool
gimple_seq_may_fallthru (gimple_seq seq)
{
- return gimple_stmt_may_fallthru (gimple_seq_last_stmt (seq));
+ return gimple_stmt_may_fallthru (gimple_seq_last_nondebug_stmt (seq));
}
return gimple_code (gs) == GIMPLE_DEBUG;
}
+
+/* Return the last nondebug statement in GIMPLE sequence S. */
+
+static inline gimple *
+gimple_seq_last_nondebug_stmt (gimple_seq s)
+{
+ gimple_seq_node n;
+ for (n = gimple_seq_last (s);
+ n && is_gimple_debug (n);
+ n = n->prev)
+ if (n->prev == s)
+ return NULL;
+ return n;
+}
+
+
/* Return true if S is a GIMPLE_DEBUG BIND statement. */
static inline bool
return false;
}
-/* Find the last statement in a scope STMT. */
+/* Find the last nondebug statement in a scope STMT. */
static gimple *
last_stmt_in_scope (gimple *stmt)
case GIMPLE_BIND:
{
gbind *bind = as_a <gbind *> (stmt);
- stmt = gimple_seq_last_stmt (gimple_bind_body (bind));
+ stmt = gimple_seq_last_nondebug_stmt (gimple_bind_body (bind));
return last_stmt_in_scope (stmt);
}
case GIMPLE_TRY:
{
gtry *try_stmt = as_a <gtry *> (stmt);
- stmt = gimple_seq_last_stmt (gimple_try_eval (try_stmt));
+ stmt = gimple_seq_last_nondebug_stmt (gimple_try_eval (try_stmt));
gimple *last_eval = last_stmt_in_scope (stmt);
if (gimple_stmt_may_fallthru (last_eval)
&& (last_eval == NULL
|| !gimple_call_internal_p (last_eval, IFN_FALLTHROUGH))
&& gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
{
- stmt = gimple_seq_last_stmt (gimple_try_cleanup (try_stmt));
+ stmt = gimple_seq_last_nondebug_stmt (gimple_try_cleanup (try_stmt));
return last_stmt_in_scope (stmt);
}
else
return last_eval;
}
+ case GIMPLE_DEBUG:
+ gcc_unreachable ();
+
default:
return stmt;
}
}
else if (gimple_call_internal_p (gsi_stmt (*gsi_p), IFN_ASAN_MARK))
;
- else
+ else if (!is_gimple_debug (gsi_stmt (*gsi_p)))
prev = gsi_stmt (*gsi_p);
gsi_next (gsi_p);
}
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
&& (l = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi))))
&& !case_label_p (&gimplify_ctxp->case_labels, l))
- gsi_next (&gsi);
+ gsi_next_nondebug (&gsi);
if (gsi_end_p (gsi) || gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
return false;
}
while (!gsi_end_p (gsi)
&& (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_PREDICT))
- gsi_next (&gsi);
+ gsi_next_nondebug (&gsi);
/* { ... something; default:; } */
if (gsi_end_p (gsi)
/* Found a label. Skip all immediately following labels. */
while (!gsi_end_p (*gsi_p)
&& gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_LABEL)
- gsi_next (gsi_p);
+ gsi_next_nondebug (gsi_p);
/* There might be no more statements. */
if (gsi_end_p (*gsi_p))
}
else if (gimple_call_internal_p (stmt, IFN_ASAN_MARK))
;
- else
- /* Something other is not expected. */
+ else if (!is_gimple_debug (stmt))
+ /* Anything else is not expected. */
break;
gsi_next (&gsi2);
}
FOR_EACH_VEC_ELT (stmts, i, use_stmt)
{
gcc_assert (gimple_code (use_stmt) != GIMPLE_PHI);
- gimple_stmt_iterator gsi_def_stmt = gsi_start_bb_nondebug (begin_bb);
+ gimple_stmt_iterator gsi_def_stmt = gsi_start_nondebug_bb (begin_bb);
use_operand_p use_p;
ssa_op_iter op_iter;
else if (gimple_code (gsi_stmt (gsi_def_stmt)) == GIMPLE_PHI)
{
gimple_stmt_iterator bsi
- = gsi_start_bb_nondebug (gsi_bb (gsi_def_stmt));
+ = gsi_start_nondebug_bb (gsi_bb (gsi_def_stmt));
/* Insert right after the PHI statements. */
gsi_insert_before (&bsi, use_stmt, GSI_NEW_STMT);
}
{
if (gimple_debug_bind_p (copy))
gimple_debug_bind_reset_value (copy);
- else if (gimple_debug_source_bind_p (copy))
+ else if (gimple_debug_source_bind_p (copy)
+ || gimple_debug_nonbind_marker_p (copy))
;
else
gcc_unreachable ();
|| (!NOTE_P (insn)
&& !LABEL_P (insn)
/* Don't emit a NOTE if it would end up before a BARRIER. */
- && !BARRIER_P (NEXT_INSN (end))))
+ && !BARRIER_P (next_nondebug_insn (end))))
{
rtx_note *note = emit_note_after (NOTE_INSN_DELETED, end);
/* Make note appear outside BB. */
gimple_stmt_iterator gsi1, gsi2;
gimple *s1, *s2;
- gsi1 = gsi_start_bb_nondebug (bb1->bb);
- gsi2 = gsi_start_bb_nondebug (bb2->bb);
+ gsi1 = gsi_start_nondebug_bb (bb1->bb);
+ gsi2 = gsi_start_nondebug_bb (bb2->bb);
while (!gsi_end_p (gsi1))
{
{
if (BARRIER_P (insn))
{
- rtx_insn *prev = prev_nonnote_insn (insn);
+ rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
if (!prev)
continue;
false, GSI_CONTINUE_LINKING);
}
- gsi = gsi_last_bb (bb);
+ gsi = gsi_last_nondebug_bb (bb);
t = gimple_omp_parallel_data_arg (entry_stmt);
if (t == NULL)
t1 = null_pointer_node;
else
priority = integer_zero_node;
- gsi = gsi_last_bb (bb);
+ gsi = gsi_last_nondebug_bb (bb);
tree t = gimple_omp_task_data_arg (entry_stmt);
if (t == NULL)
t2 = null_pointer_node;
statements that can appear in between are extremely limited -- no
memory operations at all. Here, we allow nothing at all, so the
only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
- gsi_prev (&gsi);
+ gsi_prev_nondebug (&gsi);
if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
return;
FOR_EACH_EDGE (e, ei, exit_bb->preds)
{
- gsi = gsi_last_bb (e->src);
+ gsi = gsi_last_nondebug_bb (e->src);
if (gsi_end_p (gsi))
continue;
stmt = gsi_stmt (gsi);
entry_succ_e = single_succ_edge (entry_bb);
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
|| gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
gsi_remove (&gsi, true);
/* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
so that it can be moved to the child function. */
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
stmt = gsi_stmt (gsi);
gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
|| gimple_code (stmt) == GIMPLE_OMP_TASK));
gcc_assert (e2->dest == region->exit);
remove_edge (BRANCH_EDGE (entry_bb));
set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
- gsi = gsi_last_bb (region->exit);
+ gsi = gsi_last_nondebug_bb (region->exit);
gcc_assert (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
gsi_remove (&gsi, true);
/* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
if (exit_bb)
{
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (!gsi_end_p (gsi)
&& (gimple_code (gsi_stmt (gsi))
== (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
if (l2_dom_bb == NULL)
l2_dom_bb = entry_bb;
entry_bb = e->dest;
- *gsi = gsi_last_bb (entry_bb);
+ *gsi = gsi_last_nondebug_bb (entry_bb);
}
if (POINTER_TYPE_P (itype))
l3_bb = BRANCH_EDGE (entry_bb)->dest;
exit_bb = region->exit;
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
if (fd->ordered
e = split_block (entry_bb, gsi_stmt (gsi));
entry_bb = e->dest;
make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU);
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
set_immediate_dominator (CDI_DOMINATORS, entry_bb,
get_immediate_dominator (CDI_DOMINATORS,
zero_iter1_bb));
e = split_block (entry_bb, gsi_stmt (gsi));
entry_bb = e->dest;
make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
set_immediate_dominator (CDI_DOMINATORS, entry_bb,
get_immediate_dominator
(CDI_DOMINATORS, zero_iter2_bb));
{
/* Code to control the increment and predicate for the sequential
loop goes in the CONT_BB. */
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
vmain = gimple_omp_continue_control_use (cont_stmt);
}
/* Add the loop cleanup function. */
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
if (fd->collapse > 1)
gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
second_bb = split_block (entry_bb, cond_stmt)->dest;
- gsi = gsi_last_bb (second_bb);
+ gsi = gsi_last_nondebug_bb (second_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
third_bb = split_block (second_bb, assign_stmt)->dest;
- gsi = gsi_last_bb (third_bb);
+ gsi = gsi_last_nondebug_bb (third_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
t = build2 (MULT_EXPR, itype, q, threadid);
{
/* The code controlling the sequential loop replaces the
GIMPLE_OMP_CONTINUE. */
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
vmain = gimple_omp_continue_control_use (cont_stmt);
}
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
{
t = gimple_omp_return_lhs (gsi_stmt (gsi));
exit_bb = region->exit;
/* Trip and adjustment setup goes in ENTRY_BB. */
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
if (fd->collapse > 1)
{
/* The code controlling the sequential loop goes in CONT_BB,
replacing the GIMPLE_OMP_CONTINUE. */
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
vmain = gimple_omp_continue_control_use (cont_stmt);
vback = gimple_omp_continue_control_def (cont_stmt);
}
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
{
t = gimple_omp_return_lhs (gsi_stmt (gsi));
exit_bb = region->exit;
l2_dom_bb = NULL;
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
/* Not needed in SSA form right now. */
if (!broken_loop)
{
/* Code to control the increment goes in the CONT_BB. */
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
}
/* Remove GIMPLE_OMP_RETURN. */
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
gsi_remove (&gsi, true);
/* Connect the new blocks. */
gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
exit_bb = region->exit;
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gimple *for_stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR);
if (fd->collapse > 1)
gsi = gsi_for_stmt (for_stmt);
gsi_remove (&gsi, true);
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
gsi_remove (&gsi, true);
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
gsi_remove (&gsi, true);
FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always ();
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
if (fd->collapse > 1)
{
/* The code controlling the sequential loop replaces the
GIMPLE_OMP_CONTINUE. */
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
vmain = gimple_omp_continue_control_use (cont_stmt);
gsi_remove (&gsi, true);
/* Remove the GIMPLE_OMP_RETURN statement. */
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
gsi_remove (&gsi, true);
FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always ();
entry_bb = split->src;
/* Chunk setup goes at end of entry_bb, replacing the omp_for. */
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi));
loc = gimple_location (for_stmt);
if (gimple_in_ssa_p (cfun))
{
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
offset = gimple_omp_continue_control_use (cont_stmt);
occur, especially when noreturn routines are involved. */
if (cont_bb)
{
- gsi = gsi_last_bb (cont_bb);
+ gsi = gsi_last_nondebug_bb (cont_bb);
gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
loc = gimple_location (cont_stmt);
}
}
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
loc = gimple_location (gsi_stmt (gsi));
len = EDGE_COUNT (l0_bb->succs);
gcc_assert (len > 0);
e = EDGE_SUCC (l0_bb, len - 1);
- si = gsi_last_bb (e->dest);
+ si = gsi_last_nondebug_bb (e->dest);
l2 = NULL_TREE;
if (gsi_end_p (si)
|| gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
else
FOR_EACH_EDGE (e, ei, l0_bb->succs)
{
- si = gsi_last_bb (e->dest);
+ si = gsi_last_nondebug_bb (e->dest);
if (gsi_end_p (si)
|| gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
{
/* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
GIMPLE_OMP_SECTIONS statement. */
- si = gsi_last_bb (entry_bb);
+ si = gsi_last_nondebug_bb (entry_bb);
sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
vin = gimple_omp_sections_control (sections_stmt);
/* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
L0_BB. */
- switch_si = gsi_last_bb (l0_bb);
+ switch_si = gsi_last_nondebug_bb (l0_bb);
gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
if (exit_reachable)
{
u = build_case_label (u, NULL, t);
label_vec.quick_push (u);
- si = gsi_last_bb (s_entry_bb);
+ si = gsi_last_nondebug_bb (s_entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
gsi_remove (&si, true);
if (s_exit_bb == NULL)
continue;
- si = gsi_last_bb (s_exit_bb);
+ si = gsi_last_nondebug_bb (s_exit_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
gsi_remove (&si, true);
tree bfn_decl;
/* Code to get the next section goes in L1_BB. */
- si = gsi_last_bb (l1_bb);
+ si = gsi_last_nondebug_bb (l1_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
}
/* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
- si = gsi_last_bb (l2_bb);
+ si = gsi_last_nondebug_bb (l2_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (si)))
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
else if (gimple_omp_return_lhs (gsi_stmt (si)))
entry_bb = region->entry;
exit_bb = region->exit;
- si = gsi_last_bb (entry_bb);
+ si = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
- si = gsi_last_bb (exit_bb);
+ si = gsi_last_nondebug_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
{
tree t = gimple_omp_return_lhs (gsi_stmt (si));
entry_bb = region->entry;
exit_bb = region->exit;
- si = gsi_last_bb (entry_bb);
+ si = gsi_last_nondebug_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
if (exit_bb)
{
- si = gsi_last_bb (exit_bb);
+ si = gsi_last_nondebug_bb (exit_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
gsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
gimple *stmt;
tree decl, call, type, itype;
- gsi = gsi_last_bb (load_bb);
+ gsi = gsi_last_nondebug_bb (load_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
loc = gimple_location (stmt);
gsi_remove (&gsi, true);
store_bb = single_succ (load_bb);
- gsi = gsi_last_bb (store_bb);
+ gsi = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
gsi_remove (&gsi, true);
machine_mode imode;
bool exchange;
- gsi = gsi_last_bb (load_bb);
+ gsi = gsi_last_nondebug_bb (load_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
/* If the load value is needed, then this isn't a store but an exchange. */
exchange = gimple_omp_atomic_need_value_p (stmt);
- gsi = gsi_last_bb (store_bb);
+ gsi = gsi_last_nondebug_bb (store_bb);
stmt = gsi_stmt (gsi);
gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
loc = gimple_location (stmt);
gsi_remove (&gsi, true);
/* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
- gsi = gsi_last_bb (load_bb);
+ gsi = gsi_last_nondebug_bb (load_bb);
gsi_remove (&gsi, true);
if (gimple_in_ssa_p (cfun))
gsi = gsi_after_labels (store_bb);
stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt))
+ {
+ gsi_next_nondebug (&gsi);
+ if (gsi_end_p (gsi))
+ return false;
+ stmt = gsi_stmt (gsi);
+ }
loc = gimple_location (stmt);
if (!is_gimple_assign (stmt))
return false;
- gsi_next (&gsi);
+ gsi_next_nondebug (&gsi);
if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
return false;
need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
if (!can_compare_and_swap_p (imode, true) || !can_atomic_load_p (imode))
return false;
- gsi = gsi_last_bb (load_bb);
+ gsi = gsi_last_nondebug_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
/* OpenMP does not imply any barrier-like semantics on its atomic ops.
force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
gsi_remove (&gsi, true);
- gsi = gsi_last_bb (store_bb);
+ gsi = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
gsi_remove (&gsi, true);
- gsi = gsi_last_bb (store_bb);
+ gsi = gsi_last_nondebug_bb (store_bb);
stmt = gsi_stmt (gsi);
gsi_remove (&gsi, true);
return false;
/* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
- si = gsi_last_bb (load_bb);
+ si = gsi_last_nondebug_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
/* For floating-point values, we'll need to view-convert them to integers
}
gsi_remove (&si, true);
- si = gsi_last_bb (store_bb);
+ si = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
if (iaddr == addr)
gassign *stmt;
tree t;
- si = gsi_last_bb (load_bb);
+ si = gsi_last_nondebug_bb (load_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
gsi_remove (&si, true);
- si = gsi_last_bb (store_bb);
+ si = gsi_last_nondebug_bb (store_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
/* Split ENTRY_BB at GIMPLE_*,
so that it can be moved to the child function. */
- gsi = gsi_last_bb (entry_bb);
+ gsi = gsi_last_nondebug_bb (entry_bb);
stmt = gsi_stmt (gsi);
gcc_assert (stmt
&& gimple_code (stmt) == gimple_code (entry_stmt));
/* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
if (exit_bb)
{
- gsi = gsi_last_bb (exit_bb);
+ gsi = gsi_last_nondebug_bb (exit_bb);
gcc_assert (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
stmt = gimple_build_return (NULL);
e = split_block_after_labels (new_bb);
else
{
- gsi = gsi_last_bb (new_bb);
+ gsi = gsi_last_nondebug_bb (new_bb);
gsi_prev (&gsi);
e = split_block (new_bb, gsi_stmt (gsi));
}
make_edge (else_bb, new_bb, EDGE_FALLTHRU);
device = tmp_var;
- gsi = gsi_last_bb (new_bb);
+ gsi = gsi_last_nondebug_bb (new_bb);
}
else
{
- gsi = gsi_last_bb (new_bb);
+ gsi = gsi_last_nondebug_bb (new_bb);
device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE,
true, GSI_SAME_STMT);
}
}
if (data_region && region->exit)
{
- gsi = gsi_last_bb (region->exit);
+ gsi = gsi_last_nondebug_bb (region->exit);
g = gsi_stmt (gsi);
gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
gsi_remove (&gsi, true);
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
/* Remove the omp for statement. */
- gsi = gsi_last_bb (kfor->entry);
+ gsi = gsi_last_nondebug_bb (kfor->entry);
gsi_remove (&gsi, true);
/* Remove the GIMPLE_OMP_CONTINUE statement. */
- gsi = gsi_last_bb (kfor->cont);
+ gsi = gsi_last_nondebug_bb (kfor->cont);
gcc_assert (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_CONTINUE);
gsi_remove (&gsi, true);
/* Replace the GIMPLE_OMP_RETURN with a barrier, if necessary. */
- gsi = gsi_last_bb (kfor->exit);
+ gsi = gsi_last_nondebug_bb (kfor->exit);
gcc_assert (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
if (intra_group)
grid_expand_omp_for_loop (kfor, false);
/* Remove the omp for statement. */
- gimple_stmt_iterator gsi = gsi_last_bb (gpukernel->entry);
+ gimple_stmt_iterator gsi = gsi_last_nondebug_bb (gpukernel->entry);
gsi_remove (&gsi, true);
/* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real
return. */
- gsi = gsi_last_bb (gpukernel->exit);
+ gsi = gsi_last_nondebug_bb (gpukernel->exit);
gcc_assert (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
gimple *ret_stmt = gimple_build_return (NULL);
gimple *stmt;
basic_block son;
- gsi = gsi_last_bb (bb);
+ gsi = gsi_last_nondebug_bb (bb);
if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
{
struct omp_region *region;
{
WALK_SUBSTMTS;
+ case GIMPLE_DEBUG:
+ break;
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
*info = *info == 0 ? 1 : -1;
{
rtx t;
- if (!DEBUG_INSN_P (insn))
+ if (!DEBUG_BIND_INSN_P (insn))
continue;
t = INSN_VAR_LOCATION_LOC (insn);
machine_mode mode = GET_MODE (reg);
unsigned int i;
+ gcc_assert (regno < FIRST_PSEUDO_REGISTER);
+
/* If we are accessing REG in some mode other that what we set it in,
make sure that the replacement is valid. In particular, consider
(set (reg:DI r11) (...))
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- if (gimple_code (gsi_stmt (gsi)) != GIMPLE_DEBUG
+ if (!is_gimple_debug (gsi_stmt (gsi))
&& gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
return false;
{
gimple_stmt_iterator i = gsi_start (seq);
gimple *stmt = NULL;
+ gimple *prev_stmt = NULL;
bool start_new_block = true;
bool first_stmt_of_seq = true;
while (!gsi_end_p (i))
{
- gimple *prev_stmt;
-
- prev_stmt = stmt;
+ /* PREV_STMT should only be set to a debug stmt if the debug
+ stmt is before nondebug stmts. Once stmt reaches a nondebug
+ nonlabel, prev_stmt will be set to it, so that
+ stmt_starts_bb_p will know to start a new block if a label is
+ found. However, if stmt was a label after debug stmts only,
+ keep the label in prev_stmt even if we find further debug
+ stmts, for there may be other labels after them, and they
+ should land in the same block. */
+ if (!prev_stmt || !stmt || !is_gimple_debug (stmt))
+ prev_stmt = stmt;
stmt = gsi_stmt (i);
if (stmt && is_gimple_call (stmt))
gsi_split_seq_before (&i, &seq);
bb = create_basic_block (seq, bb);
start_new_block = false;
+ prev_stmt = NULL;
}
/* Now add STMT to BB and create the subgraphs for special statement
tree target;
if (!label_stmt)
- break;
+ {
+ if (is_gimple_debug (gsi_stmt (gsi)))
+ continue;
+ break;
+ }
target = gimple_label_label (label_stmt);
for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
{
+ if (is_gimple_debug (gsi_stmt (i)))
+ continue;
+
tree label;
glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
for (i = gsi_start_bb (bb); !gsi_end_p (i); )
{
+ if (is_gimple_debug (gsi_stmt (i)))
+ {
+ gsi_next (&i);
+ continue;
+ }
+
tree label;
glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (i));
gsi_next (&gsi))
{
tree lab;
+ if (is_gimple_debug (gsi_stmt (gsi)))
+ continue;
glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
if (!label_stmt)
break;
if (stmt == NULL)
return false;
+ /* PREV_STMT is only set to a debug stmt if the debug stmt is before
+ any nondebug stmts in the block. We don't want to start another
+ block in this case: the debug stmt will already have started the
+ one STMT would start if we weren't outputting debug stmts. */
+ if (prev_stmt && is_gimple_debug (prev_stmt))
+ return false;
+
/* Labels start a new basic block only if the preceding statement
wasn't a label of the same type. This prevents the creation of
consecutive blocks that have nothing but a single label. */
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree label;
+
+ if (is_gimple_debug (gsi_stmt (gsi)))
+ continue;
+
gimple *prev_stmt = stmt;
stmt = gsi_stmt (gsi);
}
}
- gsi = gsi_last_bb (bb);
+ gsi = gsi_last_nondebug_bb (bb);
if (gsi_end_p (gsi))
continue;
tree label;
glabel *stmt;
- for (i = s; !gsi_end_p (i); first = false, gsi_next (&i))
+ for (i = s; !gsi_end_p (i); gsi_next (&i))
{
+ if (is_gimple_debug (gsi_stmt (i)))
+ continue;
stmt = dyn_cast <glabel *> (gsi_stmt (i));
if (!stmt)
break;
gsi_move_before (&i, &s);
return label;
}
+ first = false;
}
label = create_artificial_label (UNKNOWN_LOCATION);
return ret;
}
- gsi = gsi_last_bb (bb);
+ gsi = gsi_last_nondebug_bb (bb);
stmt = gsi_end_p (gsi) ? NULL : gsi_stmt (gsi);
switch (stmt ? gimple_code (stmt) : GIMPLE_ERROR_MARK)
{
tree decl;
label = gsi_stmt (gsi);
- if (is_gimple_debug (label))
- break;
- decl = gimple_label_label (as_a <glabel *> (label));
- if (EH_LANDING_PAD_NR (decl) != 0
- || DECL_NONLOCAL (decl)
- || FORCED_LABEL (decl)
- || !DECL_ARTIFICIAL (decl))
+ if (is_gimple_debug (label)
+ ? can_move_debug_stmts
+ : ((decl = gimple_label_label (as_a <glabel *> (label))),
+ EH_LANDING_PAD_NR (decl) != 0
+ || DECL_NONLOCAL (decl)
+ || FORCED_LABEL (decl)
+ || !DECL_ARTIFICIAL (decl)))
{
gsi_remove (&gsi, false);
gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
gsi_next (&gsi);
}
- /* Move debug statements if the destination has a single predecessor. */
- if (can_move_debug_stmts)
- {
- gsi_to = gsi_after_labels (dest);
- for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
- {
- gimple *debug = gsi_stmt (gsi);
- if (!is_gimple_debug (debug))
- break;
- gsi_remove (&gsi, false);
- gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
- }
- }
-
bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
/* Update the dominators. */
flag_dump_noaddr = flag_dump_unnumbered = 1;
fprintf (final_output, "\n");
- dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
+ dump_enumerated_decls (final_output,
+ dump_flags | TDF_SLIM | TDF_NOUID);
flag_dump_noaddr = save_noaddr;
flag_dump_unnumbered = save_unnumbered;
if (fclose (final_output))
pp_space (pp);
pp_equal (pp);
pp_space (pp);
- dump_generic_node (pp, DECL_INITIAL (t), spc, flags, false);
+ if (!(flags & TDF_SLIM))
+ dump_generic_node (pp, DECL_INITIAL (t), spc, flags, false);
+ else
+ pp_string (pp, "<<< omitted >>>");
}
}
easily locate the debug temp bind stmt for a use thereof,
would could refrain from marking all debug temps here, and
mark them only if they're used. */
- if (!gimple_debug_bind_p (stmt)
+ if (gimple_debug_nonbind_marker_p (stmt)
+ || !gimple_debug_bind_p (stmt)
|| gimple_debug_bind_has_value_p (stmt)
|| TREE_CODE (gimple_debug_bind_get_var (stmt)) != DEBUG_EXPR_DECL)
mark_stmt_necessary (stmt, false);
dominate others. Walking backwards, this should
be the common case. ??? Do we need to recompute
dominators because of cfg_altered? */
- if (!MAY_HAVE_DEBUG_STMTS
- || !first_dom_son (CDI_DOMINATORS, bb))
+ if (!first_dom_son (CDI_DOMINATORS, bb))
delete_basic_block (bb);
else
{
tree label = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi1)));
if (DECL_NONLOCAL (label) || FORCED_LABEL (label))
return;
- gsi_prev (&gsi1);
+ gsi_prev_nondebug (&gsi1);
}
while (!gsi_end_p (gsi2) && gimple_code (gsi_stmt (gsi2)) == GIMPLE_LABEL)
{
tree label = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi2)));
if (DECL_NONLOCAL (label) || FORCED_LABEL (label))
return;
- gsi_prev (&gsi2);
+ gsi_prev_nondebug (&gsi2);
}
if (!(gsi_end_p (gsi1) && gsi_end_p (gsi2)))
return;
}
}
+/* Return BB's head, unless BB is the block that succeeds ENTRY_BLOCK,
+ in which case it searches back from BB's head for the very first
+ insn. Use [get_first_insn (bb), BB_HEAD (bb->next_bb)[ as a range
+ to iterate over all insns of a function while iterating over its
+ BBs. */
+
+static rtx_insn *
+get_first_insn (basic_block bb)
+{
+ rtx_insn *insn = BB_HEAD (bb);
+
+ if (bb->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ while (rtx_insn *prev = PREV_INSN (insn))
+ insn = prev;
+
+ return insn;
+}
+
/* Emit notes for the whole function. */
static void
{
/* Emit the notes for changes of variable locations between two
subsequent basic blocks. */
- emit_notes_for_differences (BB_HEAD (bb), &cur, &VTI (bb)->in);
+ emit_notes_for_differences (get_first_insn (bb),
+ &cur, &VTI (bb)->in);
if (MAY_HAVE_DEBUG_BIND_INSNS)
local_get_addr_cache = new hash_map<rtx, rtx>;
{
HOST_WIDE_INT offset = VTI (bb)->out.stack_adjust;
VTI (bb)->out.stack_adjust = VTI (bb)->in.stack_adjust;
- for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb));
- insn = NEXT_INSN (insn))
+
+ /* If we are walking the first basic block, walk any HEADER
+ insns that might be before it too. Unfortunately,
+ BB_HEADER and BB_FOOTER are not set while we run this
+ pass. */
+ insn = get_first_insn (bb);
+ for (rtx_insn *next;
+ insn != BB_HEAD (bb->next_bb)
+ ? next = NEXT_INSN (insn), true : false;
+ insn = next)
{
if (INSN_P (insn))
{
+ basic_block save_bb = BLOCK_FOR_INSN (insn);
+ if (!BLOCK_FOR_INSN (insn))
+ {
+ BLOCK_FOR_INSN (insn) = bb;
+ gcc_assert (DEBUG_INSN_P (insn));
+ /* Reset debug insns between basic blocks.
+ Their location is not reliable, because they
+ were probably not maintained up to date. */
+ if (DEBUG_BIND_INSN_P (insn))
+ INSN_VAR_LOCATION_LOC (insn)
+ = gen_rtx_UNKNOWN_VAR_LOC ();
+ }
+ else
+ gcc_assert (BLOCK_FOR_INSN (insn) == bb);
+
if (!frame_pointer_needed)
{
insn_stack_adjust_offset_pre_post (insn, &pre, &post);
}
}
}
+ BLOCK_FOR_INSN (insn) = save_bb;
}
}
gcc_assert (offset == VTI (bb)->out.stack_adjust);
FOR_EACH_BB_FN (bb, cfun)
{
- FOR_BB_INSNS_SAFE (bb, insn, next)
+ for (insn = get_first_insn (bb);
+ insn != BB_HEAD (bb->next_bb)
+ ? next = NEXT_INSN (insn), true : false;
+ insn = next)
if (DEBUG_INSN_P (insn))
{
tree decl = INSN_VAR_LOCATION_DECL (insn);