+2004-09-07 Nathan Sidwell <nathan@codesourcery.com>
+
+ * cfganal.c (flow_depth_first_order_compute, dfs_enumerate_from,
+ cfgbuild.c, inside_basic_block_p, control_flow_insn_p,
+ make_label_edge, make_edges, find_basic_blocks_1): Use gcc_assert
+ or gcc_unreachable.
+ * cfg.c (clear_edges, initialize_bb_rbi, compact_blocks,
+ remove_edge, alloc_aux_for_blocks, free_aux_for_blocks,
+ alloc_aux_for_edges, free_aux_for_edges): Likewise.
+ * cfgcleanup.c (try_forward_edges,
+ merge_blocks_move_predecessor_nojumps,
+ merge_blocks_move_successor_nojumps): Likewise.
+ * cfgexpand.c (expand_gimple_cond_expr,
+ expand_gimple_tailcall): Likewise.
+ * cfghooks.c (duplicate_block): Likewise.
+ * cfglayout.c (record_effective_endpoints,
+ insn_locators_initialize, change_scope, fixup_reorder_chain,
+ verify_insn_chain, fixup_fallthru_exit_predecessor,
+ duplicate_insn_chain, cfg_layout_finalize): Likewise.
+ * cfgloopanal.c (check_irred): Likewise.
+ * cfgloop.c (superloop_at_depth, flow_loops_free,
+ flow_loop_entry_edges_find, flow_loops_find,
+ flow_loop_outside_edge_p, get_loop_body,
+ get_loop_body_in_dom_order, get_loop_body_in_bfs_order,
+ get_loop_exit_edges, num_loop_branches, cancel_loop,
+ verify_loop_structure): Likewise.
+ cfgloopmanip.c (find_path, remove_path, loop_delete_branch_edge,
+ duplicate_loop_to_header_edge, create_preheader,
+ create_loop_notes): Likewise.
+ * cfgrtl.c (delete_insn, try_redirect_by_replacing_jump,
+ edirect_branch_edge, force_nonfallthru_and_redirect,
+ rtl_split_edge, insert_insn_on_edge, commit_one_edge_insertion,
+ commit_edge_insertions, commit_edge_insertions_watch_calls,
+ purge_dead_edges, cfg_layout_redirect_edge_and_branch,
+ cfg_layout_redirect_edge_and_branch_force,
+ cfg_layout_merge_blocks, rtl_flow_call_edges_add): Likewise.
+ * cgraph.c (cgraph_node, cgraph_create_edge, cgraph_remove_edge,
+ cgraph_redirect_edge_callee, cgraph_global_info, cgraph_rtl_info,
+ cgraph_varpool_node): Likewise.
+ * cgraphunit.c (cgraph_finalize_function,
+ cgraph_finalize_compilation_unit, cgraph_mark_functions_to_output,
+ cgraph_expand_function, cgraph_remove_unreachable_nodes,
+ cgraph_clone_inlined_nodes, cgraph_mark_inline_edge,
+ cgraph_mark_inline, cgraph_expand_all_functions,
+ cgraph_build_static_cdtor): Likewise.
+ * combine.c (do_SUBST, try_combine, subst, combine_simplify_rtx,
+ simplify_logical, distribute_notes, insn_cuid): Likewise.
+ * conflict.c (conflict_graph_add, print_conflict): Likewise.
+ * coverage.c (rtl_coverage_counter_ref, tree_coverage_counter_ref,
+ coverage_checksum_string): Likewise.
+ * cse.c (make_new_qty, make_regs_eqv, insert, invalidate,
+ hash_rtx, exp_equiv_p, cse_basic_block, count_reg_usage,
+ cse_cc_succs, cse_condition_code_reg): Likewise.
+ * cselib.c (entry_and_rtx_equal_p, remove_useless_values,
+ rtx_equal_for_cselib_p, wrap_constant, cselib_hash_rtx,
+ new_cselib_val, cselib_subst_to_values, cselib_invalidate_regno,
+ cselib_record_set): Likewise.
+
2004-09-07 Jan Hubicka <jh@suse.cz>
* tree-ssa-loop-ivopts.c (iv_value): Avoid invalid sharing on niter.
EXIT_BLOCK_PTR->pred = NULL;
ENTRY_BLOCK_PTR->succ = NULL;
- if (n_edges)
- abort ();
+ gcc_assert (!n_edges);
}
\f
/* Allocate memory for basic_block. */
void
initialize_bb_rbi (basic_block bb)
{
- if (bb->rbi)
- abort ();
+ gcc_assert (!bb->rbi);
bb->rbi = pool_alloc (rbi_pool);
memset (bb->rbi, 0, sizeof (struct reorder_block_def));
}
i++;
}
- if (i != n_basic_blocks)
- abort ();
+ gcc_assert (i == n_basic_blocks);
for (; i < last_basic_block; i++)
BASIC_BLOCK (i) = NULL;
for (tmp = src->succ; tmp && tmp != e; tmp = tmp->succ_next)
last_succ = tmp;
- if (!tmp)
- abort ();
+ gcc_assert (tmp);
if (last_succ)
last_succ->succ_next = e->succ_next;
else
for (tmp = dest->pred; tmp && tmp != e; tmp = tmp->pred_next)
last_pred = tmp;
- if (!tmp)
- abort ();
+ gcc_assert (tmp);
if (last_pred)
last_pred->pred_next = e->pred_next;
else
alloc_aux_for_block (basic_block bb, int size)
{
/* Verify that aux field is clear. */
- if (bb->aux || !first_block_aux_obj)
- abort ();
+ gcc_assert (!bb->aux && first_block_aux_obj);
bb->aux = obstack_alloc (&block_aux_obstack, size);
memset (bb->aux, 0, size);
}
gcc_obstack_init (&block_aux_obstack);
initialized = 1;
}
-
- /* Check whether AUX data are still allocated. */
- else if (first_block_aux_obj)
- abort ();
+ else
+ /* Check whether AUX data are still allocated. */
+ gcc_assert (!first_block_aux_obj);
+
first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
if (size)
{
void
free_aux_for_blocks (void)
{
- if (!first_block_aux_obj)
- abort ();
+ gcc_assert (first_block_aux_obj);
obstack_free (&block_aux_obstack, first_block_aux_obj);
first_block_aux_obj = NULL;
alloc_aux_for_edge (edge e, int size)
{
/* Verify that aux field is clear. */
- if (e->aux || !first_edge_aux_obj)
- abort ();
+ gcc_assert (!e->aux && first_edge_aux_obj);
e->aux = obstack_alloc (&edge_aux_obstack, size);
memset (e->aux, 0, size);
}
gcc_obstack_init (&edge_aux_obstack);
initialized = 1;
}
-
- /* Check whether AUX data are still allocated. */
- else if (first_edge_aux_obj)
- abort ();
+ else
+ /* Check whether AUX data are still allocated. */
+ gcc_assert (!first_edge_aux_obj);
first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
if (size)
void
free_aux_for_edges (void)
{
- if (!first_edge_aux_obj)
- abort ();
+ gcc_assert (first_edge_aux_obj);
obstack_free (&edge_aux_obstack, first_edge_aux_obj);
first_edge_aux_obj = NULL;
free (stack);
sbitmap_free (visited);
- /* The number of nodes visited should not be greater than
- n_basic_blocks. */
- if (dfsnum > n_basic_blocks)
- abort ();
-
- /* There are some nodes left in the CFG that are unreachable. */
- if (dfsnum < n_basic_blocks)
- abort ();
+ /* The number of nodes visited should be the number of blocks. */
+ gcc_assert (dfsnum == n_basic_blocks);
return dfsnum;
}
for (e = lbb->pred; e; e = e->pred_next)
if (!(e->src->flags & BB_VISITED) && predicate (e->src, data))
{
- if (tv == rslt_max)
- abort ();
+ gcc_assert (tv != rslt_max);
rslt[tv++] = st[sp++] = e->src;
e->src->flags |= BB_VISITED;
}
for (e = lbb->succ; e; e = e->succ_next)
if (!(e->dest->flags & BB_VISITED) && predicate (e->dest, data))
{
- if (tv == rslt_max)
- abort ();
+ gcc_assert (tv != rslt_max);
rslt[tv++] = st[sp++] = e->dest;
e->dest->flags |= BB_VISITED;
}
return false;
default:
- abort ();
+ gcc_unreachable ();
}
}
return false;
default:
- abort ();
+ gcc_unreachable ();
}
}
static void
make_label_edge (sbitmap *edge_cache, basic_block src, rtx label, int flags)
{
- if (!LABEL_P (label))
- abort ();
+ gcc_assert (LABEL_P (label));
/* If the label was never emitted, this insn is junk, but avoid a
crash trying to refer to BLOCK_FOR_INSN (label). This can happen
/* Otherwise, we have a plain conditional or unconditional jump. */
else
{
- if (! JUMP_LABEL (insn))
- abort ();
+ gcc_assert (JUMP_LABEL (insn));
make_label_edge (edge_cache, bb, JUMP_LABEL (insn), 0);
}
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
else if (bb_note)
delete_insn (bb_note);
- if (last_basic_block != n_basic_blocks)
- abort ();
+ gcc_assert (last_basic_block == n_basic_blocks);
clear_aux_for_blocks ();
}
if (t->dest == b)
break;
- if (nthreaded_edges >= n_basic_blocks)
- abort ();
+ gcc_assert (nthreaded_edges < n_basic_blocks);
threaded_edges[nthreaded_edges++] = t;
new_target = t->dest;
{
edge e;
int prob;
- if (n >= nthreaded_edges)
- abort ();
+
+ gcc_assert (n < nthreaded_edges);
t = threaded_edges [n++];
- if (t->src != first)
- abort ();
+ gcc_assert (t->src == first);
if (first->frequency)
prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
else
merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
{
rtx barrier;
+ bool only_notes;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
return;
barrier = next_nonnote_insn (BB_END (a));
- if (!BARRIER_P (barrier))
- abort ();
+ gcc_assert (BARRIER_P (barrier));
delete_insn (barrier);
/* Move block and loop notes out of the chain so that we do not
and adjust the block trees appropriately. Even better would be to have
a tighter connection between block trees and rtl so that this is not
necessary. */
- if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
- abort ();
+ only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
+ gcc_assert (!only_notes);
/* Scramble the insn chain. */
if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
{
rtx barrier, real_b_end;
rtx label, table;
+ bool only_notes;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
and adjust the block trees appropriately. Even better would be to have
a tighter connection between block trees and rtl so that this is not
necessary. */
- if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
- abort ();
+ only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
+ gcc_assert (!only_notes);
+
/* Scramble the insn chain. */
reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
jumpifnot (pred, label_rtx (GOTO_DESTINATION (else_exp)));
return NULL;
}
- if (TREE_CODE (then_exp) != GOTO_EXPR || TREE_CODE (else_exp) != GOTO_EXPR)
- abort ();
+ gcc_assert (TREE_CODE (then_exp) == GOTO_EXPR
+ && TREE_CODE (else_exp) == GOTO_EXPR);
jumpif (pred, label_rtx (GOTO_DESTINATION (then_exp)));
last = get_last_insn ();
after the sibcall (to perform the function return). These confuse the
find_sub_basic_blocks code, so we need to get rid of these. */
last = NEXT_INSN (last);
- if (!BARRIER_P (last))
- abort ();
+ gcc_assert (BARRIER_P (last));
*can_fallthru = false;
while (NEXT_INSN (last))
if (bb->count < new_count)
new_count = bb->count;
- if (!bb->pred)
- abort ();
+ gcc_assert (bb->pred);
#ifdef ENABLE_CHECKING
- if (!can_duplicate_block_p (bb))
- abort ();
+ gcc_assert (can_duplicate_block_p (bb));
#endif
new_bb = cfg_hooks->duplicate_block (bb);
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK;
insn = NEXT_INSN (insn))
continue;
- if (!insn)
- abort (); /* No basic blocks at all? */
+ /* No basic blocks at all? */
+ gcc_assert (insn);
+
if (PREV_INSN (insn))
cfg_layout_function_header =
unlink_insn_chain (get_insns (), PREV_INSN (insn));
if (NOTE_P (insn))
{
- switch (NOTE_LINE_NUMBER (insn))
+ gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BLOCK_BEG
+ && NOTE_LINE_NUMBER (insn) != NOTE_INSN_BLOCK_END);
+ if (NOTE_LINE_NUMBER (insn) > 0)
{
- case NOTE_INSN_BLOCK_BEG:
- case NOTE_INSN_BLOCK_END:
- abort ();
-
- default:
- if (NOTE_LINE_NUMBER (insn) > 0)
- {
- expanded_location xloc;
- NOTE_EXPANDED_LOCATION (xloc, insn);
- line_number = xloc.line;
- file_name = xloc.file;
- }
- break;
+ expanded_location xloc;
+ NOTE_EXPANDED_LOCATION (xloc, insn);
+ line_number = xloc.line;
+ file_name = xloc.file;
}
}
else
while (ts1 != ts2)
{
- if (ts1 == NULL || ts2 == NULL)
- abort ();
+ gcc_assert (ts1 && ts2);
if (BLOCK_NUMBER (ts1) > BLOCK_NUMBER (ts2))
ts1 = BLOCK_SUPERCONTEXT (ts1);
else if (BLOCK_NUMBER (ts1) < BLOCK_NUMBER (ts2))
}
}
- if (index != n_basic_blocks)
- abort ();
+ gcc_assert (index == n_basic_blocks);
NEXT_INSN (insn) = cfg_layout_function_footer;
if (cfg_layout_function_footer)
{
rtx note;
edge e_fake;
+ bool redirected;
e_fake = unchecked_make_edge (bb, e_fall->dest, 0);
- if (!redirect_jump (BB_END (bb), block_label (bb), 0))
- abort ();
+ redirected = redirect_jump (BB_END (bb),
+ block_label (bb), 0);
+ gcc_assert (redirected);
+
note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
if (note)
{
{
e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
- if (!could_fall_through (e_taken->src, e_taken->dest))
- abort ();
+ gcc_assert (could_fall_through
+ (e_taken->src, e_taken->dest));
#endif
e_taken->flags |= EDGE_FALLTHRU;
update_br_prob_note (bb);
{
e_fall->flags &= ~EDGE_FALLTHRU;
#ifdef ENABLE_CHECKING
- if (!could_fall_through (e_taken->src, e_taken->dest))
- abort ();
+ gcc_assert (could_fall_through
+ (e_taken->src, e_taken->dest));
#endif
e_taken->flags |= EDGE_FALLTHRU;
update_br_prob_note (bb);
continue;
}
}
- else if (returnjump_p (bb_end_insn))
- continue;
else
{
- /* Otherwise we have some switch or computed jump. In the
- 99% case, there should not have been a fallthru edge. */
- if (! e_fall)
+#ifndef CASE_DROPS_THROUGH
+ /* Otherwise we have some return, switch or computed
+ jump. In the 99% case, there should not have been a
+ fallthru edge. */
+ gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
+ continue;
+#else
+ if (returnjump_p (bb_end_insn) || !e_fall)
continue;
-
-#ifdef CASE_DROPS_THROUGH
/* Except for VAX. Since we didn't have predication for the
tablejump, the fallthru block should not have moved. */
if (bb->rbi->next == e_fall->dest)
continue;
bb_end_insn = skip_insns_after_block (bb);
-#else
- abort ();
#endif
}
}
for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
x != 0;
prevx = x, insn_cnt1++, x = NEXT_INSN (x))
- if (PREV_INSN (x) != prevx)
- abort ();
+ gcc_assert (PREV_INSN (x) == prevx);
- if (prevx != get_last_insn ())
- abort ();
+ gcc_assert (prevx == get_last_insn ());
for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
x != 0;
nextx = x, insn_cnt2++, x = PREV_INSN (x))
- if (NEXT_INSN (x) != nextx)
- abort ();
+ gcc_assert (NEXT_INSN (x) == nextx);
- if (insn_cnt1 != insn_cnt2)
- abort ();
+ gcc_assert (insn_cnt1 == insn_cnt2);
}
\f
/* If we have assembler epilogues, the block falling through to exit must
edge e;
basic_block bb = NULL;
- /* This transformation is not valid before reload, because we might separate
- a call from the instruction that copies the return value. */
- if (! reload_completed)
- abort ();
+ /* This transformation is not valid before reload, because we might
+ separate a call from the instruction that copies the return
+ value. */
+ gcc_assert (reload_completed);
for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
if (e->flags & EDGE_FALLTHRU)
case NOTE_INSN_BASIC_BLOCK:
break;
- /* There is no purpose to duplicate prologue. */
- case NOTE_INSN_BLOCK_BEG:
- case NOTE_INSN_BLOCK_END:
- /* The BLOCK_BEG/BLOCK_END notes should be eliminated when BB
- reordering is in the progress. */
- case NOTE_INSN_EH_REGION_BEG:
- case NOTE_INSN_EH_REGION_END:
- /* Should never exist at BB duplication time. */
- abort ();
- break;
case NOTE_INSN_REPEATED_LINE_NUMBER:
case NOTE_INSN_UNLIKELY_EXECUTED_CODE:
emit_note_copy (insn);
break;
default:
- if (NOTE_LINE_NUMBER (insn) < 0)
- abort ();
+ /* All other notes should have already been eliminated.
+ */
+ gcc_assert (NOTE_LINE_NUMBER (insn) >= 0);
+
/* It is possible that no_line_number is set and the note
won't be emitted. */
emit_note_copy (insn);
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
insn = NEXT_INSN (last);
#ifdef ENABLE_CHECKING
verify_insn_chain ();
#endif
-
+
free_rbi_pool ();
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
bb->rbi = NULL;
struct loop *
superloop_at_depth (struct loop *loop, unsigned depth)
{
- if (depth > (unsigned) loop->depth)
- abort ();
+ gcc_assert (depth <= (unsigned) loop->depth);
if (depth == (unsigned) loop->depth)
return loop;
{
unsigned i;
- if (! loops->num)
- abort ();
+ gcc_assert (loops->num);
/* Free the loop descriptors. */
for (i = 0; i < loops->num; i++)
num_entries++;
}
- if (! num_entries)
- abort ();
+ gcc_assert (num_entries);
loop->entry_edges = xmalloc (num_entries * sizeof (edge *));
/* This function cannot be repeatedly called with different
flags to build up the loop information. The loop tree
must always be built if this function is called. */
- if (! (flags & LOOP_TREE))
- abort ();
+ gcc_assert (flags & LOOP_TREE);
memset (loops, 0, sizeof *loops);
{
basic_block latch = e->src;
- if (e->flags & EDGE_ABNORMAL)
- abort ();
+ gcc_assert (!(e->flags & EDGE_ABNORMAL));
/* Look for back edges where a predecessor is dominated
by this block. A natural loop has a single entry
&& dominated_by_p (CDI_DOMINATORS, latch, header))
{
/* Shared headers should be eliminated by now. */
- if (more_latches)
- abort ();
+ gcc_assert (!more_latches);
more_latches = 1;
SET_BIT (headers, header->index);
num_loops++;
bool
flow_loop_outside_edge_p (const struct loop *loop, edge e)
{
- if (e->dest != loop->header)
- abort ();
+ gcc_assert (e->dest == loop->header);
return !flow_bb_inside_loop_p (loop, e->src);
}
basic_block *tovisit, bb;
unsigned tv = 0;
- if (!loop->num_nodes)
- abort ();
+ gcc_assert (loop->num_nodes);
tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
tovisit[tv++] = loop->header;
if (loop->latch == EXIT_BLOCK_PTR)
{
/* There may be blocks unreachable from EXIT_BLOCK. */
- if (loop->num_nodes != (unsigned) n_basic_blocks + 2)
- abort ();
+ gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks + 2);
FOR_EACH_BB (bb)
tovisit[tv++] = bb;
tovisit[tv++] = EXIT_BLOCK_PTR;
loop->header) + 1;
}
- if (tv != loop->num_nodes)
- abort ();
+ gcc_assert (tv == loop->num_nodes);
return tovisit;
}
basic_block *tovisit;
int tv;
- if (!loop->num_nodes)
- abort ();
+ gcc_assert (loop->num_nodes);
tovisit = xcalloc (loop->num_nodes, sizeof (basic_block));
- if (loop->latch == EXIT_BLOCK_PTR)
- abort ();
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR);
tv = 0;
fill_sons_in_loop (loop, loop->header, tovisit, &tv);
- if (tv != (int) loop->num_nodes)
- abort ();
+ gcc_assert (tv == (int) loop->num_nodes);
return tovisit;
}
unsigned int i = 0;
unsigned int vc = 1;
- if (!loop->num_nodes)
- abort ();
-
- if (loop->latch == EXIT_BLOCK_PTR)
- abort ();
+ gcc_assert (loop->num_nodes);
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR);
blocks = xcalloc (loop->num_nodes, sizeof (basic_block));
visited = BITMAP_XMALLOC ();
}
}
- if (i < vc)
- abort ();
+ gcc_assert (i >= vc);
bb = blocks[vc++];
}
unsigned i, n;
basic_block * body;
- if (loop->latch == EXIT_BLOCK_PTR)
- abort ();
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR);
body = get_loop_body (loop);
n = 0;
unsigned i, n;
basic_block * body;
- if (loop->latch == EXIT_BLOCK_PTR)
- abort ();
+ gcc_assert (loop->latch != EXIT_BLOCK_PTR);
body = get_loop_body (loop);
n = 0;
basic_block *bbs;
unsigned i;
- if (loop->inner)
- abort ();
+ gcc_assert (!loop->inner);
/* Move blocks up one level (they should be removed as soon as possible). */
bbs = get_loop_body (loop);
}
}
- if (err)
- abort ();
+ gcc_assert (!err);
free (sizes);
}
/* All edges should lead from a component with higher number to the
one with lower one. */
- if (g->vertices[e->src].component < g->vertices[e->dest].component)
- abort ();
+ gcc_assert (g->vertices[e->src].component >= g->vertices[e->dest].component);
if (g->vertices[e->src].component != g->vertices[e->dest].component)
return;
static int
find_path (edge e, basic_block **bbs)
{
- if (e->dest->pred->pred_next)
- abort ();
+ gcc_assert (!e->dest->pred->pred_next);
/* Find bbs in the path. */
*bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
basic_block *rem_bbs, *bord_bbs, *dom_bbs, from, bb;
int i, nrem, n_bord_bbs, n_dom_bbs;
sbitmap seen;
+ bool deleted;
if (!loop_delete_branch_edge (e, 0))
return false;
/* Remove the path. */
from = e->src;
- if (!loop_delete_branch_edge (e, 1))
- abort ();
+ deleted = loop_delete_branch_edge (e, 1);
+ gcc_assert (deleted);
dom_bbs = xcalloc (n_basic_blocks, sizeof (basic_block));
/* Cancel loops contained in the path. */
loop_delete_branch_edge (edge e, int really_delete)
{
basic_block src = e->src;
+ basic_block newdest;
int irr;
edge snd;
- if (src->succ->succ_next)
- {
- basic_block newdest;
-
- /* Cannot handle more than two exit edges. */
- if (src->succ->succ_next->succ_next)
- return false;
- /* And it must be just a simple branch. */
- if (!any_condjump_p (BB_END (src)))
- return false;
-
- snd = e == src->succ ? src->succ->succ_next : src->succ;
- newdest = snd->dest;
- if (newdest == EXIT_BLOCK_PTR)
- return false;
-
- /* Hopefully the above conditions should suffice. */
- if (!really_delete)
- return true;
+ gcc_assert (src->succ->succ_next);
+
+ /* Cannot handle more than two exit edges. */
+ if (src->succ->succ_next->succ_next)
+ return false;
+ /* And it must be just a simple branch. */
+ if (!any_condjump_p (BB_END (src)))
+ return false;
- /* Redirecting behaves wrongly wrto this flag. */
- irr = snd->flags & EDGE_IRREDUCIBLE_LOOP;
+ snd = e == src->succ ? src->succ->succ_next : src->succ;
+ newdest = snd->dest;
+ if (newdest == EXIT_BLOCK_PTR)
+ return false;
- if (!redirect_edge_and_branch (e, newdest))
- return false;
- src->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP;
- src->succ->flags |= irr;
+ /* Hopefully the above conditions should suffice. */
+ if (!really_delete)
+ return true;
- return true;
- }
- else
- {
- /* Cannot happen -- we are using this only to remove an edge
- from branch. */
- abort ();
- }
+ /* Redirecting behaves wrongly wrto this flag. */
+ irr = snd->flags & EDGE_IRREDUCIBLE_LOOP;
- return false; /* To avoid warning, cannot get here. */
+ if (!redirect_edge_and_branch (e, newdest))
+ return false;
+ src->succ->flags &= ~EDGE_IRREDUCIBLE_LOOP;
+ src->succ->flags |= irr;
+
+ return true;
}
/* Check whether LOOP's body can be duplicated. */
int prob_pass_thru, prob_pass_wont_exit, prob_pass_main;
int add_irreducible_flag;
- if (e->dest != loop->header)
- abort ();
- if (ndupl <= 0)
- abort ();
+ gcc_assert (e->dest == loop->header);
+ gcc_assert (ndupl > 0);
if (orig)
{
/* Orig must be edge out of the loop. */
- if (!flow_bb_inside_loop_p (loop, orig->src))
- abort ();
- if (flow_bb_inside_loop_p (loop, orig->dest))
- abort ();
+ gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
+ gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
}
bbs = get_loop_body (loop);
/* In case we are doing loop peeling and the loop is in the middle of
irreducible region, the peeled copies will be inside it too. */
add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
- if (is_latch && add_irreducible_flag)
- abort ();
+ gcc_assert (!is_latch || !add_irreducible_flag);
/* Find edge from latch. */
latch_edge = loop_latch_edge (loop);
scale_act = REG_BR_PROB_BASE - prob_pass_thru;
}
for (i = 0; i < ndupl; i++)
- if (scale_step[i] < 0 || scale_step[i] > REG_BR_PROB_BASE)
- abort ();
- if (scale_main < 0 || scale_main > REG_BR_PROB_BASE
- || scale_act < 0 || scale_act > REG_BR_PROB_BASE)
- abort ();
+ gcc_assert (scale_step[i] >= 0 && scale_step[i] <= REG_BR_PROB_BASE);
+ gcc_assert (scale_main >= 0 && scale_main <= REG_BR_PROB_BASE
+ && scale_act >= 0 && scale_act <= REG_BR_PROB_BASE);
}
/* Loop the new bbs will belong to. */
irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
nentry++;
}
- if (!nentry)
- abort ();
+ gcc_assert (nentry);
if (nentry == 1)
{
for (e = loop->header->pred; e->src == loop->latch; e = e->pred_next);
#ifdef ENABLE_CHECKING
/* Verify that there really are no loop notes. */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- if (NOTE_P (insn)
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
- abort ();
+ gcc_assert (!NOTE_P (insn) ||
+ NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
#endif
flow_loops_find (&loops, LOOP_TREE);
&& onlyjump_p (insn))
{
pbb = BLOCK_FOR_INSN (insn);
- if (!pbb || !pbb->succ || pbb->succ->succ_next)
- abort ();
+ gcc_assert (pbb && pbb->succ && !pbb->succ->succ_next);
if (!flow_bb_inside_loop_p (loop, pbb->succ->dest))
insn = BB_HEAD (first[loop->num]);
if (really_delete)
{
/* If this insn has already been deleted, something is very wrong. */
- if (INSN_DELETED_P (insn))
- abort ();
+ gcc_assert (!INSN_DELETED_P (insn));
remove_insn (insn);
INSN_DELETED_P (insn) = 1;
}
INSN_UID (insn), e->dest->index, target->index);
if (!redirect_jump (insn, block_label (target), 0))
{
- if (target == EXIT_BLOCK_PTR)
- return NULL;
- abort ();
+ gcc_assert (target == EXIT_BLOCK_PTR);
+ return NULL;
}
}
return NULL;
/* If the insn doesn't go where we think, we're confused. */
- if (JUMP_LABEL (insn) != old_label)
- abort ();
+ gcc_assert (JUMP_LABEL (insn) == old_label);
/* If the substitution doesn't succeed, die. This can happen
if the back end emitted unrecognizable instructions or if
target is exit block on some arches. */
if (!redirect_jump (insn, block_label (target), 0))
{
- if (target == EXIT_BLOCK_PTR)
- return NULL;
- abort ();
+ gcc_assert (target == EXIT_BLOCK_PTR);
+ return NULL;
}
}
{
rtx note;
edge b = unchecked_make_edge (e->src, target, 0);
+ bool redirected;
- if (!redirect_jump (BB_END (e->src), block_label (target), 0))
- abort ();
+ redirected = redirect_jump (BB_END (e->src), block_label (target), 0);
+ gcc_assert (redirected);
+
note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
if (note)
{
We can't redirect abnormal edge, but we still can split the fallthru
one and create separate abnormal edge to original destination.
This allows bb-reorder to make such edge non-fallthru. */
- if (e->dest != target)
- abort ();
+ gcc_assert (e->dest == target);
abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU;
}
- else if (!(e->flags & EDGE_FALLTHRU))
- abort ();
- else if (e->src == ENTRY_BLOCK_PTR)
+ else
{
- /* We can't redirect the entry block. Create an empty block at the
- start of the function which we use to add the new jump. */
- edge *pe1;
- basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
-
- /* Change the existing edge's source to be the new block, and add
- a new edge from the entry block to the new block. */
- e->src = bb;
- for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next)
- if (*pe1 == e)
- {
- *pe1 = e->succ_next;
- break;
- }
- e->succ_next = 0;
- bb->succ = e;
- make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
+ gcc_assert (e->flags & EDGE_FALLTHRU);
+ if (e->src == ENTRY_BLOCK_PTR)
+ {
+ /* We can't redirect the entry block. Create an empty block
+ at the start of the function which we use to add the new
+ jump. */
+ edge *pe1;
+ basic_block bb
+ = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
+
+ /* Change the existing edge's source to be the new block, and add
+ a new edge from the entry block to the new block. */
+ e->src = bb;
+ for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next)
+ if (*pe1 == e)
+ {
+ *pe1 = e->succ_next;
+ break;
+ }
+ e->succ_next = 0;
+ bb->succ = e;
+ make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
+ }
}
if (e->src->succ->succ_next || abnormal_edge_flags)
#ifdef HAVE_return
emit_jump_insn_after (gen_return (), BB_END (jump_block));
#else
- abort ();
+ gcc_unreachable ();
#endif
}
else
rtx before;
/* Abnormal edges cannot be split. */
- if ((edge_in->flags & EDGE_ABNORMAL) != 0)
- abort ();
+ gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
/* We are going to place the new block in front of edge destination.
Avoid existence of fallthru predecessors. */
jump instruction to target our new block. */
if ((edge_in->flags & EDGE_FALLTHRU) == 0)
{
- if (!redirect_edge_and_branch (edge_in, bb))
- abort ();
+ edge redirected = redirect_edge_and_branch (edge_in, bb);
+ gcc_assert (redirected);
}
else
redirect_edge_succ (edge_in, bb);
{
/* We cannot insert instructions on an abnormal critical edge.
It will be easier to find the culprit if we die now. */
- if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
- abort ();
+ gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
if (e->insns.r == NULL_RTX)
start_sequence ();
;
else
{
- /* We'd better be fallthru, or we've lost track of what's what. */
- if ((e->flags & EDGE_FALLTHRU) == 0)
- abort ();
+ /* We'd better be fallthru, or we've lost track of
+ what's what. */
+ gcc_assert (e->flags & EDGE_FALLTHRU);
after = BB_END (bb);
}
to EXIT. */
e = bb->succ;
- if (e->dest != EXIT_BLOCK_PTR
- || e->succ_next != NULL || (e->flags & EDGE_FALLTHRU) == 0)
- abort ();
+ gcc_assert (e->dest == EXIT_BLOCK_PTR
+ && !e->succ_next && (e->flags & EDGE_FALLTHRU));
e->flags &= ~EDGE_FALLTHRU;
emit_barrier_after (last);
if (before)
delete_insn (before);
}
- else if (JUMP_P (last))
- abort ();
+ else
+ gcc_assert (!JUMP_P (last));
/* Mark the basic block for find_sub_basic_blocks. */
bb->aux = &bb->aux;
SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
- if (bb->aux != &bb->aux)
- abort ();
+ gcc_assert (bb->aux == &bb->aux);
bb->aux = NULL;
}
find_many_sub_basic_blocks (blocks);
SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
- if (bb->aux != &bb->aux)
- abort ();
+ gcc_assert (bb->aux == &bb->aux);
bb->aux = NULL;
}
find_many_sub_basic_blocks (blocks);
from non-local gotos and the like. If there were, we shouldn't
have created the sibcall in the first place. Second, there
should of course never have been a fallthru edge. */
- if (!bb->succ || bb->succ->succ_next)
- abort ();
- if (bb->succ->flags != (EDGE_SIBCALL | EDGE_ABNORMAL))
- abort ();
+ gcc_assert (bb->succ && !bb->succ->succ_next);
+ gcc_assert (bb->succ->flags == (EDGE_SIBCALL | EDGE_ABNORMAL));
return 0;
}
}
}
- if (!bb->succ || bb->succ->succ_next)
- abort ();
+ gcc_assert (bb->succ && !bb->succ->succ_next);
bb->succ->probability = REG_BR_PROB_BASE;
bb->succ->count = bb->count;
&& label_is_jump_target_p (BB_HEAD (e->dest),
BB_END (src)))
{
+ edge redirected;
+
if (dump_file)
fprintf (dump_file, "Fallthru edge unified with branch "
"%i->%i redirected to %i\n",
e->src->index, e->dest->index, dest->index);
e->flags &= ~EDGE_FALLTHRU;
- if (!redirect_branch_edge (e, dest))
- abort ();
+ redirected = redirect_branch_edge (e, dest);
+ gcc_assert (redirected);
e->flags |= EDGE_FALLTHRU;
e->src->flags |= BB_DIRTY;
return e;
ret = redirect_branch_edge (e, dest);
/* We don't want simplejumps in the insn stream during cfglayout. */
- if (simplejump_p (BB_END (src)))
- abort ();
+ gcc_assert (!simplejump_p (BB_END (src)));
src->flags |= BB_DIRTY;
return ret;
static basic_block
cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
{
- if (!cfg_layout_redirect_edge_and_branch (e, dest))
- abort ();
+ edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
+
+ gcc_assert (redirected);
return NULL;
}
cfg_layout_merge_blocks (basic_block a, basic_block b)
{
#ifdef ENABLE_CHECKING
- if (!cfg_layout_can_merge_blocks_p (a, b))
- abort ();
+ gcc_assert (cfg_layout_can_merge_blocks_p (a, b));
#endif
/* If there was a CODE_LABEL beginning B, delete it. */
it cleaned up. */
if (JUMP_P (BB_END (a)))
try_redirect_by_replacing_jump (a->succ, b, true);
- if (JUMP_P (BB_END (a)))
- abort ();
+ gcc_assert (!JUMP_P (BB_END (a)));
/* Possible line number notes should appear in between. */
if (b->rbi->header)
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (first))
first = NEXT_INSN (first);
- if (!NOTE_INSN_BASIC_BLOCK_P (first))
- abort ();
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (first));
BB_HEAD (b) = NULL;
delete_insn (first);
}
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (insn))
insn = NEXT_INSN (insn);
- if (!NOTE_INSN_BASIC_BLOCK_P (insn))
- abort ();
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
BB_HEAD (b) = NULL;
BB_END (a) = BB_END (b);
delete_insn (insn);
#ifdef ENABLE_CHECKING
if (split_at_insn == BB_END (bb))
for (e = bb->succ; e; e = e->succ_next)
- if (e->dest == EXIT_BLOCK_PTR)
- abort ();
+ gcc_assert (e->dest != EXIT_BLOCK_PTR);
#endif
/* Note that the following may create a new basic block
{
struct cgraph_node key, *node, **slot;
- if (TREE_CODE (decl) != FUNCTION_DECL)
- abort ();
+ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
if (!cgraph_hash)
cgraph_hash = htab_create_ggc (10, hash_node, eq_node, NULL);
struct cgraph_edge *e;
for (e = caller->callees; e; e = e->next_callee)
- if (e->call_expr == call_expr)
- abort ();
+ gcc_assert (e->call_expr != call_expr);
#endif
- if (TREE_CODE (call_expr) != CALL_EXPR)
- abort ();
+ gcc_assert (TREE_CODE (call_expr) == CALL_EXPR);
if (!DECL_SAVED_TREE (callee->decl))
edge->inline_failed = N_("function body not available");
for (edge = &e->callee->callers; *edge && *edge != e;
edge = &((*edge)->next_caller))
continue;
- if (!*edge)
- abort ();
+ gcc_assert (*edge);
*edge = (*edge)->next_caller;
for (edge2 = &e->caller->callees; *edge2 && *edge2 != e;
edge2 = &(*edge2)->next_callee)
continue;
- if (!*edge2)
- abort ();
+ gcc_assert (*edge2);
*edge2 = (*edge2)->next_callee;
}
for (edge = &e->callee->callers; *edge && *edge != e;
edge = &((*edge)->next_caller))
continue;
- if (!*edge)
- abort ();
+ gcc_assert (*edge);
*edge = (*edge)->next_caller;
e->callee = n;
e->next_caller = n->callers;
cgraph_local_info (tree decl)
{
struct cgraph_node *node;
- if (TREE_CODE (decl) != FUNCTION_DECL)
- abort ();
+
+ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
node = cgraph_node (decl);
return &node->local;
}
cgraph_global_info (tree decl)
{
struct cgraph_node *node;
- if (TREE_CODE (decl) != FUNCTION_DECL || !cgraph_global_info_ready)
- abort ();
+
+ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL && cgraph_global_info_ready);
node = cgraph_node (decl);
return &node->global;
}
cgraph_rtl_info (tree decl)
{
struct cgraph_node *node;
- if (TREE_CODE (decl) != FUNCTION_DECL)
- abort ();
+
+ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
node = cgraph_node (decl);
if (decl != current_function_decl
&& !TREE_ASM_WRITTEN (node->decl))
{
struct cgraph_varpool_node key, *node, **slot;
- if (!DECL_P (decl) || TREE_CODE (decl) == FUNCTION_DECL)
- abort ();
+ gcc_assert (DECL_P (decl) && TREE_CODE (decl) != FUNCTION_DECL);
if (!cgraph_varpool_hash)
cgraph_varpool_hash = htab_create_ggc (10, hash_varpool_node,
case can be sort-of legitimately seen with real function
redefinition errors. I would argue that the front end should
never present us with such a case, but don't enforce that for now. */
- if (node->output)
- abort ();
+ gcc_assert (!node->output);
/* Reset our data structures so we can analyze the function again. */
memset (&node->local, 0, sizeof (node->local));
if (!DECL_SAVED_TREE (decl))
continue;
- if (node->analyzed || !node->reachable || !DECL_SAVED_TREE (decl))
- abort ();
+ gcc_assert (!node->analyzed && node->reachable);
+ gcc_assert (DECL_SAVED_TREE (decl));
cgraph_analyze_function (node);
{
tree decl = node->decl;
struct cgraph_edge *e;
- if (node->output)
- abort ();
+
+ gcc_assert (!node->output);
for (e = node->callers; e; e = e->next_caller)
if (e->inline_failed)
&& !TREE_ASM_WRITTEN (decl)
&& !DECL_EXTERNAL (decl))
node->output = 1;
- /* We should've reclaimed all functions that are not needed. */
- else if (!node->global.inlined_to && DECL_SAVED_TREE (decl)
- && !DECL_EXTERNAL (decl))
- {
- dump_cgraph_node (stderr, node);
- abort ();
- }
+ else
+ /* We should've reclaimed all functions that are not needed. */
+ gcc_assert (node->global.inlined_to || !DECL_SAVED_TREE (decl)
+ || DECL_EXTERNAL (decl));
}
}
tree decl = node->decl;
/* We ought to not compile any inline clones. */
- if (node->global.inlined_to)
- abort ();
+ gcc_assert (!node->global.inlined_to);
if (flag_unit_at_a_time)
announce_function (decl);
/* Make sure that BE didn't give up on compiling. */
/* ??? Can happen with nested function of extern inline. */
- if (!TREE_ASM_WRITTEN (node->decl))
- abort ();
+ gcc_assert (TREE_ASM_WRITTEN (node->decl));
current_function_decl = NULL;
if (DECL_SAVED_TREE (node->decl)
fprintf (cgraph_dump_file, "\nReclaiming functions:");
#ifdef ENABLE_CHECKING
for (node = cgraph_nodes; node; node = node->next)
- if (node->aux)
- abort ();
+ gcc_assert (!node->aux);
#endif
for (node = cgraph_nodes; node; node = node->next)
if (node->needed && !node->global.inlined_to
node->aux = first;
first = node;
}
- else if (node->aux)
- abort ();
+ else
+ gcc_assert (!node->aux);
/* Perform reachability analysis. As a special case do not consider
extern inline functions not inlined as live because we won't output
&& duplicate
&& flag_unit_at_a_time)
{
- if (e->callee->global.inlined_to)
- abort ();
+ gcc_assert (!e->callee->global.inlined_to);
if (!DECL_EXTERNAL (e->callee->decl))
overall_insns -= e->callee->global.insns, nfunctions_inlined++;
duplicate = 0;
int old_insns = 0, new_insns = 0;
struct cgraph_node *to = NULL, *what;
- if (!e->inline_failed)
- abort ();
+ gcc_assert (e->inline_failed);
e->inline_failed = NULL;
if (!e->callee->global.inlined && flag_unit_at_a_time)
old_insns = e->caller->global.insns;
new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
what);
- if (new_insns < 0)
- abort ();
+ gcc_assert (new_insns >= 0);
to = e->caller;
to->global.insns = new_insns;
}
- if (what->global.inlined_to != to)
- abort ();
+ gcc_assert (what->global.inlined_to == to);
overall_insns += new_insns - old_insns;
ncalls_inlined++;
}
cgraph_mark_inline_edge (e);
if (e == edge)
edge = next;
- times ++;
+ times++;
}
}
- if (!times)
- abort ();
+ gcc_assert (times);
return edge;
}
cgraph_mark_functions_to_output ();
order_pos = cgraph_postorder (order);
- if (order_pos != cgraph_n_nodes)
- abort ();
+ gcc_assert (order_pos == cgraph_n_nodes);
/* Garbage collector may remove inline clones we eliminate during
optimization. So we must be sure to not reference them. */
node = order[i];
if (node->output)
{
- if (!node->reachable)
- abort ();
+ gcc_assert (node->reachable);
node->output = 0;
cgraph_expand_function (node);
}
DECL_SOURCE_LOCATION (decl) = input_location;
cfun->function_end_locus = input_location;
- if (which == 'I')
- DECL_STATIC_CONSTRUCTOR (decl) = 1;
- else if (which == 'D')
- DECL_STATIC_DESTRUCTOR (decl) = 1;
- else
- abort ();
+ switch (which)
+ {
+ case 'I':
+ DECL_STATIC_CONSTRUCTOR (decl) = 1;
+ break;
+ case 'D':
+ DECL_STATIC_DESTRUCTOR (decl) = 1;
+ break;
+ default:
+ gcc_unreachable ();
+ }
gimplify_function_tree (decl);
{
/* Sanity check that we're replacing oldval with a CONST_INT
that is a valid sign-extension for the original mode. */
- if (INTVAL (newval) != trunc_int_for_mode (INTVAL (newval),
- GET_MODE (oldval)))
- abort ();
+ gcc_assert (INTVAL (newval)
+ == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
/* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
CONST_INT is not valid, because after the replacement, the
when do_SUBST is called to replace the operand thereof, so we
perform this test on oldval instead, checking whether an
invalid replacement took place before we got here. */
- if ((GET_CODE (oldval) == SUBREG
- && GET_CODE (SUBREG_REG (oldval)) == CONST_INT)
- || (GET_CODE (oldval) == ZERO_EXTEND
- && GET_CODE (XEXP (oldval, 0)) == CONST_INT))
- abort ();
+ gcc_assert (!(GET_CODE (oldval) == SUBREG
+ && GET_CODE (SUBREG_REG (oldval)) == CONST_INT));
+ gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
+ && GET_CODE (XEXP (oldval, 0)) == CONST_INT));
}
if (undobuf.frees)
{
/* We don't handle the case of the target word being wider
than a host wide int. */
- if (HOST_BITS_PER_WIDE_INT < BITS_PER_WORD)
- abort ();
+ gcc_assert (HOST_BITS_PER_WIDE_INT >= BITS_PER_WORD);
lo &= ~(UWIDE_SHIFT_LEFT_BY_BITS_PER_WORD (1) - 1);
lo |= (INTVAL (SET_SRC (PATTERN (i3)))
else
/* We don't handle the case of the higher word not fitting
entirely in either hi or lo. */
- abort ();
+ gcc_unreachable ();
combine_merges++;
subst_insn = i3;
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
new, GET_MODE (XEXP (x, 0)));
- if (! x)
- abort ();
+ gcc_assert (x);
}
else
SUBST (XEXP (x, i), new);
rtx op1 = XEXP (x, 1);
int len;
- if (GET_CODE (op1) != PARALLEL)
- abort ();
+ gcc_assert (GET_CODE (op1) == PARALLEL);
len = XVECLEN (op1, 0);
if (len == 1
&& GET_CODE (XVECEXP (op1, 0, 0)) == CONST_INT
break;
default:
- abort ();
+ gcc_unreachable ();
}
return x;
case REG_NON_LOCAL_GOTO:
if (JUMP_P (i3))
place = i3;
- else if (i2 && JUMP_P (i2))
- place = i2;
else
- abort ();
+ {
+ gcc_assert (i2 && JUMP_P (i2));
+ place = i2;
+ }
break;
case REG_EH_REGION:
place = i3;
else if (i2 && CALL_P (i2))
place = i2;
- else if (flag_non_call_exceptions)
+ else
{
+ gcc_assert (flag_non_call_exceptions);
if (may_trap_p (i3))
place = i3;
else if (i2 && may_trap_p (i2))
can now prove that the instructions can't trap. Drop the
note in this case. */
}
- else
- abort ();
break;
case REG_ALWAYS_RETURN:
possible for both I2 and I3 to be a call. */
if (CALL_P (i3))
place = i3;
- else if (i2 && CALL_P (i2))
- place = i2;
else
- abort ();
+ {
+ gcc_assert (i2 && CALL_P (i2));
+ place = i2;
+ }
break;
case REG_UNUSED:
a JUMP_LABEL instead or decrement LABEL_NUSES. */
if (place && JUMP_P (place))
{
- if (!JUMP_LABEL (place))
+ rtx label = JUMP_LABEL (place);
+
+ if (!label)
JUMP_LABEL (place) = XEXP (note, 0);
- else if (JUMP_LABEL (place) != XEXP (note, 0))
- abort ();
- else if (LABEL_P (JUMP_LABEL (place)))
- LABEL_NUSES (JUMP_LABEL (place))--;
+ else
+ {
+ gcc_assert (label == XEXP (note, 0));
+ if (LABEL_P (label))
+ LABEL_NUSES (label)--;
+ }
place = 0;
}
if (place2 && JUMP_P (place2))
{
- if (!JUMP_LABEL (place2))
+ rtx label = JUMP_LABEL (place2);
+
+ if (!label)
JUMP_LABEL (place2) = XEXP (note, 0);
- else if (JUMP_LABEL (place2) != XEXP (note, 0))
- abort ();
- else if (LABEL_P (JUMP_LABEL (place2)))
- LABEL_NUSES (JUMP_LABEL (place2))--;
+ else
+ {
+ gcc_assert (label == XEXP (note, 0));
+ if (LABEL_P (label))
+ LABEL_NUSES (label)--;
+ }
place2 = 0;
}
break;
default:
/* Any other notes should not be present at this point in the
compilation. */
- abort ();
+ gcc_unreachable ();
}
if (place)
&& NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE)
insn = NEXT_INSN (insn);
- if (INSN_UID (insn) > max_uid_cuid)
- abort ();
+ gcc_assert (INSN_UID (insn) <= max_uid_cuid);
return INSN_CUID (insn);
}
void **slot;
/* A reg cannot conflict with itself. */
- if (reg1 == reg2)
- abort ();
+ gcc_assert (reg1 != reg2);
dummy.smaller = smaller;
dummy.larger = larger;
is the interesting one. */
if (reg1 == context->reg)
reg = reg2;
- else if (reg2 == context->reg)
- reg = reg1;
else
- abort ();
+ {
+ gcc_assert (reg2 == context->reg);
+ reg = reg1;
+ }
/* Print the conflict. */
fprintf (context->fp, " %d", reg);
enum machine_mode mode = mode_for_size (gcov_size, MODE_INT, 0);
rtx ref;
- if (no >= fn_n_ctrs[counter] - fn_b_ctrs[counter])
- abort ();
+ gcc_assert (no < fn_n_ctrs[counter] - fn_b_ctrs[counter]);
no += prg_n_ctrs[counter] + fn_b_ctrs[counter];
if (!ctr_labels[counter])
{
{
tree domain_type = TYPE_DOMAIN (TREE_TYPE (tree_ctr_tables[counter]));
- if (no >= fn_n_ctrs[counter] - fn_b_ctrs[counter])
- abort ();
+ gcc_assert (no < fn_n_ctrs[counter] - fn_b_ctrs[counter]);
no += prg_n_ctrs[counter] + fn_b_ctrs[counter];
/* "no" here is an array index, scaled to bytes later. */
{
int y;
unsigned seed;
+ int scan;
for (y = 1; y < 9; y++)
if (!(string[i + y] >= '0' && string[i + y] <= '9')
break;
if (y != 18)
continue;
- if (!sscanf (string + i + 10, "%X", &seed))
- abort ();
+ scan = sscanf (string + i + 10, "%X", &seed);
+ gcc_assert (scan);
if (seed != crc32_string (0, flag_random_seed))
continue;
string = dup = xstrdup (string);
struct qty_table_elem *ent;
struct reg_eqv_elem *eqv;
- if (next_qty >= max_qty)
- abort ();
+ gcc_assert (next_qty < max_qty);
q = REG_QTY (reg) = next_qty++;
ent = &qty_table[q];
ent = &qty_table[q];
/* Nothing should become eqv until it has a "non-invalid" qty number. */
- if (! REGNO_QTY_VALID_P (old))
- abort ();
+ gcc_assert (REGNO_QTY_VALID_P (old));
REG_QTY (new) = q;
firstr = ent->first_reg;
/* If X is a register and we haven't made a quantity for it,
something is wrong. */
- if (REG_P (x) && ! REGNO_QTY_VALID_P (REGNO (x)))
- abort ();
+ gcc_assert (!REG_P (x) || REGNO_QTY_VALID_P (REGNO (x)));
/* If X is a hard register, show it is being put in the table. */
if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
return;
default:
- abort ();
+ gcc_unreachable ();
}
}
\f
fmt = GET_RTX_FORMAT (code);
for (; i >= 0; i--)
{
- if (fmt[i] == 'e')
+ switch (fmt[i])
{
+ case 'e':
/* If we are about to do the last recursive call
needed at this level, change it into iteration.
This function is called enough to be worth it. */
hash += hash_rtx (XEXP (x, i), 0, do_not_record_p,
hash_arg_in_memory_p, have_reg_qty);
- }
+ break;
- else if (fmt[i] == 'E')
- for (j = 0; j < XVECLEN (x, i); j++)
- {
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
hash += hash_rtx (XVECEXP (x, i, j), 0, do_not_record_p,
hash_arg_in_memory_p, have_reg_qty);
- }
+ break;
- else if (fmt[i] == 's')
- hash += hash_rtx_string (XSTR (x, i));
- else if (fmt[i] == 'i')
- hash += (unsigned int) XINT (x, i);
- else if (fmt[i] == '0' || fmt[i] == 't')
- /* Unused. */
- ;
- else
- abort ();
+ case 's':
+ hash += hash_rtx_string (XSTR (x, i));
+ break;
+
+ case 'i':
+ hash += (unsigned int) XINT (x, i);
+ break;
+
+ case '0': case 't':
+ /* Unused. */
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
}
return hash;
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
}
}
- if (next_qty > max_qty)
- abort ();
+ gcc_assert (next_qty <= max_qty);
free (qty_table + max_reg);
return;
case INSN_LIST:
- abort ();
+ gcc_unreachable ();
default:
break;
if (mode != comp_mode)
{
- if (! can_change_mode)
- abort ();
+ gcc_assert (can_change_mode);
mode = comp_mode;
PUT_MODE (cc_src, mode);
}
submode = cse_cc_succs (e->dest, cc_reg, cc_src, false);
if (submode != VOIDmode)
{
- if (submode != mode)
- abort ();
+ gcc_assert (submode == mode);
found_equiv = true;
can_change_mode = false;
}
mode = cse_cc_succs (bb, cc_reg, cc_src, true);
if (mode != VOIDmode)
{
- if (mode != GET_MODE (cc_src))
- abort ();
+ gcc_assert (mode == GET_MODE (cc_src));
if (mode != orig_mode)
{
rtx newreg = gen_rtx_REG (mode, REGNO (cc_reg));
rtx x = (rtx) x_arg;
enum machine_mode mode = GET_MODE (x);
- if (GET_CODE (x) == CONST_INT
- || (mode == VOIDmode && GET_CODE (x) == CONST_DOUBLE))
- abort ();
+ gcc_assert (GET_CODE (x) != CONST_INT
+ && (mode != VOIDmode || GET_CODE (x) != CONST_DOUBLE));
+
if (mode != GET_MODE (v->u.val_rtx))
return 0;
htab_traverse (hash_table, discard_useless_values, 0);
- if (n_useless_values != 0)
- abort ();
+ gcc_assert (!n_useless_values);
}
/* Return the mode in which a register was last set. If X is not a
contain anything but integers and other rtx's,
except for within LABEL_REFs and SYMBOL_REFs. */
default:
- abort ();
+ gcc_unreachable ();
}
}
return 1;
if (GET_CODE (x) != CONST_INT
&& (GET_CODE (x) != CONST_DOUBLE || GET_MODE (x) != VOIDmode))
return x;
- if (mode == VOIDmode)
- abort ();
+ gcc_assert (mode != VOIDmode);
return gen_rtx_CONST (mode, x);
}
fmt = GET_RTX_FORMAT (code);
for (; i >= 0; i--)
{
- if (fmt[i] == 'e')
+ switch (fmt[i])
{
- rtx tem = XEXP (x, i);
- unsigned int tem_hash = cselib_hash_rtx (tem, 0, create);
-
- if (tem_hash == 0)
- return 0;
-
- hash += tem_hash;
- }
- else if (fmt[i] == 'E')
- for (j = 0; j < XVECLEN (x, i); j++)
+ case 'e':
{
- unsigned int tem_hash = cselib_hash_rtx (XVECEXP (x, i, j), 0, create);
-
+ rtx tem = XEXP (x, i);
+ unsigned int tem_hash = cselib_hash_rtx (tem, 0, create);
+
if (tem_hash == 0)
return 0;
-
+
hash += tem_hash;
}
- else if (fmt[i] == 's')
- {
- const unsigned char *p = (const unsigned char *) XSTR (x, i);
+ break;
+ case 'E':
+ for (j = 0; j < XVECLEN (x, i); j++)
+ {
+ unsigned int tem_hash
+ = cselib_hash_rtx (XVECEXP (x, i, j), 0, create);
+
+ if (tem_hash == 0)
+ return 0;
+
+ hash += tem_hash;
+ }
+ break;
+
+ case 's':
+ {
+ const unsigned char *p = (const unsigned char *) XSTR (x, i);
+
+ if (p)
+ while (*p)
+ hash += *p++;
+ break;
+ }
+
+ case 'i':
+ hash += XINT (x, i);
+ break;
- if (p)
- while (*p)
- hash += *p++;
+ case '0':
+ case 't':
+ /* unused */
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else if (fmt[i] == 'i')
- hash += XINT (x, i);
- else if (fmt[i] == '0' || fmt[i] == 't')
- /* unused */;
- else
- abort ();
}
return hash ? hash : 1 + (unsigned int) GET_CODE (x);
{
cselib_val *e = pool_alloc (cselib_val_pool);
-#ifdef ENABLE_CHECKING
- if (value == 0)
- abort ();
-#endif
+ gcc_assert (value);
e->value = value;
/* We use custom method to allocate this RTL construct because it accounts
if (GET_MODE (l->elt->u.val_rtx) == GET_MODE (x))
return l->elt->u.val_rtx;
- abort ();
+ gcc_unreachable ();
case MEM:
e = cselib_lookup_mem (x, 0);
unsigned int i;
/* If we see pseudos after reload, something is _wrong_. */
- if (reload_completed && regno >= FIRST_PSEUDO_REGISTER
- && reg_renumber[regno] >= 0)
- abort ();
+ gcc_assert (!reload_completed || regno < FIRST_PSEUDO_REGISTER
+ || reg_renumber[regno] < 0);
/* Determine the range of registers that must be invalidated. For
pseudos, only REGNO is affected. For hard regs, we must take MODE
if they contain values that overlap REGNO. */
if (regno < FIRST_PSEUDO_REGISTER)
{
- if (mode == VOIDmode)
- abort ();
+ gcc_assert (mode != VOIDmode);
if (regno < max_value_regs)
i = 0;
}
else
{
- if (REG_VALUES (dreg)->elt == 0)
- REG_VALUES (dreg)->elt = src_elt;
- else
- /* The register should have been invalidated. */
- abort ();
+ /* The register should have been invalidated. */
+ gcc_assert (REG_VALUES (dreg)->elt == 0);
+ REG_VALUES (dreg)->elt = src_elt;
}
if (src_elt->locs == 0)