+2004-09-08 Nathan Sidwell <nathan@codesourcery.com>
+
+ * gcse.c (INSN_CUID, insert_set_in_table, find_avail_set,
+ cprop_insn, do_local_cprop, local_cprop_pass, find_bypass_set,
+ process_insert_insn, insert_insn_end_bb, pre_insert_copy_insn,
+ hoist_code, extract_mentioned_regs_helper, compute_store_table,
+ insert_store): Use gcc_assert and gcc_unreachable.
+ * ggc-common.c (ggc_splay_alloc, ggc_splay_dont_free,
+ gt_pch_note_object, gt_pch_note_reorder, relocate_ptrs,
+ ggc_record_overhead): Likewise.
+ * ggc-page.c (alloc_page, free_page, ggc_set_mark, ggc_marked_p,
+ init_ggc, ggc_push_context, ggc_recalculate_in_use_p,
+ ggc_pop_context, clear_marks, validate_free_objects,
+ ggc_pch_read): Likewise.
+ * ggc-zone.c (ggc_allocated_p, free_chunk, ggc_set_mark,
+ ggc_marked_p, ggc_get_size, init_ggc, destroy_ggc_zone,
+ ggc_push_context, check_cookies, ggc_collect,
+ ggc_print_statistics): Likewise.
+ * gimple-low.c (lower_function_body, lower_stmt,
+ lower_bind_expr): Likewise.
+ * gimplify.c (gimple_tree_eq, push_gimplify_context,
+ pop_gimplify_context, gimple_pop_condition, create_tmp_var,
+ declare_tmp_vars, gimple_add_tmp_var, annotate_all_with_locus,
+ mostly_copy_tree_r, gimplify_return_expr, gimplify_switch_expr,
+ gimplify_case_label_expr, gimplify_exit_block_expr,
+ canonicalize_component_ref, gimplify_compound_lval,
+ gimplify_self_mod_expr, gimplify_call_expr,
+ gimplify_init_ctor_eval, gimplify_init_constructor,
+ gimplify_modify_expr, gimplify_save_expr, gimplify_target_expr,
+ gimplify_expr, check_pointer_types_r,
+ force_gimple_operand): Likewise.
+ * global.c (global_alloc, build_insn_chain): Likewise.
+ * graph.c (clean_graph_dump_file,
+ finish_graph_dump_file): Likewise.
+ gcov-io.c (gcov_open): Use GCOV_CHECK.
+
2004-09-09 Richard Sandiford <rsandifo@redhat.com>
* config/frv/frv.c (acc_operand, accg_operand): Use REGNO.
2004-09-09 Jan Hubicka <jh@suse.cz>
middle-end/17128
- * tree-inline.c (expand_call_inline): Make overactive sanity check happy.
+ * tree-inline.c (expand_call_inline): Make overactive sanity check
+ happy.
2004-09-09 Jan Hubicka <jh@suse.cz>
* config/i386/xmmintrin.h: Include <mm_malloc.h>.
2004-08-03 H.J. Lu <hongjiu.lu@intel.com>
- Tanguy FautrÃ\83 <tfautre@pandora.be>
+ Tanguy Fautrà <tfautre@pandora.be>
* config/i386/pmm_malloc.h: New file.
s_flock.l_pid = getpid ();
#endif
- if (gcov_var.file)
- abort ();
+ GCOV_CHECK (!gcov_var.file);
gcov_var.start = 0;
gcov_var.offset = gcov_var.length = 0;
gcov_var.overread = -1u;
/* Get the cuid of an insn. */
#ifdef ENABLE_CHECKING
-#define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
+#define INSN_CUID(INSN) \
+ (gcc_assert (INSN_UID (INSN) <= max_uid), uid_cuid[INSN_UID (INSN)])
#else
#define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
#endif
struct expr *cur_expr, *last_expr = NULL;
struct occr *cur_occr, *last_occr = NULL;
- if (GET_CODE (x) != SET
- || ! REG_P (SET_DEST (x)))
- abort ();
+ gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
hash = hash_set (REGNO (SET_DEST (x)), table->size);
if (set == 0)
break;
- if (GET_CODE (set->expr) != SET)
- abort ();
+ gcc_assert (GET_CODE (set->expr) == SET);
src = SET_SRC (set->expr);
pat = set->expr;
/* ??? We might be able to handle PARALLELs. Later. */
- if (GET_CODE (pat) != SET)
- abort ();
+ gcc_assert (GET_CODE (pat) == SET);
src = SET_SRC (pat);
or fix delete_trivially_dead_insns to preserve the setting insn,
or make it delete the REG_EUAQL note, and fix up all passes that
require the REG_EQUAL note there. */
- if (!adjust_libcall_notes (x, newcnst, insn, libcall_sp))
- abort ();
+ bool adjusted;
+
+ adjusted = adjust_libcall_notes (x, newcnst, insn, libcall_sp);
+ gcc_assert (adjusted);
+
if (gcse_file != NULL)
{
fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ",
if (note)
{
- if (libcall_sp == libcall_stack)
- abort ();
+ gcc_assert (libcall_sp != libcall_stack);
*--libcall_sp = XEXP (note, 0);
}
note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
if (set == 0)
break;
- if (GET_CODE (set->expr) != SET)
- abort ();
+ gcc_assert (GET_CODE (set->expr) == SET);
src = SET_SRC (set->expr);
if (gcse_constant_p (src))
/* Otherwise, make a new insn to compute this expression and make sure the
insn will be recognized (this also adds any needed CLOBBERs). Copy the
expression to make sure we don't have any sharing issues. */
- else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
- abort ();
+ else
+ {
+ rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
+
+ gcc_assert (!insn_invalid_p (insn));
+ }
+
pat = get_insns ();
end_sequence ();
rtx pat, pat_end;
pat = process_insert_insn (expr);
- if (pat == NULL_RTX || ! INSN_P (pat))
- abort ();
+ gcc_assert (pat && INSN_P (pat));
pat_end = pat;
while (NEXT_INSN (pat_end) != NULL_RTX)
/* It should always be the case that we can put these instructions
anywhere in the basic block with performing PRE optimizations.
Check this. */
- if (NONJUMP_INSN_P (insn) && pre
- && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
- && !TEST_BIT (transp[bb->index], expr->bitmap_index))
- abort ();
+ gcc_assert (!NONJUMP_INSN_P (insn) || !pre
+ || TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ || TEST_BIT (transp[bb->index], expr->bitmap_index));
/* If this is a jump table, then we can't insert stuff here. Since
we know the previous real insn must be the tablejump, we insert
anywhere in the basic block with performing PRE optimizations.
Check this. */
- if (pre
- && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
- && !TEST_BIT (transp[bb->index], expr->bitmap_index))
- abort ();
+ gcc_assert (!pre
+ || TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ || TEST_BIT (transp[bb->index], expr->bitmap_index));
/* Since different machines initialize their parameter registers
in different orders, assume nothing. Collect the set of all
int i;
/* This block matches the logic in hash_scan_insn. */
- if (GET_CODE (pat) == SET)
- set = pat;
- else if (GET_CODE (pat) == PARALLEL)
+ switch (GET_CODE (pat))
{
+ case SET:
+ set = pat;
+ break;
+
+ case PARALLEL:
/* Search through the parallel looking for the set whose
source was the expression that we're interested in. */
set = NULL_RTX;
break;
}
}
+ break;
+
+ default:
+ gcc_unreachable ();
}
- else
- abort ();
if (REG_P (SET_DEST (set)))
{
while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
occr = occr->next;
- /* Should never happen. */
- if (!occr)
- abort ();
-
+ gcc_assert (occr);
insn = occr->insn;
-
set = single_set (insn);
- if (! set)
- abort ();
+ gcc_assert (set);
/* Create a pseudo-reg to store the result of reaching
expressions into. Get the mode for the new pseudo
case POST_DEC:
case POST_INC:
/* We do not run this function with arguments having side effects. */
- abort ();
+ gcc_unreachable ();
case PC:
case CC0: /*FIXME*/
#ifdef ENABLE_CHECKING
/* last_set_in should now be all-zero. */
for (regno = 0; regno < max_gcse_regno; regno++)
- if (last_set_in[regno] != 0)
- abort ();
+ gcc_assert (!last_set_in[regno]);
#endif
/* Clear temporary marks. */
if (!(tmp->flags & EDGE_FAKE))
{
int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
- if (index == EDGE_INDEX_NO_EDGE)
- abort ();
+
+ gcc_assert (index != EDGE_INDEX_NO_EDGE);
if (! TEST_BIT (pre_insert_map[index], expr->index))
break;
}
void *
ggc_splay_alloc (int sz, void *nl)
{
- if (nl != NULL)
- abort ();
+ gcc_assert (!nl);
return ggc_alloc (sz);
}
void
ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
{
- if (nl != NULL)
- abort ();
+ gcc_assert (!nl);
}
/* Print statistics that are independent of the collector in use. */
INSERT);
if (*slot != NULL)
{
- if ((*slot)->note_ptr_fn != note_ptr_fn
- || (*slot)->note_ptr_cookie != note_ptr_cookie)
- abort ();
+ gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
+ && (*slot)->note_ptr_cookie == note_ptr_cookie);
return 0;
}
return;
data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
- if (data == NULL
- || data->note_ptr_cookie != note_ptr_cookie)
- abort ();
+ gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
data->reorder_fn = reorder_fn;
}
return;
result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
- if (result == NULL)
- abort ();
+ gcc_assert (result);
*ptr = result->new_addr;
}
if (!ptr_hash)
ptr_hash = htab_create (10, hash_ptr, eq_ptr, NULL);
slot = htab_find_slot_with_hash (ptr_hash, ptr, htab_hash_pointer (ptr), INSERT);
- if (*slot)
- abort ();
+ gcc_assert (!*slot);
*slot = p;
loc->times++;
enda -= G.pagesize;
tail_slop += G.pagesize;
}
- if (tail_slop < sizeof (page_group))
- abort ();
+ gcc_assert (tail_slop >= sizeof (page_group));
group = (page_group *)enda;
tail_slop -= sizeof (page_group);
}
if (G.by_depth_in_use > 1)
{
page_entry *top = G.by_depth[G.by_depth_in_use-1];
-
- /* If they are at the same depth, put top element into freed
- slot. */
- if (entry->context_depth == top->context_depth)
- {
- int i = entry->index_by_depth;
- G.by_depth[i] = top;
- G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
- top->index_by_depth = i;
- }
- else
- {
- /* We cannot free a page from a context deeper than the
- current one. */
- abort ();
- }
+ int i = entry->index_by_depth;
+
+ /* We cannot free a page from a context deeper than the current
+ one. */
+ gcc_assert (entry->context_depth == top->context_depth);
+
+ /* Put top element into freed slot. */
+ G.by_depth[i] = top;
+ G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
+ top->index_by_depth = i;
}
--G.by_depth_in_use;
/* Look up the page on which the object is alloced. If the object
wasn't allocated by the collector, we'll probably die. */
entry = lookup_page_table_entry (p);
-#ifdef ENABLE_CHECKING
- if (entry == NULL)
- abort ();
-#endif
+ gcc_assert (entry);
/* Calculate the index of the object on the page; this is its bit
position in the in_use_p bitmap. */
/* Look up the page on which the object is alloced. If the object
wasn't allocated by the collector, we'll probably die. */
entry = lookup_page_table_entry (p);
-#ifdef ENABLE_CHECKING
- if (entry == NULL)
- abort ();
-#endif
+ gcc_assert (entry);
/* Calculate the index of the object on the page; this is its bit
position in the in_use_p bitmap. */
can't get something useful, give up. */
p = alloc_anon (NULL, G.pagesize);
- if ((size_t)p & (G.pagesize - 1))
- abort ();
+ gcc_assert (!((size_t)p & (G.pagesize - 1)));
}
/* We have a good page, might as well hold onto it... */
++G.context_depth;
/* Die on wrap. */
- if (G.context_depth >= HOST_BITS_PER_LONG)
- abort ();
+ gcc_assert (G.context_depth < HOST_BITS_PER_LONG);
}
/* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
p->num_free_objects -= (j & 1);
}
- if (p->num_free_objects >= num_objects)
- abort ();
+ gcc_assert (p->num_free_objects < num_objects);
}
/* Decrement the `GC context'. All objects allocated since the
recalculate the in use bits. */
for (i = G.depth[depth]; i < e; ++i)
{
- page_entry *p;
-
-#ifdef ENABLE_CHECKING
- p = G.by_depth[i];
+ page_entry *p = G.by_depth[i];
/* Check that all of the pages really are at the depth that
we expect. */
- if (p->context_depth != depth)
- abort ();
- if (p->index_by_depth != i)
- abort ();
-#endif
+ gcc_assert (p->context_depth == depth);
+ gcc_assert (p->index_by_depth == i);
prefetch (&save_in_use_p_i (i+8));
prefetch (&save_in_use_p_i (i+16));
/* Check that all of the pages really are at the depth we
expect. */
-#ifdef ENABLE_CHECKING
- if (p->context_depth <= depth)
- abort ();
- if (p->index_by_depth != i)
- abort ();
-#endif
+ gcc_assert (p->context_depth > depth);
+ gcc_assert (p->index_by_depth == i);
p->context_depth = depth;
}
page_entry *p;
for (p = G.pages[order]; p != NULL; p = p->next)
- {
- if (p->context_depth > depth)
- abort ();
- else if (p->context_depth == depth && save_in_use_p (p))
- abort ();
- }
+ gcc_assert (p->context_depth < depth ||
+ (p->context_depth == depth && !save_in_use_p (p)));
}
#endif
}
size_t num_objects = OBJECTS_IN_PAGE (p);
size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
-#ifdef ENABLE_CHECKING
/* The data should be page-aligned. */
- if ((size_t) p->page & (G.pagesize - 1))
- abort ();
-#endif
+ gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
/* Pages that aren't in the topmost context are not collected;
nevertheless, we need their in-use bit vectors to store GC
/* Make certain it isn't visible from any root. Notice that we
do this check before sweep_pages merges save_in_use_p. */
- if (pe->in_use_p[word] & (1UL << bit))
- abort ();
+ gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
/* If the object comes from an outer context, then retain the
free_object entry, so that we can verify that the address
/* No object read from a PCH file should ever be freed. So, set the
context depth to 1, and set the depth of all the currently-allocated
pages to be 1 too. PCH pages will have depth 0. */
- if (G.context_depth != 0)
- abort ();
+ gcc_assert (!G.context_depth);
G.context_depth = 1;
for (i = 0; i < NUM_ORDERS; i++)
{
struct alloc_chunk *chunk;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
+ gcc_assert (chunk->magic == CHUNK_MAGIC);
#endif
if (chunk->type == 1)
return true;
size_t bin = 0;
bin = SIZE_BIN_DOWN (size);
- if (bin == 0)
- abort ();
+ gcc_assert (bin);
if (bin > NUM_FREE_BINS)
bin = 0;
#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
- abort ();
+ gcc_assert (chunk->magic == CHUNK_MAGIC || chunk->magic == DEADCHUNK_MAGIC);
chunk->magic = DEADCHUNK_MAGIC;
#endif
chunk->u.next_free = zone->free_chunks[bin];
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
+ gcc_assert (chunk->magic == CHUNK_MAGIC);
#endif
if (chunk->mark)
return 1;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
+ gcc_assert (chunk->magic == CHUNK_MAGIC);
#endif
return chunk->mark;
}
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
+ gcc_assert (chunk->magic == CHUNK_MAGIC);
#endif
if (chunk->large)
return chunk->size * 1024;
G.lg_pagesize = exact_log2 (G.pagesize);
#ifdef HAVE_MMAP_DEV_ZERO
G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
- if (G.dev_zero_fd == -1)
- abort ();
+ gcc_assert (G.dev_zero_fd != -1);
#endif
#if 0
can't get something useful, give up. */
p = alloc_anon (NULL, G.pagesize, &main_zone);
- if ((size_t)p & (G.pagesize - 1))
- abort ();
+ gcc_assert (!((size_t)p & (G.pagesize - 1)));
}
/* We have a good page, might as well hold onto it... */
struct alloc_zone *z;
for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
- /* Just find that zone. */ ;
+ /* Just find that zone. */
+ continue;
-#ifdef ENABLE_CHECKING
/* We should have found the zone in the list. Anything else is fatal. */
- if (!z)
- abort ();
-#endif
+ gcc_assert (z);
/* z is dead, baby. z is dead. */
z->dead= true;
for (zone = G.zones; zone; zone = zone->next_zone)
++(zone->context_depth);
/* Die on wrap. */
- if (main_zone.context_depth >= HOST_BITS_PER_LONG)
- abort ();
+ gcc_assert (main_zone.context_depth < HOST_BITS_PER_LONG);
}
/* Decrement the `GC context'. All objects allocated since the
struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
do
{
- if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
- abort ();
+ gcc_assert (chunk->magic == CHUNK_MAGIC
+ || chunk->magic == DEADCHUNK_MAGIC);
chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
}
while (chunk < end);
printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
/* The zone must be empty. */
- if (dead_zone->allocated != 0)
- abort ();
+ gcc_assert (!dead_zone->allocated);
/* Unchain the dead zone, release all its pages and free it. */
zone->next_zone = zone->next_zone->next_zone;
in_use += p->bytes - CHUNK_OVERHEAD;
chunk = (struct alloc_chunk *) p->page;
overhead += CHUNK_OVERHEAD;
- if (!chunk->type)
- abort ();
- if (chunk->mark)
- abort ();
+ gcc_assert (chunk->type && !chunk->mark);
continue;
}
overhead += CHUNK_OVERHEAD;
if (chunk->type)
in_use += chunk->size;
- if (chunk->mark)
- abort ();
+ gcc_assert (!chunk->mark);
}
}
fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
SCALE (in_use), LABEL (in_use),
SCALE (overhead), LABEL (overhead));
- if (in_use != zone->allocated)
- abort ();
+ gcc_assert (in_use == zone->allocated);
total_overhead += overhead;
total_allocated += zone->allocated;
tree_stmt_iterator i;
tree t, x;
- if (TREE_CODE (bind) != BIND_EXPR)
- abort ();
+ gcc_assert (TREE_CODE (bind) == BIND_EXPR);
data.block = DECL_INITIAL (current_function_decl);
BLOCK_SUBBLOCKS (data.block) = NULL_TREE;
tsi_link_after (&i, x, TSI_CONTINUE_LINKING);
}
- if (data.block != DECL_INITIAL (current_function_decl))
- abort ();
+ gcc_assert (data.block == DECL_INITIAL (current_function_decl));
BLOCK_SUBBLOCKS (data.block)
= blocks_nreverse (BLOCK_SUBBLOCKS (data.block));
break;
default:
+#ifdef ENABLE_CHECKING
print_node_brief (stderr, "", stmt, 0);
+ internal_error ("unexpected node");
+#endif
case COMPOUND_EXPR:
- abort ();
+ gcc_unreachable ();
}
tsi_next (tsi);
/* The outermost block of the original function may not be the
outermost statement chain of the gimplified function. So we
may see the outermost block just inside the function. */
- if (new_block != DECL_INITIAL (current_function_decl))
- abort ();
+ gcc_assert (new_block == DECL_INITIAL (current_function_decl));
new_block = NULL;
}
else
{
/* We do not expect to handle duplicate blocks. */
- if (TREE_ASM_WRITTEN (new_block))
- abort ();
+ gcc_assert (!TREE_ASM_WRITTEN (new_block));
TREE_ASM_WRITTEN (new_block) = 1;
/* Block tree may get clobbered by inlining. Normally this would
if (new_block)
{
- if (data->block != new_block)
- abort ();
+ gcc_assert (data->block == new_block);
BLOCK_SUBBLOCKS (new_block)
= blocks_nreverse (BLOCK_SUBBLOCKS (new_block));
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
- if (gimple_tree_hash (p1) != gimple_tree_hash (p2))
- abort ();
+ gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2));
return 1;
}
void
push_gimplify_context (void)
{
- if (gimplify_ctxp)
- abort ();
+ gcc_assert (!gimplify_ctxp);
gimplify_ctxp
= (struct gimplify_ctx *) xcalloc (1, sizeof (struct gimplify_ctx));
if (optimize)
{
tree t;
- if (!gimplify_ctxp || gimplify_ctxp->current_bind_expr)
- abort ();
+ gcc_assert (gimplify_ctxp && !gimplify_ctxp->current_bind_expr);
for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t))
DECL_GIMPLE_FORMAL_TEMP_P (t) = 0;
{
int conds = --(gimplify_ctxp->conditions);
+ gcc_assert (conds >= 0);
if (conds == 0)
{
append_to_statement_list (gimplify_ctxp->conditional_cleanups, pre_p);
gimplify_ctxp->conditional_cleanups = NULL_TREE;
}
- else if (conds < 0)
- abort ();
}
/* A subroutine of append_to_statement_list{,_force}. */
{
tree tmp_var;
-#if defined ENABLE_CHECKING
/* We don't allow types that are addressable (meaning we can't make copies),
incomplete, or of variable size. */
- if (TREE_ADDRESSABLE (type)
- || !COMPLETE_TYPE_P (type)
- || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
- abort ();
-#endif
+ gcc_assert (!TREE_ADDRESSABLE (type)
+ && COMPLETE_TYPE_P (type)
+ && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST);
tmp_var = create_tmp_var_raw (type, prefix);
gimple_add_tmp_var (tmp_var);
while (TREE_CODE (scope) == COMPOUND_EXPR)
scope = TREE_OPERAND (scope, 0);
- if (TREE_CODE (scope) != BIND_EXPR)
- abort ();
+ gcc_assert (TREE_CODE (scope) == BIND_EXPR);
temps = nreverse (last);
TREE_CHAIN (last) = BIND_EXPR_VARS (scope);
void
gimple_add_tmp_var (tree tmp)
{
- if (TREE_CHAIN (tmp) || DECL_SEEN_IN_BIND_EXPR_P (tmp))
- abort ();
+ gcc_assert (!TREE_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
DECL_CONTEXT (tmp) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
{
tree t = tsi_stmt (i);
-#ifdef ENABLE_CHECKING
- /* Assuming we've already been gimplified, we shouldn't
- see nested chaining constructs anymore. */
- if (TREE_CODE (t) == STATEMENT_LIST
- || TREE_CODE (t) == COMPOUND_EXPR)
- abort ();
-#endif
+ /* Assuming we've already been gimplified, we shouldn't
+ see nested chaining constructs anymore. */
+ gcc_assert (TREE_CODE (t) != STATEMENT_LIST
+ && TREE_CODE (t) != COMPOUND_EXPR);
annotate_one_with_locus (t, locus);
}
uses. So just avert our eyes and cross our fingers. Silly Java. */
|| code == BLOCK)
*walk_subtrees = 0;
- else if (code == BIND_EXPR)
- abort ();
else
- copy_tree_r (tp, walk_subtrees, data);
+ {
+ gcc_assert (code != BIND_EXPR);
+ copy_tree_r (tp, walk_subtrees, data);
+ }
return NULL_TREE;
}
if (TREE_CODE (result_decl) == INDIRECT_REF)
/* See through a return by reference. */
result_decl = TREE_OPERAND (result_decl, 0);
-#ifdef ENABLE_CHECKING
- if ((TREE_CODE (ret_expr) != MODIFY_EXPR
- && TREE_CODE (ret_expr) != INIT_EXPR)
- || TREE_CODE (result_decl) != RESULT_DECL)
- abort ();
-#endif
+
+ gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR
+ || TREE_CODE (ret_expr) == INIT_EXPR)
+ && TREE_CODE (result_decl) == RESULT_DECL);
}
/* If aggregate_value_p is true, then we can return the bare RESULT_DECL.
/* If someone can be bothered to fill in the labels, they can
be bothered to null out the body too. */
- if (SWITCH_LABELS (switch_expr))
- abort ();
+ gcc_assert (!SWITCH_LABELS (switch_expr));
saved_labels = gimplify_ctxp->case_labels;
VARRAY_TREE_INIT (gimplify_ctxp->case_labels, 8, "case_labels");
SWITCH_BODY (switch_expr) = NULL;
}
- else if (!SWITCH_LABELS (switch_expr))
- abort ();
+ else
+ gcc_assert (SWITCH_LABELS (switch_expr));
return ret;
}
gimplify_case_label_expr (tree *expr_p)
{
tree expr = *expr_p;
- if (gimplify_ctxp->case_labels)
- VARRAY_PUSH_TREE (gimplify_ctxp->case_labels, expr);
- else
- abort ();
+
+ gcc_assert (gimplify_ctxp->case_labels);
+ VARRAY_PUSH_TREE (gimplify_ctxp->case_labels, expr);
*expr_p = build (LABEL_EXPR, void_type_node, CASE_LABEL (expr));
return GS_ALL_DONE;
}
/* First operand must be a LABELED_BLOCK_EXPR, which should
already be lowered (or partially lowered) when we get here. */
-#if defined ENABLE_CHECKING
- if (TREE_CODE (labeled_block) != LABELED_BLOCK_EXPR)
- abort ();
-#endif
+ gcc_assert (TREE_CODE (labeled_block) == LABELED_BLOCK_EXPR);
label = LABELED_BLOCK_LABEL (labeled_block);
*expr_p = build1 (GOTO_EXPR, void_type_node, label);
tree expr = *expr_p;
tree type;
- if (TREE_CODE (expr) != COMPONENT_REF)
- abort ();
+ gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
type = TREE_TYPE (get_unwidened (expr, NULL_TREE));
p = &TREE_OPERAND (*p, 0))
VARRAY_PUSH_GENERIC_PTR_NOGC (stack, *p);
-#if defined ENABLE_CHECKING
- if (VARRAY_ACTIVE_SIZE (stack) == 0)
- abort ();
-#endif
+ gcc_assert (VARRAY_ACTIVE_SIZE (stack));
/* Now STACK is a stack of pointers to all the refs we've walked through
and P points to the innermost expression.
code = TREE_CODE (*expr_p);
-#if defined ENABLE_CHECKING
- if (code != POSTINCREMENT_EXPR
- && code != POSTDECREMENT_EXPR
- && code != PREINCREMENT_EXPR
- && code != PREDECREMENT_EXPR)
- abort ();
-#endif
+ gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR
+ || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR);
/* Prefix or postfix? */
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
tree arglist;
enum gimplify_status ret;
-#if defined ENABLE_CHECKING
- if (TREE_CODE (*expr_p) != CALL_EXPR)
- abort ();
-#endif
+ gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR);
/* For reliable diagnostics during inlining, it is necessary that
every call_expr be annotated with file and line. */
{
/* ??? Here's to hoping the front end fills in all of the indicies,
so we don't have to figure out what's missing ourselves. */
- if (!purpose)
- abort ();
+ gcc_assert (purpose);
/* ??? Need to handle this. */
- if (TREE_CODE (purpose) == RANGE_EXPR)
- abort ();
+ gcc_assert (TREE_CODE (purpose) != RANGE_EXPR);
cref = build (ARRAY_REF, array_elt_type, unshare_expr (object),
purpose, NULL_TREE, NULL_TREE);
if (elt_list)
{
i = TREE_VALUE (elt_list);
- if (TREE_CHAIN (elt_list))
- abort ();
+ gcc_assert (!TREE_CHAIN (elt_list));
}
}
if (r == NULL || i == NULL)
default:
/* So how did we get a CONSTRUCTOR for a scalar type? */
- abort ();
+ gcc_unreachable ();
}
if (ret == GS_ERROR)
tree *to_p = &TREE_OPERAND (*expr_p, 0);
enum gimplify_status ret = GS_UNHANDLED;
-#if defined ENABLE_CHECKING
- if (TREE_CODE (*expr_p) != MODIFY_EXPR && TREE_CODE (*expr_p) != INIT_EXPR)
- abort ();
-#endif
+ gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
+ || TREE_CODE (*expr_p) == INIT_EXPR);
/* The distinction between MODIFY_EXPR and INIT_EXPR is no longer useful. */
if (TREE_CODE (*expr_p) == INIT_EXPR)
{
/* If we've somehow already got an SSA_NAME on the LHS, then
we're probably modifying it twice. Not good. */
- if (TREE_CODE (*to_p) == SSA_NAME)
- abort ();
+ gcc_assert (TREE_CODE (*to_p) != SSA_NAME);
*to_p = make_ssa_name (*to_p, *expr_p);
}
enum gimplify_status ret = GS_ALL_DONE;
tree val;
-#if defined ENABLE_CHECKING
- if (TREE_CODE (*expr_p) != SAVE_EXPR)
- abort ();
-#endif
-
+ gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR);
val = TREE_OPERAND (*expr_p, 0);
/* If the SAVE_EXPR has not been resolved, then evaluate it once. */
TREE_OPERAND (targ, 3) = init;
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
}
- else if (!DECL_SEEN_IN_BIND_EXPR_P (temp))
+ else
/* We should have expanded this before. */
- abort ();
+ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp));
*expr_p = temp;
return GS_OK;
break;
case TREE_LIST:
- abort ();
+ gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
case LABEL_EXPR:
ret = GS_ALL_DONE;
-#ifdef ENABLE_CHECKING
- if (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) != current_function_decl)
- abort ();
-#endif
+ gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p))
+ == current_function_decl);
break;
case CASE_LABEL_EXPR:
case NON_LVALUE_EXPR:
/* This should have been stripped above. */
- abort ();
- break;
+ gcc_unreachable ();
case ASM_EXPR:
ret = gimplify_asm_expr (expr_p, pre_p, post_p);
&& decl_function_context (tmp) == current_function_decl
&& !DECL_SEEN_IN_BIND_EXPR_P (tmp))
{
-#ifdef ENABLE_CHECKING
- if (!errorcount && !sorrycount)
- abort ();
-#endif
+ gcc_assert (errorcount || sorrycount);
ret = GS_ERROR;
break;
}
break;
default:
- /* If this is a comparison of objects of aggregate type, handle
- it specially (by converting to a call to memcmp). It would be
- nice to only have to do this for variable-sized objects, but
- then we'd have to allow the same nest of reference nodes we
- allow for MODIFY_EXPR and that's too complex. */
- if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '<'
- && (AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (*expr_p, 1)))))
- ret = gimplify_variable_sized_compare (expr_p);
-
- /* If *EXPR_P does not need to be special-cased, handle it
- according to its class. */
- else if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '1')
- ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
- post_p, is_gimple_val, fb_rvalue);
- else if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '2'
- || TREE_CODE_CLASS (TREE_CODE (*expr_p)) == '<'
- || TREE_CODE (*expr_p) == TRUTH_AND_EXPR
- || TREE_CODE (*expr_p) == TRUTH_OR_EXPR
- || TREE_CODE (*expr_p) == TRUTH_XOR_EXPR)
+ switch (TREE_CODE_CLASS (TREE_CODE (*expr_p)))
{
- enum gimplify_status r0, r1;
-
- r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
- post_p, is_gimple_val, fb_rvalue);
- r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
- post_p, is_gimple_val, fb_rvalue);
+ case '<':
+ /* If this is a comparison of objects of aggregate type,
+ handle it specially (by converting to a call to
+ memcmp). It would be nice to only have to do this
+ for variable-sized objects, but then we'd have to
+ allow the same nest of reference nodes we allow for
+ MODIFY_EXPR and that's too complex. */
+ if (!AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (*expr_p, 1))))
+ goto expr_2;
+ ret = gimplify_variable_sized_compare (expr_p);
+ break;
+
+ /* If *EXPR_P does not need to be special-cased, handle it
+ according to its class. */
+ case '1':
+ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
+ post_p, is_gimple_val, fb_rvalue);
+ break;
- ret = MIN (r0, r1);
- }
- else if (TREE_CODE_CLASS (TREE_CODE (*expr_p)) == 'd'
- || TREE_CODE_CLASS (TREE_CODE (*expr_p)) == 'c')
- {
+ case '2':
+ expr_2:
+ {
+ enum gimplify_status r0, r1;
+
+ r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
+ post_p, is_gimple_val, fb_rvalue);
+ r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
+ post_p, is_gimple_val, fb_rvalue);
+
+ ret = MIN (r0, r1);
+ break;
+ }
+
+ case 'd':
+ case 'c':
ret = GS_ALL_DONE;
- break;
+ goto dont_recalculate;
+
+ default:
+ gcc_assert (TREE_CODE (*expr_p) == TRUTH_AND_EXPR
+ || TREE_CODE (*expr_p) == TRUTH_OR_EXPR
+ || TREE_CODE (*expr_p) == TRUTH_XOR_EXPR);
+ goto expr_2;
}
- else
- /* Fail if we don't know how to handle this tree code. */
- abort ();
recalculate_side_effects (*expr_p);
+ dont_recalculate:
break;
}
-
+
/* If we replaced *expr_p, gimplify again. */
if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr))
ret = GS_ALL_DONE;
goto out;
}
-#ifdef ENABLE_CHECKING
/* This was only valid as a return value from the langhook, which
we handled. Make sure it doesn't escape from any other context. */
- if (ret == GS_UNHANDLED)
- abort ();
-#endif
+ gcc_assert (ret != GS_UNHANDLED);
if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p))
{
has side effects. Recurse through the operands to find it. */
enum tree_code code = TREE_CODE (*expr_p);
- if (code == COMPONENT_REF
- || code == REALPART_EXPR || code == IMAGPART_EXPR)
- gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
- gimple_test_f, fallback);
- else if (code == ARRAY_REF || code == ARRAY_RANGE_REF)
+ switch (code)
{
+ case COMPONENT_REF:
+ case REALPART_EXPR: case IMAGPART_EXPR:
+ gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
+ gimple_test_f, fallback);
+ break;
+
+ case ARRAY_REF: case ARRAY_RANGE_REF:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
- gimple_test_f, fallback);
+ gimple_test_f, fallback);
+ break;
+
+ default:
+ /* Anything else with side-effects must be converted to
+ a valid statement before we get here. */
+ gcc_unreachable ();
}
- else
- /* Anything else with side-effects
- must be converted to a valid statement before we get here. */
- abort ();
*expr_p = NULL;
}
}
else if ((fallback & fb_rvalue) && is_gimple_formal_tmp_rhs (*expr_p))
{
-#if defined ENABLE_CHECKING
- if (VOID_TYPE_P (TREE_TYPE (*expr_p)))
- abort ();
-#endif
+ gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p)));
/* An rvalue will do. Assign the gimplified expression into a new
temporary TMP and replace the original expression with TMP. */
if (TREE_CODE (*expr_p) != SSA_NAME)
DECL_GIMPLE_FORMAL_TEMP_P (*expr_p) = 1;
}
- else if (fallback & fb_mayfail)
+ else
{
- /* If this is an asm statement, and the user asked for the impossible,
- don't abort. Fail and let gimplify_asm_expr issue an error. */
+#ifdef ENABLE_CHECKING
+ if (!(fallback & fb_mayfail))
+ {
+ fprintf (stderr, "gimplification failed:\n");
+ print_generic_expr (stderr, *expr_p, 0);
+ debug_tree (*expr_p);
+ internal_error ("gimplification failed");
+ }
+#endif
+ gcc_assert (fallback & fb_mayfail);
+ /* If this is an asm statement, and the user asked for the
+ impossible, don't abort. Fail and let gimplify_asm_expr
+ issue an error. */
ret = GS_ERROR;
goto out;
}
- else
- {
- fprintf (stderr, "gimplification failed:\n");
- print_generic_expr (stderr, *expr_p, 0);
- debug_tree (*expr_p);
- abort ();
- }
-#if defined ENABLE_CHECKING
/* Make sure the temporary matches our predicate. */
- if (!(*gimple_test_f) (*expr_p))
- abort ();
-#endif
+ gcc_assert ((*gimple_test_f) (*expr_p));
if (internal_post)
{
otype = TREE_TYPE (t);
ptype = TREE_TYPE (TREE_OPERAND (t, 0));
dtype = TREE_TYPE (ptype);
- if (!cpt_same_type (otype, dtype))
- abort ();
+ gcc_assert (cpt_same_type (otype, dtype));
break;
case ADDR_EXPR:
a pointer to the array type. We must allow this in order to
properly represent assigning the address of an array in C into
pointer to the element type. */
- if (TREE_CODE (otype) == ARRAY_TYPE
- && POINTER_TYPE_P (ptype)
- && cpt_same_type (TREE_TYPE (otype), dtype))
- break;
- abort ();
+ gcc_assert (TREE_CODE (otype) == ARRAY_TYPE
+ && POINTER_TYPE_P (ptype)
+ && cpt_same_type (TREE_TYPE (otype), dtype));
+ break;
}
break;
ret = gimplify_expr (&expr, stmts, NULL,
gimple_test_f, fb_rvalue);
- if (ret == GS_ERROR)
- abort ();
+ gcc_assert (ret != GS_ERROR);
for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t))
add_referenced_tmp_var (t);
&& (! current_function_has_nonlocal_label
|| REG_N_CALLS_CROSSED (i) == 0))
{
- if (reg_renumber[i] < 0 && reg_may_share[i] && reg_allocno[reg_may_share[i]] >= 0)
+ if (reg_renumber[i] < 0
+ && reg_may_share[i] && reg_allocno[reg_may_share[i]] >= 0)
reg_allocno[i] = reg_allocno[reg_may_share[i]];
else
reg_allocno[i] = max_allocno++;
- if (REG_LIVE_LENGTH (i) == 0)
- abort ();
+ gcc_assert (REG_LIVE_LENGTH (i));
}
else
reg_allocno[i] = -1;
the previous real insn is a JUMP_INSN. */
if (b == EXIT_BLOCK_PTR)
{
- for (first = NEXT_INSN (first) ; first; first = NEXT_INSN (first))
- if (INSN_P (first)
- && GET_CODE (PATTERN (first)) != USE
- && ! ((GET_CODE (PATTERN (first)) == ADDR_VEC
- || GET_CODE (PATTERN (first)) == ADDR_DIFF_VEC)
- && prev_real_insn (first) != 0
- && JUMP_P (prev_real_insn (first))))
- abort ();
+#ifdef ENABLE_CHECKING
+ for (first = NEXT_INSN (first); first; first = NEXT_INSN (first))
+ gcc_assert (!INSN_P (first)
+ || GET_CODE (PATTERN (first)) == USE
+ || ((GET_CODE (PATTERN (first)) == ADDR_VEC
+ || GET_CODE (PATTERN (first)) == ADDR_DIFF_VEC)
+ && prev_real_insn (first) != 0
+ && JUMP_P (prev_real_insn (first))));
+#endif
break;
}
}
if (fp == NULL)
fatal_error ("can't open %s: %m", buf);
- switch (graph_dump_format)
- {
- case vcg:
- fputs ("graph: {\nport_sharing: no\n", fp);
- break;
- case no_graph:
- abort ();
- }
+ gcc_assert (graph_dump_format == vcg);
+ fputs ("graph: {\nport_sharing: no\n", fp);
fclose (fp);
}
fp = fopen (buf, "a");
if (fp != NULL)
{
- switch (graph_dump_format)
- {
- case vcg:
- fputs ("}\n", fp);
- break;
- case no_graph:
- abort ();
- }
-
+ gcc_assert (graph_dump_format == vcg);
+ fputs ("}\n", fp);
fclose (fp);
}
}