static inline int
mems_in_disjoint_alias_sets_p (rtx mem1, rtx mem2)
{
-#ifdef ENABLE_CHECKING
/* Perform a basic sanity check. Namely, that there are no alias sets
if we're not using strict aliasing. This helps to catch bugs
whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared. If we begin to
use alias sets to indicate that spilled registers cannot alias each
other, we might need to remove this check. */
- if (! flag_strict_aliasing
- && (MEM_ALIAS_SET (mem1) != 0 || MEM_ALIAS_SET (mem2) != 0))
- abort ();
-#endif
+ gcc_assert (flag_strict_aliasing
+ || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
}
if (superset == subset)
return;
- if (superset == 0)
- abort ();
+ gcc_assert (superset);
superset_entry = get_alias_set_entry (superset);
if (superset_entry == 0)
regno = REGNO (dest);
- if (regno >= VARRAY_SIZE (reg_base_value))
- abort ();
+ gcc_assert (regno < VARRAY_SIZE (reg_base_value));
/* If this spans multiple hard registers, then we must indicate that every
register has an unusable value. */
contain anything but integers and other rtx's,
except for within LABEL_REFs and SYMBOL_REFs. */
default:
- abort ();
+ gcc_unreachable ();
}
}
return 1;
}
/* Now propagate values from new_reg_base_value to reg_base_value. */
- if (maxreg != (unsigned int) max_reg_num())
- abort ();
+ gcc_assert (maxreg == (unsigned int) max_reg_num());
+
for (ui = 0; ui < maxreg; ui++)
{
if (new_reg_base_value[ui]
#include "alloc-pool.h"
#include "hashtab.h"
-/* Redefine abort to report an internal error w/o coredump, and
- reporting the location of the error in the source file. This logic
- is duplicated in rtl.h and tree.h because every file that needs the
- special abort includes one or both. toplev.h gets too few files,
- system.h gets too many. */
-
-extern void fancy_abort (const char *, int, const char *)
- ATTRIBUTE_NORETURN;
-#define abort() fancy_abort (__FILE__, __LINE__, __FUNCTION__)
-
#define align_eight(x) (((x+7) >> 3) << 3)
/* The internal allocation object. */
struct alloc_pool_descriptor *desc;
#endif
- if (!name)
- abort ();
+ gcc_assert (name);
/* Make size large enough to store the list header. */
if (size < sizeof (alloc_pool_list))
#endif
/* Um, we can't really allocate 0 elements per block. */
- if (num == 0)
- abort ();
+ gcc_assert (num);
/* Find the size of the pool structure, and the name. */
pool_size = sizeof (struct alloc_pool_def);
struct alloc_pool_descriptor *desc = alloc_pool_descriptor (pool->name);
#endif
-#ifdef ENABLE_CHECKING
- if (!pool)
- abort ();
-#endif
+ gcc_assert (pool);
/* Free each block allocated to the pool. */
for (block = pool->block_list; block != NULL; block = next_block)
desc->allocated+=pool->elt_size;
#endif
-#ifdef ENABLE_CHECKING
- if (!pool)
- abort ();
-#endif
+ gcc_assert (pool);
/* If there are no more free elements, make some more!. */
if (!pool->free_list)
{
alloc_pool_list header;
-#ifdef ENABLE_CHECKING
- if (!ptr)
- abort ();
+ gcc_assert (ptr);
+#ifdef ENABLE_CHECKING
memset (ptr, 0xaf, pool->elt_size - offsetof (allocation_object, u.data));
/* Check whether the PTR was allocated from POOL. */
- if (pool->id != ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id)
- abort ();
+ gcc_assert (pool->id == ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id);
/* Mark the element to be free. */
ALLOCATION_OBJECT_PTR_FROM_USER_PTR (ptr)->id = 0;
#else
/* Check if we free more than we allocated, which is Bad (TM). */
- if (pool->elts_free + 1 > pool->elts_allocated)
- abort ();
+ gcc_assert (pool->elts_free < pool->elts_allocated);
#endif
header = (alloc_pool_list) ptr;
/* The name must not begin and end with __. */
const char *name = attribute_tables[i][j].name;
int len = strlen (name);
- if (name[0] == '_' && name[1] == '_'
- && name[len - 1] == '_' && name[len - 2] == '_')
- abort ();
+
+ gcc_assert (!(name[0] == '_' && name[1] == '_'
+ && name[len - 1] == '_' && name[len - 2] == '_'));
+
/* The minimum and maximum lengths must be consistent. */
- if (attribute_tables[i][j].min_length < 0)
- abort ();
- if (attribute_tables[i][j].max_length != -1
- && (attribute_tables[i][j].max_length
- < attribute_tables[i][j].min_length))
- abort ();
+ gcc_assert (attribute_tables[i][j].min_length >= 0);
+
+ gcc_assert (attribute_tables[i][j].max_length == -1
+ || (attribute_tables[i][j].max_length
+ >= attribute_tables[i][j].min_length));
+
/* An attribute cannot require both a DECL and a TYPE. */
- if (attribute_tables[i][j].decl_required
- && attribute_tables[i][j].type_required)
- abort ();
+ gcc_assert (!attribute_tables[i][j].decl_required
+ || !attribute_tables[i][j].type_required);
+
/* If an attribute requires a function type, in particular
it requires a type. */
- if (attribute_tables[i][j].function_type_required
- && !attribute_tables[i][j].type_required)
- abort ();
+ gcc_assert (!attribute_tables[i][j].function_type_required
+ || attribute_tables[i][j].type_required);
}
}
int j, k;
for (j = 0; attribute_tables[i][j].name != NULL; j++)
for (k = j + 1; attribute_tables[i][k].name != NULL; k++)
- if (!strcmp (attribute_tables[i][j].name,
- attribute_tables[i][k].name))
- abort ();
+ gcc_assert (strcmp (attribute_tables[i][j].name,
+ attribute_tables[i][k].name));
}
/* Check that no name occurs in more than one table. */
for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
for (k = 0; attribute_tables[i][k].name != NULL; k++)
for (l = 0; attribute_tables[j][l].name != NULL; l++)
- if (!strcmp (attribute_tables[i][k].name,
- attribute_tables[j][l].name))
- abort ();
+ gcc_assert (strcmp (attribute_tables[i][k].name,
+ attribute_tables[j][l].name));
}
#endif
fn_ptr_tmp = build_pointer_type (fn_ptr_tmp);
if (DECL_P (*node))
TREE_TYPE (*node) = fn_ptr_tmp;
- else if (TREE_CODE (*node) == POINTER_TYPE)
- *node = fn_ptr_tmp;
else
- abort ();
+ {
+ gcc_assert (TREE_CODE (*node) == POINTER_TYPE);
+ *node = fn_ptr_tmp;
+ }
}
}
#define GET_ARRAY_SIZE(X) ((((X) / 4) + 1) * 5)
/* Free the memory and set the pointer to NULL. */
-#define FREE(P) \
- do { if (P) { free (P); P = 0; } else { abort (); } } while (0)
+#define FREE(P) (gcc_assert (P), free (P), P = 0)
/* Structure for holding information about a trace. */
struct trace
/* Select the successor that will be placed after BB. */
for (e = bb->succ; e; e = e->succ_next)
{
-#ifdef ENABLE_CHECKING
- if (e->flags & EDGE_FAKE)
- abort ();
-#endif
+ gcc_assert (!(e->flags & EDGE_FAKE));
if (e->dest == EXIT_BLOCK_PTR)
continue;
new_bb = duplicate_block (old_bb, e);
BB_COPY_PARTITION (new_bb, old_bb);
- if (e->dest != new_bb)
- abort ();
- if (e->dest->rbi->visited)
- abort ();
+ gcc_assert (e->dest == new_bb);
+ gcc_assert (!e->dest->rbi->visited);
+
if (dump_file)
fprintf (dump_file,
"Duplicated bb %d (created bb %d)\n",
/* If basic block does not contain a NOTE_INSN_BASIC_BLOCK, there is
a major problem. */
-
- if (!insert_insn)
- abort ();
+ gcc_assert (insert_insn);
/* Insert note and assign basic block number to it. */
/* bb just falls through. */
{
/* make sure there's only one successor */
- if (src->succ && (src->succ->succ_next == NULL))
- {
- /* Find label in dest block. */
- label = block_label (dest);
-
- new_jump = emit_jump_insn_after (gen_jump (label),
- BB_END (src));
- barrier = emit_barrier_after (new_jump);
- JUMP_LABEL (new_jump) = label;
- LABEL_NUSES (label) += 1;
- src->rbi->footer = unlink_insn_chain (barrier,
- barrier);
- /* Mark edge as non-fallthru. */
- crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
- }
- else
- {
- /* Basic block has two successors, but
- doesn't end in a jump; something is wrong
- here! */
- abort();
- }
+ gcc_assert (src->succ && !src->succ->succ_next);
+
+ /* Find label in dest block. */
+ label = block_label (dest);
+
+ new_jump = emit_jump_insn_after (gen_jump (label),
+ BB_END (src));
+ barrier = emit_barrier_after (new_jump);
+ JUMP_LABEL (new_jump) = label;
+ LABEL_NUSES (label) += 1;
+ src->rbi->footer = unlink_insn_chain (barrier, barrier);
+ /* Mark edge as non-fallthru. */
+ crossing_edges[i]->flags &= ~EDGE_FALLTHRU;
} /* end: 'if (GET_CODE ... ' */
} /* end: 'if (src && src->index...' */
} /* end: 'if (dest && dest->index...' */
(old_label),
BB_END (new_bb));
}
- else if (HAVE_return
- && GET_CODE (old_label) == RETURN)
- new_jump = emit_jump_insn_after (gen_return (),
- BB_END (new_bb));
else
- abort ();
+ {
+ gcc_assert (HAVE_return
+ && GET_CODE (old_label) == RETURN);
+ new_jump = emit_jump_insn_after (gen_return (),
+ BB_END (new_bb));
+ }
barrier = emit_barrier_after (new_jump);
JUMP_LABEL (new_jump) = old_label;
{
rtx label2, table;
- if (any_condjump_p (last_insn))
- abort ();
+ gcc_assert (!any_condjump_p (last_insn));
/* Make sure the jump is not already an indirect or table jump. */
- else if (!computed_jump_p (last_insn)
- && !tablejump_p (last_insn, &label2, &table))
+ if (!computed_jump_p (last_insn)
+ && !tablejump_p (last_insn, &label2, &table))
{
/* We have found a "crossing" unconditional branch. Now
we must convert it to an indirect jump. First create
for (word_num = 0; word_num < BITMAP_ELEMENT_WORDS; ++word_num)
if ((word = ptr->bits[word_num]) != 0)
goto word_found;
- abort ();
+ gcc_unreachable ();
word_found:
#endif
for (word_num = BITMAP_ELEMENT_WORDS; word_num-- > 0; )
if ((word = ptr->bits[word_num]) != 0)
goto word_found;
- abort ();
+ gcc_unreachable ();
word_found:
#endif
switch (operation)
{
default:
- abort ();
+ gcc_unreachable ();
case BITMAP_AND:
DOIT (&);
if (REG_P (dest)
&& TEST_HARD_REG_BIT (all_btrs, REGNO (dest)))
{
- if (btr_referenced_p (src, NULL))
- abort();
+ gcc_assert (!btr_referenced_p (src, NULL));
+
if (!check_const || CONSTANT_P (src))
{
if (regno)
if (dominated_by_p (CDI_DOMINATORS, new_bb, head_bb))
*tos++ = new_bb;
- else if (dominated_by_p (CDI_DOMINATORS, head_bb, new_bb))
+ else
{
edge e;
int new_block = new_bb->index;
+ gcc_assert (dominated_by_p (CDI_DOMINATORS, head_bb, new_bb));
+
bitmap_set_bit (live_range, new_block);
if (flag_btr_bb_exclusive)
IOR_HARD_REG_SET (*btrs_live_in_range, btrs_live[new_block]);
for (e = head_bb->pred; e; e = e->pred_next)
*tos++ = e->src;
}
- else
- abort();
while (tos != worklist)
{
{
insp = BB_END (b);
for (insp = BB_END (b); ! INSN_P (insp); insp = PREV_INSN (insp))
- if (insp == BB_HEAD (b))
- abort ();
+ gcc_assert (insp != BB_HEAD (b));
+
if (JUMP_P (insp) || can_throw_internal (insp))
insp = PREV_INSN (insp);
}
HOST_WIDE_INT ch;
unsigned int i, j;
- if (GET_MODE_CLASS (mode) != MODE_INT)
- abort ();
+ gcc_assert (GET_MODE_CLASS (mode) == MODE_INT);
+
c[0] = 0;
c[1] = 0;
ch = 1;
&& GET_MODE_SIZE (mode) > UNITS_PER_WORD)
j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1;
j *= BITS_PER_UNIT;
- if (j > 2 * HOST_BITS_PER_WIDE_INT)
- abort ();
+ gcc_assert (j <= 2 * HOST_BITS_PER_WIDE_INT);
+
if (ch)
ch = (unsigned char) str[i];
c[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT);
a second argument of 1, because that is what builtin_setjmp will
return. This also makes EH slightly more efficient, since we are no
longer copying around a value that we don't care about. */
- if (value != const1_rtx)
- abort ();
+ gcc_assert (value == const1_rtx);
current_function_calls_longjmp = 1;
internal exception handling use only. */
for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
{
- if (insn == last)
- abort ();
+ gcc_assert (insn != last);
+
if (JUMP_P (insn))
{
REG_NOTES (insn) = alloc_EXPR_LIST (REG_NON_LOCAL_GOTO, const0_rtx,
{
mode = reg_raw_mode[regno];
- if (mode == VOIDmode)
- abort ();
+ gcc_assert (mode != VOIDmode);
align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
if (size % align != 0)
{
mode = reg_raw_mode[regno];
- if (mode == VOIDmode)
- abort ();
+ gcc_assert (mode != VOIDmode);
align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
if (size % align != 0)
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if ((mode = apply_result_mode[regno]) != VOIDmode)
{
- if (valreg)
- abort (); /* HAVE_untyped_call required. */
+ gcc_assert (!valreg); /* HAVE_untyped_call required. */
+
valreg = gen_rtx_REG (mode, regno);
}
}
else
#endif
- abort ();
+ gcc_unreachable ();
/* Find the CALL insn we just emitted, and attach the register usage
information. */
case BUILT_IN_NEARBYINTL:
builtin_optab = nearbyint_optab; break;
default:
- abort ();
+ gcc_unreachable ();
}
/* Make a suitable register to place result in. */
case BUILT_IN_DREML:
builtin_optab = drem_optab; break;
default:
- abort ();
+ gcc_unreachable ();
}
/* Make a suitable register to place result in. */
case BUILT_IN_COSL:
builtin_optab = sincos_optab; break;
default:
- abort ();
+ gcc_unreachable ();
}
/* Make a suitable register to place result in. */
case BUILT_IN_COSL:
builtin_optab = cos_optab; break;
default:
- abort();
+ gcc_unreachable ();
}
}
Set TARGET to wherever the result comes back. */
if (builtin_optab == sincos_optab)
{
+ int result;
+
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_SIN:
case BUILT_IN_SINF:
case BUILT_IN_SINL:
- if (!expand_twoval_unop (builtin_optab, op0, 0, target, 0))
- abort();
+ result = expand_twoval_unop (builtin_optab, op0, 0, target, 0);
break;
case BUILT_IN_COS:
case BUILT_IN_COSF:
case BUILT_IN_COSL:
- if (!expand_twoval_unop (builtin_optab, op0, target, 0, 0))
- abort();
+ result = expand_twoval_unop (builtin_optab, op0, target, 0, 0);
break;
default:
- abort();
+ gcc_unreachable ();
}
+ gcc_assert (result);
}
else
{
{
const char *str = (const char *) data;
- if (offset < 0
- || ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode)
- > strlen (str) + 1))
- abort (); /* Attempt to read past the end of constant string. */
+ gcc_assert (offset >= 0
+ && ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode)
+ <= strlen (str) + 1));
return c_readstr (str + offset, mode);
}
insn = data->genfun (end, dest_mem, src_mem);
- if (insn == 0)
- abort ();
+ gcc_assert (insn);
emit_insn (insn);
ret = emit_move_insn (target,
plus_constant (ret,
INTVAL (len_rtx)));
- if (! ret)
- abort ();
+ gcc_assert (ret);
return target;
}
GEN_INT (MIN (arg1_align, arg2_align)));
else
#endif
- abort ();
+ gcc_unreachable ();
if (insn)
emit_insn (insn);
int nwords = sizeof (CUMULATIVE_ARGS) / sizeof (int);
int *word_ptr = (int *) ¤t_function_args_info;
- if (sizeof (CUMULATIVE_ARGS) % sizeof (int) != 0)
- abort ();
+ gcc_assert (sizeof (CUMULATIVE_ARGS) % sizeof (int) == 0);
if (arglist != 0)
{
/* All of the alignment and movement below is for args-grow-up machines.
As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all
implement their own specialized gimplify_va_arg_expr routines. */
- abort ();
+ gcc_unreachable ();
#endif
indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
Set TARGET to wherever the result comes back. */
target = expand_unop (TYPE_MODE (TREE_TYPE (TREE_VALUE (arglist))),
op_optab, op0, target, 1);
- if (target == 0)
- abort ();
-
+ gcc_assert (target);
+
return convert_to_mode (target_mode, target, 0);
}
break;
}
default:
- abort ();
+ gcc_unreachable ();
}
return expand_expr (build_function_call_expr (fn, arglist),
rtx this, which;
this = DECL_RTL (current_function_decl);
- if (MEM_P (this))
- this = XEXP (this, 0);
- else
- abort ();
+ gcc_assert (MEM_P (this));
+ this = XEXP (this, 0);
if (exitp)
which = profile_function_exit_libfunc;
break;
default:
- abort ();
+ gcc_unreachable ();
}
decl = build_decl (FUNCTION_DECL, id, TREE_TYPE (fn));
break;
default:
- abort();
+ gcc_unreachable ();
}
return build_int_cst (TREE_TYPE (exp), result);
return fold (build2 (UNORDERED_EXPR, type, arg, arg));
default:
- abort ();
+ gcc_unreachable ();
}
}
break;
}
default:
- abort ();
+ gcc_unreachable ();
}
/* These optimizations are only performed when the result is ignored,