+2004-09-09 Giovanni Bajo <giovannibajo@gcc.gnu.org>
+
+ * ra-build.c (copy_insn_p, remember_move, defuse_overlap_p_1,
+ live_out_1, prune_hardregs_for_mode, init_one_web_common,
+ reinit_one_web, add_subweb, add_subweb_2, init_web_parts,
+ record_conflict, compare_and_free_webs, init_webs_defs_uses,
+ parts_to_webs_1, parts_to_webs, reset_conflicts,
+ check_conflict_numbers, remember_web_was_spilled, handle_asm_insn,
+ ra_build_free): Use gcc_assert and gcc_unreachable instead of abort.
+ * ra-colorize.c (push_list, put_web, reset_lists, put_web_at_end,
+ put_move, remove_move, combine, select_spill, colorize_one_web,
+ try_recolor_web, insert_coalesced_conflicts, check_colors,
+ break_precolored_alias, restore_conflicts_from_coalesce,
+ sort_and_combine_web_pairs, check_uncoalesced_moves): Likewise.
+ * ra-rewrite.c (spill_coalescing, slots_overlap_p, emit_loads,
+ reloads_to_loads, rewrite_program2, emit_colors): Likewise.
+ * ra.c (first_hard_reg, create_insn_info, find_subweb, init_ra,
+ check_df): Likewise.
+ * real.c (do_add, do_multiply, do_divide, do_compare, do_fix_trunc,
+ real_arithmetic, real_compare, real_exponent, real_ldexp,
+ real_identical, real_to_integer, real_to_integer2, real_to_decimal,
+ real_to_hexadecimal, real_from_integer, ten_to_ptwo, ten_to_mptwo,
+ real_digit, real_nan, real_maxval, round_for_format, real_convert,
+ real_to_target, real_from_target, real_hash, encode_ieee_single,
+ encode_ieee_double, encode_ieee_extended, encode_ieee_quad,
+ encode_vax_f, encode_vax_d, encode_vax_g, encode_i370_single,
+ encode_i370_double, encode_c4x_single, encode_c4x_extended): Likewise.
+ * recog.c (validate_change, validate_replace_rtx_1, asm_operand_ok,
+ extract_insn, peep2_next_insn, peep2_reg_dead_p,
+ peep2_find_free_register, peephole2_optimize, store_data_bypass_p,
+ if_test_bypass_p): Likewise.
+ * reg-stack.c (record_label_references, get_asm_operand_n_inputs,
+ stack_result, remove_regno_note, get_hard_regnum, emit_pop_insn,
+ emit_swap_insn, swap_to_top, move_for_stack_reg,
+ subst_stack_regs_pat, subst_asm_stack_regs, change_stack,
+ compensate_edge, convert_regs_1): Likewise.
+ * regclass.c (init_reg_sets, init_reg_sets_1,
+ memory_move_secondary_cost): Likewise.
+ * regrename.c (note_sets, clear_dead_regs, scan_rtx_reg, scan_rtx):
+ Likewise.
+ * reload.c (push_secondary_reload, find_valid_class, push_reload,
+ operands_match_p, decompose, immune_p, find_reloads,
+ find_reloads_toplev, find_reloads_address_1, subst_reloads,
+ copy_replacements, refers_to_regno_for_reload_p,
+ reg_overlap_mentioned_for_reload_p): Likewise.
+ * reload1.c (compute_use_by_pseudos, replace_pseudos_in, reload,
+ count_pseudo, find_reg, eliminate_regs, eliminate_regs_in_insn,
+ verify_initial_elim_offsets, finish_spills, clear_reload_reg_in_use,
+ reload_reg_free_p, reload_reg_reaches_end_p, reloads_conflict,
+ choose_reload_regs, merge_assigned_reloads, emit_input_reload_insns,
+ do_output_reload, fixup_abnormal_edges): Likewise.
+ * reorg.c (stop_search_p, emit_delay_sequence, get_jump_flags,
+ fill_slots_from_thread, relax_delay_slots): Likewise.
+ * resource.c (mark_referenced_resources, mark_set_resources):
+ Likewise.
+ * rtl.c (copy_rtx, rtx_equal_p): Likewise.
+ * rtlanal.c (insn_dependent_p, reg_overlap_mentioned_p,
+ dead_or_set_p, find_reg_fusage, remove_note, replace_rtx,
+ subreg_lsb_1, subreg_regno_offset, subreg_offset_representable_p,
+ find_first_parameter_load, can_hoist_insn_p, hoist_update_store,
+ hoist_insn_after, hoist_insn_to_edge, nonzero_bits1): Likewise.
+ * rtlhooks.c (gen_lowpart_general): Likewise.
+ * sbitmap.c (sbitmap_difference): Likewise.
+ * sched-deps.c (add_dependence, sched_analyze_1, sched_analyze_2,
+ sched_analyze, add_forward_dependence): Likewise.
+ * sched-ebb.c (fix_basic_block_boundaries, schedule_ebb): Likewise.
+ * sched-rgn.c (debug_regions, compute_trg_info, schedule_region,
+ schedule_insns): Likewise.
+ * sched-vis.c (print_pattern): Likewise.
+ * sdbout.c (sdbout_symbol, sdbout_toplevel_data): Likewise.
+ * simplify-rtx.c (simplify_unary_operation, simplify_binary_operation,
+ simplify_const_relational_operation, simplify_ternary_operation,
+ simplify_immed_subreg, simplify_subreg, simplify_gen_subreg):
+ Likewise.
+ * sreal.c (copy, sreal_sub, sreal_div): Likewise.
+ * stmt.c (force_label_rtx, expand_goto, expand_asm_operands,
+ resolve_operand_name_1, expand_return, expand_decl,
+ expand_anon_union_decl, emit_case_bit_tests, expand_case): Likewise.
+ * stor-layout.c (put_pending_size, smallest_mode_for_size,
+ int_mode_for_mode, layout_decl, finish_builtin_struct, layout_type,
+ set_sizetype, get_mode_bounds): Likewise.
+
2004-09-09 Zack Weinberg <zack@codesourcery.com>
* defaults.h (MULTIPLE_SYMBOL_SPACES): Provide default.
* config/ia64/ia64.c (ia64_gimplify_va_arg): Ditto.
* tree.h: Declare new function.
-2004-09-08 Nathan Sidwell <nathan@codesourcery.com>
+2004-09-09 Nathan Sidwell <nathan@codesourcery.com>
* cgraphunit.c (cgraph_mark_functions_to_output): Renable node
dumping for development builds.
* tree.c (iterative_hash_expr): Replace gcc_unreachable with
gcc_assert.
-2004-09-08 Nathan Sidwell <nathan@codesourcery.com>
+2004-09-09 Nathan Sidwell <nathan@codesourcery.com>
* gcse.c (INSN_CUID, insert_set_in_table, find_avail_set,
cprop_insn, do_local_cprop, local_cprop_pass, find_bypass_set,
unsigned int d_regno, s_regno;
int uid = INSN_UID (insn);
- if (!INSN_P (insn))
- abort ();
+ gcc_assert (INSN_P (insn));
/* First look, if we already saw this insn. */
if (copy_cache[uid].seen)
if (!TEST_BIT (move_handled, INSN_UID (insn)))
{
rtx s, d;
+ int ret;
+ struct df_link *slink = DF_INSN_USES (df, insn);
+ struct df_link *link = DF_INSN_DEFS (df, insn);
+
SET_BIT (move_handled, INSN_UID (insn));
- if (copy_insn_p (insn, &s, &d))
- {
- /* Some sanity test for the copy insn. */
- struct df_link *slink = DF_INSN_USES (df, insn);
- struct df_link *link = DF_INSN_DEFS (df, insn);
- if (!link || !link->ref || !slink || !slink->ref)
- abort ();
- /* The following (link->next != 0) happens when a hardreg
- is used in wider mode (REG:DI %eax). Then df.* creates
- a def/use for each hardreg contained therein. We only
- allow hardregs here. */
- if (link->next
- && DF_REF_REGNO (link->next->ref) >= FIRST_PSEUDO_REGISTER)
- abort ();
- }
- else
- abort ();
+ ret = copy_insn_p (insn, &s, &d);
+ gcc_assert (ret);
+
+ /* Some sanity test for the copy insn. */
+ gcc_assert (link && link->ref);
+ gcc_assert (slink && slink->ref);
+ /* The following (link->next != 0) happens when a hardreg
+ is used in wider mode (REG:DI %eax). Then df.* creates
+ a def/use for each hardreg contained therein. We only
+ allow hardregs here. */
+ gcc_assert (!link->next
+ || DF_REF_REGNO (link->next->ref)
+ < FIRST_PSEUDO_REGISTER);
+
/* XXX for now we don't remember move insns involving any subregs.
Those would be difficult to coalesce (we would need to implement
handling of all the subwebs in the allocator, including that such
return (old_u != use->undefined) ? 4 : -1;
}
default:
- abort ();
+ gcc_unreachable ();
}
}
{
/* If this insn doesn't completely define the USE, increment also
it's spanned deaths count (if this insn contains a death). */
- if (uid >= death_insns_max_uid)
- abort ();
+ gcc_assert (uid < death_insns_max_uid);
if (TEST_BIT (insns_with_deaths, uid))
wp->spanned_deaths++;
use->undefined = final_undef;
static void
init_one_web_common (struct web *web, rtx reg)
{
- if (!REG_P (reg))
- abort ();
+ gcc_assert (REG_P (reg));
/* web->id isn't initialized here. */
web->regno = REGNO (reg);
web->orig_x = reg;
#endif
web->num_freedom = hard_regs_count (web->usable_regs);
web->num_freedom -= web->add_hardregs;
- if (!web->num_freedom)
- abort();
+ gcc_assert (web->num_freedom);
}
COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
}
web->stack_slot = NULL;
web->pattern = NULL;
web->alias = NULL;
- if (web->moves)
- abort ();
- if (!web->useless_conflicts)
- abort ();
+ gcc_assert (!web->moves);
+ gcc_assert (web->useless_conflicts);
}
/* Insert and returns a subweb corresponding to REG into WEB (which
add_subweb (struct web *web, rtx reg)
{
struct web *w;
- if (GET_CODE (reg) != SUBREG)
- abort ();
+ gcc_assert (GET_CODE (reg) == SUBREG);
w = xmalloc (sizeof (struct web));
/* Copy most content from parent-web. */
*w = *web;
mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0);
if (mode == BLKmode)
mode = mode_for_size (size, MODE_INT, 0);
- if (mode == BLKmode)
- abort ();
+ gcc_assert (mode != BLKmode);
web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x,
BYTE_BEGIN (size_word)));
web->artificial = 1;
{
if (df->defs[no])
{
- if (no < last_def_id && web_parts[no].ref != df->defs[no])
- abort ();
+ gcc_assert (no >= last_def_id || web_parts[no].ref == df->defs[no]);
web_parts[no].ref = df->defs[no];
/* Uplink might be set from the last iteration. */
if (!web_parts[no].uplink)
{
if (df->uses[no])
{
- if (no < last_use_id
- && web_parts[no + df->def_id].ref != df->uses[no])
- abort ();
+ gcc_assert (no >= last_use_id
+ || web_parts[no + df->def_id].ref == df->uses[no]);
web_parts[no + df->def_id].ref = df->uses[no];
if (!web_parts[no + df->def_id].uplink)
num_webs++;
copy_conflict_list (struct web *web)
{
struct conflict_link *cl;
- if (web->orig_conflict_list || web->have_orig_conflicts)
- abort ();
+ gcc_assert (!web->orig_conflict_list);
+ gcc_assert (!web->have_orig_conflicts);
web->have_orig_conflicts = 1;
for (cl = web->conflict_list; cl; cl = cl->next)
{
/* Trivial non-conflict or already recorded conflict. */
if (web1 == web2 || TEST_BIT (igraph, index))
return;
- if (id1 == id2)
- abort ();
+ gcc_assert (id1 != id2);
/* As fixed_regs are no targets for allocation, conflicts with them
are pointless. */
if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno])
{
struct web *web1 = wl->web;
struct web *web2 = ID2WEB (web1->id);
- if (web1->regno != web2->regno
- || web1->mode_changed != web2->mode_changed
- || !rtx_equal_p (web1->orig_x, web2->orig_x)
- || web1->type != web2->type
- /* Only compare num_defs/num_uses with non-hardreg webs.
- E.g. the number of uses of the framepointer changes due to
- inserting spill code. */
- || (web1->type != PRECOLORED
- && (web1->num_uses != web2->num_uses
- || web1->num_defs != web2->num_defs))
- /* Similarly, if the framepointer was unreferenced originally
- but we added spills, these fields may not match. */
- || (web1->type != PRECOLORED
- && web1->crosses_call != web2->crosses_call)
- || (web1->type != PRECOLORED
- && web1->live_over_abnormal != web2->live_over_abnormal))
- abort ();
+ gcc_assert (web1->regno == web2->regno);
+ gcc_assert (web1->mode_changed == web2->mode_changed);
+ gcc_assert (rtx_equal_p (web1->orig_x, web2->orig_x));
+ gcc_assert (web1->type == web2->type);
if (web1->type != PRECOLORED)
{
unsigned int i;
+
+ /* Only compare num_defs/num_uses with non-hardreg webs.
+ E.g. the number of uses of the framepointer changes due to
+ inserting spill code. */
+ gcc_assert (web1->num_uses == web2->num_uses);
+ gcc_assert (web1->num_defs == web2->num_defs);
+ /* Similarly, if the framepointer was unreferenced originally
+ but we added spills, these fields may not match. */
+ gcc_assert (web1->crosses_call == web2->crosses_call);
+ gcc_assert (web1->live_over_abnormal == web2->live_over_abnormal);
for (i = 0; i < web1->num_defs; i++)
- if (web1->defs[i] != web2->defs[i])
- abort ();
+ gcc_assert (web1->defs[i] == web2->defs[i]);
for (i = 0; i < web1->num_uses; i++)
- if (web1->uses[i] != web2->uses[i])
- abort ();
+ gcc_assert (web1->uses[i] == web2->uses[i]);
}
if (web1->type == PRECOLORED)
{
web->uses[use_i++] = link->ref;
}
web->temp_refs = NULL;
- if (def_i != web->num_defs || use_i != web->num_uses)
- abort ();
+ gcc_assert (def_i == web->num_defs);
+ gcc_assert (use_i == web->num_uses);
}
}
web->id = newid;
web->temp_refs = NULL;
webnum++;
- if (web->regno < FIRST_PSEUDO_REGISTER && !hardreg2web[web->regno])
- hardreg2web[web->regno] = web;
- else if (web->regno < FIRST_PSEUDO_REGISTER
- && hardreg2web[web->regno] != web)
- abort ();
+ if (web->regno < FIRST_PSEUDO_REGISTER)
+ {
+ if (!hardreg2web[web->regno])
+ hardreg2web[web->regno] = web;
+ else
+ gcc_assert (hardreg2web[web->regno] == web);
+ }
}
/* If this reference already had a web assigned, we are done.
web->live_over_abnormal = 1;
/* And check, that it's not a newly allocated web. This would be
an inconsistency. */
- if (!web->old_web || web->type == PRECOLORED)
- abort ();
+ gcc_assert (web->old_web);
+ gcc_assert (web->type != PRECOLORED);
continue;
}
/* In case this was no web part root, we need to initialize WEB
/* And the test, that if def2web[i] was NULL above, that we are _not_
an old web. */
- if (web->old_web && web->type != PRECOLORED)
- abort ();
+ gcc_assert (!web->old_web || web->type == PRECOLORED);
/* Possible create a subweb, if this ref was a subreg. */
if (GET_CODE (reg) == SUBREG)
if (!subweb)
{
subweb = add_subweb (web, reg);
- if (web->old_web)
- abort ();
+ gcc_assert (!web->old_web);
}
}
else
{
struct web *compare = def2web[i];
if (i < last_def_id)
- {
- if (web->old_web && compare != subweb)
- abort ();
- }
- if (!web->old_web && compare)
- abort ();
- if (compare && compare != subweb)
- abort ();
+ gcc_assert (!web->old_web || compare == subweb);
+ gcc_assert (web->old_web || !compare);
+ gcc_assert (!compare || compare == subweb);
}
def2web[i] = subweb;
web->num_defs++;
if (ra_pass > 1)
{
struct web *compare = use2web[ref_id];
- if (ref_id < last_use_id)
- {
- if (web->old_web && compare != subweb)
- abort ();
- }
- if (!web->old_web && compare)
- abort ();
- if (compare && compare != subweb)
- abort ();
+
+ gcc_assert (ref_id >= last_use_id
+ || !web->old_web || compare == subweb);
+ gcc_assert (web->old_web || !compare);
+ gcc_assert (!compare || compare == subweb);
}
use2web[ref_id] = subweb;
web->num_uses++;
}
/* We better now have exactly as many webs as we had web part roots. */
- if (webnum != num_webs)
- abort ();
+ gcc_assert (webnum == num_webs);
return webnum;
}
struct web *web;
if (wp->uplink || !wp->ref)
{
- if (wp->sub_conflicts)
- abort ();
+ gcc_assert (!wp->sub_conflicts);
continue;
}
web = def2web[i];
web->conflict_list = web->orig_conflict_list;
web->orig_conflict_list = NULL;
}
- if (web->orig_conflict_list)
- abort ();
+ gcc_assert (!web->orig_conflict_list);
/* New non-precolored webs, have no conflict list. */
if (web->type != PRECOLORED && !web->old_web)
/* Useless conflicts will be rebuilt completely. But check
for cleanliness, as the web might have come from the
free list. */
- if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
- abort ();
+ gcc_assert (bitmap_first_set_bit (web->useless_conflicts) < 0);
}
else
{
for (cl = web->conflict_list; cl; cl = cl->next)
if (cl->t->type != SELECT && cl->t->type != COALESCED)
new_conf += 1 + cl->t->add_hardregs;
- if (web->type != PRECOLORED && new_conf != web->num_conflicts)
- abort ();
+ gcc_assert (web->type == PRECOLORED || new_conf == web->num_conflicts);
}
}
#endif
AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
#endif
web->num_freedom = hard_regs_count (web->usable_regs);
- if (!web->num_freedom)
- abort();
+ gcc_assert (web->num_freedom);
COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
/* Now look for a class, which is subset of our constraints, to
setup add_hardregs, and regclass for debug output. */
web->add_hardregs =
CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
web->num_freedom -= web->add_hardregs;
- if (!web->num_freedom)
- abort();
+ gcc_assert (web->num_freedom);
adjust -= 0 * web->add_hardregs;
web->num_conflicts -= adjust;
}
link = link->next;
if (!link || !link->ref)
{
- if (in_output)
- in_output = 0;
- else
- abort ();
+ gcc_assert (in_output);
+ in_output = 0;
}
else
break;
for (i = 0; i < num_webs; i++)
{
struct web *web = ID2WEB (i);
- if (!web)
- abort ();
- if (i >= num_webs - num_subwebs
- && (web->conflict_list || web->orig_conflict_list))
- abort ();
+ gcc_assert (web);
+ gcc_assert (i < num_webs - num_subwebs
+ || (!web->conflict_list && !web->orig_conflict_list));
web->moves = NULL;
}
/* All webs in the free list have no defs or uses anymore. */
static void
push_list (struct dlist *x, struct dlist **list)
{
- if (x->next || x->prev)
- abort ();
+ gcc_assert (!x->next);
+ gcc_assert (!x->prev);
x->next = *list;
if (*list)
(*list)->prev = x;
static void
push_list_end (struct dlist *x, struct dlist **list)
{
- if (x->prev || x->next)
- abort ();
+ gcc_assert (!x->prev);
+ gcc_assert (!x->next);
if (!*list)
{
*list = x;
push_list (web->dlink, &WEBS(SIMPLIFY));
break;
default:
- abort ();
+ gcc_unreachable ();
}
web->type = type;
}
{
struct dlist *d;
unsigned int i;
- if (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_SPILL) || WEBS(SIMPLIFY_FAT)
- || WEBS(FREEZE) || WEBS(SPILL) || WEBS(SELECT))
- abort ();
+
+ gcc_assert (!WEBS(SIMPLIFY));
+ gcc_assert (!WEBS(SIMPLIFY_SPILL));
+ gcc_assert (!WEBS(SIMPLIFY_FAT));
+ gcc_assert (!WEBS(FREEZE));
+ gcc_assert (!WEBS(SPILL));
+ gcc_assert (!WEBS(SELECT));
while ((d = pop_list (&WEBS(COALESCED))) != NULL)
{
web->useless_conflicts = NULL;
}
- /* Sanity check, that we only have free, initial or precolored webs. */
+#ifdef ENABLE_CHECKING
+ /* Sanity check, that we only have free, initial or precolored webs. */
for (i = 0; i < num_webs; i++)
{
struct web *web = ID2WEB (i);
- if (web->type != INITIAL && web->type != FREE && web->type != PRECOLORED)
- abort ();
+
+ gcc_assert (web->type == INITIAL || web->type == FREE
+ || web->type == PRECOLORED);
}
+#endif
free_dlist (&mv_worklist);
free_dlist (&mv_coalesced);
free_dlist (&mv_constrained);
{
if (type == PRECOLORED)
type = INITIAL;
- else if (type == SIMPLIFY)
- abort ();
+ else
+ gcc_assert (type != SIMPLIFY);
push_list_end (web->dlink, &WEBS(type));
web->type = type;
}
push_list (move->dlink, &mv_active);
break;
default:
- abort ();
+ gcc_unreachable ();
}
move->type = type;
}
struct move_list *ml;
remove_move_1 (web, move);
for (ml = web->moves; ml; ml = ml->next)
- if (ml->move == move)
- abort ();
+ gcc_assert (ml->move != move);
}
/* Merge the moves for the two webs into the first web's movelist. */
{
int i;
struct conflict_link *wl;
- if (u == v || v->type == COALESCED)
- abort ();
- if ((u->regno >= max_normal_pseudo) != (v->regno >= max_normal_pseudo))
- abort ();
+ gcc_assert (u != v);
+ gcc_assert (v->type != COALESCED);
+ gcc_assert ((u->regno >= max_normal_pseudo)
+ == (v->regno >= max_normal_pseudo));
remove_web_from_list (v);
put_web (v, COALESCED);
v->alias = u;
conflicts. */
u->num_freedom = hard_regs_count (u->usable_regs);
u->num_freedom -= u->add_hardregs;
- /* The next would mean an invalid coalesced move (both webs have no
- possible hardreg in common), so abort. */
- if (!u->num_freedom)
- abort();
+ /* The next checks for an invalid coalesced move (both webs must have
+ possible hardregs in common). */
+ gcc_assert (u->num_freedom);
if (u->num_conflicts >= NUM_REGS (u)
&& (u->type == FREEZE || simplify_p (u->type)))
bestd = bestd2;
best = best2;
}
- if (!bestd)
- abort ();
+ gcc_assert (bestd);
/* Note the potential spill. */
DLIST_WEB (bestd)->was_spilled = 1;
if (c < 0)
{
/* Guard against a simplified node being spilled. */
- /* Don't abort. This can happen, when e.g. enough registers
+ /* Don't assert. This can happen, when e.g. enough registers
are available in colors, but they are not consecutive. This is a
very serious issue if this web is a short live one, because
even if we spill this one here, the situation won't become better
again. That's why we try to find a neighbor, which spans more
instructions that ourself, and got a color, and try to spill _that_.
- if (DLIST_WEB (d)->was_spilled < 0)
- abort (); */
+ gcc_assert (DLIST_WEB (d)->was_spilled >= 0); */
if (hard && (!web->was_spilled || web->spill_temp))
{
unsigned int loop;
int old_c = try->color;
if (try->type == COALESCED)
{
- if (alias (try)->type != PRECOLORED)
- abort ();
+ gcc_assert (alias (try)->type == PRECOLORED);
ra_debug_msg (DUMP_COLORIZE, " breaking alias %d -> %d\n",
try->id, alias (try)->id);
break_precolored_alias (try);
above what happens, when wide webs are involved, and why in that
case there might actually be some webs spilled although thought to
be colorable. */
- if (cost > cost_neighbors[newcol]
- && nregs == 1 && !TEST_HARD_REG_BIT (wide_seen, newcol))
- abort ();
+ gcc_assert (cost <= cost_neighbors[newcol]
+ || nregs != 1 || TEST_HARD_REG_BIT (wide_seen, newcol));
/* But if the new spill-cost is higher than our own, then really loose.
Respill us and recolor neighbors as before. */
if (cost > web->spill_cost)
struct web *web2 = alias (wl->t);
if (old_colors[web2->id])
{
- if (web2->type == SPILLED)
+ switch (web2->type)
{
+ case SPILLED:
remove_list (web2->dlink, &WEBS(SPILLED));
web2->color = old_colors[web2->id] - 1;
put_web (web2, COLORED);
+ break;
+ case COLORED:
+ web2->color = old_colors[web2->id] - 1;
+ break;
+ case SELECT:
+ /* This means, that WEB2 once was a part of a coalesced
+ web, which got spilled in the above colorize_one_web()
+ call, and whose parts then got split and put back
+ onto the SELECT stack. As the cause for that splitting
+ (the coloring of WEB) was worthless, we should again
+ coalesce the parts, as they were before. For now we
+ simply leave them SELECTed, for our caller to take
+ care. */
+ break;
+ default:
+ gcc_unreachable ();
}
- else if (web2->type == COLORED)
- web2->color = old_colors[web2->id] - 1;
- else if (web2->type == SELECT)
- /* This means, that WEB2 once was a part of a coalesced
- web, which got spilled in the above colorize_one_web()
- call, and whose parts then got split and put back
- onto the SELECT stack. As the cause for that splitting
- (the coloring of WEB) was worthless, we should again
- coalesce the parts, as they were before. For now we
- simply leave them SELECTed, for our caller to take
- care. */
- ;
- else
- abort ();
}
}
}
when first some webs were coalesced and conflicts
propagated, then some combining narrowed usable_regs and
further coalescing ignored those conflicts. Now there are
- some edges to COALESCED webs but not to it's alias.
- So abort only when they really should conflict. */
- if ((!(tweb->type == PRECOLORED
- || TEST_BIT (sup_igraph, tweb->id * num_webs + wl->t->id))
- || !(wl->t->type == PRECOLORED
- || TEST_BIT (sup_igraph,
- wl->t->id * num_webs + tweb->id)))
- && hard_regs_intersect_p (&tweb->usable_regs,
- &wl->t->usable_regs))
- abort ();
+ some edges to COALESCED webs but not to its alias.
+ So assert they really don not conflict. */
+ gcc_assert (((tweb->type == PRECOLORED
+ || TEST_BIT (sup_igraph,
+ tweb->id * num_webs + wl->t->id))
+ && (wl->t->type == PRECOLORED
+ || TEST_BIT (sup_igraph,
+ wl->t->id * num_webs + tweb->id)))
+ || !hard_regs_intersect_p (&tweb->usable_regs,
+ &wl->t->usable_regs));
/*if (wl->sub == NULL)
record_conflict (tweb, wl->t);
else
struct web *aweb = alias (web);
struct conflict_link *wl;
int nregs, c;
- if (aweb->type == SPILLED || web->regno >= max_normal_pseudo)
+
+ if (web->regno >= max_normal_pseudo)
continue;
- else if (aweb->type == COLORED)
- nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
- else if (aweb->type == PRECOLORED)
- nregs = 1;
- else
- abort ();
+
+ switch (aweb->type)
+ {
+ case SPILLED:
+ continue;
+
+ case COLORED:
+ nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
+ break;
+
+ case PRECOLORED:
+ nregs = 1;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+#ifdef ENABLE_CHECKING
/* The color must be valid for the original usable_regs. */
for (c = 0; c < nregs; c++)
- if (!TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c))
- abort ();
+ gcc_assert (TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c));
+#endif
/* Search the original (pre-coalesce) conflict list. In the current
one some imprecise conflicts may be noted (due to combine() or
insert_coalesced_conflicts() relocating partial conflicts) making
nregs2 = 1;
else
continue;
- if (aweb->color >= web2->color + nregs2
- || web2->color >= aweb->color + nregs)
- continue;
- abort ();
+ gcc_assert (aweb->color >= web2->color + nregs2
+ || web2->color >= aweb->color + nregs);
+ continue;
}
else
{
&& GET_MODE_SIZE (GET_MODE (sl->s->orig_x))
>= UNITS_PER_WORD)
sofs = (SUBREG_BYTE (sl->s->orig_x) / UNITS_PER_WORD);
- if ((tcol + tofs >= scol + sofs + ssize)
- || (scol + sofs >= tcol + tofs + tsize))
- continue;
- abort ();
+ gcc_assert ((tcol + tofs >= scol + sofs + ssize)
+ || (scol + sofs >= tcol + tofs + tsize));
+ continue;
}
}
}
break_aliases_to_web (struct web *web)
{
struct dlist *d, *d_next;
- if (web->type != SPILLED)
- abort ();
+ gcc_assert (web->type == SPILLED);
for (d = WEBS(COALESCED); d; d = d_next)
{
struct web *other = DLIST_WEB (d);
struct conflict_link *wl;
unsigned int c = pre->color;
unsigned int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
- if (pre->type != PRECOLORED)
- abort ();
+ gcc_assert (pre->type == PRECOLORED);
unalias_web (web);
/* Now we need to look at each conflict X of WEB, if it conflicts
with [PRE, PRE+nregs), and remove such conflicts, of X has not other
struct sub_conflict *sl;
wl = *pcl;
*pcl = wl->next;
- if (!other->have_orig_conflicts && other->type != PRECOLORED)
- abort ();
+ gcc_assert (other->have_orig_conflicts
+ || other->type == PRECOLORED);
for (owl = other->orig_conflict_list; owl; owl = owl->next)
if (owl->t == web)
break;
- if (owl)
- abort ();
+ gcc_assert (!owl);
opcl = &(other->conflict_list);
while (*opcl)
{
opcl = &((*opcl)->next);
}
}
- if (!owl && other->type != PRECOLORED)
- abort ();
+ gcc_assert (owl || other->type == PRECOLORED);
/* wl and owl contain the edge data to be deleted. */
RESET_BIT (sup_igraph, web->id * num_webs + other->id);
RESET_BIT (sup_igraph, other->id * num_webs + web->id);
sorted = xmalloc (num_web_pairs * sizeof (sorted[0]));
for (p = web_pair_list, i = 0; p; p = p->next_list)
sorted[i++] = p;
- if (i != num_web_pairs)
- abort ();
+ gcc_assert (i == num_web_pairs);
qsort (sorted, num_web_pairs, sizeof (sorted[0]), comp_web_pairs);
/* After combining one pair, we actually should adjust the savings
s = t;
t = h;
}
- if (s != t
- && m->type != CONSTRAINED
- /* Following can happen when a move was coalesced, but later
- broken up again. Then s!=t, but m is still MV_COALESCED. */
- && m->type != MV_COALESCED
- && t->type != PRECOLORED
- && ((s->type == PRECOLORED && ok (t, s))
- || s->type != PRECOLORED)
- && !TEST_BIT (sup_igraph, s->id * num_webs + t->id)
- && !TEST_BIT (sup_igraph, t->id * num_webs + s->id))
- abort ();
+ gcc_assert (s == t
+ || m->type == CONSTRAINED
+ /* Following can happen when a move was coalesced, but
+ later broken up again. Then s!=t, but m is still
+ MV_COALESCED. */
+ || m->type == MV_COALESCED
+ || t->type == PRECOLORED
+ || (s->type == PRECOLORED && !ok (t, s))
+ || TEST_BIT (sup_igraph, s->id * num_webs + t->id)
+ || TEST_BIT (sup_igraph, t->id * num_webs + s->id));
}
}
T from the web which was coalesced into T, which at the time
of combine() were not already on the SELECT stack or were
itself coalesced to something other. */
- if (t->type != SPILLED || s->type != SPILLED)
- abort ();
+ gcc_assert (t->type == SPILLED
+ && s->type == SPILLED);
remove_list (t->dlink, &WEBS(SPILLED));
put_web (t, COALESCED);
t->alias = s;
return 0;
return 1;
}
- if (!MEM_P (s1) || GET_CODE (s2) != MEM)
- abort ();
+ gcc_assert (MEM_P (s1) && GET_CODE (s2) == MEM);
s1 = XEXP (s1, 0);
s2 = XEXP (s2, 0);
if (GET_CODE (s1) != PLUS || !REG_P (XEXP (s1, 0))
if (!web)
continue;
supweb = find_web_for_subweb (web);
- if (supweb->regno >= max_normal_pseudo)
- abort ();
+ gcc_assert (supweb->regno < max_normal_pseudo);
/* Check for web being a spilltemp, if we only want to
load spilltemps. Also remember, that we emitted that
load, which we don't need to do when we have a death,
(at least then disallow spilling them, which we already ensure
when flag_ra_break_aliases), or not take the pattern but a
stackslot. */
- if (aweb != supweb)
- abort ();
+ gcc_assert (aweb == supweb);
slot = copy_rtx (supweb->pattern);
reg = copy_rtx (supweb->orig_x);
/* Sanity check. orig_x should be a REG rtx, which should be
shared over all RTL, so copy_rtx should have no effect. */
- if (reg != supweb->orig_x)
- abort ();
+ gcc_assert (reg == supweb->orig_x);
}
else
{
{
struct web *web2 = ID2WEB (j);
struct web *aweb2 = alias (find_web_for_subweb (web2));
- if (spill_is_free (&(ri->colors_in_use), aweb2) == 0)
- abort ();
+ gcc_assert (spill_is_free (&(ri->colors_in_use), aweb2) != 0);
if (spill_same_color_p (supweb, aweb2)
/* && interfere (web, web2) */)
{
ri.need_load = 1;
emit_loads (&ri, nl_first_reload, last_block_insn);
- if (ri.nl_size != 0 /*|| ri.num_reloads != 0*/)
- abort ();
+ gcc_assert (ri.nl_size == 0);
if (!insn)
break;
}
continue;
if (web->type == COALESCED && alias (web)->type == COLORED)
continue;
- if (web->reg_rtx || web->regno < FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (!web->reg_rtx);
+ gcc_assert (web->regno >= FIRST_PSEUDO_REGISTER);
if (web->regno >= max_normal_pseudo)
{
first_hard_reg (HARD_REG_SET rs)
{
int c;
- for (c = 0; c < FIRST_PSEUDO_REGISTER && !TEST_HARD_REG_BIT (rs, c); c++)
- if (c == FIRST_PSEUDO_REGISTER)
- abort();
+
+ for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
+ if (TEST_HARD_REG_BIT (rs, c))
+ break;
+ gcc_assert (c < FIRST_PSEUDO_REGISTER);
return c;
}
act_refs += n;
insn_df[uid].num_uses = n;
}
- if (refs_for_insn_df + (df->def_id + df->use_id) < act_refs)
- abort ();
+ gcc_assert (refs_for_insn_df + (df->def_id + df->use_id) >= act_refs);
}
/* Free the insn_df structures. */
find_subweb (struct web *web, rtx reg)
{
struct web *w;
- if (GET_CODE (reg) != SUBREG)
- abort ();
+ gcc_assert (GET_CODE (reg) == SUBREG);
for (w = web->subreg_next; w; w = w->subreg_next)
if (GET_MODE (w->orig_x) == GET_MODE (reg)
&& SUBREG_BYTE (w->orig_x) == SUBREG_BYTE (reg))
an_unusable_color++)
if (TEST_HARD_REG_BIT (never_use_colors, an_unusable_color))
break;
- if (an_unusable_color == FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (an_unusable_color != FIRST_PSEUDO_REGISTER);
orig_max_uid = get_max_uid ();
compute_bb_for_insn ();
gcc_obstack_init (&ra_obstack);
}
-/* Check the consistency of DF. This aborts if it violates some
+/* Check the consistency of DF. This asserts if it violates some
invariances we expect. */
static void
{
bitmap_clear (b);
for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
bitmap_clear (b);
for (link = DF_INSN_USES (df, insn); link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
}
/* Now the same for the chains per register number. */
{
bitmap_clear (b);
for (link = df->regs[regno].defs; link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_defs, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_defs, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
bitmap_clear (b);
for (link = df->regs[regno].uses; link; link = link->next)
- if (!link->ref || bitmap_bit_p (empty_uses, DF_REF_ID (link->ref))
- || bitmap_bit_p (b, DF_REF_ID (link->ref)))
- abort ();
- else
+ {
+ gcc_assert (link->ref);
+ gcc_assert (!bitmap_bit_p (empty_uses, DF_REF_ID (link->ref)));
+ gcc_assert (!bitmap_bit_p (b, DF_REF_ID (link->ref)));
bitmap_set_bit (b, DF_REF_ID (link->ref));
+ }
}
BITMAP_XFREE (empty_uses);
break;
default:
- abort ();
+ gcc_unreachable ();
}
/* Swap the arguments such that A has the larger exponent. */
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (r == a || r == b)
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (r == a || r == b)
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (a->sign != b->sign)
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
return do_compare (op0, op1, 0) != 0;
default:
- abort ();
+ gcc_unreachable ();
}
}
case rvc_normal:
return REAL_EXP (r);
default:
- abort ();
+ gcc_unreachable ();
}
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
for (i = 0; i < SIGSZ; ++i)
if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG)
i = r->sig[SIGSZ-1];
- else if (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG)
+ else
{
+ gcc_assert (HOST_BITS_PER_WIDE_INT == 2 * HOST_BITS_PER_LONG);
i = r->sig[SIGSZ-1];
i = i << (HOST_BITS_PER_LONG - 1) << 1;
i |= r->sig[SIGSZ-2];
}
- else
- abort ();
i >>= HOST_BITS_PER_WIDE_INT - REAL_EXP (r);
return i;
default:
- abort ();
+ gcc_unreachable ();
}
}
high = t.sig[SIGSZ-1];
low = t.sig[SIGSZ-2];
}
- else if (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG)
+ else
{
+ gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG);
high = t.sig[SIGSZ-1];
high = high << (HOST_BITS_PER_LONG - 1) << 1;
high |= t.sig[SIGSZ-2];
low = low << (HOST_BITS_PER_LONG - 1) << 1;
low |= t.sig[SIGSZ-4];
}
- else
- abort ();
if (r->sign)
{
break;
default:
- abort ();
+ gcc_unreachable ();
}
*plow = low;
strcpy (str, (r.sign ? "-NaN" : "+NaN"));
return;
default:
- abort ();
+ gcc_unreachable ();
}
/* Bound the number of digits printed by the size of the representation. */
/* Bound the number of digits printed by the size of the output buffer. */
max_digits = buf_size - 1 - 1 - 2 - max_digits - 1;
- if (max_digits > buf_size)
- abort ();
+ gcc_assert (max_digits <= buf_size);
if (digits > max_digits)
digits = max_digits;
do_multiply (&r, &r, ten);
digit = rtd_divmod (&r, &pten);
dec_exp -= 1;
- if (digit == 0)
- abort ();
+ gcc_assert (digit != 0);
}
/* ... or overflow. */
*p++ = '0';
dec_exp += 1;
}
- else if (digit > 10)
- abort ();
else
- *p++ = digit + '0';
+ {
+ gcc_assert (digit <= 10);
+ *p++ = digit + '0';
+ }
/* Generate subsequent digits. */
while (--digits > 0)
strcpy (str, (r->sign ? "-NaN" : "+NaN"));
return;
default:
- abort ();
+ gcc_unreachable ();
}
if (digits == 0)
sprintf (exp_buf, "p%+d", exp);
max_digits = buf_size - strlen (exp_buf) - r->sign - 4 - 1;
- if (max_digits > buf_size)
- abort ();
+ gcc_assert (max_digits <= buf_size);
if (digits > max_digits)
digits = max_digits;
r->sig[SIGSZ-2] = low;
memset (r->sig, 0, sizeof(long)*(SIGSZ-2));
}
- else if (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT)
+ else
{
+ gcc_assert (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT);
r->sig[SIGSZ-1] = high >> (HOST_BITS_PER_LONG - 1) >> 1;
r->sig[SIGSZ-2] = high;
r->sig[SIGSZ-3] = low >> (HOST_BITS_PER_LONG - 1) >> 1;
if (SIGSZ > 4)
memset (r->sig, 0, sizeof(long)*(SIGSZ-4));
}
- else
- abort ();
normalize (r);
}
{
static REAL_VALUE_TYPE tens[EXP_BITS];
- if (n < 0 || n >= EXP_BITS)
- abort ();
+ gcc_assert (n >= 0);
+ gcc_assert (n < EXP_BITS);
if (tens[n].cl == rvc_zero)
{
{
static REAL_VALUE_TYPE tens[EXP_BITS];
- if (n < 0 || n >= EXP_BITS)
- abort ();
+ gcc_assert (n >= 0);
+ gcc_assert (n < EXP_BITS);
if (tens[n].cl == rvc_zero)
do_divide (&tens[n], real_digit (1), ten_to_ptwo (n));
{
static REAL_VALUE_TYPE num[10];
- if (n < 0 || n > 9)
- abort ();
+ gcc_assert (n >= 0);
+ gcc_assert (n <= 9);
if (n > 0 && num[n].cl == rvc_zero)
real_from_integer (&num[n], VOIDmode, n, 0, 1);
const struct real_format *fmt;
fmt = REAL_MODE_FORMAT (mode);
- if (fmt == NULL)
- abort ();
+ gcc_assert (fmt);
if (*str == 0)
{
add_significands (r, r, &u);
break;
default:
- abort ();
+ gcc_unreachable ();
}
get_zero (&u, 0);
int np2;
fmt = REAL_MODE_FORMAT (mode);
- if (fmt == NULL)
- abort ();
+ gcc_assert (fmt);
r->cl = rvc_normal;
r->sign = sign;
break;
default:
- abort ();
+ gcc_unreachable ();
}
/* If we're not base2, normalize the exponent to a multiple of
const struct real_format *fmt;
fmt = REAL_MODE_FORMAT (mode);
- if (fmt == NULL)
- abort ();
+ gcc_assert (fmt);
*r = *a;
round_for_format (fmt, r);
const struct real_format *fmt;
fmt = REAL_MODE_FORMAT (mode);
- if (fmt == NULL)
- abort ();
+ gcc_assert (fmt);
return real_to_target_fmt (buf, r, fmt);
}
const struct real_format *fmt;
fmt = REAL_MODE_FORMAT (mode);
- if (fmt == NULL)
- abort ();
+ gcc_assert (fmt);
(*fmt->decode) (fmt, r, buf);
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (sizeof(unsigned long) > sizeof(unsigned int))
break;
default:
- abort ();
+ gcc_unreachable ();
}
buf[0] = image;
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (FLOAT_WORDS_BIG_ENDIAN)
else
{
exp += 16383 - 1;
- if (exp < 0)
- abort ();
+ gcc_assert (exp >= 0);
}
image_hi |= exp;
break;
default:
- abort ();
+ gcc_unreachable ();
}
buf[0] = sig_lo, buf[1] = sig_hi, buf[2] = image_hi;
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (FLOAT_WORDS_BIG_ENDIAN)
break;
default:
- abort ();
+ gcc_unreachable ();
}
buf[0] = image;
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (FLOAT_WORDS_BIG_ENDIAN)
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (FLOAT_WORDS_BIG_ENDIAN)
break;
default:
- abort ();
+ gcc_unreachable ();
}
buf[0] = image;
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (FLOAT_WORDS_BIG_ENDIAN)
break;
default:
- abort ();
+ gcc_unreachable ();
}
image = ((exp & 0xff) << 24) | (sig & 0xffffff);
break;
default:
- abort ();
+ gcc_unreachable ();
}
exp = (exp & 0xff) << 24;
if (old == new || rtx_equal_p (old, new))
return 1;
- if (in_group == 0 && num_changes != 0)
- abort ();
+ gcc_assert (in_group != 0 || num_changes == 0);
*loc = new;
&& GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == ASM_OPERANDS)
{
/* Verify that operands are really shared. */
- if (ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, 0))) !=
- ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, j))))
- abort ();
+ gcc_assert (ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, 0)))
+ == ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP
+ (x, 0, j))));
validate_replace_rtx_1 (&SET_DEST (XVECEXP (x, 0, j)),
from, to, object);
}
int result = 0;
/* Use constrain_operands after reload. */
- if (reload_completed)
- abort ();
+ gcc_assert (!reload_completed);
while (*constraint)
{
/* This insn is an `asm' with operands. */
/* expand_asm_operands makes sure there aren't too many operands. */
- if (noperands > MAX_RECOG_OPERANDS)
- abort ();
+ gcc_assert (noperands <= MAX_RECOG_OPERANDS);
/* Now get the operand values and constraints out of the insn. */
decode_asm_operands (body, recog_data.operand,
: recog_data.constraints[i][0] == '+' ? OP_INOUT
: OP_IN);
- if (recog_data.n_alternatives > MAX_RECOG_ALTERNATIVES)
- abort ();
+ gcc_assert (recog_data.n_alternatives <= MAX_RECOG_ALTERNATIVES);
}
/* After calling extract_insn, you can use this function to extract some
rtx
peep2_next_insn (int n)
{
- if (n >= MAX_INSNS_PER_PEEP2 + 1)
- abort ();
+ gcc_assert (n < MAX_INSNS_PER_PEEP2 + 1);
n += peep2_current;
if (n >= MAX_INSNS_PER_PEEP2 + 1)
int
peep2_regno_dead_p (int ofs, int regno)
{
- if (ofs >= MAX_INSNS_PER_PEEP2 + 1)
- abort ();
+ gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1);
ofs += peep2_current;
if (ofs >= MAX_INSNS_PER_PEEP2 + 1)
ofs -= MAX_INSNS_PER_PEEP2 + 1;
- if (peep2_insn_data[ofs].insn == NULL_RTX)
- abort ();
+ gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX);
return ! REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno);
}
{
int regno, n;
- if (ofs >= MAX_INSNS_PER_PEEP2 + 1)
- abort ();
+ gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1);
ofs += peep2_current;
if (ofs >= MAX_INSNS_PER_PEEP2 + 1)
ofs -= MAX_INSNS_PER_PEEP2 + 1;
- if (peep2_insn_data[ofs].insn == NULL_RTX)
- abort ();
+ gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX);
regno = REGNO (reg);
n = hard_regno_nregs[regno][GET_MODE (reg)];
HARD_REG_SET live;
int i;
- if (from >= MAX_INSNS_PER_PEEP2 + 1 || to >= MAX_INSNS_PER_PEEP2 + 1)
- abort ();
+ gcc_assert (from < MAX_INSNS_PER_PEEP2 + 1);
+ gcc_assert (to < MAX_INSNS_PER_PEEP2 + 1);
from += peep2_current;
if (from >= MAX_INSNS_PER_PEEP2 + 1)
if (to >= MAX_INSNS_PER_PEEP2 + 1)
to -= MAX_INSNS_PER_PEEP2 + 1;
- if (peep2_insn_data[from].insn == NULL_RTX)
- abort ();
+ gcc_assert (peep2_insn_data[from].insn != NULL_RTX);
REG_SET_TO_HARD_REG_SET (live, peep2_insn_data[from].live_before);
while (from != to)
if (++from >= MAX_INSNS_PER_PEEP2 + 1)
from = 0;
- if (peep2_insn_data[from].insn == NULL_RTX)
- abort ();
+ gcc_assert (peep2_insn_data[from].insn != NULL_RTX);
REG_SET_TO_HARD_REG_SET (this_live, peep2_insn_data[from].live_before);
IOR_HARD_REG_SET (live, this_live);
}
new_insn = NEXT_INSN (new_insn);
}
- if (new_insn == NULL_RTX)
- abort ();
+ gcc_assert (new_insn != NULL_RTX);
CALL_INSN_FUNCTION_USAGE (new_insn)
= CALL_INSN_FUNCTION_USAGE (old_insn);
if (j >= MAX_INSNS_PER_PEEP2 + 1)
j -= MAX_INSNS_PER_PEEP2 + 1;
old_insn = peep2_insn_data[j].insn;
- if (CALL_P (old_insn))
- abort ();
+ gcc_assert (!CALL_P (old_insn));
}
break;
}
rtx out_set, in_set;
in_set = single_set (in_insn);
- if (! in_set)
- abort ();
+ gcc_assert (in_set);
if (!MEM_P (SET_DEST (in_set)))
return false;
int i;
out_pat = PATTERN (out_insn);
- if (GET_CODE (out_pat) != PARALLEL)
- abort ();
+ gcc_assert (GET_CODE (out_pat) == PARALLEL);
for (i = 0; i < XVECLEN (out_pat, 0); i++)
{
if (GET_CODE (exp) == CLOBBER)
continue;
- if (GET_CODE (exp) != SET)
- abort ();
+ gcc_assert (GET_CODE (exp) == SET);
if (reg_mentioned_p (SET_DEST (exp), SET_DEST (in_set)))
return false;
in_set = single_set (in_insn);
if (! in_set)
{
- if (JUMP_P (in_insn) || CALL_P (in_insn))
- return false;
- abort ();
+ gcc_assert (JUMP_P (in_insn) || CALL_P (in_insn));
+ return false;
}
if (GET_CODE (SET_SRC (in_set)) != IF_THEN_ELSE)
int i;
out_pat = PATTERN (out_insn);
- if (GET_CODE (out_pat) != PARALLEL)
- abort ();
+ gcc_assert (GET_CODE (out_pat) == PARALLEL);
for (i = 0; i < XVECLEN (out_pat, 0); i++)
{
if (GET_CODE (exp) == CLOBBER)
continue;
- if (GET_CODE (exp) != SET)
- abort ();
+ gcc_assert (GET_CODE (exp) == SET);
if (reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 1))
|| reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 2)))
rtx label = XEXP (pat, 0);
rtx ref;
- if (!LABEL_P (label))
- abort ();
+ gcc_assert (LABEL_P (label));
/* If this is an undefined label, LABEL_REFS (label) contains
garbage. */
static int
get_asm_operand_n_inputs (rtx body)
{
- if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
- return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body));
-
- else if (GET_CODE (body) == ASM_OPERANDS)
- return ASM_OPERANDS_INPUT_LENGTH (body);
-
- else if (GET_CODE (body) == PARALLEL
- && GET_CODE (XVECEXP (body, 0, 0)) == SET)
- return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0)));
-
- else if (GET_CODE (body) == PARALLEL
- && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
- return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0));
-
- abort ();
+ switch (GET_CODE (body))
+ {
+ case SET:
+ gcc_assert (GET_CODE (SET_SRC (body)) == ASM_OPERANDS);
+ return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body));
+
+ case ASM_OPERANDS:
+ return ASM_OPERANDS_INPUT_LENGTH (body);
+
+ case PARALLEL:
+ return get_asm_operand_n_inputs (XVECEXP (body, 0, 0));
+
+ default:
+ gcc_unreachable ();
+ }
}
/* If current function returns its result in an fp stack register,
static void
replace_reg (rtx *reg, int regno)
{
- if (regno < FIRST_STACK_REG || regno > LAST_STACK_REG
- || ! STACK_REG_P (*reg))
- abort ();
+ gcc_assert (regno >= FIRST_STACK_REG);
+ gcc_assert (regno <= LAST_STACK_REG);
+ gcc_assert (STACK_REG_P (*reg));
- switch (GET_MODE_CLASS (GET_MODE (*reg)))
- {
- default: abort ();
- case MODE_FLOAT:
- case MODE_COMPLEX_FLOAT:;
- }
+ gcc_assert (GET_MODE_CLASS (GET_MODE (*reg)) == MODE_FLOAT
+ || GET_MODE_CLASS (GET_MODE (*reg)) == MODE_COMPLEX_FLOAT);
*reg = FP_MODE_REG (regno, GET_MODE (*reg));
}
else
note_link = &XEXP (this, 1);
- abort ();
+ gcc_unreachable ();
}
/* Find the hard register number of virtual register REG in REGSTACK.
{
int i;
- if (! STACK_REG_P (reg))
- abort ();
+ gcc_assert (STACK_REG_P (reg));
for (i = regstack->top; i >= 0; i--)
if (regstack->reg[i] == REGNO (reg))
pop_insn = emit_pop_insn (insn, regstack, reg1, where);
if (get_hard_regnum (regstack, reg2) >= 0)
pop_insn = emit_pop_insn (insn, regstack, reg2, where);
- if (!pop_insn)
- abort ();
+ gcc_assert (pop_insn);
return pop_insn;
}
hard_regno = get_hard_regnum (regstack, reg);
- if (hard_regno < FIRST_STACK_REG)
- abort ();
+ gcc_assert (hard_regno >= FIRST_STACK_REG);
pop_rtx = gen_rtx_SET (VOIDmode, FP_MODE_REG (hard_regno, DFmode),
FP_MODE_REG (FIRST_STACK_REG, DFmode));
hard_regno = get_hard_regnum (regstack, reg);
- if (hard_regno < FIRST_STACK_REG)
- abort ();
+ gcc_assert (hard_regno >= FIRST_STACK_REG);
if (hard_regno == FIRST_STACK_REG)
return;
/* Place operand 1 at the top of stack. */
regno = get_hard_regnum (&temp_stack, src1);
- if (regno < 0)
- abort ();
+ gcc_assert (regno >= 0);
if (regno != FIRST_STACK_REG)
{
k = temp_stack.top - (regno - FIRST_STACK_REG);
/* Place operand 2 next on the stack. */
regno = get_hard_regnum (&temp_stack, src2);
- if (regno < 0)
- abort ();
+ gcc_assert (regno >= 0);
if (regno != FIRST_STACK_REG + 1)
{
k = temp_stack.top - (regno - FIRST_STACK_REG);
int i;
/* If this is a no-op move, there must not be a REG_DEAD note. */
- if (REGNO (src) == REGNO (dest))
- abort ();
+ gcc_assert (REGNO (src) != REGNO (dest));
for (i = regstack->top; i >= 0; i--)
if (regstack->reg[i] == REGNO (src))
break;
/* The source must be live, and the dest must be dead. */
- if (i < 0 || get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
- abort ();
+ gcc_assert (i >= 0);
+ gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG);
/* It is possible that the dest is unused after this insn.
If so, just pop the src. */
}
/* The destination ought to be dead. */
- if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
- abort ();
+ gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG);
replace_reg (psrc, get_hard_regnum (regstack, src));
replace_reg (psrc, FIRST_STACK_REG);
}
- else if (STACK_REG_P (dest))
+ else
{
+ gcc_assert (STACK_REG_P (dest));
+
/* Load from MEM, or possibly integer REG or constant, into the
stack regs. The actual target is always the top of the
stack. The stack mapping is changed to reflect that DEST is
now at top of stack. */
/* The destination ought to be dead. */
- if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
- abort ();
+ gcc_assert (get_hard_regnum (regstack, dest) < FIRST_STACK_REG);
- if (regstack->top >= REG_STACK_SIZE)
- abort ();
+ gcc_assert (regstack->top < REG_STACK_SIZE);
regstack->reg[++regstack->top] = REGNO (dest);
SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest));
replace_reg (pdest, FIRST_STACK_REG);
}
- else
- abort ();
return control_flow_insn_deleted;
}
return control_flow_insn_deleted;
}
/* ??? Uninitialized USE should not happen. */
- else if (get_hard_regnum (regstack, *src) == -1)
- abort ();
+ else
+ gcc_assert (get_hard_regnum (regstack, *src) != -1);
break;
case CLOBBER:
else
{
note = find_reg_note (insn, REG_UNUSED, *dest);
- if (!note)
- abort ();
+ gcc_assert (note);
}
remove_note (insn, note);
replace_reg (dest, FIRST_STACK_REG + 1);
case REG:
/* This is a `tstM2' case. */
- if (*dest != cc0_rtx)
- abort ();
+ gcc_assert (*dest == cc0_rtx);
src1 = src;
/* Fall through. */
src1_hard_regnum = get_hard_regnum (regstack, *src1);
src2_hard_regnum = get_hard_regnum (regstack, *src2);
- if (src1_hard_regnum == -1 || src2_hard_regnum == -1)
- abort ();
+ gcc_assert (src1_hard_regnum != -1);
+ gcc_assert (src2_hard_regnum != -1);
if (src1_hard_regnum != FIRST_STACK_REG
&& src2_hard_regnum != FIRST_STACK_REG)
/* Input should never die, it is
replaced with output. */
src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
- if (src1_note)
- abort();
+ gcc_assert (!src1_note);
if (STACK_REG_P (*dest))
replace_reg (dest, FIRST_STACK_REG);
/* Inputs should never die, they are
replaced with outputs. */
- if ((src1_note) || (src2_note))
- abort();
+ gcc_assert (!src1_note);
+ gcc_assert (!src2_note);
swap_to_top (insn, regstack, *src1, *src2);
/* Inputs should never die, they are
replaced with outputs. */
- if ((src1_note) || (src2_note))
- abort();
+ gcc_assert (!src1_note);
+ gcc_assert (!src2_note);
swap_to_top (insn, regstack, *src1, *src2);
/* Input should never die, it is
replaced with output. */
src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
- if (src1_note)
- abort();
+ gcc_assert (!src1_note);
/* Push the result back onto stack. Empty stack slot
will be filled in second part of insn. */
/* Input should never die, it is
replaced with output. */
src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
- if (src1_note)
- abort();
+ gcc_assert (!src1_note);
/* Push the result back onto stack. Fill empty slot from
first part of insn and fix top of stack pointer. */
The combination matches the PPRO fcomi instruction. */
pat_src = XVECEXP (pat_src, 0, 0);
- if (GET_CODE (pat_src) != UNSPEC
- || XINT (pat_src, 1) != UNSPEC_FNSTSW)
- abort ();
+ gcc_assert (GET_CODE (pat_src) == UNSPEC);
+ gcc_assert (XINT (pat_src, 1) == UNSPEC_FNSTSW);
/* Fall through. */
case UNSPEC_FNSTSW:
up before now. */
pat_src = XVECEXP (pat_src, 0, 0);
- if (GET_CODE (pat_src) != COMPARE)
- abort ();
+ gcc_assert (GET_CODE (pat_src) == COMPARE);
compare_for_stack_reg (insn, regstack, pat_src);
break;
default:
- abort ();
+ gcc_unreachable ();
}
break;
int regno = REGNO (XEXP (src_note[i], 0));
/* If the register that dies is not at the top of
- stack, then move the top of stack to the dead reg */
- if (regno != regstack->reg[regstack->top])
- {
- remove_regno_note (insn, REG_DEAD, regno);
- emit_pop_insn (insn, regstack, XEXP (src_note[i], 0),
- EMIT_AFTER);
- }
- else
- /* Top of stack never dies, as it is the
- destination. */
- abort ();
+ stack, then move the top of stack to the dead reg.
+ Top of stack should never die, as it is the
+ destination. */
+ gcc_assert (regno != regstack->reg[regstack->top]);
+ remove_regno_note (insn, REG_DEAD, regno);
+ emit_pop_insn (insn, regstack, XEXP (src_note[i], 0),
+ EMIT_AFTER);
}
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
break;
}
n_inputs = get_asm_operand_n_inputs (body);
n_outputs = recog_data.n_operands - n_inputs;
- if (alt < 0)
- abort ();
+ gcc_assert (alt >= 0);
/* Strip SUBREGs here to make the following code simpler. */
for (i = 0; i < recog_data.n_operands; i++)
int regno = get_hard_regnum (&temp_stack, recog_data.operand[i]);
- if (regno < 0)
- abort ();
+ gcc_assert (regno >= 0);
if ((unsigned int) regno != REGNO (recog_data.operand[i]))
{
{
int regnum = get_hard_regnum (regstack, recog_data.operand[i]);
- if (regnum < 0)
- abort ();
+ gcc_assert (regnum >= 0);
replace_reg (recog_data.operand_loc[i], regnum);
}
{
int regnum = get_hard_regnum (regstack, note_reg[i]);
- if (regnum < 0)
- abort ();
+ gcc_assert (regnum >= 0);
replace_reg (note_loc[i], regnum);
}
if (regnum >= 0)
{
/* Sigh - clobbers always have QImode. But replace_reg knows
- that these regs can't be MODE_INT and will abort. Just put
+ that these regs can't be MODE_INT and will assert. Just put
the right reg there without calling replace_reg. */
*clobber_loc[i] = FP_MODE_REG (regnum, DFmode);
not their depth or liveliness. */
GO_IF_HARD_REG_EQUAL (old->reg_set, new->reg_set, win);
- abort ();
+ gcc_unreachable ();
win:
- if (old->top != new->top)
- abort ();
+ gcc_assert (old->top == new->top);
/* If the stack is not empty (new->top != -1), loop here emitting
swaps until the stack is correct.
if (new->reg[reg] == old->reg[old->top])
break;
- if (reg == -1)
- abort ();
+ gcc_assert (reg != -1);
emit_swap_insn (insn, old,
FP_MODE_REG (old->reg[reg], DFmode));
/* At this point there must be no differences. */
for (reg = old->top; reg >= 0; reg--)
- if (old->reg[reg] != new->reg[reg])
- abort ();
+ gcc_assert (old->reg[reg] == new->reg[reg]);
}
if (update_end)
CLEAR_HARD_REG_SET (tmp);
GO_IF_HARD_REG_EQUAL (target_stack->reg_set, tmp, eh1);
- abort ();
+ gcc_unreachable ();
eh1:
/* We are sure that there is st(0) live, otherwise we won't compensate.
if (TEST_HARD_REG_BIT (regstack.reg_set, FIRST_STACK_REG + 1))
SET_HARD_REG_BIT (tmp, FIRST_STACK_REG + 1);
GO_IF_HARD_REG_EQUAL (regstack.reg_set, tmp, eh2);
- abort ();
+ gcc_unreachable ();
eh2:
target_stack->top = -1;
/* We don't support abnormal edges. Global takes care to
avoid any live register across them, so we should never
have to insert instructions on such edges. */
- if (e->flags & EDGE_ABNORMAL)
- abort ();
+ gcc_assert (!(e->flags & EDGE_ABNORMAL));
current_block = NULL;
start_sequence ();
next = NEXT_INSN (insn);
/* Ensure we have not missed a block boundary. */
- if (next == NULL)
- abort ();
+ gcc_assert (next);
if (insn == BB_END (block))
next = NULL;
asms, we zapped the instruction itself, but that didn't produce the
same pattern of register kills as before. */
GO_IF_HARD_REG_EQUAL (regstack.reg_set, bi->out_reg_set, win);
- if (!any_malformed_asm)
- abort ();
+ gcc_assert (any_malformed_asm);
win:
bi->stack_out = regstack;
if (e->flags & EDGE_DFS_BACK
|| (e->dest == EXIT_BLOCK_PTR))
{
- if (!BLOCK_INFO (e->dest)->done
- && e->dest != block)
- abort ();
+ gcc_assert (BLOCK_INFO (e->dest)->done
+ || e->dest == block);
inserted |= compensate_edge (e, file);
}
}
if (e != beste && !(e->flags & EDGE_DFS_BACK)
&& e->src != ENTRY_BLOCK_PTR)
{
- if (!BLOCK_INFO (e->src)->done)
- abort ();
+ gcc_assert (BLOCK_INFO (e->src)->done);
inserted |= compensate_edge (e, file);
}
}
/* Sanity check: make sure the target macros FIXED_REGISTERS and
CALL_USED_REGISTERS had the right number of initializers. */
- if (sizeof fixed_regs != sizeof initial_fixed_regs
- || sizeof call_used_regs != sizeof initial_call_used_regs)
- abort();
+ gcc_assert (sizeof fixed_regs == sizeof initial_fixed_regs);
+ gcc_assert (sizeof call_used_regs == sizeof initial_call_used_regs);
memcpy (fixed_regs, initial_fixed_regs, sizeof fixed_regs);
memcpy (call_used_regs, initial_call_used_regs, sizeof call_used_regs);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
-#ifdef ENABLE_CHECKING
/* call_used_regs must include fixed_regs. */
- if (fixed_regs[i] && !call_used_regs[i])
- abort ();
+ gcc_assert (!fixed_regs[i] || call_used_regs[i]);
#ifdef CALL_REALLY_USED_REGISTERS
/* call_used_regs must include call_really_used_regs. */
- if (call_really_used_regs[i] && !call_used_regs[i])
- abort ();
-#endif
+ gcc_assert (!call_really_used_regs[i] || call_used_regs[i]);
#endif
if (fixed_regs[i])
what it is, so MEMORY_MOVE_COST really ought not to be calling
here in that case.
- I'm tempted to put in an abort here, but returning this will
+ I'm tempted to put in an assert here, but returning this will
probably only give poor estimates, which is what we would've
had before this code anyways. */
return partial_cost;
nregs = hard_regno_nregs[regno][GET_MODE (x)];
/* There must not be pseudos at this point. */
- if (regno + nregs > FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
while (nregs-- > 0)
SET_HARD_REG_BIT (*pset, regno + nregs);
int nregs = hard_regno_nregs[regno][GET_MODE (reg)];
/* There must not be pseudos at this point. */
- if (regno + nregs > FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
while (nregs-- > 0)
CLEAR_HARD_REG_BIT (*pset, regno + nregs);
if (action == mark_read)
{
- if (! exact_match)
- abort ();
+ gcc_assert (exact_match);
/* ??? Class NO_REGS can happen if the md file makes use of
EXTRA_CONSTRAINTS to match registers. Which is arguably
case POST_MODIFY:
case PRE_MODIFY:
/* Should only happen inside MEM. */
- abort ();
+ gcc_unreachable ();
case CLOBBER:
scan_rtx (insn, &SET_DEST (x), cl, action, OP_OUT, 1);
: REG_CLASS_FROM_CONSTRAINT ((unsigned char) insn_letter,
insn_constraint));
- if (insn_class == NO_REGS)
- abort ();
- if (in_p
- && insn_data[(int) icode].operand[!in_p].constraint[0] != '=')
- abort ();
+ gcc_assert (insn_class != NO_REGS);
+ gcc_assert (!in_p
+ || insn_data[(int) icode].operand[!in_p].constraint[0]
+ == '=');
}
/* The scratch register's constraint must start with "=&". */
- if (insn_data[(int) icode].operand[2].constraint[0] != '='
- || insn_data[(int) icode].operand[2].constraint[1] != '&')
- abort ();
+ gcc_assert (insn_data[(int) icode].operand[2].constraint[0] == '='
+ && insn_data[(int) icode].operand[2].constraint[1] == '&');
if (reg_class_subset_p (reload_class, insn_class))
mode = insn_data[(int) icode].operand[2].mode;
Allow this when a reload_in/out pattern is being used. I.e. assume
that the generated code handles this case. */
- if (in_p && class == reload_class && icode == CODE_FOR_nothing
- && t_icode == CODE_FOR_nothing)
- abort ();
+ gcc_assert (!in_p || class != reload_class || icode != CODE_FOR_nothing
+ || t_icode != CODE_FOR_nothing);
/* If we need a tertiary reload, see if we have one we can reuse or else
make a new one. */
}
}
- if (best_size == 0)
- abort ();
+ gcc_assert (best_size != 0);
return best_class;
}
if (MEM_P (in))
/* This is supposed to happen only for paradoxical subregs made by
combine.c. (SUBREG (MEM)) isn't supposed to occur other ways. */
- if (GET_MODE_SIZE (GET_MODE (in)) > GET_MODE_SIZE (inmode))
- abort ();
+ gcc_assert (GET_MODE_SIZE (GET_MODE (in)) <= GET_MODE_SIZE (inmode));
#endif
inmode = GET_MODE (in);
}
outloc = &SUBREG_REG (out);
out = *outloc;
#if ! defined (LOAD_EXTEND_OP) && ! defined (WORD_REGISTER_OPERATIONS)
- if (MEM_P (out)
- && GET_MODE_SIZE (GET_MODE (out)) > GET_MODE_SIZE (outmode))
- abort ();
+ gcc_assert (!MEM_P (out)
+ || GET_MODE_SIZE (GET_MODE (out))
+ <= GET_MODE_SIZE (outmode));
#endif
outmode = GET_MODE (out);
}
/* Optional output reloads are always OK even if we have no register class,
since the function of these reloads is only to have spill_reg_store etc.
set, so that the storing insn can be deleted later. */
- if (class == NO_REGS
- && (optional == 0 || type != RELOAD_FOR_OUTPUT))
- abort ();
+ gcc_assert (class != NO_REGS
+ || (optional != 0 && type == RELOAD_FOR_OUTPUT));
i = find_reusable_reload (&in, out, class, type, opnum, dont_share);
/* If we did not find a nonzero amount-to-increment-by,
that contradicts the belief that IN is being incremented
in an address in this insn. */
- if (rld[i].inc == 0)
- abort ();
+ gcc_assert (rld[i].inc != 0);
}
#endif
contain anything but integers and other rtx's,
except for within LABEL_REFs and SYMBOL_REFs. */
default:
- abort ();
+ gcc_unreachable ();
}
}
return 1 + success_2;
memset (&val, 0, sizeof (val));
- if (MEM_P (x))
- {
- rtx base = NULL_RTX, offset = 0;
- rtx addr = XEXP (x, 0);
-
- if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
- || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
- {
- val.base = XEXP (addr, 0);
- val.start = -GET_MODE_SIZE (GET_MODE (x));
- val.end = GET_MODE_SIZE (GET_MODE (x));
- val.safe = REGNO (val.base) == STACK_POINTER_REGNUM;
- return val;
- }
-
- if (GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY)
- {
- if (GET_CODE (XEXP (addr, 1)) == PLUS
- && XEXP (addr, 0) == XEXP (XEXP (addr, 1), 0)
- && CONSTANT_P (XEXP (XEXP (addr, 1), 1)))
- {
- val.base = XEXP (addr, 0);
- val.start = -INTVAL (XEXP (XEXP (addr, 1), 1));
- val.end = INTVAL (XEXP (XEXP (addr, 1), 1));
- val.safe = REGNO (val.base) == STACK_POINTER_REGNUM;
- return val;
- }
- }
-
- if (GET_CODE (addr) == CONST)
- {
- addr = XEXP (addr, 0);
- all_const = 1;
- }
- if (GET_CODE (addr) == PLUS)
- {
- if (CONSTANT_P (XEXP (addr, 0)))
- {
- base = XEXP (addr, 1);
- offset = XEXP (addr, 0);
- }
- else if (CONSTANT_P (XEXP (addr, 1)))
- {
- base = XEXP (addr, 0);
- offset = XEXP (addr, 1);
- }
- }
-
- if (offset == 0)
- {
- base = addr;
- offset = const0_rtx;
- }
- if (GET_CODE (offset) == CONST)
- offset = XEXP (offset, 0);
- if (GET_CODE (offset) == PLUS)
- {
- if (GET_CODE (XEXP (offset, 0)) == CONST_INT)
- {
- base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 1));
- offset = XEXP (offset, 0);
- }
- else if (GET_CODE (XEXP (offset, 1)) == CONST_INT)
- {
- base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 0));
- offset = XEXP (offset, 1);
- }
- else
- {
- base = gen_rtx_PLUS (GET_MODE (base), base, offset);
- offset = const0_rtx;
- }
- }
- else if (GET_CODE (offset) != CONST_INT)
- {
- base = gen_rtx_PLUS (GET_MODE (base), base, offset);
- offset = const0_rtx;
- }
-
- if (all_const && GET_CODE (base) == PLUS)
- base = gen_rtx_CONST (GET_MODE (base), base);
-
- if (GET_CODE (offset) != CONST_INT)
- abort ();
-
- val.start = INTVAL (offset);
- val.end = val.start + GET_MODE_SIZE (GET_MODE (x));
- val.base = base;
- return val;
- }
- else if (REG_P (x))
+ switch (GET_CODE (x))
{
+ case MEM:
+ {
+ rtx base = NULL_RTX, offset = 0;
+ rtx addr = XEXP (x, 0);
+
+ if (GET_CODE (addr) == PRE_DEC || GET_CODE (addr) == PRE_INC
+ || GET_CODE (addr) == POST_DEC || GET_CODE (addr) == POST_INC)
+ {
+ val.base = XEXP (addr, 0);
+ val.start = -GET_MODE_SIZE (GET_MODE (x));
+ val.end = GET_MODE_SIZE (GET_MODE (x));
+ val.safe = REGNO (val.base) == STACK_POINTER_REGNUM;
+ return val;
+ }
+
+ if (GET_CODE (addr) == PRE_MODIFY || GET_CODE (addr) == POST_MODIFY)
+ {
+ if (GET_CODE (XEXP (addr, 1)) == PLUS
+ && XEXP (addr, 0) == XEXP (XEXP (addr, 1), 0)
+ && CONSTANT_P (XEXP (XEXP (addr, 1), 1)))
+ {
+ val.base = XEXP (addr, 0);
+ val.start = -INTVAL (XEXP (XEXP (addr, 1), 1));
+ val.end = INTVAL (XEXP (XEXP (addr, 1), 1));
+ val.safe = REGNO (val.base) == STACK_POINTER_REGNUM;
+ return val;
+ }
+ }
+
+ if (GET_CODE (addr) == CONST)
+ {
+ addr = XEXP (addr, 0);
+ all_const = 1;
+ }
+ if (GET_CODE (addr) == PLUS)
+ {
+ if (CONSTANT_P (XEXP (addr, 0)))
+ {
+ base = XEXP (addr, 1);
+ offset = XEXP (addr, 0);
+ }
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ {
+ base = XEXP (addr, 0);
+ offset = XEXP (addr, 1);
+ }
+ }
+
+ if (offset == 0)
+ {
+ base = addr;
+ offset = const0_rtx;
+ }
+ if (GET_CODE (offset) == CONST)
+ offset = XEXP (offset, 0);
+ if (GET_CODE (offset) == PLUS)
+ {
+ if (GET_CODE (XEXP (offset, 0)) == CONST_INT)
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 1));
+ offset = XEXP (offset, 0);
+ }
+ else if (GET_CODE (XEXP (offset, 1)) == CONST_INT)
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, XEXP (offset, 0));
+ offset = XEXP (offset, 1);
+ }
+ else
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, offset);
+ offset = const0_rtx;
+ }
+ }
+ else if (GET_CODE (offset) != CONST_INT)
+ {
+ base = gen_rtx_PLUS (GET_MODE (base), base, offset);
+ offset = const0_rtx;
+ }
+
+ if (all_const && GET_CODE (base) == PLUS)
+ base = gen_rtx_CONST (GET_MODE (base), base);
+
+ gcc_assert (GET_CODE (offset) == CONST_INT);
+
+ val.start = INTVAL (offset);
+ val.end = val.start + GET_MODE_SIZE (GET_MODE (x));
+ val.base = base;
+ }
+ break;
+
+ case REG:
val.reg_flag = 1;
val.start = true_regnum (x);
if (val.start < 0)
else
/* A hard reg. */
val.end = val.start + hard_regno_nregs[val.start][GET_MODE (x)];
- }
- else if (GET_CODE (x) == SUBREG)
- {
+ break;
+
+ case SUBREG:
if (!REG_P (SUBREG_REG (x)))
/* This could be more precise, but it's good enough. */
return decompose (SUBREG_REG (x));
else
/* A hard reg. */
val.end = val.start + hard_regno_nregs[val.start][GET_MODE (x)];
+ break;
+
+ case SCRATCH:
+ /* This hasn't been assigned yet, so it can't conflict yet. */
+ val.safe = 1;
+ break;
+
+ default:
+ gcc_assert (CONSTANT_P (x));
+ val.safe = 1;
+ break;
}
- else if (CONSTANT_P (x)
- /* This hasn't been assigned yet, so it can't conflict yet. */
- || GET_CODE (x) == SCRATCH)
- val.safe = 1;
- else
- abort ();
return val;
}
if (ydata.safe)
return 1;
- if (!MEM_P (y))
- abort ();
+ gcc_assert (MEM_P (y));
/* If Y is memory and X is not, Y can't affect X. */
if (!MEM_P (x))
return 1;
case '%':
{
/* The last operand should not be marked commutative. */
- if (i == noperands - 1)
- abort ();
+ gcc_assert (i != noperands - 1);
/* We currently only support one commutative pair of
operands. Some existing asm code currently uses more
future we may handle it correctly. */
if (commutative < 0)
commutative = i;
- else if (!this_insn_is_asm)
- abort ();
+ else
+ gcc_assert (this_insn_is_asm);
}
break;
/* Use of ISDIGIT is tempting here, but it may get expensive because
recog_data.operand[i]);
/* An operand may not match itself. */
- if (c == i)
- abort ();
+ gcc_assert (c != i);
/* If C can be commuted with C+1, and C might need to match I,
then C+1 might also need to match I. */
early_data = decompose (recog_data.operand[i]);
- if (modified[i] == RELOAD_READ)
- abort ();
+ gcc_assert (modified[i] != RELOAD_READ);
if (this_alternative[i] == NO_REGS)
{
this_alternative_earlyclobber[i] = 0;
- if (this_insn_is_asm)
- error_for_asm (this_insn,
- "`&' constraint used with no register class");
- else
- abort ();
+ gcc_assert (this_insn_is_asm);
+ error_for_asm (this_insn,
+ "`&' constraint used with no register class");
}
for (j = 0; j < noperands; j++)
0, 0, i, RELOAD_OTHER);
operand_reloadnum[i] = output_reloadnum;
}
- else if (insn_code_number >= 0)
- abort ();
else
{
+ gcc_assert (insn_code_number < 0);
error_for_asm (insn, "inconsistent operand constraints in an `asm'");
/* Avoid further trouble with this insn. */
PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
do after the insn (such as for output addresses) are fine. */
if (no_input_reloads)
for (i = 0; i < n_reloads; i++)
- if (rld[i].in != 0
- && rld[i].when_needed != RELOAD_FOR_OUTADDR_ADDRESS
- && rld[i].when_needed != RELOAD_FOR_OUTPUT_ADDRESS)
- abort ();
+ gcc_assert (rld[i].in == 0
+ || rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS
+ || rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS);
#endif
/* Compute reload_mode and reload_nregs. */
tem =
simplify_gen_subreg (GET_MODE (x), reg_equiv_constant[regno],
GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
- if (!tem)
- abort ();
+ gcc_assert (tem);
return tem;
}
{
rtx op0 = XEXP (x, 0);
rtx op1 = XEXP (x, 1);
+ int regno;
+ int reloadnum;
if (GET_CODE (op1) != PLUS && GET_CODE (op1) != MINUS)
return 0;
where a base register is {inc,dec}remented by the contents
of another register or by a constant value. Thus, these
operands must match. */
- if (op0 != XEXP (op1, 0))
- abort ();
+ gcc_assert (op0 == XEXP (op1, 0));
/* Require index register (or constant). Let's just handle the
register case in the meantime... If the target allows
find_reloads_address_1 (mode, XEXP (op1, 1), 1, &XEXP (op1, 1),
opnum, type, ind_levels, insn);
- if (REG_P (XEXP (op1, 0)))
- {
- int regno = REGNO (XEXP (op1, 0));
- int reloadnum;
-
- /* A register that is incremented cannot be constant! */
- if (regno >= FIRST_PSEUDO_REGISTER
- && reg_equiv_constant[regno] != 0)
- abort ();
-
- /* Handle a register that is equivalent to a memory location
- which cannot be addressed directly. */
- if (reg_equiv_memory_loc[regno] != 0
- && (reg_equiv_address[regno] != 0
- || num_not_at_initial_offset))
- {
- rtx tem = make_memloc (XEXP (x, 0), regno);
+ gcc_assert (REG_P (XEXP (op1, 0)));
- if (reg_equiv_address[regno]
- || ! rtx_equal_p (tem, reg_equiv_mem[regno]))
- {
- /* First reload the memory location's address.
- We can't use ADDR_TYPE (type) here, because we need to
- write back the value after reading it, hence we actually
- need two registers. */
- find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0),
- &XEXP (tem, 0), opnum,
- RELOAD_OTHER,
- ind_levels, insn);
-
- /* Then reload the memory location into a base
- register. */
- reloadnum = push_reload (tem, tem, &XEXP (x, 0),
- &XEXP (op1, 0),
- MODE_BASE_REG_CLASS (mode),
- GET_MODE (x), GET_MODE (x), 0,
- 0, opnum, RELOAD_OTHER);
-
- update_auto_inc_notes (this_insn, regno, reloadnum);
- return 0;
- }
- }
+ regno = REGNO (XEXP (op1, 0));
+
+ /* A register that is incremented cannot be constant! */
+ gcc_assert (regno < FIRST_PSEUDO_REGISTER
+ || reg_equiv_constant[regno] == 0);
- if (reg_renumber[regno] >= 0)
- regno = reg_renumber[regno];
+ /* Handle a register that is equivalent to a memory location
+ which cannot be addressed directly. */
+ if (reg_equiv_memory_loc[regno] != 0
+ && (reg_equiv_address[regno] != 0
+ || num_not_at_initial_offset))
+ {
+ rtx tem = make_memloc (XEXP (x, 0), regno);
- /* We require a base register here... */
- if (!REGNO_MODE_OK_FOR_BASE_P (regno, GET_MODE (x)))
+ if (reg_equiv_address[regno]
+ || ! rtx_equal_p (tem, reg_equiv_mem[regno]))
{
- reloadnum = push_reload (XEXP (op1, 0), XEXP (x, 0),
- &XEXP (op1, 0), &XEXP (x, 0),
- MODE_BASE_REG_CLASS (mode),
- GET_MODE (x), GET_MODE (x), 0, 0,
- opnum, RELOAD_OTHER);
+ /* First reload the memory location's address.
+ We can't use ADDR_TYPE (type) here, because we need to
+ write back the value after reading it, hence we actually
+ need two registers. */
+ find_reloads_address (GET_MODE (tem), &tem, XEXP (tem, 0),
+ &XEXP (tem, 0), opnum,
+ RELOAD_OTHER,
+ ind_levels, insn);
+
+ /* Then reload the memory location into a base
+ register. */
+ reloadnum = push_reload (tem, tem, &XEXP (x, 0),
+ &XEXP (op1, 0),
+ MODE_BASE_REG_CLASS (mode),
+ GET_MODE (x), GET_MODE (x), 0,
+ 0, opnum, RELOAD_OTHER);
update_auto_inc_notes (this_insn, regno, reloadnum);
return 0;
}
}
- else
- abort ();
+
+ if (reg_renumber[regno] >= 0)
+ regno = reg_renumber[regno];
+
+ /* We require a base register here... */
+ if (!REGNO_MODE_OK_FOR_BASE_P (regno, GET_MODE (x)))
+ {
+ reloadnum = push_reload (XEXP (op1, 0), XEXP (x, 0),
+ &XEXP (op1, 0), &XEXP (x, 0),
+ MODE_BASE_REG_CLASS (mode),
+ GET_MODE (x), GET_MODE (x), 0, 0,
+ opnum, RELOAD_OTHER);
+
+ update_auto_inc_notes (this_insn, regno, reloadnum);
+ return 0;
+ }
}
return 0;
rtx x_orig = x;
/* A register that is incremented cannot be constant! */
- if (regno >= FIRST_PSEUDO_REGISTER
- && reg_equiv_constant[regno] != 0)
- abort ();
+ gcc_assert (regno < FIRST_PSEUDO_REGISTER
+ || reg_equiv_constant[regno] == 0);
/* Handle a register that is equivalent to a memory location
which cannot be addressed directly. */
for (check_regno = 0; check_regno < max_regno; check_regno++)
{
#define CHECK_MODF(ARRAY) \
- if (ARRAY[check_regno] \
- && loc_mentioned_in_p (r->where, \
- ARRAY[check_regno])) \
- abort ()
+ gcc_assert (!ARRAY[check_regno] \
+ || !loc_mentioned_in_p (r->where, \
+ ARRAY[check_regno]))
CHECK_MODF (reg_equiv_constant);
CHECK_MODF (reg_equiv_memory_loc);
*r->where = reloadreg;
}
/* If reload got no reg and isn't optional, something's wrong. */
- else if (! rld[r->what].optional)
- abort ();
+ else
+ gcc_assert (rld[r->what].optional);
}
}
\f
{
/* We can't support X being a SUBREG because we might then need to know its
location if something inside it was replaced. */
- if (GET_CODE (x) == SUBREG)
- abort ();
+ gcc_assert (GET_CODE (x) != SUBREG);
copy_replacements_1 (&x, &y, n_replacements);
}
reg_equiv_memory_loc[r],
(rtx*) 0);
- if (reg_equiv_constant[r])
- return 0;
-
- abort ();
+ gcc_assert (reg_equiv_constant[r]);
+ return 0;
}
return (endregno > r
{
if (reg_equiv_memory_loc[regno])
return refers_to_mem_for_reload_p (in);
- else if (reg_equiv_constant[regno])
- return 0;
- abort ();
+ gcc_assert (reg_equiv_constant[regno]);
+ return 0;
}
}
else if (MEM_P (x))
else if (GET_CODE (x) == SCRATCH || GET_CODE (x) == PC
|| GET_CODE (x) == CC0)
return reg_mentioned_p (x, in);
- else if (GET_CODE (x) == PLUS)
+ else
{
+ gcc_assert (GET_CODE (x) == PLUS);
+
/* We actually want to know if X is mentioned somewhere inside IN.
We must not say that (plus (sp) (const_int 124)) is in
(plus (sp) (const_int 64)), since that can lead to incorrect reload
else return (reg_overlap_mentioned_for_reload_p (XEXP (x, 0), in)
|| reg_overlap_mentioned_for_reload_p (XEXP (x, 1), in));
}
- else
- abort ();
endregno = regno + (regno < FIRST_PSEUDO_REGISTER
? hard_regno_nregs[regno][GET_MODE (x)] : 1);
BASIC_BLOCK->global_live_at_start, which might still
contain registers that have not actually been allocated
since they have an equivalence. */
- if (! reload_completed)
- abort ();
+ gcc_assert (reload_completed);
}
else
{
*loc = reg_equiv_mem[regno];
else if (reg_equiv_address[regno])
*loc = gen_rtx_MEM (GET_MODE (x), reg_equiv_address[regno]);
- else if (!REG_P (regno_reg_rtx[regno])
- || REGNO (regno_reg_rtx[regno]) != regno)
- *loc = regno_reg_rtx[regno];
else
- abort ();
+ {
+ gcc_assert (!REG_P (regno_reg_rtx[regno])
+ || REGNO (regno_reg_rtx[regno]) != regno);
+ *loc = regno_reg_rtx[regno];
+ }
return;
}
reload_as_needed (global);
- if (old_frame_size != get_frame_size ())
- abort ();
+ gcc_assert (old_frame_size == get_frame_size ());
if (num_eliminable)
verify_initial_elim_offsets ();
SET_REGNO_REG_SET (&pseudos_counted, reg);
- if (r < 0)
- abort ();
+ gcc_assert (r >= 0);
spill_add_cost[r] += freq;
for (i = 0; i < rl->nregs; i++)
{
- if (spill_cost[best_reg + i] != 0
- || spill_add_cost[best_reg + i] != 0)
- abort ();
+ gcc_assert (spill_cost[best_reg + i] == 0);
+ gcc_assert (spill_add_cost[best_reg + i] == 0);
SET_HARD_REG_BIT (used_spill_regs_local, best_reg + i);
}
return 1;
case CLOBBER:
case ASM_OPERANDS:
case SET:
- abort ();
+ gcc_unreachable ();
default:
break;
if (! insn_is_asm && icode < 0)
{
- if (GET_CODE (PATTERN (insn)) == USE
- || GET_CODE (PATTERN (insn)) == CLOBBER
- || GET_CODE (PATTERN (insn)) == ADDR_VEC
- || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
- || GET_CODE (PATTERN (insn)) == ASM_INPUT)
- return 0;
- abort ();
+ gcc_assert (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT);
+ return 0;
}
if (old_set != 0 && REG_P (SET_DEST (old_set))
PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode, vec);
add_clobbers (PATTERN (insn), INSN_CODE (insn));
}
- if (INSN_CODE (insn) < 0)
- abort ();
+ gcc_assert (INSN_CODE (insn) >= 0);
}
/* If we have a nonzero offset, and the source is already
a simple REG, the following transformation would
for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++)
{
INITIAL_ELIMINATION_OFFSET (ep->from, ep->to, t);
- if (t != ep->initial_offset)
- abort ();
+ gcc_assert (t == ep->initial_offset);
}
#else
INITIAL_FRAME_POINTER_OFFSET (t);
- if (t != reg_eliminate[0].initial_offset)
- abort ();
+ gcc_assert (t == reg_eliminate[0].initial_offset);
#endif
}
/* Record the current hard register the pseudo is allocated to in
pseudo_previous_regs so we avoid reallocating it to the same
hard reg in a later pass. */
- if (reg_renumber[i] < 0)
- abort ();
+ gcc_assert (reg_renumber[i] >= 0);
SET_HARD_REG_BIT (pseudo_previous_regs[i], reg_renumber[i]);
/* Mark it as no longer having a hard register home. */
/* Make sure we only enlarge the set. */
GO_IF_HARD_REG_SUBSET (used_by_pseudos2, chain->used_spill_regs, ok);
- abort ();
+ gcc_unreachable ();
ok:;
}
}
used_in_set = &reload_reg_used_in_insn;
break;
default:
- abort ();
+ gcc_unreachable ();
}
/* We resolve conflicts with remaining reloads of the same type by
excluding the intervals of reload registers by them from the
case RELOAD_FOR_OTHER_ADDRESS:
return ! TEST_HARD_REG_BIT (reload_reg_used_in_other_addr, regno);
+
+ default:
+ gcc_unreachable ();
}
- abort ();
}
/* Return 1 if the value in reload reg REGNO, as used by a reload
return 0;
return 1;
- }
- abort ();
+ default:
+ gcc_unreachable ();
+ }
}
\f
/* Return 1 if the reloads denoted by R1 and R2 cannot share a register.
return 1;
default:
- abort ();
+ gcc_unreachable ();
}
}
\f
{
if (REG_P (equiv))
regno = REGNO (equiv);
- else if (GET_CODE (equiv) == SUBREG)
+ else
{
/* This must be a SUBREG of a hard register.
Make a new REG since this might be used in an
address and not all machines support SUBREGs
there. */
+ gcc_assert (GET_CODE (equiv) == SUBREG);
regno = subreg_regno (equiv);
equiv = gen_rtx_REG (rld[r].mode, regno);
}
- else
- abort ();
}
/* If we found a spill reg, reject it unless it is free
/* Some sanity tests to verify that the reloads found in the first
pass are identical to the ones we have now. */
- if (chain->n_reloads != n_reloads)
- abort ();
+ gcc_assert (chain->n_reloads == n_reloads);
for (i = 0; i < n_reloads; i++)
{
if (chain->rld[i].regno < 0 || chain->rld[i].reg_rtx != 0)
continue;
- if (chain->rld[i].when_needed != rld[i].when_needed)
- abort ();
+ gcc_assert (chain->rld[i].when_needed == rld[i].when_needed);
for (j = 0; j < n_spills; j++)
if (spill_regs[j] == chain->rld[i].regno)
if (! set_reload_reg (j, i))
SET_HARD_REG_BIT (reg_is_output_reload, i + nr);
}
- if (rld[r].when_needed != RELOAD_OTHER
- && rld[r].when_needed != RELOAD_FOR_OUTPUT
- && rld[r].when_needed != RELOAD_FOR_INSN)
- abort ();
+ gcc_assert (rld[r].when_needed == RELOAD_OTHER
+ || rld[r].when_needed == RELOAD_FOR_OUTPUT
+ || rld[r].when_needed == RELOAD_FOR_INSN);
}
}
}
so abort. */
if (rld[j].reg_rtx)
for (k = 0; k < j; k++)
- if (rld[k].in != 0 && rld[k].reg_rtx != 0
- && rld[k].when_needed == rld[j].when_needed
- && rtx_equal_p (rld[k].reg_rtx, rld[j].reg_rtx)
- && ! rtx_equal_p (rld[k].in, rld[j].in))
- abort ();
+ gcc_assert (rld[k].in == 0 || rld[k].reg_rtx == 0
+ || rld[k].when_needed != rld[j].when_needed
+ || !rtx_equal_p (rld[k].reg_rtx,
+ rld[j].reg_rtx)
+ || rtx_equal_p (rld[k].in,
+ rld[j].in));
}
}
}
where = &other_input_address_reload_insns;
break;
default:
- abort ();
+ gcc_unreachable ();
}
push_to_sequence (*where);
/* We are not going to bother supporting the case where a
incremented register can't be copied directly from
OLDEQUIV since this seems highly unlikely. */
- if (rl->secondary_in_reload >= 0)
- abort ();
+ gcc_assert (rl->secondary_in_reload < 0);
if (reload_inherited[j])
oldequiv = reloadreg;
return;
/* If is a JUMP_INSN, we can't support output reloads yet. */
- if (JUMP_P (insn))
- abort ();
+ gcc_assert (!JUMP_P (insn));
emit_output_reload_insns (chain, rld + j, j);
}
&& !can_throw_internal (insn)
&& insn != BB_HEAD (bb))
insn = PREV_INSN (insn);
- if (!CALL_P (insn) && !can_throw_internal (insn))
- abort ();
+ gcc_assert (CALL_P (insn) || can_throw_internal (insn));
BB_END (bb) = insn;
inserted = true;
insn = NEXT_INSN (insn);
|| asm_noperands (PATTERN (insn)) >= 0);
default:
- abort ();
+ gcc_unreachable ();
}
}
\f
if (had_barrier)
emit_barrier_after (seq_insn);
- if (i != length + 1)
- abort ();
+ gcc_assert (i == length + 1);
return seq_insn;
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
else
int flags;
/* Validate our arguments. */
- if ((condition == const_true_rtx && ! thread_if_true)
- || (! own_thread && ! thread_if_true))
- abort ();
+ gcc_assert(condition != const_true_rtx || thread_if_true);
+ gcc_assert(own_thread || thread_if_true);
flags = get_jump_flags (insn, JUMP_LABEL (insn));
{
rtx label;
- if (! thread_if_true)
- abort ();
+ gcc_assert (thread_if_true);
if (new_thread && JUMP_P (new_thread)
&& (simplejump_p (new_thread)
trial = PREV_INSN (insn);
delete_related_insns (insn);
- if (GET_CODE (pat) != SEQUENCE)
- abort ();
+ gcc_assert (GET_CODE (pat) == SEQUENCE);
after = trial;
for (i = 0; i < XVECLEN (pat, 0); i++)
{
trial = PREV_INSN (insn);
delete_related_insns (insn);
- if (GET_CODE (pat) != SEQUENCE)
- abort ();
+ gcc_assert (GET_CODE (pat) == SEQUENCE);
after = trial;
for (i = 0; i < XVECLEN (pat, 0); i++)
{
unsigned int last_regno
= regno + hard_regno_nregs[regno][GET_MODE (x)];
- if (last_regno > FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER);
for (r = regno; r < last_regno; r++)
SET_HARD_REG_BIT (res->regs, r);
}
unsigned int last_regno
= regno + hard_regno_nregs[regno][GET_MODE (x)];
- if (last_regno > FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER);
for (r = regno; r < last_regno; r++)
SET_HARD_REG_BIT (res->regs, r);
}
{
sequence = PATTERN (NEXT_INSN (insn));
seq_size = XVECLEN (sequence, 0);
- if (GET_CODE (sequence) != SEQUENCE)
- abort ();
+ gcc_assert (GET_CODE (sequence) == SEQUENCE);
}
res->memory = 1;
unsigned int last_regno
= regno + hard_regno_nregs[regno][GET_MODE (x)];
- if (last_regno > FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER);
for (r = regno; r < last_regno; r++)
SET_HARD_REG_BIT (res->regs, r);
}
unsigned int last_regno
= regno + hard_regno_nregs[regno][GET_MODE (x)];
- if (last_regno > FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (last_regno <= FIRST_PSEUDO_REGISTER);
for (r = regno; r < last_regno; r++)
SET_HARD_REG_BIT (res->regs, r);
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
return copy;
contain anything but integers and other rtx's,
except for within LABEL_REFs and SYMBOL_REFs. */
default:
- abort ();
+ gcc_unreachable ();
}
}
return 1;
{
rtx tmp;
- if (! INSN_P (x) || ! INSN_P (y))
- abort ();
+ gcc_assert (INSN_P (x));
+ gcc_assert (INSN_P (y));
tmp = PATTERN (y);
note_stores (PATTERN (x), insn_dependent_p_1, &tmp);
}
default:
-#ifdef ENABLE_CHECKING
- if (!CONSTANT_P (x))
- abort ();
-#endif
-
+ gcc_assert (CONSTANT_P (x));
return 0;
}
}
if (GET_CODE (x) == CC0)
return 1;
- if (!REG_P (x))
- abort ();
+ gcc_assert (REG_P (x));
regno = REGNO (x);
last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno
if (!CALL_P (insn))
return 0;
- if (! datum)
- abort ();
+ gcc_assert (datum);
if (!REG_P (datum))
{
return;
}
- abort ();
+ gcc_unreachable ();
}
/* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
x = simplify_subreg (GET_MODE (x), new,
GET_MODE (SUBREG_REG (x)),
SUBREG_BYTE (x));
- if (! x)
- abort ();
+ gcc_assert (x);
}
else
SUBREG_REG (x) = new;
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
new, GET_MODE (XEXP (x, 0)));
- if (! x)
- abort ();
+ gcc_assert (x);
}
else
XEXP (x, 0) = new;
if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
/* If the subreg crosses a word boundary ensure that
it also begins and ends on a word boundary. */
- if ((subreg_byte % UNITS_PER_WORD
- + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
- && (subreg_byte % UNITS_PER_WORD
- || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD))
- abort ();
+ gcc_assert (!((subreg_byte % UNITS_PER_WORD
+ + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
+ && (subreg_byte % UNITS_PER_WORD
+ || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
if (WORDS_BIG_ENDIAN)
word = (GET_MODE_SIZE (inner_mode)
int mode_multiple, nregs_multiple;
int y_offset;
- if (xregno >= FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
nregs_xmode = hard_regno_nregs[xregno][xmode];
nregs_ymode = hard_regno_nregs[xregno][ymode];
/* size of ymode must not be greater than the size of xmode. */
mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
- if (mode_multiple == 0)
- abort ();
+ gcc_assert (mode_multiple != 0);
y_offset = offset / GET_MODE_SIZE (ymode);
nregs_multiple = nregs_xmode / nregs_ymode;
int mode_multiple, nregs_multiple;
int y_offset;
- if (xregno >= FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
nregs_xmode = hard_regno_nregs[xregno][xmode];
nregs_ymode = hard_regno_nregs[xregno][ymode];
if (offset == subreg_lowpart_offset (ymode, xmode))
return true;
-#ifdef ENABLE_CHECKING
/* This should always pass, otherwise we don't know how to verify the
constraint. These conditions may be relaxed but subreg_offset would
need to be redesigned. */
- if (GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)
- || GET_MODE_SIZE (ymode) % nregs_ymode
- || nregs_xmode % nregs_ymode)
- abort ();
-#endif
+ gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
+ gcc_assert ((GET_MODE_SIZE (ymode) % nregs_ymode) == 0);
+ gcc_assert ((nregs_xmode % nregs_ymode) == 0);
/* The XMODE value can be seen as a vector of NREGS_XMODE
values. The subreg must represent a lowpart of given field.
/* size of ymode must not be greater than the size of xmode. */
mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
- if (mode_multiple == 0)
- abort ();
+ gcc_assert (mode_multiple != 0);
y_offset = offset / GET_MODE_SIZE (ymode);
nregs_multiple = nregs_xmode / nregs_ymode;
-#ifdef ENABLE_CHECKING
- if (offset % GET_MODE_SIZE (ymode)
- || mode_multiple % nregs_multiple)
- abort ();
-#endif
+
+ gcc_assert ((offset % GET_MODE_SIZE (ymode)) == 0);
+ gcc_assert ((mode_multiple % nregs_multiple) == 0);
+
return (!(y_offset % (mode_multiple / nregs_multiple)));
}
if (GET_CODE (XEXP (p, 0)) == USE
&& REG_P (XEXP (XEXP (p, 0), 0)))
{
- if (REGNO (XEXP (XEXP (p, 0), 0)) >= FIRST_PSEUDO_REGISTER)
- abort ();
+ gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
/* We only care about registers which can hold function
arguments. */
CODE_LABEL. */
if (LABEL_P (before))
{
- if (before != boundary)
- abort ();
+ gcc_assert (before == boundary);
break;
}
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
return true;
}
x = *xp;
}
- if (!REG_P (x))
- abort ();
+ gcc_assert (REG_P (x));
/* We've verified that hard registers are dead, so we may keep the side
effect. Otherwise replace it by new pseudo. */
rtx pat;
int i;
rtx note;
+ int applied;
insn = emit_copy_of_insn_after (insn, after);
pat = PATTERN (insn);
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
- if (!apply_change_group ())
- abort ();
+ applied = apply_change_group ();
+ gcc_assert (applied);
return insn;
}
/* We cannot insert instructions on an abnormal critical edge.
It will be easier to find the culprit if we die now. */
- if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
- abort ();
+ gcc_assert (!(e->flags & EDGE_ABNORMAL) || !EDGE_CRITICAL_P (e));
/* Do not use emit_insn_on_edge as we want to preserve notes and similar
stuff. We also emit CALL_INSNS and firends. */
result_low = MIN (low0, low1);
break;
default:
- abort ();
+ gcc_unreachable ();
}
if (result_width < mode_width)
{
/* Must be a hard reg that's not valid in MODE. */
result = gen_lowpart_common (mode, copy_to_reg (x));
- if (result == 0)
- abort ();
+ gcc_assert (result != 0);
return result;
}
- else if (MEM_P (x))
+ else
{
- /* The only additional case we can do is MEM. */
int offset = 0;
+ /* The only additional case we can do is MEM. */
+ gcc_assert (MEM_P (x));
+
/* The following exposes the use of "x" to CSE. */
if (GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD
&& SCALAR_INT_MODE_P (GET_MODE (x))
return adjust_address (x, mode, offset);
}
- else
- abort ();
}
rtx
sbitmap_ptr bp = b->elms;
/* A should be at least as large as DEST, to have a defined source. */
- if (a->size < dst_size)
- abort ();
+ gcc_assert (a->size >= dst_size);
/* If minuend is smaller, we simply pretend it to be zero bits, i.e.
only copy the subtrahend into dest. */
if (b->size < min_size)
{
enum reg_note present_dep_type = 0;
- if (anti_dependency_cache == NULL || output_dependency_cache == NULL)
- abort ();
+ gcc_assert (anti_dependency_cache);
+ gcc_assert (output_dependency_cache);
if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
INSN_LUID (elem)))
/* Do nothing (present_set_type is already 0). */
may be changed. */
if (true_dependency_cache != NULL)
{
- if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
- bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
- && output_dependency_cache)
- bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
- INSN_LUID (elem));
- else
- abort ();
+ enum reg_note kind = REG_NOTE_KIND (link);
+ switch (kind)
+ {
+ case REG_DEP_ANTI:
+ bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ break;
+ case REG_DEP_OUTPUT:
+ gcc_assert (output_dependency_cache);
+ bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
+ INSN_LUID (elem));
+ break;
+ default:
+ gcc_unreachable ();
+ }
}
#endif
purpose already. */
else if (regno >= deps->max_reg)
{
- if (GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER)
- abort ();
+ gcc_assert (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER);
}
else
{
purpose already. */
else if (regno >= deps->max_reg)
{
- if (GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER)
- abort ();
+ gcc_assert (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER);
}
else
{
return;
}
}
- abort ();
+ gcc_unreachable ();
}
\f
However, if we have enabled checking we might as well go
ahead and verify that add_dependence worked properly. */
- if (NOTE_P (from)
- || INSN_DELETED_P (from)
- || (forward_dependency_cache != NULL
- && bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
- INSN_LUID (to)))
- || (forward_dependency_cache == NULL
- && find_insn_list (to, INSN_DEPEND (from))))
- abort ();
+ gcc_assert (!NOTE_P (from));
+ gcc_assert (!INSN_DELETED_P (from));
+ if (forward_dependency_cache)
+ gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
+ INSN_LUID (to)));
+ else
+ gcc_assert (!find_insn_list (to, INSN_DEPEND (from)));
+
+ /* ??? If bitmap_bit_p is a predicate, what is this supposed to do? */
if (forward_dependency_cache != NULL)
bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
INSN_LUID (to));
for (; insn != aftertail; insn = NEXT_INSN (insn))
{
- if (LABEL_P (insn))
- abort ();
+ gcc_assert (!LABEL_P (insn));
/* Create new basic blocks just before first insn. */
if (inside_basic_block_p (insn))
{
schedule_block (-1, n_insns);
/* Sanity check: verify that all region insns were scheduled. */
- if (sched_n_insns != n_insns)
- abort ();
+ gcc_assert (sched_n_insns == n_insns);
head = current_sched_info->head;
tail = current_sched_info->tail;
{
current_blocks = RGN_BLOCKS (rgn);
- if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
- abort ();
-
+ gcc_assert (bb == BLOCK_TO_BB (BB_TO_BLOCK (bb)));
fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
}
sp->update_bbs.nr_members = update_idx;
/* Make sure we didn't overrun the end of bblst_table. */
- if (bblst_last > bblst_size)
- abort ();
+ gcc_assert (bblst_last <= bblst_size);
}
else
{
}
/* Sanity check: verify that all region insns were scheduled. */
- if (sched_rgn_n_insns != rgn_n_insns)
- abort ();
+ gcc_assert (sched_rgn_n_insns == rgn_n_insns);
/* Restore line notes. */
if (write_symbols != NO_DEBUG)
sbitmap_zero (blocks);
SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
- if (deaths_in_region[rgn]
- != count_or_remove_death_notes (blocks, 0))
- abort ();
+ gcc_assert (deaths_in_region[rgn]
+ == count_or_remove_death_notes (blocks, 0));
}
free (deaths_in_region);
}
nr_inter, nr_spec);
}
else
- {
- if (nr_inter > 0)
- abort ();
- }
+ gcc_assert (nr_inter <= 0);
fprintf (sched_dump, "\n\n");
}
break;
case SEQUENCE:
/* Should never see SEQUENCE codes until after reorg. */
- abort ();
- break;
+ gcc_unreachable ();
case ASM_INPUT:
sprintf (buf, "asm {%s}", XSTR (x, 0));
break;
case PARM_DECL:
/* Parm decls go in their own separate chains
and are output by sdbout_reg_parms and sdbout_parms. */
- abort ();
+ gcc_unreachable ();
case VAR_DECL:
/* Don't mention a variable that is external.
if (DECL_IGNORED_P (decl))
return;
- if (! (TREE_CODE (decl) == VAR_DECL
- && MEM_P (DECL_RTL (decl))
- && DECL_INITIAL (decl)))
- abort ();
+ gcc_assert (TREE_CODE (decl) == VAR_DECL);
+ gcc_assert (MEM_P (DECL_RTL (decl)));
+ gcc_assert (DECL_INITIAL (decl));
PUT_SDB_DEF (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
PUT_SDB_VAL (XEXP (DECL_RTL (decl), 0));
if (code == VEC_DUPLICATE)
{
- if (!VECTOR_MODE_P (mode))
- abort ();
- if (GET_MODE (trueop) != VOIDmode
- && !VECTOR_MODE_P (GET_MODE (trueop))
- && GET_MODE_INNER (mode) != GET_MODE (trueop))
- abort ();
- if (GET_MODE (trueop) != VOIDmode
- && VECTOR_MODE_P (GET_MODE (trueop))
- && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
- abort ();
+ gcc_assert (VECTOR_MODE_P (mode));
+ if (GET_MODE (trueop) != VOIDmode)
+ {
+ if (!VECTOR_MODE_P (GET_MODE (trueop)))
+ gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
+ (GET_MODE (trueop)));
+ }
if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
|| GET_CODE (trueop) == CONST_VECTOR)
{
int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
- if (in_n_elts >= n_elts || n_elts % in_n_elts)
- abort ();
+ gcc_assert (in_n_elts < n_elts);
+ gcc_assert ((n_elts % in_n_elts) == 0);
for (i = 0; i < n_elts; i++)
RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
}
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (op_n_elts != n_elts)
- abort ();
-
+ gcc_assert (op_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
case ZERO_EXTEND:
/* When zero-extending a CONST_INT, we need to know its
original mode. */
- if (op_mode == VOIDmode)
- abort ();
+ gcc_assert (op_mode != VOIDmode);
if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- if (width != GET_MODE_BITSIZE (op_mode))
- abort ();
+ gcc_assert (width == GET_MODE_BITSIZE (op_mode));
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- if (width != GET_MODE_BITSIZE (op_mode))
- abort ();
+ gcc_assert (width == GET_MODE_BITSIZE (op_mode));
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
val = trunc_int_for_mode (val, mode);
break;
case ZERO_EXTEND:
- if (op_mode == VOIDmode)
- abort ();
+ gcc_assert (op_mode != VOIDmode);
if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
return 0;
real_from_target (&d, tmp, mode);
}
default:
- abort ();
+ gcc_unreachable ();
}
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
break;
default:
- abort ();
+ gcc_unreachable ();
}
return immed_double_const (xl, xh, mode);
}
rtx trueop0, trueop1;
rtx tem;
-#ifdef ENABLE_CHECKING
/* Relational operations don't work here. We must know the mode
of the operands in order to do the comparison correctly.
Assuming a full word can give incorrect results.
Consider comparing 128 with -128 in QImode. */
-
- if (GET_RTX_CLASS (code) == RTX_COMPARE
- || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
- abort ();
-#endif
+ gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
+ gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
/* Make sure the constant is second. */
if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (op0_n_elts != n_elts || op1_n_elts != n_elts)
- abort ();
-
+ gcc_assert (op0_n_elts == n_elts);
+ gcc_assert (op1_n_elts == n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
GET_MODE (op1));
for (i = 0; i < 4; i++)
{
- if (code == AND)
+ switch (code)
+ {
+ case AND:
tmp0[i] &= tmp1[i];
- else if (code == IOR)
+ break;
+ case IOR:
tmp0[i] |= tmp1[i];
- else if (code == XOR)
+ break;
+ case XOR:
tmp0[i] ^= tmp1[i];
- else
- abort ();
+ break;
+ default:
+ gcc_unreachable ();
+ }
}
real_from_target (&r, tmp0, mode);
return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
case VEC_SELECT:
if (!VECTOR_MODE_P (mode))
{
- if (!VECTOR_MODE_P (GET_MODE (trueop0))
- || (mode
- != GET_MODE_INNER (GET_MODE (trueop0)))
- || GET_CODE (trueop1) != PARALLEL
- || XVECLEN (trueop1, 0) != 1
- || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
- abort ();
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
+ gcc_assert (XVECLEN (trueop1, 0) == 1);
+ gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
if (GET_CODE (trueop0) == CONST_VECTOR)
- return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
+ return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
+ (trueop1, 0, 0)));
}
else
{
- if (!VECTOR_MODE_P (GET_MODE (trueop0))
- || (GET_MODE_INNER (mode)
- != GET_MODE_INNER (GET_MODE (trueop0)))
- || GET_CODE (trueop1) != PARALLEL)
- abort ();
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
if (GET_CODE (trueop0) == CONST_VECTOR)
{
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (XVECLEN (trueop1, 0) != (int) n_elts)
- abort ();
+ gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
for (i = 0; i < n_elts; i++)
{
rtx x = XVECEXP (trueop1, 0, i);
- if (GET_CODE (x) != CONST_INT)
- abort ();
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
+ INTVAL (x));
}
return gen_rtx_CONST_VECTOR (mode, v);
? GET_MODE (trueop1)
: GET_MODE_INNER (mode));
- if (!VECTOR_MODE_P (mode)
- || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
- != GET_MODE_SIZE (mode)))
- abort ();
-
- if ((VECTOR_MODE_P (op0_mode)
- && (GET_MODE_INNER (mode)
- != GET_MODE_INNER (op0_mode)))
- || (!VECTOR_MODE_P (op0_mode)
- && GET_MODE_INNER (mode) != op0_mode))
- abort ();
-
- if ((VECTOR_MODE_P (op1_mode)
- && (GET_MODE_INNER (mode)
- != GET_MODE_INNER (op1_mode)))
- || (!VECTOR_MODE_P (op1_mode)
- && GET_MODE_INNER (mode) != op1_mode))
- abort ();
+ gcc_assert (VECTOR_MODE_P (mode));
+ gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
+ == GET_MODE_SIZE (mode));
+
+ if (VECTOR_MODE_P (op0_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op0_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op0_mode);
+
+ if (VECTOR_MODE_P (op1_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op1_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op1_mode);
if ((GET_CODE (trueop0) == CONST_VECTOR
|| GET_CODE (trueop0) == CONST_INT
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
return 0;
return 0;
default:
- abort ();
+ gcc_unreachable ();
}
val = trunc_int_for_mode (val, mode);
rtx trueop0;
rtx trueop1;
- if (mode == VOIDmode
- && (GET_MODE (op0) != VOIDmode
- || GET_MODE (op1) != VOIDmode))
- abort ();
+ gcc_assert (mode != VOIDmode
+ || (GET_MODE (op0) == VOIDmode
+ && GET_MODE (op1) == VOIDmode));
/* If op0 is a compare, extract the comparison arguments from it. */
if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
case UNORDERED:
return const0_rtx;
default:
- abort ();
+ gcc_unreachable ();
}
}
\f
break;
case VEC_MERGE:
- if (GET_MODE (op0) != mode
- || GET_MODE (op1) != mode
- || !VECTOR_MODE_P (mode))
- abort ();
+ gcc_assert (GET_MODE (op0) == mode);
+ gcc_assert (GET_MODE (op1) == mode);
+ gcc_assert (VECTOR_MODE_P (mode));
op2 = avoid_constant_pool_reference (op2);
if (GET_CODE (op2) == CONST_INT)
{
break;
default:
- abort ();
+ gcc_unreachable ();
}
return 0;
elems = &op;
elem_bitsize = max_bitsize;
}
-
- if (BITS_PER_UNIT % value_bit != 0)
- abort (); /* Too complicated; reducing value_bit may help. */
- if (elem_bitsize % BITS_PER_UNIT != 0)
- abort (); /* I don't know how to handle endianness of sub-units. */
+ /* If this asserts, it is too complicated; reducing value_bit may help. */
+ gcc_assert (BITS_PER_UNIT % value_bit == 0);
+ /* I don't know how to handle endianness of sub-units. */
+ gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
for (elem = 0; elem < num_elem; elem++)
{
{
/* If this triggers, someone should have generated a
CONST_INT instead. */
- if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
- abort ();
+ gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
*vp++ = CONST_DOUBLE_LOW (el) >> i;
for (; i < max_bitsize; i += value_bit)
*vp++ = 0;
}
- else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
+ else
{
long tmp[max_bitsize / 32];
int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
-
- if (bitsize > elem_bitsize)
- abort ();
- if (bitsize % value_bit != 0)
- abort ();
+
+ gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
+ gcc_assert (bitsize <= elem_bitsize);
+ gcc_assert (bitsize % value_bit == 0);
real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
GET_MODE (el));
for (; i < elem_bitsize; i += value_bit)
*vp++ = 0;
}
- else
- abort ();
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
/* BYTE should still be inside OP. (Note that BYTE is unsigned,
so if it's become negative it will instead be very large.) */
- if (byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
/* Convert from bytes to chunks of size value_bit. */
value_start = byte * (BITS_PER_UNIT / value_bit);
outer_class = GET_MODE_CLASS (outer_submode);
elem_bitsize = GET_MODE_BITSIZE (outer_submode);
- if (elem_bitsize % value_bit != 0)
- abort ();
- if (elem_bitsize + value_start * value_bit > max_bitsize)
- abort ();
+ gcc_assert (elem_bitsize % value_bit == 0);
+ gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
for (elem = 0; elem < num_elem; elem++)
{
break;
default:
- abort ();
+ gcc_unreachable ();
}
}
if (VECTOR_MODE_P (outermode))
enum machine_mode innermode, unsigned int byte)
{
/* Little bit of sanity checking. */
- if (innermode == VOIDmode || outermode == VOIDmode
- || innermode == BLKmode || outermode == BLKmode)
- abort ();
+ gcc_assert (innermode != VOIDmode);
+ gcc_assert (outermode != VOIDmode);
+ gcc_assert (innermode != BLKmode);
+ gcc_assert (outermode != BLKmode);
- if (GET_MODE (op) != innermode
- && GET_MODE (op) != VOIDmode)
- abort ();
+ gcc_assert (GET_MODE (op) == innermode
+ || GET_MODE (op) == VOIDmode);
- if (byte % GET_MODE_SIZE (outermode)
- || byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
if (outermode == innermode && !byte)
return op;
{
rtx newx;
/* Little bit of sanity checking. */
- if (innermode == VOIDmode || outermode == VOIDmode
- || innermode == BLKmode || outermode == BLKmode)
- abort ();
+ gcc_assert (innermode != VOIDmode);
+ gcc_assert (outermode != VOIDmode);
+ gcc_assert (innermode != BLKmode);
+ gcc_assert (outermode != BLKmode);
- if (GET_MODE (op) != innermode
- && GET_MODE (op) != VOIDmode)
- abort ();
+ gcc_assert (GET_MODE (op) == innermode
+ || GET_MODE (op) == VOIDmode);
- if (byte % GET_MODE_SIZE (outermode)
- || byte >= GET_MODE_SIZE (innermode))
- abort ();
+ gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
+ gcc_assert (byte < GET_MODE_SIZE (innermode));
newx = simplify_subreg (outermode, op, innermode, byte);
if (newx)
static inline void
shift_right (sreal *x, int s)
{
-#ifdef ENABLE_CHECKING
- if (s <= 0 || s > SREAL_BITS)
- abort ();
- if (x->exp + s > SREAL_MAX_EXP)
- {
- /* Exponent should never be so large because shift_right is used only by
- sreal_add and sreal_sub ant thus the number cannot be shifted out from
- exponent range. */
- abort ();
- }
-#endif
+ gcc_assert (s > 0);
+ gcc_assert (s <= SREAL_BITS);
+ /* Exponent should never be so large because shift_right is used only by
+ sreal_add and sreal_sub ant thus the number cannot be shifted out from
+ exponent range. */
+ gcc_assert (x->exp + s <= SREAL_MAX_EXP);
x->exp += s;
sreal tmp;
sreal *bb;
- if (sreal_compare (a, b) < 0)
- {
- abort ();
- }
+ gcc_assert (sreal_compare (a, b) >= 0);
dexp = a->exp - b->exp;
r->exp = a->exp;
#if SREAL_PART_BITS < 32
unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
- if (b->sig_hi < SREAL_MIN_SIG)
- {
- abort ();
- }
- else if (a->sig_hi < SREAL_MIN_SIG)
+ gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
+ if (a->sig_hi < SREAL_MIN_SIG)
{
r->sig_hi = 0;
r->sig_lo = 0;
normalize (r);
}
#else
- if (b->sig == 0)
- {
- abort ();
- }
- else
- {
- r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
- r->exp = a->exp - b->exp - SREAL_PART_BITS;
- normalize (r);
- }
+ gcc_assert (b->sig != 0);
+ r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
+ r->exp = a->exp - b->exp - SREAL_PART_BITS;
+ normalize (r);
#endif
return r;
}
rtx
label_rtx (tree label)
{
- if (TREE_CODE (label) != LABEL_DECL)
- abort ();
+ gcc_assert (TREE_CODE (label) == LABEL_DECL);
if (!DECL_RTL_SET_P (label))
{
tree function = decl_function_context (label);
struct function *p;
- if (!function)
- abort ();
+ gcc_assert (function);
if (function != current_function_decl)
p = find_function_data (function);
/* Check for a nonlocal goto to a containing function. Should have
gotten translated to __builtin_nonlocal_goto. */
tree context = decl_function_context (label);
- if (context != 0 && context != current_function_decl)
- abort ();
+ gcc_assert (!context || context == current_function_decl);
#endif
emit_jump (label_rtx (label));
bool allows_reg;
bool allows_mem;
rtx op;
+ bool ok;
- if (!parse_output_constraint (&constraints[i], i, ninputs,
+ ok = parse_output_constraint (&constraints[i], i, ninputs,
noutputs, &allows_mem, &allows_reg,
- &is_inout))
- abort ();
+ &is_inout);
+ gcc_assert (ok);
/* If an output operand is not a decl or indirect ref and our constraint
allows a register, make a temporary to act as an intermediate.
const char *constraint;
tree val, type;
rtx op;
+ bool ok;
constraint = constraints[i + noutputs];
- if (! parse_input_constraint (&constraint, i, ninputs, noutputs, ninout,
- constraints, &allows_mem, &allows_reg))
- abort ();
+ ok = parse_input_constraint (&constraint, i, ninputs, noutputs, ninout,
+ constraints, &allows_mem, &allows_reg);
+ gcc_assert (ok);
generating_concat_p = 0;
p = strchr (p, '\0');
/* Verify the no extra buffer space assumption. */
- if (p > q)
- abort ();
+ gcc_assert (p <= q);
/* Shift the rest of the buffer down to fill the gap. */
memmove (p, q + 1, strlen (q + 1) + 1);
if (GET_MODE_SIZE (tmpmode) >= bytes)
break;
- /* No suitable mode found. */
- if (tmpmode == VOIDmode)
- abort ();
+ /* A suitable mode should have been found. */
+ gcc_assert (tmpmode != VOIDmode);
PUT_MODE (result_rtl, tmpmode);
}
to the proper address. */
if (DECL_RTL_SET_P (decl))
{
- if (!MEM_P (DECL_RTL (decl))
- || !REG_P (XEXP (DECL_RTL (decl), 0)))
- abort ();
+ gcc_assert (MEM_P (DECL_RTL (decl)));
+ gcc_assert (REG_P (XEXP (DECL_RTL (decl), 0)));
oldaddr = XEXP (DECL_RTL (decl), 0);
}
{
tree decl_elt = TREE_VALUE (t);
enum machine_mode mode = TYPE_MODE (TREE_TYPE (decl_elt));
+ rtx decl_rtl;
/* If any of the elements are addressable, so is the entire
union. */
DECL_MODE (decl_elt) = mode
= mode_for_size_tree (DECL_SIZE (decl_elt), MODE_INT, 1);
- /* (SUBREG (MEM ...)) at RTL generation time is invalid, so we
- instead create a new MEM rtx with the proper mode. */
- if (MEM_P (x))
+ if (mode == GET_MODE (x))
+ decl_rtl = x;
+ else if (MEM_P (x))
+ /* (SUBREG (MEM ...)) at RTL generation time is invalid, so we
+ instead create a new MEM rtx with the proper mode. */
+ decl_rtl = adjust_address_nv (x, mode, 0);
+ else
{
- if (mode == GET_MODE (x))
- SET_DECL_RTL (decl_elt, x);
- else
- SET_DECL_RTL (decl_elt, adjust_address_nv (x, mode, 0));
+ gcc_assert (REG_P (x));
+ decl_rtl = gen_lowpart_SUBREG (mode, x);
}
- else if (REG_P (x))
- {
- if (mode == GET_MODE (x))
- SET_DECL_RTL (decl_elt, x);
- else
- SET_DECL_RTL (decl_elt, gen_lowpart_SUBREG (mode, x));
- }
- else
- abort ();
+ SET_DECL_RTL (decl_elt, decl_rtl);
}
}
\f
if (i == count)
{
- if (count >= MAX_CASE_BIT_TESTS)
- abort ();
- test[i].hi = 0;
- test[i].lo = 0;
+ gcc_assert (count < MAX_CASE_BIT_TESTS);
+ test[i].hi = 0;
+ test[i].lo = 0;
test[i].label = label;
test[i].bits = 1;
count++;
/* The switch body is lowered in gimplify.c, we should never have
switches with a non-NULL SWITCH_BODY here. */
- if (SWITCH_BODY (exp) || !SWITCH_LABELS (exp))
- abort ();
+ gcc_assert (!SWITCH_BODY (exp));
+ gcc_assert (SWITCH_LABELS (exp));
for (i = TREE_VEC_LENGTH (vec); --i >= 0; )
{
/* Handle default labels specially. */
if (!CASE_HIGH (elt) && !CASE_LOW (elt))
{
-#ifdef ENABLE_CHECKING
- if (default_label_decl != 0)
- abort ();
-#endif
- default_label_decl = CASE_LABEL (elt);
+ gcc_assert (!default_label_decl);
+ default_label_decl = CASE_LABEL (elt);
}
else
case_list = add_case_node (case_list, CASE_LOW (elt), CASE_HIGH (elt),
- CASE_LABEL (elt));
+ CASE_LABEL (elt));
}
do_pending_stack_adjust ();
/* An ERROR_MARK occurs for various reasons including invalid data type. */
if (index_type != error_mark_node)
{
+ int fail;
+
/* If we don't have a default-label, create one here,
after the body of the switch. */
if (default_label_decl == 0)
for (n = case_list; n; n = n->right)
{
/* Check low and high label values are integers. */
- if (TREE_CODE (n->low) != INTEGER_CST)
- abort ();
- if (TREE_CODE (n->high) != INTEGER_CST)
- abort ();
+ gcc_assert (TREE_CODE (n->low) == INTEGER_CST);
+ gcc_assert (TREE_CODE (n->high) == INTEGER_CST);
n->low = convert (index_type, n->low);
n->high = convert (index_type, n->high);
if (! try_casesi (index_type, index_expr, minval, range,
table_label, default_label))
{
+ bool ok;
index_type = integer_type_node;
/* Index jumptables from zero for suitable values of
range = maxval;
}
- if (! try_tablejump (index_type, index_expr, minval, range,
- table_label, default_label))
- abort ();
+ ok = try_tablejump (index_type, index_expr, minval, range,
+ table_label, default_label);
+ gcc_assert (ok);
}
/* Get table of labels to jump to, in order of case index. */
before_case = NEXT_INSN (before_case);
end = get_last_insn ();
- if (squeeze_notes (&before_case, &end))
- abort ();
+ fail = squeeze_notes (&before_case, &end);
+ gcc_assert (!fail);
reorder_insns (before_case, end, start);
}
void
put_pending_sizes (tree chain)
{
- if (pending_sizes)
- abort ();
-
+ gcc_assert (!pending_sizes);
pending_sizes = chain;
}
if (GET_MODE_PRECISION (mode) >= size)
return mode;
- abort ();
+ gcc_unreachable ();
}
/* Find an integer mode of the exact same size, or BLKmode on failure. */
case MODE_CC:
default:
- abort ();
+ gcc_unreachable ();
}
return mode;
if (code == CONST_DECL)
return;
- else if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL
- && code != TYPE_DECL && code != FIELD_DECL)
- abort ();
-
+
+ gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
+ || code == TYPE_DECL ||code == FIELD_DECL);
+
rtl = DECL_RTL_IF_SET (decl);
if (type == error_mark_node)
void
layout_type (tree type)
{
- if (type == 0)
- abort ();
+ gcc_assert (type);
if (type == error_mark_node)
return;
case LANG_TYPE:
/* This kind of type is the responsibility
of the language-specific code. */
- abort ();
+ gcc_unreachable ();
case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
if (TYPE_PRECISION (type) == 0)
tree nunits_tree = build_int_cst (NULL_TREE, nunits);
tree innertype = TREE_TYPE (type);
- if (nunits & (nunits - 1))
- abort ();
+ gcc_assert (!(nunits & (nunits - 1)));
/* Find an appropriate mode for the vector type. */
if (TYPE_MODE (type) == VOIDmode)
break;
case SET_TYPE: /* Used by Chill and Pascal. */
- if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST
- || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST)
- abort ();
- else
- {
+ {
+ unsigned int alignment;
+ HOST_WIDE_INT size_in_bits;
+ HOST_WIDE_INT rounded_size;
+
+ gcc_assert (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
+ == INTEGER_CST);
+ gcc_assert (TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type)))
+ == INTEGER_CST);
+
#ifndef SET_WORD_SIZE
#define SET_WORD_SIZE BITS_PER_WORD
#endif
- unsigned int alignment
- = set_alignment ? set_alignment : SET_WORD_SIZE;
- HOST_WIDE_INT size_in_bits
- = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
- - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1);
- HOST_WIDE_INT rounded_size
- = ((size_in_bits + alignment - 1) / alignment) * alignment;
-
- if (rounded_size > (int) alignment)
- TYPE_MODE (type) = BLKmode;
- else
- TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1);
-
- TYPE_SIZE (type) = bitsize_int (rounded_size);
- TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT);
- TYPE_ALIGN (type) = alignment;
- TYPE_USER_ALIGN (type) = 0;
- TYPE_PRECISION (type) = size_in_bits;
- }
+ alignment = set_alignment ? set_alignment : SET_WORD_SIZE;
+ size_in_bits
+ = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
+ - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1);
+ rounded_size
+ = ((size_in_bits + alignment - 1) / alignment) * alignment;
+
+ if (rounded_size > (int) alignment)
+ TYPE_MODE (type) = BLKmode;
+ else
+ TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1);
+
+ TYPE_SIZE (type) = bitsize_int (rounded_size);
+ TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT);
+ TYPE_ALIGN (type) = alignment;
+ TYPE_USER_ALIGN (type) = 0;
+ TYPE_PRECISION (type) = size_in_bits;
+ }
break;
case FILE_TYPE:
break;
default:
- abort ();
+ gcc_unreachable ();
}
/* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2 * HOST_BITS_PER_WIDE_INT);
tree t;
- if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (sizetype))
- abort ();
+ gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
t = build_distinct_type_copy (type);
/* We do want to use sizetype's cache, as we will be replacing that
unsigned size = GET_MODE_BITSIZE (mode);
unsigned HOST_WIDE_INT min_val, max_val;
- if (size > HOST_BITS_PER_WIDE_INT)
- abort ();
+ gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
if (sign)
{