+2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
+
+ * genconfig.c (main): Always define HAVE_cc0.
+ * caller-save.c (insert_one_insn): Change ifdef HAVE_cc0 to #if
+ HAVE_cc0.
+ * cfgcleanup.c (flow_find_cross_jump): Likewise.
+ (flow_find_head_matching_sequence): Likewise.
+ (try_head_merge_bb): Likewise.
+ * cfgrtl.c (rtl_merge_blocks): Likewise.
+ (try_redirect_by_replacing_jump): Likewise.
+ (rtl_tidy_fallthru_edge): Likewise.
+ * combine.c (do_SUBST_MODE): Likewise.
+ (insn_a_feeds_b): Likewise.
+ (combine_instructions): Likewise.
+ (can_combine_p): Likewise.
+ (try_combine): Likewise.
+ (find_split_point): Likewise.
+ (subst): Likewise.
+ (simplify_set): Likewise.
+ (distribute_notes): Likewise.
+ * cprop.c (cprop_jump): Likewise.
+ * cse.c (cse_extended_basic_block): Likewise.
+ * df-problems.c (can_move_insns_across): Likewise.
+ * final.c (final): Likewise.
+ (final_scan_insn): Likewise.
+ * function.c (emit_use_return_register_into_block): Likewise.
+ * gcse.c (insert_insn_end_basic_block): Likewise.
+ * haifa-sched.c (sched_init): Likewise.
+ * ira.c (find_moveable_pseudos): Likewise.
+ * loop-invariant.c (find_invariant_insn): Likewise.
+ * lra-constraints.c (curr_insn_transform): Likewise.
+ * optabs.c (prepare_cmp_insn): Likewise.
+ * postreload.c (reload_combine_recognize_const_pattern):
+ * Likewise.
+ * reload.c (find_reloads): Likewise.
+ (find_reloads_address_1): Likewise.
+ * reorg.c (delete_scheduled_jump): Likewise.
+ (steal_delay_list_from_target): Likewise.
+ (steal_delay_list_from_fallthrough): Likewise.
+ (try_merge_delay_insns): Likewise.
+ (redundant_insn): Likewise.
+ (fill_simple_delay_slots): Likewise.
+ (fill_slots_from_thread): Likewise.
+ (delete_computation): Likewise.
+ (relax_delay_slots): Likewise.
+ * sched-deps.c (sched_analyze_2): Likewise.
+ * sched-rgn.c (add_branch_dependences): Likewise.
+
2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
* combine.c (find_single_use): Remove HAVE_cc0 ifdef for code
rtx_insn *insn = chain->insn;
struct insn_chain *new_chain;
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If INSN references CC0, put our insns in front of the insn that sets
CC0. This is always safe, since the only way we could be passed an
insn that references CC0 is for a restore, and doing a restore earlier
i2 = PREV_INSN (i2);
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Don't allow the insn after a compare to be shared by
cross-jumping unless the compare is also shared. */
if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
i2 = NEXT_INSN (i2);
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Don't allow a compare to be shared by cross-jumping unless the insn
after the compare is also shared. */
if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
cond = get_condition (jump, &move_before, true, false);
if (cond == NULL_RTX)
{
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, jump))
move_before = prev_nonnote_nondebug_insn (jump);
else
cond = get_condition (jump, &move_before, true, false);
if (cond == NULL_RTX)
{
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, jump))
move_before = prev_nonnote_nondebug_insn (jump);
else
/* Try again, using a different insertion point. */
move_before = jump;
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Don't try moving before a cc0 user, as that may invalidate
the cc0. */
if (reg_mentioned_p (cc0_rtx, jump))
/* For the unmerged insns, try a different insertion point. */
move_before = jump;
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Don't try moving before a cc0 user, as that may invalidate
the cc0. */
if (reg_mentioned_p (cc0_rtx, jump))
del_first = a_end;
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
if (only_sets_cc0_p (prev))
/* In case we zap a conditional jump, we'll need to kill
the cc0 setter too. */
kill_from = insn;
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, PATTERN (insn))
&& only_sets_cc0_p (PREV_INSN (insn)))
kill_from = PREV_INSN (insn);
delete_insn (table);
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
#define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
-#ifndef HAVE_cc0
+#if !HAVE_cc0
/* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
static void
FOR_EACH_LOG_LINK (links, b)
if (links->insn == a)
return true;
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (sets_cc0_p (a))
return true;
#endif
combine_instructions (rtx_insn *f, unsigned int nregs)
{
rtx_insn *insn, *next;
-#ifdef HAVE_cc0
+#if HAVE_cc0
rtx_insn *prev;
#endif
struct insn_link *links, *nextlinks;
}
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Try to combine a jump insn that uses CC0
with a preceding insn that sets CC0, and maybe with its
logical predecessor as well.
return 0;
#endif
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Don't combine an insn that follows a CC0-setting insn.
An insn that uses CC0 must not be separated from the one that sets it.
We do, however, allow I2 to follow a CC0-setting insn if that insn
return true;
}
-#ifndef HAVE_cc0
+#if !HAVE_cc0
/* Return whether INSN, a PARALLEL of N register SETs (and maybe some
CLOBBERs), can be split into individual SETs in that order, without
changing semantics. */
}
}
-#ifndef HAVE_cc0
+#if !HAVE_cc0
/* If we have no I1 and I2 looks like:
(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
(set Y OP)])
subst_insn = i3;
-#ifndef HAVE_cc0
+#if !HAVE_cc0
/* Many machines that don't use CC0 have insns that can both perform an
arithmetic operation and set the condition code. These operations will
be represented as a PARALLEL with the first element of the vector
are set between I2 and I3. */
if (insn_code_number < 0
&& (split = find_split_point (&newpat, i3, false)) != 0
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& REG_P (i2dest)
#endif
/* We need I2DEST in the proper mode. If it is a hard register
&& !(GET_CODE (SET_DEST (set1)) == SUBREG
&& find_reg_note (i2, REG_DEAD,
SUBREG_REG (SET_DEST (set1))))
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& !reg_referenced_p (cc0_rtx, set0)
#endif
/* If I3 is a jump, ensure that set0 is a jump so that
&& !(GET_CODE (SET_DEST (set0)) == SUBREG
&& find_reg_note (i2, REG_DEAD,
SUBREG_REG (SET_DEST (set0))))
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& !reg_referenced_p (cc0_rtx, set1)
#endif
/* If I3 is a jump, ensure that set1 is a jump so that
}
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If I2 is the CC0 setter and I3 is the CC0 user then check whether
they are adjacent to each other or not. */
{
break;
case SET:
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
ZERO_EXTRACT, the most likely reason why this doesn't match is that
we need to put the operand into a register. So split at that
&& ! (code == SUBREG
&& MODES_TIEABLE_P (GET_MODE (x),
GET_MODE (SUBREG_REG (to))))
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
#endif
)
else
compare_mode = SELECT_CC_MODE (new_code, op0, op1);
-#ifndef HAVE_cc0
+#if !HAVE_cc0
/* If the mode changed, we have to change SET_DEST, the mode in the
compare, and the mode in the place SET_DEST is used. If SET_DEST is
a hard register, just build new versions with the proper mode. If it
{
rtx set = single_set (tem_insn);
rtx inner_dest = 0;
-#ifdef HAVE_cc0
+#if HAVE_cc0
rtx_insn *cc0_setter = NULL;
#endif
if (set != 0 && ! side_effects_p (SET_SRC (set))
&& rtx_equal_p (XEXP (note, 0), inner_dest)
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
|| ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
&& sets_cc0_p (PATTERN (cc0_setter)) > 0))
if (tem_insn == i2)
i2 = NULL;
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Delete the setter too. */
if (cc0_setter)
{
remove_note (jump, note);
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Delete the cc0 setter. */
if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
delete_insn (setcc);
&& check_for_label_ref (insn))
recorded_label_ref = true;
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (NONDEBUG_INSN_P (insn))
{
/* If the previous insn sets CC0 and this insn no
if (bitmap_intersect_p (merge_set, test_use)
|| bitmap_intersect_p (merge_use, test_set))
break;
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (!sets_cc0_p (insn))
#endif
max_to = insn;
if (NONDEBUG_INSN_P (insn))
{
if (!bitmap_intersect_p (test_set, local_merge_live)
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& !sets_cc0_p (insn)
#endif
)
#ifdef LEAF_REGISTERS
static void leaf_renumber_regs (rtx_insn *);
#endif
-#ifdef HAVE_cc0
+#if HAVE_cc0
static int alter_cond (rtx);
#endif
#ifndef ADDR_VEC_ALIGN
last_ignored_compare = 0;
-#ifdef HAVE_cc0
+#if HAVE_cc0
for (insn = first; insn; insn = NEXT_INSN (insn))
{
/* If CC tracking across branches is enabled, record the insn which
final_scan_insn (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED,
int nopeepholes ATTRIBUTE_UNUSED, int *seen)
{
-#ifdef HAVE_cc0
+#if HAVE_cc0
rtx set;
#endif
rtx_insn *next;
|| GET_CODE (body) == CLOBBER)
break;
-#ifdef HAVE_cc0
+#if HAVE_cc0
{
/* If there is a REG_CC_SETTER note on this insn, it means that
the setting of the condition code was done in the delay slot
body = PATTERN (insn);
-#ifdef HAVE_cc0
+#if HAVE_cc0
set = single_set (insn);
/* Check for redundant test and compare instructions
&& GET_CODE (PATTERN (insn)) == COND_EXEC)
current_insn_predicate = COND_EXEC_TEST (PATTERN (insn));
-#ifdef HAVE_cc0
+#if HAVE_cc0
cc_prev_status = cc_status;
/* Update `cc_status' for this instruction.
return *xp;
}
\f
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Given BODY, the body of a jump instruction, alter the jump condition
as required by the bits that are set in cc_status.flags.
seq = get_insns ();
end_sequence ();
insn = BB_END (bb);
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, PATTERN (insn)))
insn = prev_cc0_setter (insn);
#endif
&& (!single_succ_p (bb)
|| single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
{
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
if cc0 isn't set. */
rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
{
/* We output CC0_P this way to make sure that X is declared
somewhere. */
+ printf ("#define HAVE_cc0 0\n");
printf ("#define CC0_P(X) ((X) ? 0 : 0)\n");
}
sched_init (void)
{
/* Disable speculative loads in their presence if cc0 defined. */
-#ifdef HAVE_cc0
+#if HAVE_cc0
flag_schedule_speculative_load = 0;
#endif
? " (no unique first use)" : "");
continue;
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (closest_use)))
{
if (dump_file)
{
if (bitmap_bit_p (def_bb_moveable, regno)
&& !control_flow_insn_p (use_insn)
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& !sets_cc0_p (use_insn)
#endif
)
bool simple = true;
struct invariant *inv;
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* We can't move a CC0 setter without the user. */
if (sets_cc0_p (insn))
return;
if (JUMP_P (curr_insn) || CALL_P (curr_insn))
no_output_reloads_p = true;
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (curr_insn)))
no_input_reloads_p = true;
if (reg_set_p (cc0_rtx, PATTERN (curr_insn)))
> COSTS_N_INSNS (1)))
y = force_reg (mode, y);
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Make sure if we have a canonical comparison. The RTL
documentation states that canonical comparisons are required only
for targets which have cc0. */
&& reg_state[clobbered_regno].real_store_ruid >= use_ruid)
break;
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Do not separate cc0 setter and cc0 user on HAVE_cc0 targets. */
if (must_move_add && sets_cc0_p (PATTERN (use_insn)))
break;
if (JUMP_P (insn) || CALL_P (insn))
no_output_reloads = 1;
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
no_input_reloads = 1;
if (reg_set_p (cc0_rtx, PATTERN (insn)))
rld[j].in = 0;
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If we made any reloads for addresses, see if they violate a
"no input reloads" requirement for this insn. But loads that we
do after the insn (such as for output addresses) are fine. */
enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
if (insn && NONJUMP_INSN_P (insn) && equiv
&& memory_operand (equiv, GET_MODE (equiv))
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& ! sets_cc0_p (PATTERN (insn))
#endif
&& ! (icode != CODE_FOR_nothing
return label;
}
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
and REG_CC_USER notes so we can find it. */
be other insns that became dead anyway, which we wouldn't know to
delete. */
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, insn))
{
rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
if (insn_references_resource_p (trial, sets, false)
|| insn_sets_resource_p (trial, needed, false)
|| insn_sets_resource_p (trial, sets, false)
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If TRIAL sets CC0, we can't copy it, so we can't steal this
delay list. */
|| find_reg_note (trial, REG_CC_USER, NULL_RTX)
if (insn_references_resource_p (trial, sets, false)
|| insn_sets_resource_p (trial, needed, false)
|| insn_sets_resource_p (trial, sets, false)
-#ifdef HAVE_cc0
+#if HAVE_cc0
|| sets_cc0_p (PATTERN (trial))
#endif
)
continue;
if (GET_CODE (next_to_match) == GET_CODE (trial)
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* We can't share an insn that sets cc0. */
&& ! sets_cc0_p (pat)
#endif
if (! insn_references_resource_p (dtrial, &set, true)
&& ! insn_sets_resource_p (dtrial, &set, true)
&& ! insn_sets_resource_p (dtrial, &needed, true)
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& ! sets_cc0_p (PATTERN (dtrial))
#endif
&& rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
target_main = XVECEXP (PATTERN (target), 0, 0);
if (resource_conflicts_p (&needed, &set)
-#ifdef HAVE_cc0
+#if HAVE_cc0
|| reg_mentioned_p (cc0_rtx, ipat)
#endif
/* The insn requiring the delay may not set anything needed or set by
filter_flags ? &fset : &set,
true)
&& ! insn_sets_resource_p (trial, &needed, true)
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* Can't separate set of cc0 from its use. */
&& ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
#endif
&& ! insn_references_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &needed, true)
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
#endif
&& ! (maybe_never && may_trap_or_fault_p (pat))
{
next_trial = next_nonnote_insn (trial);
delay_list = add_to_delay_list (trial, delay_list);
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, pat))
link_cc0_insns (trial);
#endif
&& ! insn_references_resource_p (next_trial, &set, true)
&& ! insn_sets_resource_p (next_trial, &set, true)
&& ! insn_sets_resource_p (next_trial, &needed, true)
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
#endif
&& ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial)))
if (! insn_references_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &needed, true)
-#ifdef HAVE_cc0
+#if HAVE_cc0
&& ! (reg_mentioned_p (cc0_rtx, pat)
&& (! own_thread || ! sets_cc0_p (pat)))
#endif
must_annul = 1;
winner:
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_mentioned_p (cc0_rtx, pat))
link_cc0_insns (trial);
#endif
{
rtx note, next;
-#ifdef HAVE_cc0
+#if HAVE_cc0
if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
{
rtx prev = prev_nonnote_insn (insn);
&& ! condjump_in_parallel_p (delay_insn)
&& prev_active_insn (target_label) == insn
&& ! BARRIER_P (prev_nonnote_insn (target_label))
-#ifdef HAVE_cc0
+#if HAVE_cc0
/* If the last insn in the delay slot sets CC0 for some insn,
various code assumes that it is in a delay slot. We could
put it back where it belonged and delete the register notes,
return;
case CC0:
-#ifndef HAVE_cc0
+#if !HAVE_cc0
gcc_unreachable ();
#endif
/* User of CC0 depends on immediately preceding insn. */
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| can_throw_internal (insn)
-#ifdef HAVE_cc0
+#if HAVE_cc0
|| sets_cc0_p (PATTERN (insn))
#endif
|| (!reload_completed