+2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
+
+ * cfgrtl.c (rtl_merge_blocks): Change #if HAVE_cc0 to if (HAVE_cc0)
+ (try_redirect_by_replacing_jump): Likewise.
+ (rtl_tidy_fallthru_edge): Likewise.
+ * combine.c (insn_a_feeds_b): Likewise.
+ (find_split_point): Likewise.
+ (simplify_set): Likewise.
+ * cprop.c (cprop_jump): Likewise.
+ * cse.c (cse_extended_basic_block): Likewise.
+ * df-problems.c (can_move_insns_across): Likewise.
+ * function.c (emit_use_return_register_into_block): Likewise.
+ * haifa-sched.c (sched_init): Likewise.
+ * ira.c (find_moveable_pseudos): Likewise.
+ * loop-invariant.c (find_invariant_insn): Likewise.
+ * lra-constraints.c (curr_insn_transform): Likewise.
+ * postreload.c (reload_combine_recognize_const_pattern):
+ * Likewise.
+ * reload.c (find_reloads): Likewise.
+ * reorg.c (delete_scheduled_jump): Likewise.
+ (steal_delay_list_from_target): Likewise.
+ (steal_delay_list_from_fallthrough): Likewise.
+ (redundant_insn): Likewise.
+ (fill_simple_delay_slots): Likewise.
+ (fill_slots_from_thread): Likewise.
+ (delete_computation): Likewise.
+ * sched-rgn.c (add_branch_dependences): Likewise.
+
2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
* genconfig.c (main): Always define HAVE_cc0.
del_first = a_end;
-#if HAVE_cc0
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
- if (only_sets_cc0_p (prev))
+ if (HAVE_cc0 && only_sets_cc0_p (prev))
{
rtx_insn *tmp = prev;
prev = BB_HEAD (a);
del_first = tmp;
}
-#endif
a_end = PREV_INSN (del_first);
}
/* In case we zap a conditional jump, we'll need to kill
the cc0 setter too. */
kill_from = insn;
-#if HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, PATTERN (insn))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn))
&& only_sets_cc0_p (PREV_INSN (insn)))
kill_from = PREV_INSN (insn);
-#endif
/* See if we can create the fallthru edge. */
if (in_cfglayout || can_fallthru (src, target))
delete_insn (table);
}
-#if HAVE_cc0
/* If this was a conditional jump, we need to also delete
the insn that set cc0. */
- if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
+ if (HAVE_cc0 && any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
q = PREV_INSN (q);
-#endif
q = PREV_INSN (q);
}
FOR_EACH_LOG_LINK (links, b)
if (links->insn == a)
return true;
-#if HAVE_cc0
- if (sets_cc0_p (a))
+ if (HAVE_cc0 && sets_cc0_p (a))
return true;
-#endif
return false;
}
\f
break;
case SET:
-#if HAVE_cc0
/* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
ZERO_EXTRACT, the most likely reason why this doesn't match is that
we need to put the operand into a register. So split at that
&& ! (GET_CODE (SET_SRC (x)) == SUBREG
&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
return &SET_SRC (x);
-#endif
/* See if we can split SET_SRC as it stands. */
split = find_split_point (&SET_SRC (x), insn, true);
else
compare_mode = SELECT_CC_MODE (new_code, op0, op1);
-#if !HAVE_cc0
/* If the mode changed, we have to change SET_DEST, the mode in the
compare, and the mode in the place SET_DEST is used. If SET_DEST is
a hard register, just build new versions with the proper mode. If it
is a pseudo, we lose unless it is only time we set the pseudo, in
which case we can safely change its mode. */
- if (compare_mode != GET_MODE (dest))
+ if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
{
if (can_change_dest_mode (dest, 0, compare_mode))
{
dest = new_dest;
}
}
-#endif /* cc0 */
#endif /* SELECT_CC_MODE */
/* If the code changed, we have to build a new comparison in
remove_note (jump, note);
}
-#if HAVE_cc0
/* Delete the cc0 setter. */
- if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
+ if (HAVE_cc0 && setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
delete_insn (setcc);
-#endif
global_const_prop_count++;
if (dump_file != NULL)
&& check_for_label_ref (insn))
recorded_label_ref = true;
-#if HAVE_cc0
- if (NONDEBUG_INSN_P (insn))
+ if (HAVE_cc0 && NONDEBUG_INSN_P (insn))
{
/* If the previous insn sets CC0 and this insn no
longer references CC0, delete the previous insn.
prev_insn_cc0_mode = this_insn_cc0_mode;
}
}
-#endif
}
}
if (bitmap_intersect_p (merge_set, test_use)
|| bitmap_intersect_p (merge_use, test_set))
break;
-#if HAVE_cc0
- if (!sets_cc0_p (insn))
-#endif
+ if (!HAVE_cc0 || !sets_cc0_p (insn))
max_to = insn;
}
next = NEXT_INSN (insn);
seq = get_insns ();
end_sequence ();
insn = BB_END (bb);
-#if HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, PATTERN (insn)))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
insn = prev_cc0_setter (insn);
-#endif
+
emit_insn_before (seq, insn);
}
sched_init (void)
{
/* Disable speculative loads in their presence if cc0 defined. */
-#if HAVE_cc0
+ if (HAVE_cc0)
flag_schedule_speculative_load = 0;
-#endif
if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
? " (no unique first use)" : "");
continue;
}
-#if HAVE_cc0
- if (reg_referenced_p (cc0_rtx, PATTERN (closest_use)))
+ if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (closest_use)))
{
if (dump_file)
fprintf (dump_file, "Reg %d: closest user uses cc0\n",
regno);
continue;
}
-#endif
+
bitmap_set_bit (&interesting, regno);
/* If we get here, we know closest_use is a non-NULL insn
(as opposed to const_0_rtx). */
bool simple = true;
struct invariant *inv;
-#if HAVE_cc0
/* We can't move a CC0 setter without the user. */
- if (sets_cc0_p (insn))
+ if (HAVE_cc0 && sets_cc0_p (insn))
return;
-#endif
set = single_set (insn);
if (!set)
if (JUMP_P (curr_insn) || CALL_P (curr_insn))
no_output_reloads_p = true;
-#if HAVE_cc0
- if (reg_referenced_p (cc0_rtx, PATTERN (curr_insn)))
+ if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (curr_insn)))
no_input_reloads_p = true;
- if (reg_set_p (cc0_rtx, PATTERN (curr_insn)))
+ if (HAVE_cc0 && reg_set_p (cc0_rtx, PATTERN (curr_insn)))
no_output_reloads_p = true;
-#endif
n_operands = curr_static_id->n_operands;
n_alternatives = curr_static_id->n_alternatives;
&& reg_state[clobbered_regno].real_store_ruid >= use_ruid)
break;
-#if HAVE_cc0
/* Do not separate cc0 setter and cc0 user on HAVE_cc0 targets. */
- if (must_move_add && sets_cc0_p (PATTERN (use_insn)))
+ if (HAVE_cc0 && must_move_add && sets_cc0_p (PATTERN (use_insn)))
break;
-#endif
gcc_assert (reg_state[regno].store_ruid <= use_ruid);
/* Avoid moving a use of ADDREG past a point where it is stored. */
if (JUMP_P (insn) || CALL_P (insn))
no_output_reloads = 1;
-#if HAVE_cc0
- if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (insn)))
no_input_reloads = 1;
- if (reg_set_p (cc0_rtx, PATTERN (insn)))
+ if (HAVE_cc0 && reg_set_p (cc0_rtx, PATTERN (insn)))
no_output_reloads = 1;
-#endif
#ifdef SECONDARY_MEMORY_NEEDED
/* The eliminated forms of any secondary memory locations are per-insn, so
rld[j].in = 0;
}
-#if HAVE_cc0
/* If we made any reloads for addresses, see if they violate a
"no input reloads" requirement for this insn. But loads that we
do after the insn (such as for output addresses) are fine. */
- if (no_input_reloads)
+ if (HAVE_cc0 && no_input_reloads)
for (i = 0; i < n_reloads; i++)
gcc_assert (rld[i].in == 0
|| rld[i].when_needed == RELOAD_FOR_OUTADDR_ADDRESS
|| rld[i].when_needed == RELOAD_FOR_OUTPUT_ADDRESS);
-#endif
/* Compute reload_mode and reload_nregs. */
for (i = 0; i < n_reloads; i++)
return label;
}
-#if HAVE_cc0
/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
and REG_CC_USER notes so we can find it. */
add_reg_note (user, REG_CC_SETTER, insn);
add_reg_note (insn, REG_CC_USER, user);
}
-#endif
\f
/* Insns which have delay slots that have not yet been filled. */
be other insns that became dead anyway, which we wouldn't know to
delete. */
-#if HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, insn))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, insn))
{
rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
delete_from_delay_slot (trial);
}
}
-#endif
delete_related_insns (insn);
}
if (insn_references_resource_p (trial, sets, false)
|| insn_sets_resource_p (trial, needed, false)
|| insn_sets_resource_p (trial, sets, false)
-#if HAVE_cc0
/* If TRIAL sets CC0, we can't copy it, so we can't steal this
delay list. */
- || find_reg_note (trial, REG_CC_USER, NULL_RTX)
-#endif
+ || (HAVE_cc0 && find_reg_note (trial, REG_CC_USER, NULL_RTX))
/* If TRIAL is from the fallthrough code of an annulled branch insn
in SEQ, we cannot use it. */
|| (INSN_ANNULLED_BRANCH_P (seq->insn (0))
if (insn_references_resource_p (trial, sets, false)
|| insn_sets_resource_p (trial, needed, false)
|| insn_sets_resource_p (trial, sets, false)
-#if HAVE_cc0
- || sets_cc0_p (PATTERN (trial))
-#endif
- )
+ || (HAVE_cc0 && sets_cc0_p (PATTERN (trial))))
break;
target_main = XVECEXP (PATTERN (target), 0, 0);
if (resource_conflicts_p (&needed, &set)
-#if HAVE_cc0
- || reg_mentioned_p (cc0_rtx, ipat)
-#endif
+ || (HAVE_cc0 && reg_mentioned_p (cc0_rtx, ipat))
/* The insn requiring the delay may not set anything needed or set by
INSN. */
|| insn_sets_resource_p (target_main, &needed, true)
{
next_trial = next_nonnote_insn (trial);
delay_list = add_to_delay_list (trial, delay_list);
-#if HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, pat))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, pat))
link_cc0_insns (trial);
-#endif
+
delete_related_insns (trial);
if (slots_to_fill == ++slots_filled)
break;
must_annul = 1;
winner:
-#if HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, pat))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, pat))
link_cc0_insns (trial);
-#endif
/* If we own this thread, delete the insn. If this is the
destination of a branch, show that a basic block status
{
rtx note, next;
-#if HAVE_cc0
- if (reg_referenced_p (cc0_rtx, PATTERN (insn)))
+ if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (insn)))
{
rtx prev = prev_nonnote_insn (insn);
/* We assume that at this stage
add_reg_note (prev, REG_UNUSED, cc0_rtx);
}
}
-#endif
for (note = REG_NOTES (insn); note; note = next)
{
&& (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| can_throw_internal (insn)
-#if HAVE_cc0
- || sets_cc0_p (PATTERN (insn))
-#endif
+ || (HAVE_cc0 && sets_cc0_p (PATTERN (insn)))
|| (!reload_completed
&& sets_likely_spilled (PATTERN (insn)))))
|| NOTE_P (insn)