+2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
+
+ * caller-save.c (insert_one_insn): Remove ifdef HAVE_cc0.
+ * cfgcleanup.c (flow_find_cross_jump): Likewise.
+ (flow_find_head_matching_sequence): Likewise.
+ (try_head_merge_bb): Likewise.
+ * combine.c (can_combine_p): Likewise.
+ (try_combine): Likewise.
+ (distribute_notes): Likewise.
+ * df-problems.c (can_move_insns_across): Likewise.
+ * final.c (final): Likewise.
+ * gcse.c (insert_insn_end_basic_block): Likewise.
+ * ira.c (find_moveable_pseudos): Likewise.
+ * reorg.c (try_merge_delay_insns): Likewise.
+ (fill_simple_delay_slots): Likewise.
+ (fill_slots_from_thread): Likewise.
+ * sched-deps.c (sched_analyze_2): Likewise.
+
2015-04-21 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
* df-scan.c (df_get_entry_block_def_set): Remove #ifdef
rtx_insn *insn = chain->insn;
struct insn_chain *new_chain;
-#if HAVE_cc0
/* If INSN references CC0, put our insns in front of the insn that sets
CC0. This is always safe, since the only way we could be passed an
insn that references CC0 is for a restore, and doing a restore earlier
isn't a problem. We do, however, assume here that CALL_INSNs don't
reference CC0. Guard against non-INSN's like CODE_LABEL. */
- if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
+ if (HAVE_cc0 && (NONJUMP_INSN_P (insn) || JUMP_P (insn))
&& before_p
&& reg_referenced_p (cc0_rtx, PATTERN (insn)))
chain = chain->prev, insn = chain->insn;
-#endif
new_chain = new_insn_chain ();
if (before_p)
i2 = PREV_INSN (i2);
}
-#if HAVE_cc0
/* Don't allow the insn after a compare to be shared by
cross-jumping unless the compare is also shared. */
- if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
+ if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
+ && ! sets_cc0_p (last1))
last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
-#endif
/* Include preceding notes and labels in the cross-jump. One,
this may bring us to the head of the blocks as requested above.
i2 = NEXT_INSN (i2);
}
-#if HAVE_cc0
/* Don't allow a compare to be shared by cross-jumping unless the insn
after the compare is also shared. */
- if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
+ if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
+ && sets_cc0_p (last1))
last1 = beforelast1, last2 = beforelast2, ninsns--;
-#endif
if (ninsns)
{
cond = get_condition (jump, &move_before, true, false);
if (cond == NULL_RTX)
{
-#if HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
move_before = prev_nonnote_nondebug_insn (jump);
else
-#endif
move_before = jump;
}
cond = get_condition (jump, &move_before, true, false);
if (cond == NULL_RTX)
{
-#if HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
move_before = prev_nonnote_nondebug_insn (jump);
else
-#endif
move_before = jump;
}
}
/* Try again, using a different insertion point. */
move_before = jump;
-#if HAVE_cc0
/* Don't try moving before a cc0 user, as that may invalidate
the cc0. */
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
break;
-#endif
continue;
}
/* For the unmerged insns, try a different insertion point. */
move_before = jump;
-#if HAVE_cc0
/* Don't try moving before a cc0 user, as that may invalidate
the cc0. */
- if (reg_mentioned_p (cc0_rtx, jump))
+ if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
break;
-#endif
for (ix = 0; ix < nedges; ix++)
currptr[ix] = headptr[ix] = nextptr[ix];
return 0;
#endif
-#if HAVE_cc0
/* Don't combine an insn that follows a CC0-setting insn.
An insn that uses CC0 must not be separated from the one that sets it.
We do, however, allow I2 to follow a CC0-setting insn if that insn
It would be more logical to test whether CC0 occurs inside I1 or I2,
but that would be much slower, and this ought to be equivalent. */
- p = prev_nonnote_insn (insn);
- if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
- && ! all_adjacent)
- return 0;
-#endif
+ if (HAVE_cc0)
+ {
+ p = prev_nonnote_insn (insn);
+ if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
+ && ! all_adjacent)
+ return 0;
+ }
/* If we get here, we have passed all the tests and the combination is
to be allowed. */
subst_insn = i3;
-#if !HAVE_cc0
/* Many machines that don't use CC0 have insns that can both perform an
arithmetic operation and set the condition code. These operations will
be represented as a PARALLEL with the first element of the vector
needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
I2SRC. Later we will make the PARALLEL that contains I2. */
- if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
+ if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
&& GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
&& CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
&& rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
i2_is_used = 1;
}
}
-#endif
if (i2_is_used == 0)
{
are set between I2 and I3. */
if (insn_code_number < 0
&& (split = find_split_point (&newpat, i3, false)) != 0
-#if HAVE_cc0
- && REG_P (i2dest)
-#endif
+ && (!HAVE_cc0 || REG_P (i2dest))
/* We need I2DEST in the proper mode. If it is a hard register
or the only use of a pseudo, we can change its mode.
Make sure we don't change a hard register to have a mode that
&& !(GET_CODE (SET_DEST (set1)) == SUBREG
&& find_reg_note (i2, REG_DEAD,
SUBREG_REG (SET_DEST (set1))))
-#if HAVE_cc0
- && !reg_referenced_p (cc0_rtx, set0)
-#endif
+ && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
/* If I3 is a jump, ensure that set0 is a jump so that
we do not create invalid RTL. */
&& (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
&& !(GET_CODE (SET_DEST (set0)) == SUBREG
&& find_reg_note (i2, REG_DEAD,
SUBREG_REG (SET_DEST (set0))))
-#if HAVE_cc0
- && !reg_referenced_p (cc0_rtx, set1)
-#endif
+ && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
/* If I3 is a jump, ensure that set1 is a jump so that
we do not create invalid RTL. */
&& (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
}
}
-#if HAVE_cc0
/* If I2 is the CC0 setter and I3 is the CC0 user then check whether
they are adjacent to each other or not. */
- {
- rtx_insn *p = prev_nonnote_insn (i3);
- if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
- && sets_cc0_p (newi2pat))
- {
- undo_all ();
- return 0;
- }
- }
-#endif
+ if (HAVE_cc0)
+ {
+ rtx_insn *p = prev_nonnote_insn (i3);
+ if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
+ && sets_cc0_p (newi2pat))
+ {
+ undo_all ();
+ return 0;
+ }
+ }
/* Only allow this combination if insn_rtx_costs reports that the
replacement instructions are cheaper than the originals. */
{
rtx set = single_set (tem_insn);
rtx inner_dest = 0;
-#if HAVE_cc0
rtx_insn *cc0_setter = NULL;
-#endif
if (set != 0)
for (inner_dest = SET_DEST (set);
if (tem_insn == i2)
i2 = NULL;
-#if HAVE_cc0
/* Delete the setter too. */
if (cc0_setter)
{
if (cc0_setter == i2)
i2 = NULL;
}
-#endif
}
else
{
if (NONDEBUG_INSN_P (insn))
{
if (!bitmap_intersect_p (test_set, local_merge_live)
-#if HAVE_cc0
- && !sets_cc0_p (insn)
-#endif
- )
+ && (!HAVE_cc0 || !sets_cc0_p (insn)))
{
max_to = insn;
break;
last_ignored_compare = 0;
-#if HAVE_cc0
- for (insn = first; insn; insn = NEXT_INSN (insn))
- {
- /* If CC tracking across branches is enabled, record the insn which
- jumps to each branch only reached from one place. */
- if (optimize_p && JUMP_P (insn))
- {
- rtx lab = JUMP_LABEL (insn);
- if (lab && LABEL_P (lab) && LABEL_NUSES (lab) == 1)
- {
- LABEL_REFS (lab) = insn;
- }
- }
- }
-#endif
+ if (HAVE_cc0)
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ /* If CC tracking across branches is enabled, record the insn which
+ jumps to each branch only reached from one place. */
+ if (optimize_p && JUMP_P (insn))
+ {
+ rtx lab = JUMP_LABEL (insn);
+ if (lab && LABEL_P (lab) && LABEL_NUSES (lab) == 1)
+ {
+ LABEL_REFS (lab) = insn;
+ }
+ }
+ }
init_recog ();
&& (!single_succ_p (bb)
|| single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
{
-#if HAVE_cc0
/* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
if cc0 isn't set. */
- rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
- if (note)
- insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
- else
+ if (HAVE_cc0)
{
- rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
- if (maybe_cc0_setter
- && INSN_P (maybe_cc0_setter)
- && sets_cc0_p (PATTERN (maybe_cc0_setter)))
- insn = maybe_cc0_setter;
+ rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+ if (note)
+ insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
+ else
+ {
+ rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
+ if (maybe_cc0_setter
+ && INSN_P (maybe_cc0_setter)
+ && sets_cc0_p (PATTERN (maybe_cc0_setter)))
+ insn = maybe_cc0_setter;
+ }
}
-#endif
+
/* FIXME: What if something in cc0/jump uses value set in new insn? */
new_insn = emit_insn_before_noloc (pat, insn, bb);
}
{
if (bitmap_bit_p (def_bb_moveable, regno)
&& !control_flow_insn_p (use_insn)
-#if HAVE_cc0
- && !sets_cc0_p (use_insn)
-#endif
- )
+ && (!HAVE_cc0 || !sets_cc0_p (use_insn)))
{
if (modified_between_p (DF_REF_REG (use), def_insn, use_insn))
{
continue;
if (GET_CODE (next_to_match) == GET_CODE (trial)
-#if HAVE_cc0
/* We can't share an insn that sets cc0. */
- && ! sets_cc0_p (pat)
-#endif
+ && (!HAVE_cc0 || ! sets_cc0_p (pat))
&& ! insn_references_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &needed, true)
if (! insn_references_resource_p (dtrial, &set, true)
&& ! insn_sets_resource_p (dtrial, &set, true)
&& ! insn_sets_resource_p (dtrial, &needed, true)
-#if HAVE_cc0
- && ! sets_cc0_p (PATTERN (dtrial))
-#endif
+ && (!HAVE_cc0 || ! sets_cc0_p (PATTERN (dtrial)))
&& rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
/* Check that DTRIAL and NEXT_TO_MATCH does not reference a
resource modified between them (only dtrial is checked because
filter_flags ? &fset : &set,
true)
&& ! insn_sets_resource_p (trial, &needed, true)
-#if HAVE_cc0
/* Can't separate set of cc0 from its use. */
- && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
-#endif
+ && (!HAVE_cc0 || ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)))
&& ! can_throw_internal (trial))
{
trial = try_split (pat, trial, 1);
&& ! insn_references_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &needed, true)
-#if HAVE_cc0
- && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
-#endif
+ && (!HAVE_cc0 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)))
&& ! (maybe_never && may_trap_or_fault_p (pat))
&& (trial = try_split (pat, trial, 0))
&& eligible_for_delay (insn, slots_filled, trial, flags)
&& ! insn_references_resource_p (next_trial, &set, true)
&& ! insn_sets_resource_p (next_trial, &set, true)
&& ! insn_sets_resource_p (next_trial, &needed, true)
-#if HAVE_cc0
- && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
-#endif
+ && (!HAVE_cc0 || ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial)))
&& ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial)))
&& (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
&& eligible_for_delay (insn, slots_filled, next_trial, flags)
if (! insn_references_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &set, true)
&& ! insn_sets_resource_p (trial, &needed, true)
-#if HAVE_cc0
- && ! (reg_mentioned_p (cc0_rtx, pat)
- && (! own_thread || ! sets_cc0_p (pat)))
-#endif
+ && (!HAVE_cc0 || (! (reg_mentioned_p (cc0_rtx, pat)
+ && (! own_thread || ! sets_cc0_p (pat)))))
&& ! can_throw_internal (trial))
{
rtx prior_insn;
return;
case CC0:
-#if !HAVE_cc0
- gcc_unreachable ();
-#endif
+ if (!HAVE_cc0)
+ gcc_unreachable ();
+
/* User of CC0 depends on immediately preceding insn. */
SCHED_GROUP_P (insn) = 1;
/* Don't move CC0 setter to another block (it can set up the