From: Trevor Saunders Date: Wed, 22 Apr 2015 00:44:27 +0000 (+0000) Subject: always define HAVE_cc0 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=f1e52ed6b2a91ff953156a63a5c4af70e17fb6a8;p=gcc.git always define HAVE_cc0 gcc/ChangeLog: 2015-04-21 Trevor Saunders * genconfig.c (main): Always define HAVE_cc0. * caller-save.c (insert_one_insn): Change ifdef HAVE_cc0 to #if HAVE_cc0. * cfgcleanup.c (flow_find_cross_jump): Likewise. (flow_find_head_matching_sequence): Likewise. (try_head_merge_bb): Likewise. * cfgrtl.c (rtl_merge_blocks): Likewise. (try_redirect_by_replacing_jump): Likewise. (rtl_tidy_fallthru_edge): Likewise. * combine.c (do_SUBST_MODE): Likewise. (insn_a_feeds_b): Likewise. (combine_instructions): Likewise. (can_combine_p): Likewise. (try_combine): Likewise. (find_split_point): Likewise. (subst): Likewise. (simplify_set): Likewise. (distribute_notes): Likewise. * cprop.c (cprop_jump): Likewise. * cse.c (cse_extended_basic_block): Likewise. * df-problems.c (can_move_insns_across): Likewise. * final.c (final): Likewise. (final_scan_insn): Likewise. * function.c (emit_use_return_register_into_block): Likewise. * gcse.c (insert_insn_end_basic_block): Likewise. * haifa-sched.c (sched_init): Likewise. * ira.c (find_moveable_pseudos): Likewise. * loop-invariant.c (find_invariant_insn): Likewise. * lra-constraints.c (curr_insn_transform): Likewise. * optabs.c (prepare_cmp_insn): Likewise. * postreload.c (reload_combine_recognize_const_pattern): * Likewise. * reload.c (find_reloads): Likewise. (find_reloads_address_1): Likewise. * reorg.c (delete_scheduled_jump): Likewise. (steal_delay_list_from_target): Likewise. (steal_delay_list_from_fallthrough): Likewise. (try_merge_delay_insns): Likewise. (redundant_insn): Likewise. (fill_simple_delay_slots): Likewise. (fill_slots_from_thread): Likewise. (delete_computation): Likewise. (relax_delay_slots): Likewise. * sched-deps.c (sched_analyze_2): Likewise. * sched-rgn.c (add_branch_dependences): Likewise. From-SVN: r222296 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 9d51cd6f6a2..69e4990d100 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,51 @@ +2015-04-21 Trevor Saunders + + * genconfig.c (main): Always define HAVE_cc0. + * caller-save.c (insert_one_insn): Change ifdef HAVE_cc0 to #if + HAVE_cc0. + * cfgcleanup.c (flow_find_cross_jump): Likewise. + (flow_find_head_matching_sequence): Likewise. + (try_head_merge_bb): Likewise. + * cfgrtl.c (rtl_merge_blocks): Likewise. + (try_redirect_by_replacing_jump): Likewise. + (rtl_tidy_fallthru_edge): Likewise. + * combine.c (do_SUBST_MODE): Likewise. + (insn_a_feeds_b): Likewise. + (combine_instructions): Likewise. + (can_combine_p): Likewise. + (try_combine): Likewise. + (find_split_point): Likewise. + (subst): Likewise. + (simplify_set): Likewise. + (distribute_notes): Likewise. + * cprop.c (cprop_jump): Likewise. + * cse.c (cse_extended_basic_block): Likewise. + * df-problems.c (can_move_insns_across): Likewise. + * final.c (final): Likewise. + (final_scan_insn): Likewise. + * function.c (emit_use_return_register_into_block): Likewise. + * gcse.c (insert_insn_end_basic_block): Likewise. + * haifa-sched.c (sched_init): Likewise. + * ira.c (find_moveable_pseudos): Likewise. + * loop-invariant.c (find_invariant_insn): Likewise. + * lra-constraints.c (curr_insn_transform): Likewise. + * optabs.c (prepare_cmp_insn): Likewise. + * postreload.c (reload_combine_recognize_const_pattern): + * Likewise. + * reload.c (find_reloads): Likewise. + (find_reloads_address_1): Likewise. + * reorg.c (delete_scheduled_jump): Likewise. + (steal_delay_list_from_target): Likewise. + (steal_delay_list_from_fallthrough): Likewise. + (try_merge_delay_insns): Likewise. + (redundant_insn): Likewise. + (fill_simple_delay_slots): Likewise. + (fill_slots_from_thread): Likewise. + (delete_computation): Likewise. + (relax_delay_slots): Likewise. + * sched-deps.c (sched_analyze_2): Likewise. + * sched-rgn.c (add_branch_dependences): Likewise. + 2015-04-21 Trevor Saunders * combine.c (find_single_use): Remove HAVE_cc0 ifdef for code diff --git a/gcc/caller-save.c b/gcc/caller-save.c index 3b01941d116..fc575ebb724 100644 --- a/gcc/caller-save.c +++ b/gcc/caller-save.c @@ -1400,7 +1400,7 @@ insert_one_insn (struct insn_chain *chain, int before_p, int code, rtx pat) rtx_insn *insn = chain->insn; struct insn_chain *new_chain; -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If INSN references CC0, put our insns in front of the insn that sets CC0. This is always safe, since the only way we could be passed an insn that references CC0 is for a restore, and doing a restore earlier diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c index 93f682f6847..17cf023561b 100644 --- a/gcc/cfgcleanup.c +++ b/gcc/cfgcleanup.c @@ -1456,7 +1456,7 @@ flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1, i2 = PREV_INSN (i2); } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Don't allow the insn after a compare to be shared by cross-jumping unless the compare is also shared. */ if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1)) @@ -1579,7 +1579,7 @@ flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f i2 = NEXT_INSN (i2); } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Don't allow a compare to be shared by cross-jumping unless the insn after the compare is also shared. */ if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1)) @@ -2370,7 +2370,7 @@ try_head_merge_bb (basic_block bb) cond = get_condition (jump, &move_before, true, false); if (cond == NULL_RTX) { -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_mentioned_p (cc0_rtx, jump)) move_before = prev_nonnote_nondebug_insn (jump); else @@ -2539,7 +2539,7 @@ try_head_merge_bb (basic_block bb) cond = get_condition (jump, &move_before, true, false); if (cond == NULL_RTX) { -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_mentioned_p (cc0_rtx, jump)) move_before = prev_nonnote_nondebug_insn (jump); else @@ -2562,7 +2562,7 @@ try_head_merge_bb (basic_block bb) /* Try again, using a different insertion point. */ move_before = jump; -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Don't try moving before a cc0 user, as that may invalidate the cc0. */ if (reg_mentioned_p (cc0_rtx, jump)) @@ -2622,7 +2622,7 @@ try_head_merge_bb (basic_block bb) /* For the unmerged insns, try a different insertion point. */ move_before = jump; -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Don't try moving before a cc0 user, as that may invalidate the cc0. */ if (reg_mentioned_p (cc0_rtx, jump)) diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c index 633c7626de6..4712cd4c221 100644 --- a/gcc/cfgrtl.c +++ b/gcc/cfgrtl.c @@ -893,7 +893,7 @@ rtl_merge_blocks (basic_block a, basic_block b) del_first = a_end; -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If this was a conditional jump, we need to also delete the insn that set cc0. */ if (only_sets_cc0_p (prev)) @@ -1064,7 +1064,7 @@ try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) /* In case we zap a conditional jump, we'll need to kill the cc0 setter too. */ kill_from = insn; -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_mentioned_p (cc0_rtx, PATTERN (insn)) && only_sets_cc0_p (PREV_INSN (insn))) kill_from = PREV_INSN (insn); @@ -1825,7 +1825,7 @@ rtl_tidy_fallthru_edge (edge e) delete_insn (table); } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If this was a conditional jump, we need to also delete the insn that set cc0. */ if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q))) diff --git a/gcc/combine.c b/gcc/combine.c index 0a35b8f5300..430084eb3c8 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -836,7 +836,7 @@ do_SUBST_MODE (rtx *into, machine_mode newval) #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL)) -#ifndef HAVE_cc0 +#if !HAVE_cc0 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */ static void @@ -1141,7 +1141,7 @@ insn_a_feeds_b (rtx_insn *a, rtx_insn *b) FOR_EACH_LOG_LINK (links, b) if (links->insn == a) return true; -#ifdef HAVE_cc0 +#if HAVE_cc0 if (sets_cc0_p (a)) return true; #endif @@ -1157,7 +1157,7 @@ static int combine_instructions (rtx_insn *f, unsigned int nregs) { rtx_insn *insn, *next; -#ifdef HAVE_cc0 +#if HAVE_cc0 rtx_insn *prev; #endif struct insn_link *links, *nextlinks; @@ -1334,7 +1334,7 @@ combine_instructions (rtx_insn *f, unsigned int nregs) } } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Try to combine a jump insn that uses CC0 with a preceding insn that sets CC0, and maybe with its logical predecessor as well. @@ -2068,7 +2068,7 @@ can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED, return 0; #endif -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Don't combine an insn that follows a CC0-setting insn. An insn that uses CC0 must not be separated from the one that sets it. We do, however, allow I2 to follow a CC0-setting insn if that insn @@ -2514,7 +2514,7 @@ is_parallel_of_n_reg_sets (rtx pat, int n) return true; } -#ifndef HAVE_cc0 +#if !HAVE_cc0 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some CLOBBERs), can be split into individual SETs in that order, without changing semantics. */ @@ -2888,7 +2888,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, } } -#ifndef HAVE_cc0 +#if !HAVE_cc0 /* If we have no I1 and I2 looks like: (parallel [(set (reg:CC X) (compare:CC OP (const_int 0))) (set Y OP)]) @@ -3116,7 +3116,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, subst_insn = i3; -#ifndef HAVE_cc0 +#if !HAVE_cc0 /* Many machines that don't use CC0 have insns that can both perform an arithmetic operation and set the condition code. These operations will be represented as a PARALLEL with the first element of the vector @@ -3646,7 +3646,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, are set between I2 and I3. */ if (insn_code_number < 0 && (split = find_split_point (&newpat, i3, false)) != 0 -#ifdef HAVE_cc0 +#if HAVE_cc0 && REG_P (i2dest) #endif /* We need I2DEST in the proper mode. If it is a hard register @@ -3918,7 +3918,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, && !(GET_CODE (SET_DEST (set1)) == SUBREG && find_reg_note (i2, REG_DEAD, SUBREG_REG (SET_DEST (set1)))) -#ifdef HAVE_cc0 +#if HAVE_cc0 && !reg_referenced_p (cc0_rtx, set0) #endif /* If I3 is a jump, ensure that set0 is a jump so that @@ -3935,7 +3935,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, && !(GET_CODE (SET_DEST (set0)) == SUBREG && find_reg_note (i2, REG_DEAD, SUBREG_REG (SET_DEST (set0)))) -#ifdef HAVE_cc0 +#if HAVE_cc0 && !reg_referenced_p (cc0_rtx, set1) #endif /* If I3 is a jump, ensure that set1 is a jump so that @@ -4002,7 +4002,7 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, } } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether they are adjacent to each other or not. */ { @@ -4816,7 +4816,7 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src) break; case SET: -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a ZERO_EXTRACT, the most likely reason why this doesn't match is that we need to put the operand into a register. So split at that @@ -5331,7 +5331,7 @@ subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy) && ! (code == SUBREG && MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (to)))) -#ifdef HAVE_cc0 +#if HAVE_cc0 && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx) #endif ) @@ -6582,7 +6582,7 @@ simplify_set (rtx x) else compare_mode = SELECT_CC_MODE (new_code, op0, op1); -#ifndef HAVE_cc0 +#if !HAVE_cc0 /* If the mode changed, we have to change SET_DEST, the mode in the compare, and the mode in the place SET_DEST is used. If SET_DEST is a hard register, just build new versions with the proper mode. If it @@ -13802,7 +13802,7 @@ distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2, { rtx set = single_set (tem_insn); rtx inner_dest = 0; -#ifdef HAVE_cc0 +#if HAVE_cc0 rtx_insn *cc0_setter = NULL; #endif @@ -13824,7 +13824,7 @@ distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2, if (set != 0 && ! side_effects_p (SET_SRC (set)) && rtx_equal_p (XEXP (note, 0), inner_dest) -#ifdef HAVE_cc0 +#if HAVE_cc0 && (! reg_mentioned_p (cc0_rtx, SET_SRC (set)) || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL && sets_cc0_p (PATTERN (cc0_setter)) > 0)) @@ -13848,7 +13848,7 @@ distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2, if (tem_insn == i2) i2 = NULL; -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Delete the setter too. */ if (cc0_setter) { diff --git a/gcc/cprop.c b/gcc/cprop.c index c9fb2fc5129..b1caabb09fc 100644 --- a/gcc/cprop.c +++ b/gcc/cprop.c @@ -965,7 +965,7 @@ cprop_jump (basic_block bb, rtx_insn *setcc, rtx_insn *jump, rtx from, rtx src) remove_note (jump, note); } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Delete the cc0 setter. */ if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc)))) delete_insn (setcc); diff --git a/gcc/cse.c b/gcc/cse.c index ef5eb8cebef..94f5c33aa2b 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -6524,7 +6524,7 @@ cse_extended_basic_block (struct cse_basic_block_data *ebb_data) && check_for_label_ref (insn)) recorded_label_ref = true; -#ifdef HAVE_cc0 +#if HAVE_cc0 if (NONDEBUG_INSN_P (insn)) { /* If the previous insn sets CC0 and this insn no diff --git a/gcc/df-problems.c b/gcc/df-problems.c index 3f4aacd20f5..d2134558e65 100644 --- a/gcc/df-problems.c +++ b/gcc/df-problems.c @@ -3820,7 +3820,7 @@ can_move_insns_across (rtx_insn *from, rtx_insn *to, if (bitmap_intersect_p (merge_set, test_use) || bitmap_intersect_p (merge_use, test_set)) break; -#ifdef HAVE_cc0 +#if HAVE_cc0 if (!sets_cc0_p (insn)) #endif max_to = insn; @@ -3861,7 +3861,7 @@ can_move_insns_across (rtx_insn *from, rtx_insn *to, if (NONDEBUG_INSN_P (insn)) { if (!bitmap_intersect_p (test_set, local_merge_live) -#ifdef HAVE_cc0 +#if HAVE_cc0 && !sets_cc0_p (insn) #endif ) diff --git a/gcc/final.c b/gcc/final.c index 41f6bd9fe04..dc7126ef212 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -242,7 +242,7 @@ static void output_asm_operand_names (rtx *, int *, int); #ifdef LEAF_REGISTERS static void leaf_renumber_regs (rtx_insn *); #endif -#ifdef HAVE_cc0 +#if HAVE_cc0 static int alter_cond (rtx); #endif #ifndef ADDR_VEC_ALIGN @@ -2029,7 +2029,7 @@ final (rtx_insn *first, FILE *file, int optimize_p) last_ignored_compare = 0; -#ifdef HAVE_cc0 +#if HAVE_cc0 for (insn = first; insn; insn = NEXT_INSN (insn)) { /* If CC tracking across branches is enabled, record the insn which @@ -2198,7 +2198,7 @@ rtx_insn * final_scan_insn (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED, int nopeepholes ATTRIBUTE_UNUSED, int *seen) { -#ifdef HAVE_cc0 +#if HAVE_cc0 rtx set; #endif rtx_insn *next; @@ -2505,7 +2505,7 @@ final_scan_insn (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED, || GET_CODE (body) == CLOBBER) break; -#ifdef HAVE_cc0 +#if HAVE_cc0 { /* If there is a REG_CC_SETTER note on this insn, it means that the setting of the condition code was done in the delay slot @@ -2722,7 +2722,7 @@ final_scan_insn (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED, body = PATTERN (insn); -#ifdef HAVE_cc0 +#if HAVE_cc0 set = single_set (insn); /* Check for redundant test and compare instructions @@ -2967,7 +2967,7 @@ final_scan_insn (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED, && GET_CODE (PATTERN (insn)) == COND_EXEC) current_insn_predicate = COND_EXEC_TEST (PATTERN (insn)); -#ifdef HAVE_cc0 +#if HAVE_cc0 cc_prev_status = cc_status; /* Update `cc_status' for this instruction. @@ -3256,7 +3256,7 @@ walk_alter_subreg (rtx *xp, bool *changed) return *xp; } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Given BODY, the body of a jump instruction, alter the jump condition as required by the bits that are set in cc_status.flags. diff --git a/gcc/function.c b/gcc/function.c index 7d4df92f40e..4963e5220b0 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -5661,7 +5661,7 @@ emit_use_return_register_into_block (basic_block bb) seq = get_insns (); end_sequence (); insn = BB_END (bb); -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_mentioned_p (cc0_rtx, PATTERN (insn))) insn = prev_cc0_setter (insn); #endif diff --git a/gcc/gcse.c b/gcc/gcse.c index 4be3f36a773..151da065a0d 100644 --- a/gcc/gcse.c +++ b/gcc/gcse.c @@ -2048,7 +2048,7 @@ insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb) && (!single_succ_p (bb) || single_succ_edge (bb)->flags & EDGE_ABNORMAL))) { -#ifdef HAVE_cc0 +#if HAVE_cc0 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts if cc0 isn't set. */ rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); diff --git a/gcc/genconfig.c b/gcc/genconfig.c index da3922d835a..2247eef13c7 100644 --- a/gcc/genconfig.c +++ b/gcc/genconfig.c @@ -346,6 +346,7 @@ main (int argc, char **argv) { /* We output CC0_P this way to make sure that X is declared somewhere. */ + printf ("#define HAVE_cc0 0\n"); printf ("#define CC0_P(X) ((X) ? 0 : 0)\n"); } diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index 6b231c2505e..8dcc67dadbf 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -7184,7 +7184,7 @@ void sched_init (void) { /* Disable speculative loads in their presence if cc0 defined. */ -#ifdef HAVE_cc0 +#if HAVE_cc0 flag_schedule_speculative_load = 0; #endif diff --git a/gcc/ira.c b/gcc/ira.c index ea2b69fee76..819d702267a 100644 --- a/gcc/ira.c +++ b/gcc/ira.c @@ -4641,7 +4641,7 @@ find_moveable_pseudos (void) ? " (no unique first use)" : ""); continue; } -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_referenced_p (cc0_rtx, PATTERN (closest_use))) { if (dump_file) @@ -4724,7 +4724,7 @@ find_moveable_pseudos (void) { if (bitmap_bit_p (def_bb_moveable, regno) && !control_flow_insn_p (use_insn) -#ifdef HAVE_cc0 +#if HAVE_cc0 && !sets_cc0_p (use_insn) #endif ) diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c index 52ecc0574e2..77bb85daeb4 100644 --- a/gcc/loop-invariant.c +++ b/gcc/loop-invariant.c @@ -923,7 +923,7 @@ find_invariant_insn (rtx_insn *insn, bool always_reached, bool always_executed) bool simple = true; struct invariant *inv; -#ifdef HAVE_cc0 +#if HAVE_cc0 /* We can't move a CC0 setter without the user. */ if (sets_cc0_p (insn)) return; diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c index 497d8db2d1f..451a14bbd83 100644 --- a/gcc/lra-constraints.c +++ b/gcc/lra-constraints.c @@ -3354,7 +3354,7 @@ curr_insn_transform (bool check_only_p) if (JUMP_P (curr_insn) || CALL_P (curr_insn)) no_output_reloads_p = true; -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_referenced_p (cc0_rtx, PATTERN (curr_insn))) no_input_reloads_p = true; if (reg_set_p (cc0_rtx, PATTERN (curr_insn))) diff --git a/gcc/optabs.c b/gcc/optabs.c index e9dc7981c63..983c8d9c64a 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -4088,7 +4088,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size, > COSTS_N_INSNS (1))) y = force_reg (mode, y); -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Make sure if we have a canonical comparison. The RTL documentation states that canonical comparisons are required only for targets which have cc0. */ diff --git a/gcc/postreload.c b/gcc/postreload.c index 30fa4498dc7..68443abda25 100644 --- a/gcc/postreload.c +++ b/gcc/postreload.c @@ -1032,7 +1032,7 @@ reload_combine_recognize_const_pattern (rtx_insn *insn) && reg_state[clobbered_regno].real_store_ruid >= use_ruid) break; -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Do not separate cc0 setter and cc0 user on HAVE_cc0 targets. */ if (must_move_add && sets_cc0_p (PATTERN (use_insn))) break; diff --git a/gcc/reload.c b/gcc/reload.c index 70b86a9f674..8b253b83430 100644 --- a/gcc/reload.c +++ b/gcc/reload.c @@ -2706,7 +2706,7 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known, if (JUMP_P (insn) || CALL_P (insn)) no_output_reloads = 1; -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_referenced_p (cc0_rtx, PATTERN (insn))) no_input_reloads = 1; if (reg_set_p (cc0_rtx, PATTERN (insn))) @@ -4579,7 +4579,7 @@ find_reloads (rtx_insn *insn, int replace, int ind_levels, int live_known, rld[j].in = 0; } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If we made any reloads for addresses, see if they violate a "no input reloads" requirement for this insn. But loads that we do after the insn (such as for output addresses) are fine. */ @@ -5873,7 +5873,7 @@ find_reloads_address_1 (machine_mode mode, addr_space_t as, enum insn_code icode = optab_handler (add_optab, GET_MODE (x)); if (insn && NONJUMP_INSN_P (insn) && equiv && memory_operand (equiv, GET_MODE (equiv)) -#ifdef HAVE_cc0 +#if HAVE_cc0 && ! sets_cc0_p (PATTERN (insn)) #endif && ! (icode != CODE_FOR_nothing diff --git a/gcc/reorg.c b/gcc/reorg.c index c734a78571c..68e991c0a0d 100644 --- a/gcc/reorg.c +++ b/gcc/reorg.c @@ -182,7 +182,7 @@ skip_consecutive_labels (rtx label_or_return) return label; } -#ifdef HAVE_cc0 +#if HAVE_cc0 /* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER and REG_CC_USER notes so we can find it. */ @@ -699,7 +699,7 @@ delete_scheduled_jump (rtx_insn *insn) be other insns that became dead anyway, which we wouldn't know to delete. */ -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_mentioned_p (cc0_rtx, insn)) { rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX); @@ -1171,7 +1171,7 @@ steal_delay_list_from_target (rtx_insn *insn, rtx condition, rtx_sequence *seq, if (insn_references_resource_p (trial, sets, false) || insn_sets_resource_p (trial, needed, false) || insn_sets_resource_p (trial, sets, false) -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If TRIAL sets CC0, we can't copy it, so we can't steal this delay list. */ || find_reg_note (trial, REG_CC_USER, NULL_RTX) @@ -1279,7 +1279,7 @@ steal_delay_list_from_fallthrough (rtx_insn *insn, rtx condition, if (insn_references_resource_p (trial, sets, false) || insn_sets_resource_p (trial, needed, false) || insn_sets_resource_p (trial, sets, false) -#ifdef HAVE_cc0 +#if HAVE_cc0 || sets_cc0_p (PATTERN (trial)) #endif ) @@ -1373,7 +1373,7 @@ try_merge_delay_insns (rtx insn, rtx_insn *thread) continue; if (GET_CODE (next_to_match) == GET_CODE (trial) -#ifdef HAVE_cc0 +#if HAVE_cc0 /* We can't share an insn that sets cc0. */ && ! sets_cc0_p (pat) #endif @@ -1446,7 +1446,7 @@ try_merge_delay_insns (rtx insn, rtx_insn *thread) if (! insn_references_resource_p (dtrial, &set, true) && ! insn_sets_resource_p (dtrial, &set, true) && ! insn_sets_resource_p (dtrial, &needed, true) -#ifdef HAVE_cc0 +#if HAVE_cc0 && ! sets_cc0_p (PATTERN (dtrial)) #endif && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial)) @@ -1629,7 +1629,7 @@ redundant_insn (rtx insn, rtx_insn *target, rtx delay_list) target_main = XVECEXP (PATTERN (target), 0, 0); if (resource_conflicts_p (&needed, &set) -#ifdef HAVE_cc0 +#if HAVE_cc0 || reg_mentioned_p (cc0_rtx, ipat) #endif /* The insn requiring the delay may not set anything needed or set by @@ -2125,7 +2125,7 @@ fill_simple_delay_slots (int non_jumps_p) filter_flags ? &fset : &set, true) && ! insn_sets_resource_p (trial, &needed, true) -#ifdef HAVE_cc0 +#if HAVE_cc0 /* Can't separate set of cc0 from its use. */ && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)) #endif @@ -2260,7 +2260,7 @@ fill_simple_delay_slots (int non_jumps_p) && ! insn_references_resource_p (trial, &set, true) && ! insn_sets_resource_p (trial, &set, true) && ! insn_sets_resource_p (trial, &needed, true) -#ifdef HAVE_cc0 +#if HAVE_cc0 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat)) #endif && ! (maybe_never && may_trap_or_fault_p (pat)) @@ -2270,7 +2270,7 @@ fill_simple_delay_slots (int non_jumps_p) { next_trial = next_nonnote_insn (trial); delay_list = add_to_delay_list (trial, delay_list); -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_mentioned_p (cc0_rtx, pat)) link_cc0_insns (trial); #endif @@ -2309,7 +2309,7 @@ fill_simple_delay_slots (int non_jumps_p) && ! insn_references_resource_p (next_trial, &set, true) && ! insn_sets_resource_p (next_trial, &set, true) && ! insn_sets_resource_p (next_trial, &needed, true) -#ifdef HAVE_cc0 +#if HAVE_cc0 && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial)) #endif && ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial))) @@ -2522,7 +2522,7 @@ fill_slots_from_thread (rtx_insn *insn, rtx condition, rtx thread_or_return, if (! insn_references_resource_p (trial, &set, true) && ! insn_sets_resource_p (trial, &set, true) && ! insn_sets_resource_p (trial, &needed, true) -#ifdef HAVE_cc0 +#if HAVE_cc0 && ! (reg_mentioned_p (cc0_rtx, pat) && (! own_thread || ! sets_cc0_p (pat))) #endif @@ -2605,7 +2605,7 @@ fill_slots_from_thread (rtx_insn *insn, rtx condition, rtx thread_or_return, must_annul = 1; winner: -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_mentioned_p (cc0_rtx, pat)) link_cc0_insns (trial); #endif @@ -3161,7 +3161,7 @@ delete_computation (rtx insn) { rtx note, next; -#ifdef HAVE_cc0 +#if HAVE_cc0 if (reg_referenced_p (cc0_rtx, PATTERN (insn))) { rtx prev = prev_nonnote_insn (insn); @@ -3498,7 +3498,7 @@ relax_delay_slots (rtx_insn *first) && ! condjump_in_parallel_p (delay_insn) && prev_active_insn (target_label) == insn && ! BARRIER_P (prev_nonnote_insn (target_label)) -#ifdef HAVE_cc0 +#if HAVE_cc0 /* If the last insn in the delay slot sets CC0 for some insn, various code assumes that it is in a delay slot. We could put it back where it belonged and delete the register notes, diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c index 6fd6774c45f..7d2719fb69d 100644 --- a/gcc/sched-deps.c +++ b/gcc/sched-deps.c @@ -2609,7 +2609,7 @@ sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn) return; case CC0: -#ifndef HAVE_cc0 +#if !HAVE_cc0 gcc_unreachable (); #endif /* User of CC0 depends on immediately preceding insn. */ diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 76f78dfb504..33261fc6bca 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -2487,7 +2487,7 @@ add_branch_dependences (rtx_insn *head, rtx_insn *tail) && (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER || can_throw_internal (insn) -#ifdef HAVE_cc0 +#if HAVE_cc0 || sets_cc0_p (PATTERN (insn)) #endif || (!reload_completed