From: David Malcolm Date: Mon, 25 Aug 2014 19:32:54 +0000 (+0000) Subject: config/spu/spu.c: Use rtx_insn X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=23c39aaa85b6d09b36b639fa98a2c46b229120eb;p=gcc.git config/spu/spu.c: Use rtx_insn gcc/ * config/spu/spu.c (frame_emit_store): Strengthen return type from rtx to rtx_insn *. (frame_emit_load): Likewise. (frame_emit_add_imm): Likewise, also for local "insn". (spu_expand_prologue): Likewise for local "insn". (struct spu_bb_info): Likewise for field "prop_jump". (emit_nop_for_insn): Likewise for param "insn" and local "new_insn". (pad_bb): Likewise for locals "insn", "next_insn", "prev_insn", "hbr_insn". (spu_emit_branch_hint): Likewise for params "before", "branch" and locals "hint", "insn". (get_branch_target): Likewise for param "branch". (insn_clobbers_hbr): Likewise for param "insn". (insert_hbrp_for_ilb_runout): Likewise for param "first" and locals "insn", "before_4", "before_16". (insert_hbrp): Likewise for local "insn". (spu_machine_dependent_reorg): Likewise for locals "branch", "insn", "next", "bbend". (uses_ls_unit): Likewise for param "insn". (get_pipe): Likewise. (spu_sched_variable_issue): Rename param "insn" to "uncast_insn", introducing a checked cast. (spu_sched_adjust_cost): Likewise for params "insn" and "dep_insn". (ea_load_store_inline): Strengthen local "insn" from rtx to rtx_insn *. (spu_sms_res_mii): Likewise. From-SVN: r214463 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index cff88efdb7e..c865df151d4 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,33 @@ +2014-08-25 David Malcolm + + * config/spu/spu.c (frame_emit_store): Strengthen return type from + rtx to rtx_insn *. + (frame_emit_load): Likewise. + (frame_emit_add_imm): Likewise, also for local "insn". + (spu_expand_prologue): Likewise for local "insn". + (struct spu_bb_info): Likewise for field "prop_jump". + (emit_nop_for_insn): Likewise for param "insn" and local + "new_insn". + (pad_bb): Likewise for locals "insn", "next_insn", "prev_insn", + "hbr_insn". + (spu_emit_branch_hint): Likewise for params "before", "branch" and + locals "hint", "insn". + (get_branch_target): Likewise for param "branch". + (insn_clobbers_hbr): Likewise for param "insn". + (insert_hbrp_for_ilb_runout): Likewise for param "first" and + locals "insn", "before_4", "before_16". + (insert_hbrp): Likewise for local "insn". + (spu_machine_dependent_reorg): Likewise for locals "branch", + "insn", "next", "bbend". + (uses_ls_unit): Likewise for param "insn". + (get_pipe): Likewise. + (spu_sched_variable_issue): Rename param "insn" to "uncast_insn", + introducing a checked cast. + (spu_sched_adjust_cost): Likewise for params "insn" and + "dep_insn". + (ea_load_store_inline): Strengthen local "insn" from rtx to rtx_insn *. + (spu_sms_res_mii): Likewise. + 2014-08-25 David Malcolm * config/sparc/sparc-protos.h (output_ubranch): Strengthen param 2 diff --git a/gcc/config/spu/spu.c b/gcc/config/spu/spu.c index 8816371cc34..b974b2362ac 100644 --- a/gcc/config/spu/spu.c +++ b/gcc/config/spu/spu.c @@ -160,7 +160,7 @@ static struct spu_builtin_range spu_builtin_range[] = { char regs_ever_allocated[FIRST_PSEUDO_REGISTER]; /* Prototypes and external defs. */ -static int get_pipe (rtx insn); +static int get_pipe (rtx_insn *insn); static int spu_naked_function_p (tree func); static int mem_is_padded_component_ref (rtx x); static void fix_range (const char *); @@ -1632,7 +1632,7 @@ spu_saved_regs_size (void) return reg_save_size; } -static rtx +static rtx_insn * frame_emit_store (int regno, rtx addr, HOST_WIDE_INT offset) { rtx reg = gen_rtx_REG (V4SImode, regno); @@ -1641,7 +1641,7 @@ frame_emit_store (int regno, rtx addr, HOST_WIDE_INT offset) return emit_insn (gen_movv4si (mem, reg)); } -static rtx +static rtx_insn * frame_emit_load (int regno, rtx addr, HOST_WIDE_INT offset) { rtx reg = gen_rtx_REG (V4SImode, regno); @@ -1651,10 +1651,10 @@ frame_emit_load (int regno, rtx addr, HOST_WIDE_INT offset) } /* This happens after reload, so we need to expand it. */ -static rtx +static rtx_insn * frame_emit_add_imm (rtx dst, rtx src, HOST_WIDE_INT imm, rtx scratch) { - rtx insn; + rtx_insn *insn; if (satisfies_constraint_K (GEN_INT (imm))) { insn = emit_insn (gen_addsi3 (dst, src, GEN_INT (imm))); @@ -1725,7 +1725,8 @@ spu_expand_prologue (void) HOST_WIDE_INT saved_regs_size; rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM); rtx scratch_reg_0, scratch_reg_1; - rtx insn, real; + rtx_insn *insn; + rtx real; if (flag_pic && optimize == 0 && !cfun->machine->pic_reg) cfun->machine->pic_reg = pic_offset_table_rtx; @@ -1969,7 +1970,7 @@ spu_const_from_ints(enum machine_mode mode, int a, int b, int c, int d) /* An array of these is used to propagate hints to predecessor blocks. */ struct spu_bb_info { - rtx prop_jump; /* propagated from another block */ + rtx_insn *prop_jump; /* propagated from another block */ int bb_index; /* the original block. */ }; static struct spu_bb_info *spu_bb_info; @@ -1993,10 +1994,10 @@ static struct spu_bb_info *spu_bb_info; We check for TImode to handle a MULTI1 insn which has dual issued its first instruction. get_pipe returns -1 for MULTI0 or inline asm. */ static void -emit_nop_for_insn (rtx insn) +emit_nop_for_insn (rtx_insn *insn) { int p; - rtx new_insn; + rtx_insn *new_insn; /* We need to handle JUMP_TABLE_DATA separately. */ if (JUMP_TABLE_DATA_P (insn)) @@ -2028,7 +2029,7 @@ emit_nop_for_insn (rtx insn) static void pad_bb(void) { - rtx insn, next_insn, prev_insn, hbr_insn = 0; + rtx_insn *insn, *next_insn, *prev_insn, *hbr_insn = 0; int length; int addr; @@ -2098,12 +2099,12 @@ pad_bb(void) /* Routines for branch hints. */ static void -spu_emit_branch_hint (rtx before, rtx branch, rtx target, +spu_emit_branch_hint (rtx_insn *before, rtx_insn *branch, rtx target, int distance, sbitmap blocks) { rtx branch_label = 0; - rtx hint; - rtx insn; + rtx_insn *hint; + rtx_insn *insn; rtx_jump_table_data *table; if (before == 0 || branch == 0 || target == 0) @@ -2183,7 +2184,7 @@ spu_emit_branch_hint (rtx before, rtx branch, rtx target, /* Returns 0 if we don't want a hint for this branch. Otherwise return the rtx for the branch target. */ static rtx -get_branch_target (rtx branch) +get_branch_target (rtx_insn *branch) { if (JUMP_P (branch)) { @@ -2251,7 +2252,7 @@ get_branch_target (rtx branch) should only be used in a clobber, and this function searches for insns which clobber it. */ static bool -insn_clobbers_hbr (rtx insn) +insn_clobbers_hbr (rtx_insn *insn) { if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == PARALLEL) @@ -2282,9 +2283,9 @@ insn_clobbers_hbr (rtx insn) and an hbrp within 16 instructions of FIRST. */ static void -insert_hbrp_for_ilb_runout (rtx first) +insert_hbrp_for_ilb_runout (rtx_insn *first) { - rtx insn, before_4 = 0, before_16 = 0; + rtx_insn *insn, *before_4 = 0, *before_16 = 0; int addr = 0, length, first_addr = -1; int hbrp_addr0 = 128 * 4, hbrp_addr1 = 128 * 4; int insert_lnop_after = 0; @@ -2414,7 +2415,7 @@ insert_hbrp_for_ilb_runout (rtx first) static void insert_hbrp (void) { - rtx insn; + rtx_insn *insn; if (TARGET_SAFE_HINTS) { shorten_branches (get_insns ()); @@ -2451,7 +2452,7 @@ spu_machine_dependent_reorg (void) { sbitmap blocks; basic_block bb; - rtx branch, insn; + rtx_insn *branch, *insn; rtx branch_target = 0; int branch_addr = 0, insn_addr, required_dist = 0; int i; @@ -2523,7 +2524,7 @@ spu_machine_dependent_reorg (void) || insn_clobbers_hbr (insn) || branch_addr - insn_addr > 600)) { - rtx next = NEXT_INSN (insn); + rtx_insn *next = NEXT_INSN (insn); int next_addr = INSN_ADDRESSES (INSN_UID (next)); if (insn != BB_END (bb) && branch_addr - next_addr >= required_dist) @@ -2562,7 +2563,7 @@ spu_machine_dependent_reorg (void) /* If we haven't emitted a hint for this branch yet, it might be profitable to emit it in one of the predecessor blocks, especially for loops. */ - rtx bbend; + rtx_insn *bbend; basic_block prev = 0, prop = 0, prev2 = 0; int loop_exit = 0, simple_loop = 0; int next_addr = INSN_ADDRESSES (INSN_UID (NEXT_INSN (insn))); @@ -2693,7 +2694,7 @@ spu_sched_issue_rate (void) } static int -uses_ls_unit(rtx insn) +uses_ls_unit(rtx_insn *insn) { rtx set = single_set (insn); if (set != 0 @@ -2704,7 +2705,7 @@ uses_ls_unit(rtx insn) } static int -get_pipe (rtx insn) +get_pipe (rtx_insn *insn) { enum attr_type t; /* Handle inline asm */ @@ -2800,10 +2801,12 @@ spu_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, static int spu_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED, - int verbose ATTRIBUTE_UNUSED, rtx insn, int more) + int verbose ATTRIBUTE_UNUSED, + rtx uncast_insn, int more) { int len; int p; + rtx_insn *insn = as_a (uncast_insn); if (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER || (len = get_attr_length (insn)) == 0) @@ -2997,9 +3000,11 @@ spu_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED, /* INSN is dependent on DEP_INSN. */ static int -spu_sched_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost) +spu_sched_adjust_cost (rtx uncast_insn, rtx link, rtx uncast_dep_insn, int cost) { rtx set; + rtx_insn *insn = as_a (uncast_insn); + rtx_insn *dep_insn = as_a (uncast_dep_insn); /* The blockage pattern is used to prevent instructions from being moved across it and has no cost. */ @@ -4272,7 +4277,8 @@ ea_load_store_inline (rtx mem, bool is_store, rtx ea_addr, rtx data_addr) rtx tag_eq_pack = gen_reg_rtx (V4SImode); rtx tag_eq_pack_si = gen_reg_rtx (SImode); rtx eq_index = gen_reg_rtx (SImode); - rtx bcomp, hit_label, hit_ref, cont_label, insn; + rtx bcomp, hit_label, hit_ref, cont_label; + rtx_insn *insn; if (spu_ea_model != 32) { @@ -6839,7 +6845,7 @@ spu_sms_res_mii (struct ddg *g) for (i = 0; i < g->num_nodes; i++) { - rtx insn = g->nodes[i].insn; + rtx_insn *insn = g->nodes[i].insn; int p = get_pipe (insn) + 2; gcc_assert (p >= 0);