+2014-08-25 David Malcolm <dmalcolm@redhat.com>
+
+ * config/spu/spu.c (frame_emit_store): Strengthen return type from
+ rtx to rtx_insn *.
+ (frame_emit_load): Likewise.
+ (frame_emit_add_imm): Likewise, also for local "insn".
+ (spu_expand_prologue): Likewise for local "insn".
+ (struct spu_bb_info): Likewise for field "prop_jump".
+ (emit_nop_for_insn): Likewise for param "insn" and local
+ "new_insn".
+ (pad_bb): Likewise for locals "insn", "next_insn", "prev_insn",
+ "hbr_insn".
+ (spu_emit_branch_hint): Likewise for params "before", "branch" and
+ locals "hint", "insn".
+ (get_branch_target): Likewise for param "branch".
+ (insn_clobbers_hbr): Likewise for param "insn".
+ (insert_hbrp_for_ilb_runout): Likewise for param "first" and
+ locals "insn", "before_4", "before_16".
+ (insert_hbrp): Likewise for local "insn".
+ (spu_machine_dependent_reorg): Likewise for locals "branch",
+ "insn", "next", "bbend".
+ (uses_ls_unit): Likewise for param "insn".
+ (get_pipe): Likewise.
+ (spu_sched_variable_issue): Rename param "insn" to "uncast_insn",
+ introducing a checked cast.
+ (spu_sched_adjust_cost): Likewise for params "insn" and
+ "dep_insn".
+ (ea_load_store_inline): Strengthen local "insn" from rtx to rtx_insn *.
+ (spu_sms_res_mii): Likewise.
+
2014-08-25 David Malcolm <dmalcolm@redhat.com>
* config/sparc/sparc-protos.h (output_ubranch): Strengthen param 2
char regs_ever_allocated[FIRST_PSEUDO_REGISTER];
/* Prototypes and external defs. */
-static int get_pipe (rtx insn);
+static int get_pipe (rtx_insn *insn);
static int spu_naked_function_p (tree func);
static int mem_is_padded_component_ref (rtx x);
static void fix_range (const char *);
return reg_save_size;
}
-static rtx
+static rtx_insn *
frame_emit_store (int regno, rtx addr, HOST_WIDE_INT offset)
{
rtx reg = gen_rtx_REG (V4SImode, regno);
return emit_insn (gen_movv4si (mem, reg));
}
-static rtx
+static rtx_insn *
frame_emit_load (int regno, rtx addr, HOST_WIDE_INT offset)
{
rtx reg = gen_rtx_REG (V4SImode, regno);
}
/* This happens after reload, so we need to expand it. */
-static rtx
+static rtx_insn *
frame_emit_add_imm (rtx dst, rtx src, HOST_WIDE_INT imm, rtx scratch)
{
- rtx insn;
+ rtx_insn *insn;
if (satisfies_constraint_K (GEN_INT (imm)))
{
insn = emit_insn (gen_addsi3 (dst, src, GEN_INT (imm)));
HOST_WIDE_INT saved_regs_size;
rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
rtx scratch_reg_0, scratch_reg_1;
- rtx insn, real;
+ rtx_insn *insn;
+ rtx real;
if (flag_pic && optimize == 0 && !cfun->machine->pic_reg)
cfun->machine->pic_reg = pic_offset_table_rtx;
/* An array of these is used to propagate hints to predecessor blocks. */
struct spu_bb_info
{
- rtx prop_jump; /* propagated from another block */
+ rtx_insn *prop_jump; /* propagated from another block */
int bb_index; /* the original block. */
};
static struct spu_bb_info *spu_bb_info;
We check for TImode to handle a MULTI1 insn which has dual issued its
first instruction. get_pipe returns -1 for MULTI0 or inline asm. */
static void
-emit_nop_for_insn (rtx insn)
+emit_nop_for_insn (rtx_insn *insn)
{
int p;
- rtx new_insn;
+ rtx_insn *new_insn;
/* We need to handle JUMP_TABLE_DATA separately. */
if (JUMP_TABLE_DATA_P (insn))
static void
pad_bb(void)
{
- rtx insn, next_insn, prev_insn, hbr_insn = 0;
+ rtx_insn *insn, *next_insn, *prev_insn, *hbr_insn = 0;
int length;
int addr;
/* Routines for branch hints. */
static void
-spu_emit_branch_hint (rtx before, rtx branch, rtx target,
+spu_emit_branch_hint (rtx_insn *before, rtx_insn *branch, rtx target,
int distance, sbitmap blocks)
{
rtx branch_label = 0;
- rtx hint;
- rtx insn;
+ rtx_insn *hint;
+ rtx_insn *insn;
rtx_jump_table_data *table;
if (before == 0 || branch == 0 || target == 0)
/* Returns 0 if we don't want a hint for this branch. Otherwise return
the rtx for the branch target. */
static rtx
-get_branch_target (rtx branch)
+get_branch_target (rtx_insn *branch)
{
if (JUMP_P (branch))
{
should only be used in a clobber, and this function searches for
insns which clobber it. */
static bool
-insn_clobbers_hbr (rtx insn)
+insn_clobbers_hbr (rtx_insn *insn)
{
if (INSN_P (insn)
&& GET_CODE (PATTERN (insn)) == PARALLEL)
and an hbrp within 16 instructions of FIRST.
*/
static void
-insert_hbrp_for_ilb_runout (rtx first)
+insert_hbrp_for_ilb_runout (rtx_insn *first)
{
- rtx insn, before_4 = 0, before_16 = 0;
+ rtx_insn *insn, *before_4 = 0, *before_16 = 0;
int addr = 0, length, first_addr = -1;
int hbrp_addr0 = 128 * 4, hbrp_addr1 = 128 * 4;
int insert_lnop_after = 0;
static void
insert_hbrp (void)
{
- rtx insn;
+ rtx_insn *insn;
if (TARGET_SAFE_HINTS)
{
shorten_branches (get_insns ());
{
sbitmap blocks;
basic_block bb;
- rtx branch, insn;
+ rtx_insn *branch, *insn;
rtx branch_target = 0;
int branch_addr = 0, insn_addr, required_dist = 0;
int i;
|| insn_clobbers_hbr (insn)
|| branch_addr - insn_addr > 600))
{
- rtx next = NEXT_INSN (insn);
+ rtx_insn *next = NEXT_INSN (insn);
int next_addr = INSN_ADDRESSES (INSN_UID (next));
if (insn != BB_END (bb)
&& branch_addr - next_addr >= required_dist)
/* If we haven't emitted a hint for this branch yet, it might
be profitable to emit it in one of the predecessor blocks,
especially for loops. */
- rtx bbend;
+ rtx_insn *bbend;
basic_block prev = 0, prop = 0, prev2 = 0;
int loop_exit = 0, simple_loop = 0;
int next_addr = INSN_ADDRESSES (INSN_UID (NEXT_INSN (insn)));
}
static int
-uses_ls_unit(rtx insn)
+uses_ls_unit(rtx_insn *insn)
{
rtx set = single_set (insn);
if (set != 0
}
static int
-get_pipe (rtx insn)
+get_pipe (rtx_insn *insn)
{
enum attr_type t;
/* Handle inline asm */
static int
spu_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
- int verbose ATTRIBUTE_UNUSED, rtx insn, int more)
+ int verbose ATTRIBUTE_UNUSED,
+ rtx uncast_insn, int more)
{
int len;
int p;
+ rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
if (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| (len = get_attr_length (insn)) == 0)
/* INSN is dependent on DEP_INSN. */
static int
-spu_sched_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+spu_sched_adjust_cost (rtx uncast_insn, rtx link, rtx uncast_dep_insn, int cost)
{
rtx set;
+ rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
+ rtx_insn *dep_insn = as_a <rtx_insn *> (uncast_dep_insn);
/* The blockage pattern is used to prevent instructions from being
moved across it and has no cost. */
rtx tag_eq_pack = gen_reg_rtx (V4SImode);
rtx tag_eq_pack_si = gen_reg_rtx (SImode);
rtx eq_index = gen_reg_rtx (SImode);
- rtx bcomp, hit_label, hit_ref, cont_label, insn;
+ rtx bcomp, hit_label, hit_ref, cont_label;
+ rtx_insn *insn;
if (spu_ea_model != 32)
{
for (i = 0; i < g->num_nodes; i++)
{
- rtx insn = g->nodes[i].insn;
+ rtx_insn *insn = g->nodes[i].insn;
int p = get_pipe (insn) + 2;
gcc_assert (p >= 0);