+2014-09-09 David Malcolm <dmalcolm@redhat.com>
+
+ * caller-save.c (rtx saveinsn): Strengthen this variable from rtx
+ to rtx_insn *.
+ (restinsn): Likewise.
+ * config/aarch64/aarch64-protos.h (aarch64_simd_attr_length_move):
+ Likewise for param.
+ * config/aarch64/aarch64.c (aarch64_simd_attr_length_move):
+ Likewise.
+ * config/arc/arc-protos.h (arc_adjust_insn_length): Likewise for
+ first param.
+ (arc_hazard): Likewise for both params.
+ * config/arc/arc.c (arc600_corereg_hazard): Likewise, adding
+ checked casts to rtx_sequence * and uses of the insn method for
+ type-safety.
+ (arc_hazard): Strengthen both params from rtx to rtx_insn *.
+ (arc_adjust_insn_length): Likewise for param "insn".
+ (struct insn_length_parameters_s): Likewise for first param of
+ "get_variants" callback field.
+ (arc_get_insn_variants): Likewise for first param and local
+ "inner". Replace a check of GET_CODE with a dyn_cast to
+ rtx_sequence *, using methods for type-safety and clarity.
+ * config/arc/arc.h (ADJUST_INSN_LENGTH): Use casts to
+ rtx_sequence * and uses of the insn method for type-safety when
+ invoking arc_adjust_insn_length.
+ * config/arm/arm-protos.h (arm_attr_length_move_neon): Likewise
+ for param.
+ (arm_address_offset_is_imm): Likewise.
+ (struct tune_params): Likewise for params 1 and 3 of the
+ "sched_adjust_cost" callback field.
+ * config/arm/arm.c (cortex_a9_sched_adjust_cost): Likewise for
+ params 1 and 3 ("insn" and "dep").
+ (xscale_sched_adjust_cost): Likewise.
+ (fa726te_sched_adjust_cost): Likewise.
+ (cortexa7_older_only): Likewise for param "insn".
+ (cortexa7_younger): Likewise.
+ (arm_attr_length_move_neon): Likewise.
+ (arm_address_offset_is_imm): Likewise.
+ * config/avr/avr-protos.h (avr_notice_update_cc): Likewise.
+ * config/avr/avr.c (avr_notice_update_cc): Likewise.
+ * config/bfin/bfin.c (hwloop_pattern_reg): Likewise.
+ (workaround_speculation): Likewise for local "last_condjump".
+ * config/c6x/c6x.c (shadow_p): Likewise for param "insn".
+ (shadow_or_blockage_p): Likewise.
+ (get_unit_reqs): Likewise.
+ (get_unit_operand_masks): Likewise.
+ (c6x_registers_update): Likewise.
+ (returning_call_p): Likewise.
+ (can_use_callp): Likewise.
+ (convert_to_callp): Likewise.
+ (find_last_same_clock): Likwise for local "t".
+ (reorg_split_calls): Likewise for local "shadow".
+ (hwloop_pattern_reg): Likewise for param "insn".
+ * config/frv/frv-protos.h (frv_final_prescan_insn): Likewise.
+ * config/frv/frv.c (frv_final_prescan_insn): Likewise.
+ (frv_extract_membar): Likewise.
+ (frv_optimize_membar_local): Strengthen param "last_membar" from
+ rtx * to rtx_insn **.
+ (frv_optimize_membar_global): Strengthen param "membar" from rtx
+ to rtx_insn *.
+ (frv_optimize_membar): Strengthen local "last_membar" from rtx *
+ to rtx_insn **.
+ * config/ia64/ia64-protos.h (ia64_st_address_bypass_p): Strengthen
+ both params from rtx to rtx_insn *.
+ (ia64_ld_address_bypass_p): Likewise.
+ * config/ia64/ia64.c (ia64_safe_itanium_class): Likewise for param
+ "insn".
+ (ia64_safe_type): Likewise.
+ (group_barrier_needed): Likewise.
+ (safe_group_barrier_needed): Likewise.
+ (ia64_single_set): Likewise.
+ (is_load_p): Likewise.
+ (record_memory_reference): Likewise.
+ (get_mode_no_for_insn): Likewise.
+ (important_for_bundling_p): Likewise.
+ (unknown_for_bundling_p): Likewise.
+ (ia64_st_address_bypass_p): Likewise for both params.
+ (ia64_ld_address_bypass_p): Likewise.
+ (expand_vselect): Introduce new local rtx_insn * "insn", using it
+ in place of rtx "x" after the emit_insn call.
+ * config/i386/i386-protos.h (x86_extended_QIreg_mentioned_p):
+ Strengthen param from rtx to rtx_insn *.
+ (ix86_agi_dependent): Likewise for both params.
+ (ix86_attr_length_immediate_default): Likewise for param 1.
+ (ix86_attr_length_address_default): Likewise for param.
+ (ix86_attr_length_vex_default): Likewise for param 1.
+ * config/i386/i386.c (ix86_attr_length_immediate_default):
+ Likewise for param "insn".
+ (ix86_attr_length_address_default): Likewise.
+ (ix86_attr_length_vex_default): Likewise.
+ (ix86_agi_dependent): Likewise for both params.
+ (x86_extended_QIreg_mentioned_p): Likewise for param "insn".
+ (vselect_insn): Likewise for this variable.
+ * config/m68k/m68k-protos.h (m68k_sched_attr_opx_type): Likewise
+ for param 1.
+ (m68k_sched_attr_opy_type): Likewise.
+ * config/m68k/m68k.c (sched_get_operand): Likewise.
+ (sched_attr_op_type): Likewise.
+ (m68k_sched_attr_opx_type): Likewise.
+ (m68k_sched_attr_opy_type): Likewise.
+ (sched_get_reg_operand): Likewise.
+ (sched_get_mem_operand): Likewise.
+ (m68k_sched_address_bypass_p): Likewise for both params.
+ (sched_get_indexed_address_scale): Likewise.
+ (m68k_sched_indexed_address_bypass_p): Likewise.
+ * config/m68k/m68k.h (m68k_sched_address_bypass_p): Likewise.
+ (m68k_sched_indexed_address_bypass_p): Likewise.
+ * config/mep/mep.c (mep_jmp_return_reorg): Strengthen locals
+ "label", "ret" from rtx to rtx_insn *, adding a checked cast and
+ removing another.
+ * config/mips/mips-protos.h (mips_linked_madd_p): Strengthen both
+ params from rtx to rtx_insn *.
+ (mips_fmadd_bypass): Likewise.
+ * config/mips/mips.c (mips_fmadd_bypass): Likewise.
+ (mips_linked_madd_p): Likewise.
+ (mips_macc_chains_last_hilo): Likewise for this variable.
+ (mips_macc_chains_record): Likewise for param.
+ (vr4130_last_insn): Likewise for this variable.
+ (vr4130_swap_insns_p): Likewise for both params.
+ (mips_ls2_variable_issue): Likewise for param.
+ (mips_need_noat_wrapper_p): Likewise for param "insn".
+ (mips_expand_vselect): Add a new local rtx_insn * "insn", using it
+ in place of "x" after the emit_insn.
+ * config/pa/pa-protos.h (pa_fpstore_bypass_p): Strengthen both
+ params from rtx to rtx_insn *.
+ * config/pa/pa.c (pa_fpstore_bypass_p): Likewise.
+ (pa_combine_instructions): Introduce local "par" for result of
+ gen_rtx_PARALLEL, moving decl and usage of new_rtx for after call
+ to make_insn_raw.
+ (pa_can_combine_p): Strengthen param "new_rtx" from rtx to rtx_insn *.
+ * config/rl78/rl78.c (insn_ok_now): Likewise for param "insn".
+ (rl78_alloc_physical_registers_op1): Likewise.
+ (rl78_alloc_physical_registers_op2): Likewise.
+ (rl78_alloc_physical_registers_ro1): Likewise.
+ (rl78_alloc_physical_registers_cmp): Likewise.
+ (rl78_alloc_physical_registers_umul): Likewise.
+ (rl78_alloc_address_registers_macax): Likewise.
+ (rl78_alloc_physical_registers): Likewise for locals "insn", "curr".
+ * config/s390/predicates.md (execute_operation): Likewise for
+ local "insn".
+ * config/s390/s390-protos.h (s390_agen_dep_p): Likewise for both
+ params.
+ * config/s390/s390.c (s390_safe_attr_type): Likewise for param.
+ (addr_generation_dependency_p): Likewise for param "insn".
+ (s390_agen_dep_p): Likewise for both params.
+ (s390_fpload_toreg): Likewise for param "insn".
+ * config/sh/sh-protos.h (sh_loop_align): Likewise for param.
+ * config/sh/sh.c (sh_loop_align): Likewise for param and local
+ "next".
+ * config/sh/sh.md (define_peephole2): Likewise for local "insn2".
+ * config/sh/sh_treg_combine.cc
+ (sh_treg_combine::make_inv_ccreg_insn): Likewise for return type
+ and local "i".
+ (sh_treg_combine::try_eliminate_cstores): Likewise for local "i".
+ * config/stormy16/stormy16.c (combine_bnp): Likewise for locals
+ "and_insn", "load", "shift".
+ * config/tilegx/tilegx.c (match_pcrel_step2): Likewise for param
+ "insn".
+ * final.c (final_scan_insn): Introduce local rtx_insn * "other"
+ for XEXP (note, 0) of the REG_CC_SETTER note.
+ (cleanup_subreg_operands): Strengthen param "insn" from rtx to
+ rtx_insn *, eliminating a checked cast made redundant by this.
+ * gcse.c (process_insert_insn): Strengthen local "insn" from rtx
+ to rtx_insn *.
+ * genattr.c (main): When writing out the prototype to
+ const_num_delay_slots, strengthen the param from rtx to
+ rtx_insn *.
+ * genattrtab.c (write_const_num_delay_slots): Likewise when
+ writing out the implementation of const_num_delay_slots.
+ * hw-doloop.h (struct hw_doloop_hooks): Strengthen the param
+ "insn" of callback field "end_pattern_reg" from rtx to rtx_insn *.
+ * ifcvt.c (noce_emit_store_flag): Eliminate local rtx "tmp" in
+ favor of new rtx locals "src" and "set" and new local rtx_insn *
+ "insn" and "seq".
+ (noce_emit_move_insn): Strengthen locals "seq" and "insn" from rtx
+ to rtx_insn *.
+ (noce_emit_cmove): Eliminate local rtx "tmp" in favor of new rtx
+ locals "cond", "if_then_else", "set" and new rtx_insn * locals
+ "insn" and "seq".
+ (noce_try_cmove_arith): Strengthen locals "insn_a" and "insn_b",
+ "last" from rtx to rtx_insn *. Likewise for a local "tmp",
+ renaming to "tmp_insn". Eliminate the other local rtx "tmp" from
+ the top-level scope, replacing with new more tightly-scoped rtx
+ locals "reg", "pat", "mem" and rtx_insn * "insn", "copy_of_a",
+ "new_insn", "copy_of_insn_b", and make local rtx "set" more
+ tightly-scoped.
+ * ira-int.h (ira_setup_alts): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ * ira.c (setup_prohibited_mode_move_regs): Likewise for local
+ "move_insn".
+ (ira_setup_alts): Likewise for param "insn".
+ * lra-constraints.c (emit_inc): Likewise for local "add_insn".
+ * lra.c (emit_add3_insn): Split local rtx "insn" in two, an rtx
+ and an rtx_insn *.
+ (lra_emit_add): Eliminate top-level local rtx "insn" in favor of
+ new more-tightly scoped rtx locals "add3_insn", "insn",
+ "add2_insn" and rtx_insn * "move_insn".
+ * postreload-gcse.c (eliminate_partially_redundant_load): Add
+ checked cast on result of gen_move_insn when invoking
+ extract_insn.
+ * recog.c (insn_invalid_p): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (verify_changes): Add a checked cast on "object" when invoking
+ insn_invalid_p.
+ (extract_insn_cached): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (extract_constrain_insn_cached): Likewise.
+ (extract_insn): Likewise.
+ * recog.h (insn_invalid_p): Likewise for param 1.
+ (recog_memoized): Likewise for param.
+ (extract_insn): Likewise.
+ (extract_constrain_insn_cached): Likewise.
+ (extract_insn_cached): Likewise.
+ * reload.c (can_reload_into): Likewise for local "test_insn".
+ * reload.h (cleanup_subreg_operands): Likewise for param.
+ * reload1.c (emit_insn_if_valid_for_reload): Rename param from
+ "insn" to "pat", reintroducing "insn" as an rtx_insn * on the
+ result of emit_insn. Remove a checked cast made redundant by this
+ change.
+ * sel-sched-ir.c (sel_insn_rtx_cost): Strengthen param "insn" from
+ rtx to rtx_insn *.
+ * sel-sched.c (get_reg_class): Likewise.
+
2014-09-09 Marcus Shawcroft <marcus.shawcroft@arm.com>
Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
static GTY(()) rtx restpat;
static GTY(()) rtx test_reg;
static GTY(()) rtx test_mem;
-static GTY(()) rtx saveinsn;
-static GTY(()) rtx restinsn;
+static GTY(()) rtx_insn *saveinsn;
+static GTY(()) rtx_insn *restinsn;
/* Return the INSN_CODE used to save register REG in mode MODE. */
static int
enum machine_mode);
int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
-int aarch64_simd_attr_length_move (rtx);
+int aarch64_simd_attr_length_move (rtx_insn *);
int aarch64_uxt_size (int, HOST_WIDE_INT);
rtx aarch64_final_eh_return_addr (void);
rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
/* Compute and return the length of aarch64_simd_mov<mode>, where <mode> is
one of VSTRUCT modes: OI, CI or XI. */
int
-aarch64_simd_attr_length_move (rtx insn)
+aarch64_simd_attr_length_move (rtx_insn *insn)
{
enum machine_mode mode;
extern int arc_register_move_cost (enum machine_mode, enum reg_class,
enum reg_class);
extern rtx disi_highpart (rtx);
-extern int arc_adjust_insn_length (rtx, int, bool);
+extern int arc_adjust_insn_length (rtx_insn *, int, bool);
extern int arc_corereg_hazard (rtx, rtx);
-extern int arc_hazard (rtx, rtx);
+extern int arc_hazard (rtx_insn *, rtx_insn *);
extern int arc_write_ext_corereg (rtx);
extern rtx gen_acc1 (void);
extern rtx gen_acc2 (void);
between PRED and SUCC to prevent a hazard. */
static int
-arc600_corereg_hazard (rtx pred, rtx succ)
+arc600_corereg_hazard (rtx_insn *pred, rtx_insn *succ)
{
if (!TARGET_ARC600)
return 0;
if (recog_memoized (succ) == CODE_FOR_doloop_begin_i)
return 0;
if (GET_CODE (PATTERN (pred)) == SEQUENCE)
- pred = XVECEXP (PATTERN (pred), 0, 1);
+ pred = as_a <rtx_sequence *> (PATTERN (pred))->insn (1);
if (GET_CODE (PATTERN (succ)) == SEQUENCE)
- succ = XVECEXP (PATTERN (succ), 0, 0);
+ succ = as_a <rtx_sequence *> (PATTERN (succ))->insn (0);
if (recog_memoized (pred) == CODE_FOR_mulsi_600
|| recog_memoized (pred) == CODE_FOR_umul_600
|| recog_memoized (pred) == CODE_FOR_mac_600
between PRED and SUCC to prevent a hazard. */
int
-arc_hazard (rtx pred, rtx succ)
+arc_hazard (rtx_insn *pred, rtx_insn *succ)
{
if (!TARGET_ARC600)
return 0;
/* Return length adjustment for INSN. */
int
-arc_adjust_insn_length (rtx insn, int len, bool)
+arc_adjust_insn_length (rtx_insn *insn, int len, bool)
{
if (!INSN_P (insn))
return len;
int align_unit_log;
int align_base_log;
int max_variants;
- int (*get_variants) (rtx, int, bool, bool, insn_length_variant_t *);
+ int (*get_variants) (rtx_insn *, int, bool, bool, insn_length_variant_t *);
} insn_length_parameters_t;
static void
#endif
static int
-arc_get_insn_variants (rtx insn, int len, bool, bool target_p,
+arc_get_insn_variants (rtx_insn *insn, int len, bool, bool target_p,
insn_length_variant_t *ilv)
{
if (!NONDEBUG_INSN_P (insn))
get_variants mechanism, so turn this off for now. */
if (optimize_size)
return 0;
- if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
{
/* The interaction of a short delay slot insn with a short branch is
too weird for shorten_branches to piece together, so describe the
entire SEQUENCE. */
- rtx pat, inner;
+ rtx_insn *inner;
if (TARGET_UPSIZE_DBR
- && get_attr_length (XVECEXP ((pat = PATTERN (insn)), 0, 1)) <= 2
- && (((type = get_attr_type (inner = XVECEXP (pat, 0, 0)))
+ && get_attr_length (XVECEXP (pat, 0, 1)) <= 2
+ && (((type = get_attr_type (inner = pat->insn (0)))
== TYPE_UNCOND_BRANCH)
|| type == TYPE_BRANCH)
&& get_attr_delay_slot_filled (inner) == DELAY_SLOT_FILLED_YES)
((LENGTH) \
= (GET_CODE (PATTERN (X)) == SEQUENCE \
? ((LENGTH) \
- + arc_adjust_insn_length (XVECEXP (PATTERN (X), 0, 0), \
+ + arc_adjust_insn_length (as_a <rtx_sequence *> (PATTERN (X))->insn (0), \
get_attr_length (XVECEXP (PATTERN (X), \
0, 0)), \
true) \
- get_attr_length (XVECEXP (PATTERN (X), 0, 0)) \
- + arc_adjust_insn_length (XVECEXP (PATTERN (X), 0, 1), \
+ + arc_adjust_insn_length (as_a <rtx_sequence *> (PATTERN (X))->insn (1), \
get_attr_length (XVECEXP (PATTERN (X), \
0, 1)), \
true) \
extern int arm_count_output_move_double_insns (rtx *);
extern const char *output_move_vfp (rtx *operands);
extern const char *output_move_neon (rtx *operands);
-extern int arm_attr_length_move_neon (rtx);
-extern int arm_address_offset_is_imm (rtx);
+extern int arm_attr_length_move_neon (rtx_insn *);
+extern int arm_address_offset_is_imm (rtx_insn *);
extern const char *output_add_immediate (rtx *);
extern const char *arithmetic_instr (rtx, int);
extern void output_ascii_pseudo_op (FILE *, const unsigned char *, int);
{
bool (*rtx_costs) (rtx, RTX_CODE, RTX_CODE, int *, bool);
const struct cpu_cost_table *insn_extra_cost;
- bool (*sched_adjust_cost) (rtx, rtx, rtx, int *);
+ bool (*sched_adjust_cost) (rtx_insn *, rtx, rtx_insn *, int *);
int constant_limit;
/* Maximum number of instructions to conditionalise. */
int max_insns_skipped;
static void arm_trampoline_init (rtx, tree, rtx);
static rtx arm_trampoline_adjust_address (rtx);
static rtx arm_pic_static_addr (rtx orig, rtx reg);
-static bool cortex_a9_sched_adjust_cost (rtx, rtx, rtx, int *);
-static bool xscale_sched_adjust_cost (rtx, rtx, rtx, int *);
-static bool fa726te_sched_adjust_cost (rtx, rtx, rtx, int *);
+static bool cortex_a9_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int *);
+static bool xscale_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int *);
+static bool fa726te_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int *);
static bool arm_array_mode_supported_p (enum machine_mode,
unsigned HOST_WIDE_INT);
static enum machine_mode arm_preferred_simd_mode (enum machine_mode);
/* Adjust cost hook for XScale. */
static bool
-xscale_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
+xscale_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int * cost)
{
/* Some true dependencies can have a higher cost depending
on precisely how certain input operands are used. */
/* Adjust cost hook for Cortex A9. */
static bool
-cortex_a9_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
+cortex_a9_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int * cost)
{
switch (REG_NOTE_KIND (link))
{
/* Adjust cost hook for FA726TE. */
static bool
-fa726te_sched_adjust_cost (rtx insn, rtx link, rtx dep, int * cost)
+fa726te_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int * cost)
{
/* For FA726TE, true dependency on CPSR (i.e. set cond followed by predicated)
have penalty of 3. */
/* Return true if and only if this insn can dual-issue only as older. */
static bool
-cortexa7_older_only (rtx insn)
+cortexa7_older_only (rtx_insn *insn)
{
if (recog_memoized (insn) < 0)
return false;
/* Return true if and only if this insn can dual-issue as younger. */
static bool
-cortexa7_younger (FILE *file, int verbose, rtx insn)
+cortexa7_younger (FILE *file, int verbose, rtx_insn *insn)
{
if (recog_memoized (insn) < 0)
{
/* Compute and return the length of neon_mov<mode>, where <mode> is
one of VSTRUCT modes: EI, OI, CI or XI. */
int
-arm_attr_length_move_neon (rtx insn)
+arm_attr_length_move_neon (rtx_insn *insn)
{
rtx reg, mem, addr;
int load;
return zero. */
int
-arm_address_offset_is_imm (rtx insn)
+arm_address_offset_is_imm (rtx_insn *insn)
{
rtx mem, addr;
extern const char* output_reload_insisf (rtx*, rtx, int*);
extern const char* avr_out_reload_inpsi (rtx*, rtx, int*);
extern const char* avr_out_lpm (rtx_insn *, rtx*, int*);
-extern void avr_notice_update_cc (rtx body, rtx insn);
+extern void avr_notice_update_cc (rtx body, rtx_insn *insn);
extern int reg_unused_after (rtx_insn *insn, rtx reg);
extern int _reg_unused_after (rtx_insn *insn, rtx reg);
extern int avr_jump_mode (rtx x, rtx_insn *insn);
/* Update the condition code in the INSN. */
void
-avr_notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx insn)
+avr_notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx_insn *insn)
{
rtx set;
enum attr_cc cc = get_attr_cc (insn);
loop counter. Otherwise, return NULL_RTX. */
static rtx
-hwloop_pattern_reg (rtx insn)
+hwloop_pattern_reg (rtx_insn *insn)
{
rtx reg;
workaround_speculation (void)
{
rtx_insn *insn, *next;
- rtx last_condjump = NULL_RTX;
+ rtx_insn *last_condjump = NULL;
int cycles_since_jump = INT_MAX;
int delay_added = 0;
/* Return true iff INSN is a shadow pattern. */
static bool
-shadow_p (rtx insn)
+shadow_p (rtx_insn *insn)
{
if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
return false;
/* Return true iff INSN is a shadow or blockage pattern. */
static bool
-shadow_or_blockage_p (rtx insn)
+shadow_or_blockage_p (rtx_insn *insn)
{
enum attr_type type;
if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
instructions reservation, e.g. UNIT_REQ_DL. REQ2 is used to either
describe a cross path, or for loads/stores, the T unit. */
static int
-get_unit_reqs (rtx insn, int *req1, int *side1, int *req2, int *side2)
+get_unit_reqs (rtx_insn *insn, int *req1, int *side1, int *req2, int *side2)
{
enum attr_units units;
enum attr_cross cross;
found by get_unit_reqs. Return true if we did this successfully, false
if we couldn't identify what to do with INSN. */
static bool
-get_unit_operand_masks (rtx insn, unsigned int *pmask1, unsigned int *pmask2)
+get_unit_operand_masks (rtx_insn *insn, unsigned int *pmask1,
+ unsigned int *pmask2)
{
enum attr_op_pattern op_pat;
next cycle. */
static bool
-c6x_registers_update (rtx insn)
+c6x_registers_update (rtx_insn *insn)
{
enum attr_cross cross;
enum attr_dest_regfile destrf;
placed. */
static bool
-returning_call_p (rtx insn)
+returning_call_p (rtx_insn *insn)
{
if (CALL_P (insn))
return (!SIBLING_CALL_P (insn)
/* Determine whether INSN's pattern can be converted to use callp. */
static bool
-can_use_callp (rtx insn)
+can_use_callp (rtx_insn *insn)
{
int icode = recog_memoized (insn);
if (!TARGET_INSNS_64PLUS
/* Convert the pattern of INSN, which must be a CALL_INSN, into a callp. */
static void
-convert_to_callp (rtx insn)
+convert_to_callp (rtx_insn *insn)
{
rtx lab;
extract_insn (insn);
find_last_same_clock (rtx insn)
{
rtx retval = insn;
- rtx t = next_real_insn (insn);
+ rtx_insn *t = next_real_insn (insn);
while (t && GET_MODE (t) != TImode)
{
/* Find the first insn of the next execute packet. If it
is the shadow insn corresponding to this call, we may
use a CALLP insn. */
- rtx shadow = next_nonnote_nondebug_insn (last_same_clock);
+ rtx_insn *shadow =
+ next_nonnote_nondebug_insn (last_same_clock);
if (CALL_P (shadow)
&& insn_get_clock (shadow) == this_clock + 5)
loop counter. Otherwise, return NULL_RTX. */
static rtx
-hwloop_pattern_reg (rtx insn)
+hwloop_pattern_reg (rtx_insn *insn)
{
rtx pat, reg;
extern rtx frv_index_memory (rtx, enum machine_mode, int);
extern const char *frv_asm_output_opcode
(FILE *, const char *);
-extern void frv_final_prescan_insn (rtx, rtx *, int);
+extern void frv_final_prescan_insn (rtx_insn *, rtx *, int);
extern void frv_emit_move (enum machine_mode, rtx, rtx);
extern int frv_emit_movsi (rtx, rtx);
extern const char *output_move_single (rtx *, rtx);
function is not called for asm insns. */
void
-frv_final_prescan_insn (rtx insn, rtx *opvec,
+frv_final_prescan_insn (rtx_insn *insn, rtx *opvec,
int noperands ATTRIBUTE_UNUSED)
{
if (INSN_P (insn))
membar instruction INSN. */
static void
-frv_extract_membar (struct frv_io *io, rtx insn)
+frv_extract_membar (struct frv_io *io, rtx_insn *insn)
{
extract_insn (insn);
io->type = (enum frv_io_type) INTVAL (recog_data.operand[2]);
static void
frv_optimize_membar_local (basic_block bb, struct frv_io *next_io,
- rtx *last_membar)
+ rtx_insn **last_membar)
{
HARD_REG_SET used_regs;
rtx next_membar, set;
static void
frv_optimize_membar_global (basic_block bb, struct frv_io *first_io,
- rtx membar)
+ rtx_insn *membar)
{
struct frv_io this_io, next_io;
edge succ;
{
basic_block bb;
struct frv_io *first_io;
- rtx *last_membar;
+ rtx_insn **last_membar;
compute_bb_for_insn ();
first_io = XCNEWVEC (struct frv_io, last_basic_block_for_fn (cfun));
- last_membar = XCNEWVEC (rtx, last_basic_block_for_fn (cfun));
+ last_membar = XCNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
FOR_EACH_BB_FN (bb, cfun)
frv_optimize_membar_local (bb, &first_io[bb->index],
extern const char *standard_sse_constant_opcode (rtx, rtx);
extern bool symbolic_reference_mentioned_p (rtx);
extern bool extended_reg_mentioned_p (rtx);
-extern bool x86_extended_QIreg_mentioned_p (rtx);
+extern bool x86_extended_QIreg_mentioned_p (rtx_insn *);
extern bool x86_extended_reg_mentioned_p (rtx);
extern bool x86_maybe_negate_const_int (rtx *, enum machine_mode);
extern enum machine_mode ix86_cc_mode (enum rtx_code, rtx, rtx);
extern bool ix86_lea_for_add_ok (rtx_insn *, rtx[]);
extern bool ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high);
extern bool ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn);
-extern bool ix86_agi_dependent (rtx set_insn, rtx use_insn);
+extern bool ix86_agi_dependent (rtx_insn *set_insn, rtx_insn *use_insn);
extern void ix86_expand_unary_operator (enum rtx_code, enum machine_mode,
rtx[]);
extern rtx ix86_build_const_vector (enum machine_mode, bool, rtx);
extern bool ix86_emit_cfi ();
extern rtx assign_386_stack_local (enum machine_mode, enum ix86_stack_slot);
-extern int ix86_attr_length_immediate_default (rtx, bool);
-extern int ix86_attr_length_address_default (rtx);
-extern int ix86_attr_length_vex_default (rtx, bool, bool);
+extern int ix86_attr_length_immediate_default (rtx_insn *, bool);
+extern int ix86_attr_length_address_default (rtx_insn *);
+extern int ix86_attr_length_vex_default (rtx_insn *, bool, bool);
extern enum machine_mode ix86_fp_compare_mode (enum rtx_code);
/* Compute default value for "length_immediate" attribute. When SHORTFORM
is set, expect that insn have 8bit immediate alternative. */
int
-ix86_attr_length_immediate_default (rtx insn, bool shortform)
+ix86_attr_length_immediate_default (rtx_insn *insn, bool shortform)
{
int len = 0;
int i;
/* Compute default value for "length_address" attribute. */
int
-ix86_attr_length_address_default (rtx insn)
+ix86_attr_length_address_default (rtx_insn *insn)
{
int i;
2 or 3 byte VEX prefix and 1 opcode byte. */
int
-ix86_attr_length_vex_default (rtx insn, bool has_0f_opcode, bool has_vex_w)
+ix86_attr_length_vex_default (rtx_insn *insn, bool has_0f_opcode,
+ bool has_vex_w)
{
int i;
SET_INSN. */
bool
-ix86_agi_dependent (rtx set_insn, rtx use_insn)
+ix86_agi_dependent (rtx_insn *set_insn, rtx_insn *use_insn)
{
int i;
extract_insn_cached (use_insn);
/* Return nonzero when QImode register that must be represented via REX prefix
is used. */
bool
-x86_extended_QIreg_mentioned_p (rtx insn)
+x86_extended_QIreg_mentioned_p (rtx_insn *insn)
{
int i;
extract_insn_cached (insn);
insn, so that expand_vselect{,_vconcat} doesn't have to create a fresh
insn every time. */
-static GTY(()) rtx vselect_insn;
+static GTY(()) rtx_insn *vselect_insn;
/* Initialize vselect_insn. */
extern int bundling_p;
#ifdef RTX_CODE
-extern int ia64_st_address_bypass_p (rtx, rtx);
-extern int ia64_ld_address_bypass_p (rtx, rtx);
+extern int ia64_st_address_bypass_p (rtx_insn *, rtx_insn *);
+extern int ia64_ld_address_bypass_p (rtx_insn *, rtx_insn *);
extern int ia64_produce_address_p (rtx);
extern rtx ia64_expand_move (rtx, rtx);
static int get_template (state_t, int);
static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *);
-static bool important_for_bundling_p (rtx);
-static bool unknown_for_bundling_p (rtx);
+static bool important_for_bundling_p (rtx_insn *);
+static bool unknown_for_bundling_p (rtx_insn *);
static void bundling (FILE *, int, rtx_insn *, rtx_insn *);
static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
return ggc_cleared_alloc<machine_function> ();
}
\f
-static enum attr_itanium_class ia64_safe_itanium_class (rtx);
-static enum attr_type ia64_safe_type (rtx);
+static enum attr_itanium_class ia64_safe_itanium_class (rtx_insn *);
+static enum attr_type ia64_safe_type (rtx_insn *);
static enum attr_itanium_class
-ia64_safe_itanium_class (rtx insn)
+ia64_safe_itanium_class (rtx_insn *insn)
{
if (recog_memoized (insn) >= 0)
return get_attr_itanium_class (insn);
}
static enum attr_type
-ia64_safe_type (rtx insn)
+ia64_safe_type (rtx_insn *insn)
{
if (recog_memoized (insn) >= 0)
return get_attr_type (insn);
static int set_src_needs_barrier (rtx, struct reg_flags, int);
static int rtx_needs_barrier (rtx, struct reg_flags, int);
static void init_insn_group_barriers (void);
-static int group_barrier_needed (rtx);
-static int safe_group_barrier_needed (rtx);
+static int group_barrier_needed (rtx_insn *);
+static int safe_group_barrier_needed (rtx_insn *);
static int in_safe_group_barrier;
/* Update *RWS for REGNO, which is being written by the current instruction,
include the effects of INSN as a side-effect. */
static int
-group_barrier_needed (rtx insn)
+group_barrier_needed (rtx_insn *insn)
{
rtx pat;
int need_barrier = 0;
/* Like group_barrier_needed, but do not clobber the current state. */
static int
-safe_group_barrier_needed (rtx insn)
+safe_group_barrier_needed (rtx_insn *insn)
{
int saved_first_instruction;
int t;
/* Number of current processor cycle (from scheduler's point of view). */
static int current_cycle;
-static rtx ia64_single_set (rtx);
+static rtx ia64_single_set (rtx_insn *);
static void ia64_emit_insn_before (rtx, rtx);
/* Map a bundle number to its pseudo-op. */
/* Helper function - like single_set, but look inside COND_EXEC. */
static rtx
-ia64_single_set (rtx insn)
+ia64_single_set (rtx_insn *insn)
{
rtx x = PATTERN (insn), ret;
if (GET_CODE (x) == COND_EXEC)
/* Return TRUE if INSN is a load (either normal or speculative, but not a
speculation check), FALSE otherwise. */
static bool
-is_load_p (rtx insn)
+is_load_p (rtx_insn *insn)
{
enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
Itanium 2 Reference Manual for Software Development and Optimization,
6.7.3.1). */
static void
-record_memory_reference (rtx insn)
+record_memory_reference (rtx_insn *insn)
{
enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
/* If INSN is an appropriate load return its mode.
Return -1 otherwise. */
static int
-get_mode_no_for_insn (rtx insn)
+get_mode_no_for_insn (rtx_insn *insn)
{
rtx reg, mem, mode_rtx;
int mode_no;
/* True when INSN is important for bundling. */
static bool
-important_for_bundling_p (rtx insn)
+important_for_bundling_p (rtx_insn *insn)
{
return (INSN_P (insn)
&& ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
/* True when INSN is unknown, but important, for bundling. */
static bool
-unknown_for_bundling_p (rtx insn)
+unknown_for_bundling_p (rtx_insn *insn)
{
return (INSN_P (insn)
&& ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN
ld) produces address for CONSUMER (of type st or stf). */
int
-ia64_st_address_bypass_p (rtx producer, rtx consumer)
+ia64_st_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
{
rtx dest, reg, mem;
ld) produces address for CONSUMER (of type ld or fld). */
int
-ia64_ld_address_bypass_p (rtx producer, rtx consumer)
+ia64_ld_address_bypass_p (rtx_insn *producer, rtx_insn *consumer)
{
rtx dest, src, reg, mem;
x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
x = gen_rtx_SET (VOIDmode, target, x);
- x = emit_insn (x);
- if (recog_memoized (x) < 0)
+ rtx_insn *insn = emit_insn (x);
+ if (recog_memoized (insn) < 0)
{
- remove_insn (x);
+ remove_insn (insn);
return false;
}
return true;
extern enum attr_cpu m68k_sched_cpu;
extern enum attr_mac m68k_sched_mac;
-extern enum attr_opx_type m68k_sched_attr_opx_type (rtx, int);
-extern enum attr_opy_type m68k_sched_attr_opy_type (rtx, int);
+extern enum attr_opx_type m68k_sched_attr_opx_type (rtx_insn *, int);
+extern enum attr_opy_type m68k_sched_attr_opy_type (rtx_insn *, int);
extern enum attr_size m68k_sched_attr_size (rtx);
extern enum attr_op_mem m68k_sched_attr_op_mem (rtx);
#endif /* HAVE_ATTR_cpu */
/* Return X or Y (depending on OPX_P) operand of INSN. */
static rtx
-sched_get_operand (rtx insn, bool opx_p)
+sched_get_operand (rtx_insn *insn, bool opx_p)
{
int i;
/* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
If ADDRESS_P is true, return type of memory location operand refers to. */
static enum attr_op_type
-sched_attr_op_type (rtx insn, bool opx_p, bool address_p)
+sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
{
rtx op;
Return type of INSN's operand X.
If ADDRESS_P is true, return type of memory location operand refers to. */
enum attr_opx_type
-m68k_sched_attr_opx_type (rtx insn, int address_p)
+m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
{
switch (sched_attr_op_type (insn, true, address_p != 0))
{
Return type of INSN's operand Y.
If ADDRESS_P is true, return type of memory location operand refers to. */
enum attr_opy_type
-m68k_sched_attr_opy_type (rtx insn, int address_p)
+m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
{
switch (sched_attr_op_type (insn, false, address_p != 0))
{
/* Return X or Y (depending on OPX_P) operand of INSN,
if it is an integer register, or NULL overwise. */
static rtx
-sched_get_reg_operand (rtx insn, bool opx_p)
+sched_get_reg_operand (rtx_insn *insn, bool opx_p)
{
rtx op = NULL;
/* Return X or Y (depending on OPX_P) operand of INSN,
if it is a MEM, or NULL overwise. */
static rtx
-sched_get_mem_operand (rtx insn, bool must_read_p, bool must_write_p)
+sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
{
bool opx_p;
bool opy_p;
/* Return non-zero if PRO modifies register used as part of
address in CON. */
int
-m68k_sched_address_bypass_p (rtx pro, rtx con)
+m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
{
rtx pro_x;
rtx con_mem_read;
if PRO modifies register used as index in CON,
return scale of indexed memory access in CON. Return zero overwise. */
static int
-sched_get_indexed_address_scale (rtx pro, rtx con)
+sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
{
rtx reg;
rtx mem;
/* Return non-zero if PRO modifies register used
as index with scale 2 or 4 in CON. */
int
-m68k_sched_indexed_address_bypass_p (rtx pro, rtx con)
+m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
{
gcc_assert (sched_cfv4_bypass_data.pro == NULL
&& sched_cfv4_bypass_data.con == NULL
extern void m68k_emit_move_double (rtx [2]);
-extern int m68k_sched_address_bypass_p (rtx, rtx);
-extern int m68k_sched_indexed_address_bypass_p (rtx, rtx);
+extern int m68k_sched_address_bypass_p (rtx_insn *, rtx_insn *);
+extern int m68k_sched_indexed_address_bypass_p (rtx_insn *, rtx_insn *);
#define CPU_UNITS_QUERY 1
static void
mep_jmp_return_reorg (rtx_insn *insns)
{
- rtx_insn *insn;
- rtx label, ret;
+ rtx_insn *insn, *label, *ret;
int ret_code;
for (insn = insns; insn; insn = NEXT_INSN (insn))
if (simplejump_p (insn))
{
/* Find the fist real insn the jump jumps to. */
- label = ret = JUMP_LABEL (insn);
+ label = ret = safe_as_a <rtx_insn *> (JUMP_LABEL (insn));
while (ret
&& (NOTE_P (ret)
|| LABEL_P (ret)
|| GET_CODE (PATTERN (ret)) == USE))
- ret = NEXT_INSN (as_a <rtx_insn *> (ret));
+ ret = NEXT_INSN (ret);
if (ret)
{
extern const char *mips_output_division (const char *, rtx *);
extern const char *mips_output_probe_stack_range (rtx, rtx);
extern unsigned int mips_hard_regno_nregs (int, enum machine_mode);
-extern bool mips_linked_madd_p (rtx, rtx);
+extern bool mips_linked_madd_p (rtx_insn *, rtx_insn *);
extern bool mips_store_data_bypass_p (rtx, rtx);
extern int mips_dspalu_bypass_p (rtx, rtx);
extern rtx mips_prefetch_cookie (rtx, rtx);
extern bool mask_low_and_shift_p (enum machine_mode, rtx, rtx, int);
extern int mask_low_and_shift_len (enum machine_mode, rtx, rtx);
extern bool and_operands_ok (enum machine_mode, rtx, rtx);
-extern bool mips_fmadd_bypass (rtx, rtx);
+extern bool mips_fmadd_bypass (rtx_insn *, rtx_insn *);
union mips_gen_fn_ptrs
{
madd.s a, dst, b, c */
bool
-mips_fmadd_bypass (rtx out_insn, rtx in_insn)
+mips_fmadd_bypass (rtx_insn *out_insn, rtx_insn *in_insn)
{
int dst_reg, src_reg;
instruction and if OUT_INSN assigns to the accumulator operand. */
bool
-mips_linked_madd_p (rtx out_insn, rtx in_insn)
+mips_linked_madd_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
enum attr_accum_in accum_in;
int accum_in_opnum;
\f
/* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
that may clobber hi or lo. */
-static rtx mips_macc_chains_last_hilo;
+static rtx_insn *mips_macc_chains_last_hilo;
/* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
been scheduled, updating mips_macc_chains_last_hilo appropriately. */
static void
-mips_macc_chains_record (rtx insn)
+mips_macc_chains_record (rtx_insn *insn)
{
if (get_attr_may_clobber_hilo (insn))
mips_macc_chains_last_hilo = insn;
}
\f
/* The last instruction to be scheduled. */
-static rtx vr4130_last_insn;
+static rtx_insn *vr4130_last_insn;
/* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
points to an rtx that is initially an instruction. Nullify the rtx
alignment than (INSN1, INSN2). See 4130.md for more details. */
static bool
-vr4130_swap_insns_p (rtx insn1, rtx insn2)
+vr4130_swap_insns_p (rtx_insn *insn1, rtx_insn *insn2)
{
sd_iterator_def sd_it;
dep_t dep;
/* Update round-robin counters for ALU1/2 and FALU1/2. */
static void
-mips_ls2_variable_issue (rtx insn)
+mips_ls2_variable_issue (rtx_insn *insn)
{
if (mips_ls2.alu1_turn_p)
{
INSN has NOPERANDS operands, stored in OPVEC. */
static bool
-mips_need_noat_wrapper_p (rtx insn, rtx *opvec, int noperands)
+mips_need_noat_wrapper_p (rtx_insn *insn, rtx *opvec, int noperands)
{
int i;
const unsigned char *perm, unsigned nelt)
{
rtx rperm[MAX_VECT_LEN], x;
+ rtx_insn *insn;
unsigned i;
for (i = 0; i < nelt; ++i)
x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
x = gen_rtx_SET (VOIDmode, target, x);
- x = emit_insn (x);
- if (recog_memoized (x) < 0)
+ insn = emit_insn (x);
+ if (recog_memoized (insn) < 0)
{
- remove_insn (x);
+ remove_insn (insn);
return false;
}
return true;
extern int pa_emit_hpdiv_const (rtx *, int);
extern int pa_is_function_label_plus_const (rtx);
extern int pa_jump_in_call_delay (rtx_insn *);
-extern int pa_fpstore_bypass_p (rtx, rtx);
+extern int pa_fpstore_bypass_p (rtx_insn *, rtx_insn *);
extern int pa_attr_length_millicode_call (rtx_insn *);
extern int pa_attr_length_call (rtx_insn *, int);
extern int pa_attr_length_indirect_call (rtx_insn *);
/* Return nonzero if there is a bypass for the output of
OUT_INSN and the fp store IN_INSN. */
int
-pa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
+pa_fpstore_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
enum machine_mode store_mode;
enum machine_mode other_mode;
static inline rtx force_mode (enum machine_mode, rtx);
static void pa_reorg (void);
static void pa_combine_instructions (void);
-static int pa_can_combine_p (rtx, rtx_insn *, rtx_insn *, int, rtx, rtx, rtx);
+static int pa_can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, int, rtx,
+ rtx, rtx);
static bool forward_branch_p (rtx_insn *);
static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
pa_combine_instructions (void)
{
rtx_insn *anchor;
- rtx new_rtx;
/* This can get expensive since the basic algorithm is on the
order of O(n^2) (or worse). Only do it for -O2 or higher
may be combined with "floating" insns. As the name implies,
"anchor" instructions don't move, while "floating" insns may
move around. */
- new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
- new_rtx = make_insn_raw (new_rtx);
+ rtx par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
+ rtx_insn *new_rtx = make_insn_raw (par);
for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
{
}
static int
-pa_can_combine_p (rtx new_rtx, rtx_insn *anchor, rtx_insn *floater,
+pa_can_combine_p (rtx_insn *new_rtx, rtx_insn *anchor, rtx_insn *floater,
int reversed, rtx dest,
rtx src1, rtx src2)
{
carefully to ensure that all the constraint information is accurate
for the newly matched insn. */
static bool
-insn_ok_now (rtx insn)
+insn_ok_now (rtx_insn *insn)
{
rtx pattern = PATTERN (insn);
int i;
/* Devirtualize an insn of the form (SET (op) (unop (op))). */
static void
-rl78_alloc_physical_registers_op1 (rtx insn)
+rl78_alloc_physical_registers_op1 (rtx_insn *insn)
{
/* op[0] = func op[1] */
/* Devirtualize an insn of the form (SET (op) (binop (op) (op))). */
static void
-rl78_alloc_physical_registers_op2 (rtx insn)
+rl78_alloc_physical_registers_op2 (rtx_insn *insn)
{
rtx prev;
rtx first;
/* Devirtualize an insn of the form SET (PC) (MEM/REG). */
static void
-rl78_alloc_physical_registers_ro1 (rtx insn)
+rl78_alloc_physical_registers_ro1 (rtx_insn *insn)
{
OP (0) = transcode_memory_rtx (OP (0), BC, insn);
/* Devirtualize a compare insn. */
static void
-rl78_alloc_physical_registers_cmp (rtx insn)
+rl78_alloc_physical_registers_cmp (rtx_insn *insn)
{
int tmp_id;
rtx saved_op1;
/* Like op2, but AX = A * X. */
static void
-rl78_alloc_physical_registers_umul (rtx insn)
+rl78_alloc_physical_registers_umul (rtx_insn *insn)
{
rtx prev = prev_nonnote_nondebug_insn (insn);
rtx first;
}
static void
-rl78_alloc_address_registers_macax (rtx insn)
+rl78_alloc_address_registers_macax (rtx_insn *insn)
{
int which, op;
bool replace_in_op0 = false;
registers. At this point, we need to assign physical registers
to the vitual ones, and copy in/out as needed. */
- rtx insn, curr;
+ rtx_insn *insn, *curr;
enum attr_valloc valloc_method;
for (insn = get_insns (); insn; insn = curr)
(match_code "parallel")
{
rtx pattern = op;
- rtx insn;
+ rtx_insn *insn;
int icode;
/* This is redundant but since this predicate is evaluated
extern void print_operand (FILE *, rtx, int);
extern void s390_output_pool_entry (rtx, enum machine_mode, unsigned int);
extern int s390_label_align (rtx);
-extern int s390_agen_dep_p (rtx, rtx);
+extern int s390_agen_dep_p (rtx_insn *, rtx_insn *);
extern rtx_insn *s390_load_got (void);
extern rtx s390_get_thread_pointer (void);
extern void s390_emit_tpf_eh_return (rtx);
/* Return attribute type of insn. */
static enum attr_type
-s390_safe_attr_type (rtx insn)
+s390_safe_attr_type (rtx_insn *insn)
{
if (recog_memoized (insn) >= 0)
return get_attr_type (insn);
used by instruction INSN to address memory. */
static bool
-addr_generation_dependency_p (rtx dep_rtx, rtx insn)
+addr_generation_dependency_p (rtx dep_rtx, rtx_insn *insn)
{
rtx target, pat;
/* Return 1, if dep_insn sets register used in insn in the agen unit. */
int
-s390_agen_dep_p (rtx dep_insn, rtx insn)
+s390_agen_dep_p (rtx_insn *dep_insn, rtx_insn *insn)
{
rtx dep_rtx = PATTERN (dep_insn);
int i;
/* Return true if INSN is a fp load insn writing register REGNO. */
static inline bool
-s390_fpload_toreg (rtx insn, unsigned int regno)
+s390_fpload_toreg (rtx_insn *insn, unsigned int regno)
{
rtx set;
enum attr_type flag = s390_safe_attr_type (insn);
extern rtx sfunc_uses_reg (rtx);
extern int barrier_align (rtx_insn *);
-extern int sh_loop_align (rtx);
+extern int sh_loop_align (rtx_insn *);
extern bool fp_zero_operand (rtx);
extern bool fp_one_operand (rtx);
extern rtx get_fpscr_rtx (void);
Applying loop alignment to small constant or switch tables is a waste
of space, so we suppress this too. */
int
-sh_loop_align (rtx label)
+sh_loop_align (rtx_insn *label)
{
- rtx next = label;
+ rtx_insn *next = label;
if (! optimize || optimize_size)
return 0;
[(set (match_dup 0) (match_dup 3))
(set (match_dup 4) (match_dup 5))]
{
- rtx set1, set2, insn2;
+ rtx set1, set2;
+ rtx_insn *insn2;
rtx replacements[4];
/* We want to replace occurrences of operands[0] with operands[1] and
rtx make_not_reg_insn (rtx dst_reg, rtx src_reg) const;
// Create an insn rtx that inverts the ccreg.
- rtx make_inv_ccreg_insn (void) const;
+ rtx_insn *make_inv_ccreg_insn (void) const;
// Adds the specified insn to the set of modified or newly added insns that
// might need splitting at the end of the pass.
return i;
}
-rtx
+rtx_insn *
sh_treg_combine::make_inv_ccreg_insn (void) const
{
start_sequence ();
- rtx i = emit_insn (gen_rtx_SET (VOIDmode, m_ccreg,
- gen_rtx_fmt_ee (XOR, GET_MODE (m_ccreg),
- m_ccreg, const1_rtx)));
+ rtx_insn *i = emit_insn (gen_rtx_SET (VOIDmode, m_ccreg,
+ gen_rtx_fmt_ee (XOR, GET_MODE (m_ccreg),
+ m_ccreg, const1_rtx)));
end_sequence ();
return i;
}
// invert the ccreg as a replacement for one of them.
if (cstore_count != 0 && inv_cstore_count != 0)
{
- rtx i = make_inv_ccreg_insn ();
+ rtx_insn *i = make_inv_ccreg_insn ();
if (recog_memoized (i) < 0)
{
log_msg ("failed to match ccreg inversion insn:\n");
{
int insn_code, regno, need_extend;
unsigned int mask;
- rtx cond, reg, and_insn, load, qireg, mem;
+ rtx cond, reg, qireg, mem;
+ rtx_insn *and_insn, *load;
enum machine_mode load_mode = QImode;
enum machine_mode and_mode = QImode;
- rtx shift = NULL_RTX;
+ rtx_insn *shift = NULL;
insn_code = recog_memoized (insn);
if (insn_code != CODE_FOR_cbranchhi
if (reg_mentioned_p (reg, shift)
|| (! NOTE_P (shift) && ! NONJUMP_INSN_P (shift)))
{
- shift = NULL_RTX;
+ shift = NULL;
break;
}
}
/* Returns true if INSN is the second instruction of a pc-relative
address compuatation. */
static bool
-match_pcrel_step2 (rtx insn)
+match_pcrel_step2 (rtx_insn *insn)
{
rtx unspec;
rtx addr;
rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
if (note)
{
- NOTICE_UPDATE_CC (PATTERN (XEXP (note, 0)), XEXP (note, 0));
+ rtx_insn *other = as_a <rtx_insn *> (XEXP (note, 0));
+ NOTICE_UPDATE_CC (PATTERN (other), other);
cc_prev_status = cc_status;
}
}
directly to the desired hard register. */
void
-cleanup_subreg_operands (rtx insn)
+cleanup_subreg_operands (rtx_insn *insn)
{
int i;
bool changed = false;
*recog_data.dup_loc[i] = walk_alter_subreg (recog_data.dup_loc[i], &changed);
}
if (changed)
- df_insn_rescan (as_a <rtx_insn *> (insn));
+ df_insn_rescan (insn);
}
/* If X is a SUBREG, try to replace it with a REG or a MEM, based on
insn will be recognized (this also adds any needed CLOBBERs). */
else
{
- rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
+ rtx_insn *insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
if (insn_invalid_p (insn, false))
gcc_unreachable ();
{
printf ("extern int num_delay_slots (rtx);\n");
printf ("extern int eligible_for_delay (rtx_insn *, int, rtx_insn *, int);\n\n");
- printf ("extern int const_num_delay_slots (rtx);\n\n");
+ printf ("extern int const_num_delay_slots (rtx_insn *);\n\n");
have_delay = 1;
}
if (attr)
{
- fprintf (outf, "int\nconst_num_delay_slots (rtx insn)\n");
+ fprintf (outf, "int\nconst_num_delay_slots (rtx_insn *insn)\n");
fprintf (outf, "{\n");
fprintf (outf, " switch (recog_memoized (insn))\n");
fprintf (outf, " {\n");
/* Examine INSN. If it is a suitable doloop_end pattern, return the
iteration register, which should be a single hard register.
Otherwise, return NULL_RTX. */
- rtx (*end_pattern_reg) (rtx insn);
+ rtx (*end_pattern_reg) (rtx_insn *insn);
/* Optimize LOOP. The target should perform any additional analysis
(e.g. checking that the loop isn't too long), and then perform
its transformations. Return true if successful, false if the
if ((if_info->cond_earliest == if_info->jump || cond_complex)
&& (normalize == 0 || STORE_FLAG_VALUE == normalize))
{
- rtx tmp;
-
- tmp = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
+ rtx src = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
XEXP (cond, 1));
- tmp = gen_rtx_SET (VOIDmode, x, tmp);
+ rtx set = gen_rtx_SET (VOIDmode, x, src);
start_sequence ();
- tmp = emit_insn (tmp);
+ rtx_insn *insn = emit_insn (set);
- if (recog_memoized (tmp) >= 0)
+ if (recog_memoized (insn) >= 0)
{
- tmp = get_insns ();
+ rtx_insn *seq = get_insns ();
end_sequence ();
- emit_insn (tmp);
+ emit_insn (seq);
if_info->cond_earliest = if_info->jump;
if (GET_CODE (x) != STRICT_LOW_PART)
{
- rtx seq, insn, target;
+ rtx_insn *seq, *insn;
+ rtx target;
optab ot;
start_sequence ();
if (if_info->cond_earliest == if_info->jump)
{
- rtx tmp;
-
- tmp = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
- tmp = gen_rtx_IF_THEN_ELSE (GET_MODE (x), tmp, vtrue, vfalse);
- tmp = gen_rtx_SET (VOIDmode, x, tmp);
+ rtx cond = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
+ rtx if_then_else = gen_rtx_IF_THEN_ELSE (GET_MODE (x),
+ cond, vtrue, vfalse);
+ rtx set = gen_rtx_SET (VOIDmode, x, if_then_else);
start_sequence ();
- tmp = emit_insn (tmp);
+ rtx_insn *insn = emit_insn (set);
- if (recog_memoized (tmp) >= 0)
+ if (recog_memoized (insn) >= 0)
{
- tmp = get_insns ();
+ rtx_insn *seq = get_insns ();
end_sequence ();
- emit_insn (tmp);
+ emit_insn (seq);
return x;
}
rtx b = if_info->b;
rtx x = if_info->x;
rtx orig_a, orig_b;
- rtx insn_a, insn_b;
- rtx tmp, target;
+ rtx_insn *insn_a, *insn_b;
+ rtx target;
int is_mem = 0;
int insn_cost;
enum rtx_code code;
+ rtx_insn *ifcvt_seq;
/* A conditional move from two memory sources is equivalent to a
conditional on their addresses followed by a load. Don't do this
if (reversep)
{
+ rtx tmp;
+ rtx_insn *tmp_insn;
code = reversed_comparison_code (if_info->cond, if_info->jump);
tmp = a, a = b, b = tmp;
- tmp = insn_a, insn_a = insn_b, insn_b = tmp;
+ tmp_insn = insn_a, insn_a = insn_b, insn_b = tmp_insn;
}
}
This is of course not possible in the IS_MEM case. */
if (! general_operand (a, GET_MODE (a)))
{
- rtx set;
+ rtx_insn *insn;
if (is_mem)
{
- tmp = gen_reg_rtx (GET_MODE (a));
- tmp = emit_insn (gen_rtx_SET (VOIDmode, tmp, a));
+ rtx reg = gen_reg_rtx (GET_MODE (a));
+ insn = emit_insn (gen_rtx_SET (VOIDmode, reg, a));
}
else if (! insn_a)
goto end_seq_and_fail;
else
{
a = gen_reg_rtx (GET_MODE (a));
- tmp = copy_rtx (insn_a);
- set = single_set (tmp);
+ rtx_insn *copy_of_a = as_a <rtx_insn *> (copy_rtx (insn_a));
+ rtx set = single_set (copy_of_a);
SET_DEST (set) = a;
- tmp = emit_insn (PATTERN (tmp));
+ insn = emit_insn (PATTERN (copy_of_a));
}
- if (recog_memoized (tmp) < 0)
+ if (recog_memoized (insn) < 0)
goto end_seq_and_fail;
}
if (! general_operand (b, GET_MODE (b)))
{
- rtx set, last;
+ rtx pat;
+ rtx_insn *last;
+ rtx_insn *new_insn;
if (is_mem)
{
- tmp = gen_reg_rtx (GET_MODE (b));
- tmp = gen_rtx_SET (VOIDmode, tmp, b);
+ rtx reg = gen_reg_rtx (GET_MODE (b));
+ pat = gen_rtx_SET (VOIDmode, reg, b);
}
else if (! insn_b)
goto end_seq_and_fail;
else
{
b = gen_reg_rtx (GET_MODE (b));
- tmp = copy_rtx (insn_b);
- set = single_set (tmp);
+ rtx_insn *copy_of_insn_b = as_a <rtx_insn *> (copy_rtx (insn_b));
+ rtx set = single_set (copy_of_insn_b);
SET_DEST (set) = b;
- tmp = PATTERN (tmp);
+ pat = PATTERN (copy_of_insn_b);
}
/* If insn to set up A clobbers any registers B depends on, try to
last = get_last_insn ();
if (last && modified_in_p (orig_b, last))
{
- tmp = emit_insn_before (tmp, get_insns ());
- if (modified_in_p (orig_a, tmp))
+ new_insn = emit_insn_before (pat, get_insns ());
+ if (modified_in_p (orig_a, new_insn))
goto end_seq_and_fail;
}
else
- tmp = emit_insn (tmp);
+ new_insn = emit_insn (pat);
- if (recog_memoized (tmp) < 0)
+ if (recog_memoized (new_insn) < 0)
goto end_seq_and_fail;
}
/* If we're handling a memory for above, emit the load now. */
if (is_mem)
{
- tmp = gen_rtx_MEM (GET_MODE (if_info->x), target);
+ rtx mem = gen_rtx_MEM (GET_MODE (if_info->x), target);
/* Copy over flags as appropriate. */
if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b))
- MEM_VOLATILE_P (tmp) = 1;
+ MEM_VOLATILE_P (mem) = 1;
if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b))
- set_mem_alias_set (tmp, MEM_ALIAS_SET (if_info->a));
- set_mem_align (tmp,
+ set_mem_alias_set (mem, MEM_ALIAS_SET (if_info->a));
+ set_mem_align (mem,
MIN (MEM_ALIGN (if_info->a), MEM_ALIGN (if_info->b)));
gcc_assert (MEM_ADDR_SPACE (if_info->a) == MEM_ADDR_SPACE (if_info->b));
- set_mem_addr_space (tmp, MEM_ADDR_SPACE (if_info->a));
+ set_mem_addr_space (mem, MEM_ADDR_SPACE (if_info->a));
- noce_emit_move_insn (if_info->x, tmp);
+ noce_emit_move_insn (if_info->x, mem);
}
else if (target != x)
noce_emit_move_insn (x, target);
- tmp = end_ifcvt_sequence (if_info);
- if (!tmp)
+ ifcvt_seq = end_ifcvt_sequence (if_info);
+ if (!ifcvt_seq)
return FALSE;
- emit_insn_before_setloc (tmp, if_info->jump, INSN_LOCATION (if_info->insn_a));
+ emit_insn_before_setloc (ifcvt_seq, if_info->jump,
+ INSN_LOCATION (if_info->insn_a));
return TRUE;
end_seq_and_fail:
extern void ira_debug_disposition (void);
extern void ira_debug_allocno_classes (void);
extern void ira_init_register_move_cost (enum machine_mode);
-extern void ira_setup_alts (rtx insn, HARD_REG_SET &alts);
+extern void ira_setup_alts (rtx_insn *insn, HARD_REG_SET &alts);
extern int ira_get_dup_out_num (int op_num, HARD_REG_SET &alts);
/* ira-build.c */
setup_prohibited_mode_move_regs (void)
{
int i, j;
- rtx test_reg1, test_reg2, move_pat, move_insn;
+ rtx test_reg1, test_reg2, move_pat;
+ rtx_insn *move_insn;
if (ira_prohibited_mode_move_regs_initialized_p)
return;
/* Setup possible alternatives in ALTS for INSN. */
void
-ira_setup_alts (rtx insn, HARD_REG_SET &alts)
+ira_setup_alts (rtx_insn *insn, HARD_REG_SET &alts)
{
/* MAP nalt * nop -> start of constraints for given operand and
alternative */
|| GET_CODE (value) == POST_MODIFY);
rtx_insn *last;
rtx inc;
- rtx add_insn;
+ rtx_insn *add_insn;
int code;
rtx real_in = in == value ? incloc : in;
rtx result;
emit_add3_insn (rtx x, rtx y, rtx z)
{
rtx_insn *last;
- rtx insn;
last = get_last_insn ();
if (have_addptr3_insn (x, y, z))
{
- insn = gen_addptr3_insn (x, y, z);
+ rtx insn = gen_addptr3_insn (x, y, z);
/* If the target provides an "addptr" pattern it hopefully does
for a reason. So falling back to the normal add would be
return insn;
}
- insn = emit_insn (gen_rtx_SET (VOIDmode, x,
- gen_rtx_PLUS (GET_MODE (y), y, z)));
+ rtx_insn *insn = emit_insn (gen_rtx_SET (VOIDmode, x,
+ gen_rtx_PLUS (GET_MODE (y), y, z)));
if (recog_memoized (insn) < 0)
{
delete_insns_since (last);
- insn = NULL_RTX;
+ insn = NULL;
}
return insn;
}
lra_emit_add (rtx x, rtx y, rtx z)
{
int old;
- rtx insn;
rtx_insn *last;
rtx a1, a2, base, index, disp, scale, index_scale;
bool ok_p;
- insn = emit_add3_insn (x, y, z);
+ rtx add3_insn = emit_add3_insn (x, y, z);
old = max_reg_num ();
- if (insn != NULL_RTX)
+ if (add3_insn != NULL)
;
else
{
adding the address segment to register. */
lra_assert (x != y && x != z);
emit_move_insn (x, y);
- insn = emit_add2_insn (x, z);
+ rtx insn = emit_add2_insn (x, z);
lra_assert (insn != NULL_RTX);
}
else
/* Generate x = index_scale; x = x + base. */
lra_assert (index_scale != NULL_RTX && base != NULL_RTX);
emit_move_insn (x, index_scale);
- insn = emit_add2_insn (x, base);
+ rtx insn = emit_add2_insn (x, base);
lra_assert (insn != NULL_RTX);
}
else if (scale == NULL_RTX)
/* Try x = base + disp. */
lra_assert (base != NULL_RTX);
last = get_last_insn ();
- insn = emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base),
- base, disp));
- if (recog_memoized (insn) < 0)
+ rtx_insn *move_insn =
+ emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base), base, disp));
+ if (recog_memoized (move_insn) < 0)
{
delete_insns_since (last);
/* Generate x = disp; x = x + base. */
emit_move_insn (x, disp);
- insn = emit_add2_insn (x, base);
- lra_assert (insn != NULL_RTX);
+ rtx add2_insn = emit_add2_insn (x, base);
+ lra_assert (add2_insn != NULL_RTX);
}
/* Generate x = x + index. */
if (index != NULL_RTX)
{
- insn = emit_add2_insn (x, index);
+ rtx insn = emit_add2_insn (x, index);
lra_assert (insn != NULL_RTX);
}
}
{
/* Try x = index_scale; x = x + disp; x = x + base. */
last = get_last_insn ();
- insn = emit_move_insn (x, index_scale);
+ rtx_insn *move_insn = emit_move_insn (x, index_scale);
ok_p = false;
- if (recog_memoized (insn) >= 0)
+ if (recog_memoized (move_insn) >= 0)
{
- insn = emit_add2_insn (x, disp);
+ rtx insn = emit_add2_insn (x, disp);
if (insn != NULL_RTX)
{
insn = emit_add2_insn (x, disp);
delete_insns_since (last);
/* Generate x = disp; x = x + base; x = x + index_scale. */
emit_move_insn (x, disp);
- insn = emit_add2_insn (x, base);
+ rtx insn = emit_add2_insn (x, base);
lra_assert (insn != NULL_RTX);
insn = emit_add2_insn (x, index_scale);
lra_assert (insn != NULL_RTX);
/* Make sure we can generate a move from register avail_reg to
dest. */
- extract_insn (gen_move_insn (copy_rtx (dest),
- copy_rtx (avail_reg)));
+ extract_insn (as_a <rtx_insn *> (
+ gen_move_insn (copy_rtx (dest),
+ copy_rtx (avail_reg))));
if (! constrain_operands (1)
|| reg_killed_on_edge (avail_reg, pred)
|| reg_used_on_edge (dest, pred))
Otherwise the changes will take effect immediately. */
int
-insn_invalid_p (rtx insn, bool in_group)
+insn_invalid_p (rtx_insn *insn, bool in_group)
{
rtx pat = PATTERN (insn);
int num_clobbers = 0;
}
else if (DEBUG_INSN_P (object))
continue;
- else if (insn_invalid_p (object, true))
+ else if (insn_invalid_p (as_a <rtx_insn *> (object), true))
{
rtx pat = PATTERN (object);
valid information. This is used primary by gen_attr infrastructure that
often does extract insn again and again. */
void
-extract_insn_cached (rtx insn)
+extract_insn_cached (rtx_insn *insn)
{
if (recog_data.insn == insn && INSN_CODE (insn) >= 0)
return;
/* Do cached extract_insn, constrain_operands and complain about failures.
Used by insn_attrtab. */
void
-extract_constrain_insn_cached (rtx insn)
+extract_constrain_insn_cached (rtx_insn *insn)
{
extract_insn_cached (insn);
if (which_alternative == -1
/* Analyze INSN and fill in recog_data. */
void
-extract_insn (rtx insn)
+extract_insn (rtx_insn *insn)
{
int i;
int icode;
extern bool validate_change (rtx, rtx *, rtx, bool);
extern bool validate_unshare_change (rtx, rtx *, rtx, bool);
extern bool canonicalize_change_group (rtx insn, rtx x);
-extern int insn_invalid_p (rtx, bool);
+extern int insn_invalid_p (rtx_insn *, bool);
extern int verify_changes (int);
extern void confirm_change_group (void);
extern int apply_change_group (void);
extern int recog (rtx, rtx, int *);
#ifndef GENERATOR_FILE
-static inline int recog_memoized (rtx insn);
+static inline int recog_memoized (rtx_insn *insn);
#endif
extern void add_clobbers (rtx, int);
extern int added_clobbers_hard_reg_p (int);
extern void insn_extract (rtx);
-extern void extract_insn (rtx);
-extern void extract_constrain_insn_cached (rtx);
-extern void extract_insn_cached (rtx);
+extern void extract_insn (rtx_insn *);
+extern void extract_constrain_insn_cached (rtx_insn *);
+extern void extract_insn_cached (rtx_insn *);
extern void preprocess_constraints (int, int, const char **,
operand_alternative *);
extern const operand_alternative *preprocess_insn_constraints (int);
through this one. */
static inline int
-recog_memoized (rtx insn)
+recog_memoized (rtx_insn *insn)
{
if (INSN_CODE (insn) < 0)
INSN_CODE (insn) = recog (PATTERN (insn), insn, 0);
static int
can_reload_into (rtx in, int regno, enum machine_mode mode)
{
- rtx dst, test_insn;
+ rtx dst;
+ rtx_insn *test_insn;
int r = 0;
struct recog_data_d save_recog_data;
extern void save_call_clobbered_regs (void);
/* Replace (subreg (reg)) with the appropriate (reg) for any operands. */
-extern void cleanup_subreg_operands (rtx);
+extern void cleanup_subreg_operands (rtx_insn *);
/* Debugging support. */
extern void debug_reload_to_stream (FILE *);
Return the emitted insn if valid, else return NULL. */
static rtx_insn *
-emit_insn_if_valid_for_reload (rtx insn)
+emit_insn_if_valid_for_reload (rtx pat)
{
rtx_insn *last = get_last_insn ();
int code;
- insn = emit_insn (insn);
+ rtx_insn *insn = emit_insn (pat);
code = recog_memoized (insn);
if (code >= 0)
validity determination, i.e., the way it would after reload has
completed. */
if (constrain_operands (1))
- return as_a <rtx_insn *> (insn);
+ return insn;
}
delete_insns_since (last);
/* Return latency of INSN. */
static int
-sel_insn_rtx_cost (rtx insn)
+sel_insn_rtx_cost (rtx_insn *insn)
{
int cost;
Code adopted from regrename.c::build_def_use. */
static enum reg_class
-get_reg_class (rtx insn)
+get_reg_class (rtx_insn *insn)
{
int i, n_ops;