+2014-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ * target.def (unwind_emit): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (final_postscan_insn): Likewise.
+ (adjust_cost): Likewise.
+ (adjust_priority): Likewise.
+ (variable_issue): Likewise.
+ (macro_fusion_pair_p): Likewise.
+ (dfa_post_cycle_insn): Likewise.
+ (first_cycle_multipass_dfa_lookahead_guard): Likewise.
+ (first_cycle_multipass_issue): Likewise.
+ (dfa_new_cycle): Likewise.
+ (adjust_cost_2): Likewise for params "insn" and "dep_insn".
+ (speculate_insn): Likewise for param "insn".
+ (gen_spec_check): Likewise for params "insn" and "label".
+ (get_insn_spec_ds): Likewise for param "insn".
+ (get_insn_checked_ds): Likewise.
+ (dispatch_do): Likewise.
+ (dispatch): Likewise.
+ (cannot_copy_insn_p): Likewise.
+ (invalid_within_doloop): Likewise.
+ (legitimate_combined_insn): Likewise.
+ (needed): Likewise.
+ (after): Likewise.
+
+ * doc/tm.texi: Automatically updated to reflect changes to
+ target.def.
+
+ * haifa-sched.c (choose_ready): Convert NULL_RTX to NULL when
+ working with insn.
+ (schedule_block): Likewise.
+ (sched_init): Likewise.
+ (sched_speculate_insn): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (ready_remove_first_dispatch): Convert NULL_RTX to NULL when
+ working with insn.
+ * hooks.c (hook_bool_rtx_true): Rename to...
+ hook_bool_rtx_insn_true): ...this, and strengthen first param from
+ rtx to rtx_insn *.
+ (hook_constcharptr_const_rtx_null): Rename to...
+ (hook_constcharptr_const_rtx_insn_null): ...this, and strengthen
+ first param from const_rtx to const rtx_insn *.
+ (hook_bool_rtx_int_false): Rename to...
+ (hook_bool_rtx_insn_int_false): ...this, and strengthen first
+ param from rtx to rtx_insn *.
+ (hook_void_rtx_int): Rename to...
+ (hook_void_rtx_insn_int): ...this, and strengthen first param from
+ rtx to rtx_insn *.
+
+ * hooks.h (hook_bool_rtx_true): Rename to...
+ (hook_bool_rtx_insn_true): ...this, and strengthen first param from
+ rtx to rtx_insn *.
+ (hook_bool_rtx_int_false): Rename to...
+ (hook_bool_rtx_insn_int_false): ...this, and strengthen first
+ param from rtx to rtx_insn *.
+ (hook_void_rtx_int): Rename to...
+ (hook_void_rtx_insn_int): ...this, and strengthen first param from
+ rtx to rtx_insn *.
+ (hook_constcharptr_const_rtx_null): Rename to...
+ (hook_constcharptr_const_rtx_insn_null): ...this, and strengthen
+ first param from const_rtx to const rtx_insn *.
+
+ * sched-deps.c (sched_macro_fuse_insns): Strengthen param "insn"
+ and local "prev" from rtx to rtx_insn *.
+
+ * sched-int.h (sched_speculate_insn): Strengthen first param from
+ rtx to rtx_insn *.
+
+ * sel-sched.c (create_speculation_check): Likewise for local "label".
+ * targhooks.c (default_invalid_within_doloop): Strengthen param
+ "insn" from const_rtx to const rtx_insn *.
+ * targhooks.h (default_invalid_within_doloop): Strengthen param
+ from const_rtx to const rtx_insn *.
+
+ * config/alpha/alpha.c (alpha_cannot_copy_insn_p): Likewise.
+ (alpha_adjust_cost): Likewise for params "insn", "dep_insn".
+
+ * config/arc/arc.c (arc_sched_adjust_priority): Likewise for param
+ "insn".
+ (arc_invalid_within_doloop): Likewise, with const.
+
+ * config/arm/arm.c (arm_adjust_cost): Likewise for params "insn", "dep".
+ (arm_cannot_copy_insn_p): Likewise for param "insn".
+ (arm_unwind_emit): Likewise.
+
+ * config/bfin/bfin.c (bfin_adjust_cost): Likewise for params "insn",
+ "dep_insn".
+
+ * config/c6x/c6x.c (c6x_dfa_new_cycle): Likewise for param "insn".
+ (c6x_variable_issue): Likewise. Removed now-redundant checked
+ cast.
+ (c6x_adjust_cost): Likewise for params "insn", "dep_insn".
+
+ * config/epiphany/epiphany-protos.h (epiphany_mode_needed):
+ Likewise for param "insn".
+ (epiphany_mode_after): Likewise.
+ * config/epiphany/epiphany.c (epiphany_adjust_cost): Likewise for
+ params "insn", "dep_insn".
+ (epiphany_mode_needed): Likewise for param "insn".
+ (epiphany_mode_after): Likewise.
+
+ * config/i386/i386-protos.h (i386_pe_seh_unwind_emit): Likewise.
+ * config/i386/i386.c (ix86_legitimate_combined_insn): Likewise.
+ (ix86_avx_u128_mode_needed): Likewise.
+ (ix86_i387_mode_needed): Likewise.
+ (ix86_mode_needed): Likewise.
+ (ix86_avx_u128_mode_after): Likewise.
+ (ix86_mode_after): Likewise.
+ (ix86_adjust_cost): Likewise for params "insn", "dep_insn".
+ (ix86_macro_fusion_pair_p): Likewise for params "condgen", "condjmp".
+ (ix86_adjust_priority): Likewise for param "insn".
+ (core2i7_first_cycle_multipass_issue): Likewise for param "insn".
+ (do_dispatch): Likewise.
+ (has_dispatch): Likewise.
+ * config/i386/winnt.c (i386_pe_seh_unwind_emit): Likewise.
+
+ * config/ia64/ia64.c (TARGET_INVALID_WITHIN_DOLOOP): Update to
+ reflect renaming of default hook implementation from
+ hook_constcharptr_const_rtx_null to
+ hook_constcharptr_const_rtx_insn_null.
+ (ia64_adjust_cost_2): Strengthen params "insn", "dep_insn" from
+ rtx to rtx_insn *.
+ (ia64_variable_issue): Likewise for param "insn".
+ (ia64_first_cycle_multipass_dfa_lookahead_guard): Likewise.
+ (ia64_dfa_new_cycle): Likewise.
+ (ia64_get_insn_spec_ds): Likewise.
+ (ia64_get_insn_checked_ds): Likewise.
+ (ia64_speculate_insn): Likewise.
+ (ia64_gen_spec_check): Likewise for params "insn", "label".
+ (ia64_asm_unwind_emit): Likewise for param "insn".
+
+ * config/m32r/m32r.c (m32r_adjust_priority): Likewise.
+
+ * config/m68k/m68k.c (m68k_sched_adjust_cost): Likewise for params
+ "insn", "def_insn".
+ (m68k_sched_variable_issue): Likewise for param "insn".
+
+ * config/mep/mep.c (mep_adjust_cost): Likewise for params "insn",
+ "def_insn".
+
+ * config/microblaze/microblaze.c (microblaze_adjust_cost):
+ Likewise for params "insn", "dep".
+
+ * config/mips/mips.c (mips_adjust_cost): Likewise.
+ (mips_variable_issue): Likewise for param "insn".
+ (mips_final_postscan_insn): Likewise.
+
+ * config/mn10300/mn10300.c (mn10300_adjust_sched_cost): Likewise
+ for params "insn", "dep".
+
+ * config/pa/pa.c (pa_adjust_cost): Likewise for params "insn",
+ "dep_insn".
+ (pa_adjust_priority): Likewise for param "insn".
+
+ * config/picochip/picochip.c (picochip_sched_adjust_cost):
+ Likewise for params "insn", "dep_insn".
+
+ * config/rs6000/rs6000.c (rs6000_variable_issue_1): Likewise for
+ param "insn".
+ (rs6000_variable_issue): Likewise.
+ (rs6000_adjust_cost): Likewise for params "insn", "dep_insn".
+ (rs6000_debug_adjust_cost): Likewise.
+ (rs6000_adjust_priority): Likewise for param "insn".
+ (rs6000_use_sched_lookahead_guard): Likewise.
+ (get_next_active_insn): Likewise for return type and both params.
+ (redefine_groups): Likewise for params "prev_head_insn", "tail"
+ and locals "insn", "next_insn".
+ (pad_groups): Likewise.
+
+ * config/s390/s390.c (s390_adjust_priority): Likewise for param
+ "insn".
+ (s390_cannot_copy_insn_p): Likewise.
+ (s390_sched_variable_issue): Likewise for third param, eliminating
+ checked cast.
+ (TARGET_INVALID_WITHIN_DOLOOP): Update to reflect renaming of
+ default hook implementation from hook_constcharptr_const_rtx_null
+ to hook_constcharptr_const_rtx_insn_null.
+
+ * config/sh/sh.c (sh_cannot_copy_insn_p): Strengthen param "insn"
+ from rtx to rtx_insn *.
+ (sh_adjust_cost): Likewise for params "insn", "dep_insn".
+ (sh_variable_issue): Likewise for param "insn".
+ (sh_dfa_new_cycle): Likewise.
+ (sh_mode_needed): Likewise.
+ (sh_mode_after): Likewise.
+
+ * config/sparc/sparc.c (supersparc_adjust_cost): Likewise for
+ params "insn", "dep_insn".
+ (hypersparc_adjust_cost): Likewise.
+ (sparc_adjust_cost): Likewise.
+
+ * config/spu/spu.c (spu_sched_variable_issue): Likewise for third
+ param, eliminated checked cast.
+ (spu_sched_adjust_cost): Likewise for first and third params.
+
+ * config/tilegx/tilegx.c (tilegx_sched_adjust_cost): Strengthen
+ params "insn" and "dep_insn" from rtx to rtx_insn *.
+
+ * config/tilepro/tilepro.c (tilepro_sched_adjust_cost): Likewise.
+
2014-08-27 David Malcolm <dmalcolm@redhat.com>
* gcc/config/mn10300/mn10300.c (is_load_insn): Rename to...
containing the call and branch to the block containing the ldgp. */
static bool
-alpha_cannot_copy_insn_p (rtx insn)
+alpha_cannot_copy_insn_p (rtx_insn *insn)
{
if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
return false;
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
static int
-alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+alpha_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type dep_insn_type;
static bool arc_can_use_doloop_p (const widest_int &, const widest_int &,
unsigned int, bool);
-static const char *arc_invalid_within_doloop (const_rtx);
+static const char *arc_invalid_within_doloop (const rtx_insn *);
static void output_short_suffix (FILE *file);
use the peephole2 pattern. */
static int
-arc_sched_adjust_priority (rtx insn, int priority)
+arc_sched_adjust_priority (rtx_insn *insn, int priority)
{
rtx set = single_set (insn);
if (set
Otherwise return why doloop cannot be applied. */
static const char *
-arc_invalid_within_doloop (const_rtx insn)
+arc_invalid_within_doloop (const rtx_insn *insn)
{
if (CALL_P (insn))
return "Function call in the loop.";
static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
static int arm_comp_type_attributes (const_tree, const_tree);
static void arm_set_default_type_attributes (tree);
-static int arm_adjust_cost (rtx, rtx, rtx, int);
+static int arm_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static int arm_sched_reorder (FILE *, int, rtx_insn **, int *, int);
static int optimal_immediate_sequence (enum rtx_code code,
unsigned HOST_WIDE_INT val,
static bool arm_must_pass_in_stack (enum machine_mode, const_tree);
static bool arm_return_in_memory (const_tree, const_tree);
#if ARM_UNWIND_INFO
-static void arm_unwind_emit (FILE *, rtx);
+static void arm_unwind_emit (FILE *, rtx_insn *);
static bool arm_output_ttype (rtx);
static void arm_asm_emit_except_personality (rtx);
static void arm_asm_init_sections (void);
static tree arm_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
static void arm_option_override (void);
static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
-static bool arm_cannot_copy_insn_p (rtx);
+static bool arm_cannot_copy_insn_p (rtx_insn *);
static int arm_issue_rate (void);
static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
static bool arm_output_addr_const_extra (FILE *, rtx);
adjust_cost function. Only put bits of code into arm_adjust_cost that
are common across all cores. */
static int
-arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
+arm_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
{
rtx i_pat, d_pat;
}
static bool
-arm_cannot_copy_insn_p (rtx insn)
+arm_cannot_copy_insn_p (rtx_insn *insn)
{
/* The tls call insn cannot be copied, as it is paired with a data
word. */
/* Emit unwind directives for the given insn. */
static void
-arm_unwind_emit (FILE * asm_out_file, rtx insn)
+arm_unwind_emit (FILE * asm_out_file, rtx_insn *insn)
{
rtx note, pat;
bool handled_one = false;
}
static int
-bfin_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+bfin_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type dep_insn_type;
int dep_insn_code_number;
static int
c6x_dfa_new_cycle (FILE *dump ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
- rtx insn ATTRIBUTE_UNUSED, int last_clock ATTRIBUTE_UNUSED,
+ rtx_insn *insn ATTRIBUTE_UNUSED,
+ int last_clock ATTRIBUTE_UNUSED,
int clock ATTRIBUTE_UNUSED, int *sort_p ATTRIBUTE_UNUSED)
{
if (clock != last_clock)
static int
c6x_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx insn, int can_issue_more ATTRIBUTE_UNUSED)
+ rtx_insn *insn, int can_issue_more ATTRIBUTE_UNUSED)
{
ss.last_scheduled_insn = insn;
if (INSN_UID (insn) < sploop_max_uid_iter0 && !JUMP_P (insn))
- ss.last_scheduled_iter0 = as_a <rtx_insn *> (insn);
+ ss.last_scheduled_iter0 = insn;
if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)
ss.issued_this_cycle++;
if (insn_info.exists ())
anti- and output dependencies. */
static int
-c6x_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+c6x_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type insn_type = TYPE_UNKNOWN, dep_insn_type = TYPE_UNKNOWN;
int dep_insn_code_number, insn_code_number;
#endif
extern void epiphany_insert_mode_switch_use (rtx insn, int, int);
extern void epiphany_expand_set_fp_mode (rtx *operands);
-extern int epiphany_mode_needed (int entity, rtx insn);
-extern int epiphany_mode_after (int entity, int last_mode, rtx insn);
+extern int epiphany_mode_needed (int entity, rtx_insn *insn);
+extern int epiphany_mode_after (int entity, int last_mode, rtx_insn *insn);
extern bool epiphany_epilogue_uses (int regno);
extern bool epiphany_optimize_mode_switching (int entity);
extern bool epiphany_is_interrupt_p (tree);
the same cost as a data-dependence. The return value should be
the new value for COST. */
static int
-epiphany_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+epiphany_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
if (REG_NOTE_KIND (link) == 0)
{
}
int
-epiphany_mode_needed (int entity, rtx insn)
+epiphany_mode_needed (int entity, rtx_insn *insn)
{
enum attr_fp_mode mode;
}
int
-epiphany_mode_after (int entity, int last_mode, rtx insn)
+epiphany_mode_after (int entity, int last_mode, rtx_insn *insn)
{
/* We have too few call-saved registers to hope to keep the masks across
calls. */
extern void i386_pe_seh_init (FILE *);
extern void i386_pe_seh_end_prologue (FILE *);
-extern void i386_pe_seh_unwind_emit (FILE *, rtx);
+extern void i386_pe_seh_unwind_emit (FILE *, rtx_insn *);
extern void i386_pe_seh_emit_except_personality (rtx);
extern void i386_pe_seh_init_sections (void);
/* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
static bool
-ix86_legitimate_combined_insn (rtx insn)
+ix86_legitimate_combined_insn (rtx_insn *insn)
{
/* Check operand constraints in case hard registers were propagated
into insn pattern. This check prevents combine pass from
/* Return needed mode for entity in optimize_mode_switching pass. */
static int
-ix86_avx_u128_mode_needed (rtx insn)
+ix86_avx_u128_mode_needed (rtx_insn *insn)
{
if (CALL_P (insn))
{
prior to the execution of insn. */
static int
-ix86_i387_mode_needed (int entity, rtx insn)
+ix86_i387_mode_needed (int entity, rtx_insn *insn)
{
enum attr_i387_cw mode;
prior to the execution of insn. */
static int
-ix86_mode_needed (int entity, rtx insn)
+ix86_mode_needed (int entity, rtx_insn *insn)
{
switch (entity)
{
/* Calculate mode of upper 128bit AVX registers after the insn. */
static int
-ix86_avx_u128_mode_after (int mode, rtx insn)
+ix86_avx_u128_mode_after (int mode, rtx_insn *insn)
{
rtx pat = PATTERN (insn);
/* Return the mode that an insn results in. */
int
-ix86_mode_after (int entity, int mode, rtx insn)
+ix86_mode_after (int entity, int mode, rtx_insn *insn)
{
switch (entity)
{
}
static int
-ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+ix86_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type insn_type, dep_insn_type;
enum attr_memory memory;
"Intel Architectures Optimization Reference Manual". */
static bool
-ix86_macro_fusion_pair_p (rtx condgen, rtx condjmp)
+ix86_macro_fusion_pair_p (rtx_insn *condgen, rtx_insn *condjmp)
{
rtx src, dest;
rtx single_set = single_set (condgen);
moves from function argument registers at the top of the function entry
and moves from function return value registers after call. */
static int
-ix86_adjust_priority (rtx insn, int priority)
+ix86_adjust_priority (rtx_insn *insn, int priority)
{
rtx set;
static void
core2i7_first_cycle_multipass_issue (void *_data,
signed char *ready_try, int n_ready,
- rtx insn, const void *_prev_data)
+ rtx_insn *insn, const void *_prev_data)
{
ix86_first_cycle_multipass_data_t data
= (ix86_first_cycle_multipass_data_t) _data;
/* This routine is the driver of the dispatch scheduler. */
static void
-do_dispatch (rtx insn, int mode)
+do_dispatch (rtx_insn *insn, int mode)
{
if (mode == DISPATCH_INIT)
init_dispatch_sched ();
/* Return TRUE if Dispatch Scheduling is supported. */
static bool
-has_dispatch (rtx insn, int action)
+has_dispatch (rtx_insn *insn, int action)
{
if ((TARGET_BDVER1 || TARGET_BDVER2 || TARGET_BDVER3 || TARGET_BDVER4)
&& flag_dispatch_scheduler)
required for unwind of this insn. */
void
-i386_pe_seh_unwind_emit (FILE *asm_out_file, rtx insn)
+i386_pe_seh_unwind_emit (FILE *asm_out_file, rtx_insn *insn)
{
rtx note, pat;
bool handled_one = false;
static void ia64_dependencies_evaluation_hook (rtx_insn *, rtx_insn *);
static void ia64_init_dfa_pre_cycle_insn (void);
static rtx ia64_dfa_pre_cycle_insn (void);
-static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx, int);
-static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
+static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int);
+static int ia64_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *);
static void ia64_h_i_d_extended (void);
static void * ia64_alloc_sched_context (void);
static void ia64_init_sched_context (void *, bool);
static void ia64_free_sched_context (void *);
static int ia64_mode_to_int (enum machine_mode);
static void ia64_set_sched_flags (spec_info_t);
-static ds_t ia64_get_insn_spec_ds (rtx);
-static ds_t ia64_get_insn_checked_ds (rtx);
+static ds_t ia64_get_insn_spec_ds (rtx_insn *);
+static ds_t ia64_get_insn_checked_ds (rtx_insn *);
static bool ia64_skip_rtx_p (const_rtx);
-static int ia64_speculate_insn (rtx, ds_t, rtx *);
+static int ia64_speculate_insn (rtx_insn *, ds_t, rtx *);
static bool ia64_needs_block_p (ds_t);
-static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
+static rtx ia64_gen_spec_check (rtx_insn *, rtx_insn *, ds_t);
static int ia64_spec_check_p (rtx);
static int ia64_spec_check_src_p (rtx);
static rtx gen_tls_get_addr (void);
static bool ia64_print_operand_punct_valid_p (unsigned char code);
static int ia64_issue_rate (void);
-static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
+static int ia64_adjust_cost_2 (rtx_insn *, int, rtx_insn *, int, dw_t);
static void ia64_sched_init (FILE *, int, int);
static void ia64_sched_init_global (FILE *, int, int);
static void ia64_sched_finish_global (FILE *, int);
static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn **, int *, int, int);
static int ia64_sched_reorder (FILE *, int, rtx_insn **, int *, int);
static int ia64_sched_reorder2 (FILE *, int, rtx_insn **, int *, int);
-static int ia64_variable_issue (FILE *, int, rtx, int);
+static int ia64_variable_issue (FILE *, int, rtx_insn *, int);
-static void ia64_asm_unwind_emit (FILE *, rtx);
+static void ia64_asm_unwind_emit (FILE *, rtx_insn *);
static void ia64_asm_emit_except_personality (rtx);
static void ia64_asm_init_sections (void);
#undef TARGET_CAN_USE_DOLOOP_P
#define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
#undef TARGET_INVALID_WITHIN_DOLOOP
-#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
+#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
COST is the current cost, DW is dependency weakness. */
static int
-ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
+ia64_adjust_cost_2 (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
+ int cost, dw_t dw)
{
enum reg_note dep_type = (enum reg_note) dep_type1;
enum attr_itanium_class dep_class;
static int
ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx insn ATTRIBUTE_UNUSED,
+ rtx_insn *insn,
int can_issue_more ATTRIBUTE_UNUSED)
{
if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
can be chosen. */
static int
-ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn, int ready_index)
+ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn, int ready_index)
{
gcc_assert (insn && INSN_P (insn));
the ready queue on the next clock start. */
static int
-ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
+ia64_dfa_new_cycle (FILE *dump, int verbose, rtx_insn *insn, int last_clock,
int clock, int *sort_p)
{
gcc_assert (insn && INSN_P (insn));
/* If INSN is a speculative load, return a ds with the speculation types.
Otherwise [if INSN is a normal instruction] return 0. */
static ds_t
-ia64_get_insn_spec_ds (rtx insn)
+ia64_get_insn_spec_ds (rtx_insn *insn)
{
int code = get_insn_spec_code (insn);
will be checked.
Otherwise [if INSN is a normal instruction] return 0. */
static ds_t
-ia64_get_insn_checked_ds (rtx insn)
+ia64_get_insn_checked_ds (rtx_insn *insn)
{
int code = get_insn_spec_code (insn);
If current pattern of the INSN already provides TS speculation,
return 0. */
static int
-ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
+ia64_speculate_insn (rtx_insn *insn, ds_t ts, rtx *new_pat)
{
int mode_no;
int res;
/* Generate (or regenerate) a recovery check for INSN. */
static rtx
-ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
+ia64_gen_spec_check (rtx_insn *insn, rtx_insn *label, ds_t ds)
{
rtx op1, pat, check_pat;
gen_func_t gen_check;
required to unwind this insn. */
static void
-ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
+ia64_asm_unwind_emit (FILE *asm_out_file, rtx_insn *insn)
{
bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
bool frame = dwarf2out_do_frame ();
static void m32r_file_start (void);
-static int m32r_adjust_priority (rtx, int);
+static int m32r_adjust_priority (rtx_insn *, int);
static int m32r_issue_rate (void);
static void m32r_encode_section_info (tree, rtx, int);
short instructions are scheduled ahead of the long ones. */
static int
-m32r_adjust_priority (rtx insn, int priority)
+m32r_adjust_priority (rtx_insn *insn, int priority)
{
if (m32r_is_insn (insn)
&& get_attr_insn_size (insn) != INSN_SIZE_SHORT)
int scale;
};
-static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
+static int m68k_sched_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static int m68k_sched_issue_rate (void);
-static int m68k_sched_variable_issue (FILE *, int, rtx, int);
+static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
static void m68k_sched_md_init_global (FILE *, int, int);
static void m68k_sched_md_finish_global (FILE *, int);
static void m68k_sched_md_init (FILE *, int, int);
/* Implement adjust_cost scheduler hook.
Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
static int
-m68k_sched_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx def_insn,
- int cost)
+m68k_sched_adjust_cost (rtx_insn *insn, rtx link ATTRIBUTE_UNUSED,
+ rtx_insn *def_insn, int cost)
{
int delay;
static int
m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx insn, int can_issue_more)
+ rtx_insn *insn, int can_issue_more)
{
int insn_size;
static rtx mep_legitimize_arg (const struct insn_operand_data *, rtx, int);
static void mep_incompatible_arg (const struct insn_operand_data *, rtx, int, tree);
static rtx mep_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
-static int mep_adjust_cost (rtx, rtx, rtx, int);
+static int mep_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static int mep_issue_rate (void);
static rtx_insn *mep_find_ready_insn (rtx_insn **, int, enum attr_slot, int);
static void mep_move_ready_insn (rtx_insn **, int, rtx_insn *);
insns. Not implemented. */
static int
-mep_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+mep_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
int cost_specified;
/* Implement TARGET_SCHED_ADJUST_COST. */
static int
-microblaze_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
- rtx dep ATTRIBUTE_UNUSED, int cost)
+microblaze_adjust_cost (rtx_insn *insn ATTRIBUTE_UNUSED, rtx link,
+ rtx_insn *dep ATTRIBUTE_UNUSED, int cost)
{
if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
return cost;
is treated like input-dependence. */
static int
-mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
- rtx dep ATTRIBUTE_UNUSED, int cost)
+mips_adjust_cost (rtx_insn *insn ATTRIBUTE_UNUSED, rtx link,
+ rtx_insn *dep ATTRIBUTE_UNUSED, int cost)
{
if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
&& TUNE_20KC)
static int
mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
- rtx insn, int more)
+ rtx_insn *insn, int more)
{
/* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
if (USEFUL_INSN_P (insn))
/* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. */
static void
-mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx insn,
+mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx_insn *insn,
rtx *opvec, int noperands)
{
if (mips_need_noat_wrapper_p (insn, opvec, noperands))
COST is the current cycle cost for DEP. */
static int
-mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
+mn10300_adjust_sched_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
{
rtx insn_set;
rtx dep_set;
static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
static void update_total_code_bytes (unsigned int);
static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
-static int pa_adjust_cost (rtx, rtx, rtx, int);
-static int pa_adjust_priority (rtx, int);
+static int pa_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
+static int pa_adjust_priority (rtx_insn *, int);
static int pa_issue_rate (void);
static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
static section *pa_som_tm_clone_table_section (void) ATTRIBUTE_UNUSED;
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
static int
-pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+pa_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type attr_type;
/* Adjust scheduling priorities. We use this to try and keep addil
and the next use of %r1 close together. */
static int
-pa_adjust_priority (rtx insn, int priority)
+pa_adjust_priority (rtx_insn *insn, int priority)
{
rtx set = single_set (insn);
rtx src, dest;
int picochip_sched_lookahead (void);
int picochip_sched_issue_rate (void);
-int picochip_sched_adjust_cost (rtx insn, rtx link,
- rtx dep_insn, int cost);
+int picochip_sched_adjust_cost (rtx_insn *insn, rtx link,
+ rtx_insn *dep_insn, int cost);
int picochip_sched_reorder (FILE * file, int verbose, rtx_insn ** ready,
int *n_readyp, int clock);
/* Adjust the scheduling cost between the two given instructions,
which have the given dependency. */
int
-picochip_sched_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+picochip_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
+ int cost)
{
if (TARGET_DEBUG)
static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
bool);
-static int rs6000_debug_adjust_cost (rtx, rtx, rtx, int);
+static int rs6000_debug_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static bool is_microcoded_insn (rtx);
static bool is_nonpipeline_insn (rtx);
static bool is_cracked_insn (rtx);
instructions to issue in this cycle. */
static int
-rs6000_variable_issue_1 (rtx insn, int more)
+rs6000_variable_issue_1 (rtx_insn *insn, int more)
{
last_scheduled_insn = insn;
if (GET_CODE (PATTERN (insn)) == USE
}
static int
-rs6000_variable_issue (FILE *stream, int verbose, rtx insn, int more)
+rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
{
int r = rs6000_variable_issue_1 (insn, more);
if (verbose)
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
static int
-rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+rs6000_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type attr_type;
/* Debug version of rs6000_adjust_cost. */
static int
-rs6000_debug_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+rs6000_debug_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
+ int cost)
{
int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
priorities of insns. */
static int
-rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
+rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
{
rtx load_mem, str_mem;
/* On machines (like the 750) which have asymmetric integer units,
/* We are choosing insn from the ready queue. Return zero if INSN can be
chosen. */
static int
-rs6000_use_sched_lookahead_guard (rtx insn, int ready_index)
+rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
{
if (ready_index == 0)
return 0;
skipping any "non-active" insns - insns that will not actually occupy
an issue slot. Return NULL_RTX if such an insn is not found. */
-static rtx
-get_next_active_insn (rtx insn, rtx tail)
+static rtx_insn *
+get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
{
if (insn == NULL_RTX || insn == tail)
- return NULL_RTX;
+ return NULL;
while (1)
{
insn = NEXT_INSN (insn);
if (insn == NULL_RTX || insn == tail)
- return NULL_RTX;
+ return NULL;
if (CALL_P (insn)
|| JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
start a new group. */
static int
-redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
+redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
+ rtx_insn *tail)
{
- rtx insn, next_insn;
+ rtx_insn *insn, *next_insn;
int issue_rate;
int can_issue_more;
int slot, i;
returns the number of dispatch groups found. */
static int
-pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
+pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
+ rtx_insn *tail)
{
- rtx insn, next_insn;
+ rtx_insn *insn, *next_insn;
rtx nop;
int issue_rate;
int can_issue_more;
A STD instruction should be scheduled earlier,
in order to use the bypass. */
static int
-s390_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
+s390_adjust_priority (rtx_insn *insn, int priority)
{
if (! INSN_P (insn))
return priority;
execute insns that carry a unique label. */
static bool
-s390_cannot_copy_insn_p (rtx insn)
+s390_cannot_copy_insn_p (rtx_insn *insn)
{
rtx label = s390_execute_label (insn);
return label && label != const0_rtx;
last_scheduled_insn in order to make it available for
s390_sched_reorder. */
static int
-s390_sched_variable_issue (FILE *file, int verbose, rtx uncast_insn, int more)
+s390_sched_variable_issue (FILE *file, int verbose, rtx_insn *insn, int more)
{
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
last_scheduled_insn = insn;
if (s390_tune == PROCESSOR_2827_ZEC12
#define TARGET_CC_MODES_COMPATIBLE s390_cc_modes_compatible
#undef TARGET_INVALID_WITHIN_DOLOOP
-#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
+#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null
#ifdef HAVE_AS_TLS
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
static HOST_WIDE_INT rounded_frame_size (int);
static bool sh_frame_pointer_required (void);
static void sh_emit_mode_set (int, int, int, HARD_REG_SET);
-static int sh_mode_needed (int, rtx);
-static int sh_mode_after (int, int, rtx);
+static int sh_mode_needed (int, rtx_insn *);
+static int sh_mode_after (int, int, rtx_insn *);
static int sh_mode_entry (int);
static int sh_mode_exit (int);
static int sh_mode_priority (int entity, int n);
static void sh_insert_attributes (tree, tree *);
static const char *sh_check_pch_target_flags (int);
static int sh_register_move_cost (enum machine_mode, reg_class_t, reg_class_t);
-static int sh_adjust_cost (rtx, rtx, rtx, int);
+static int sh_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static int sh_issue_rate (void);
-static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
+static int sh_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *sort_p);
static short find_set_regmode_weight (rtx, enum machine_mode);
static short find_insn_regmode_weight (rtx, enum machine_mode);
static void find_regmode_weight (basic_block, enum machine_mode);
static int sh_reorder (FILE *, int, rtx_insn **, int *, int);
static int sh_reorder2 (FILE *, int, rtx_insn **, int *, int);
static void sh_md_init (FILE *, int, int);
-static int sh_variable_issue (FILE *, int, rtx, int);
+static int sh_variable_issue (FILE *, int, rtx_insn *, int);
static bool sh_function_ok_for_sibcall (tree, tree);
static int addsubcosts (rtx);
static int multcosts (rtx);
static bool unspec_caller_rtx_p (rtx);
-static bool sh_cannot_copy_insn_p (rtx);
+static bool sh_cannot_copy_insn_p (rtx_insn *);
static bool sh_rtx_costs (rtx, int, int, int, int *, bool);
static int sh_address_cost (rtx, enum machine_mode, addr_space_t, bool);
static int sh_pr_n_sets (void);
/* Indicate that INSN cannot be duplicated. This is true for insn
that generates a unique label. */
static bool
-sh_cannot_copy_insn_p (rtx insn)
+sh_cannot_copy_insn_p (rtx_insn *insn)
{
rtx pat;
the same cost as a data-dependence. The return value should be
the new value for COST. */
static int
-sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
+sh_adjust_cost (rtx_insn *insn, rtx link ATTRIBUTE_UNUSED,
+ rtx_insn *dep_insn, int cost)
{
rtx reg, use_pat;
static int
sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx insn,
+ rtx_insn *insn,
int can_issue_more)
{
if (GET_CODE (PATTERN (insn)) != USE
static int
sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx insn ATTRIBUTE_UNUSED,
+ rtx_insn *insn ATTRIBUTE_UNUSED,
int last_clock_var,
int clock_var,
int *sort_p)
}
static int
-sh_mode_needed (int entity ATTRIBUTE_UNUSED, rtx insn)
+sh_mode_needed (int entity ATTRIBUTE_UNUSED, rtx_insn *insn)
{
return recog_memoized (insn) >= 0 ? get_attr_fp_mode (insn) : FP_MODE_NONE;
}
static int
-sh_mode_after (int entity ATTRIBUTE_UNUSED, int mode, rtx insn)
+sh_mode_after (int entity ATTRIBUTE_UNUSED, int mode, rtx_insn *insn)
{
if (TARGET_HITACHI && recog_memoized (insn) >= 0 &&
get_attr_fp_set (insn) != FP_SET_NONE)
static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
const_tree, bool, bool, int *, int *);
-static int supersparc_adjust_cost (rtx, rtx, rtx, int);
-static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
+static int supersparc_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
+static int hypersparc_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static void sparc_emit_set_const32 (rtx, rtx);
static void sparc_emit_set_const64 (rtx, rtx);
static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
tree) ATTRIBUTE_UNUSED;
#endif
-static int sparc_adjust_cost (rtx, rtx, rtx, int);
+static int sparc_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
static int sparc_issue_rate (void);
static void sparc_sched_init (FILE *, int, int);
static int sparc_use_sched_lookahead (void);
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
static int
-supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+supersparc_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type insn_type;
}
static int
-hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+hypersparc_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
enum attr_type insn_type, dep_type;
rtx pat = PATTERN(insn);
}
static int
-sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
+sparc_adjust_cost(rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
{
switch (sparc_cpu)
{
static int
spu_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
int verbose ATTRIBUTE_UNUSED,
- rtx uncast_insn, int more)
+ rtx_insn *insn, int more)
{
int len;
int p;
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
if (GET_CODE (PATTERN (insn)) == USE
|| GET_CODE (PATTERN (insn)) == CLOBBER
|| (len = get_attr_length (insn)) == 0)
/* INSN is dependent on DEP_INSN. */
static int
-spu_sched_adjust_cost (rtx uncast_insn, rtx link, rtx uncast_dep_insn, int cost)
+spu_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
{
rtx set;
- rtx_insn *insn = as_a <rtx_insn *> (uncast_insn);
- rtx_insn *dep_insn = as_a <rtx_insn *> (uncast_dep_insn);
/* The blockage pattern is used to prevent instructions from being
moved across it and has no cost. */
/* Implement TARGET_SCHED_ADJUST_COST. */
static int
-tilegx_sched_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+tilegx_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
+ int cost)
{
/* If we have a true dependence, INSN is a call, and DEP_INSN
defines a register that is needed by the call (argument or stack
/* Implement TARGET_SCHED_ADJUST_COST. */
static int
-tilepro_sched_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+tilepro_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
+ int cost)
{
/* If we have a true dependence, INSN is a call, and DEP_INSN
defines a register that is needed by the call (argument or stack
@samp{TARGET_SCHED_VARIABLE_ISSUE}.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_VARIABLE_ISSUE (FILE *@var{file}, int @var{verbose}, rtx @var{insn}, int @var{more})
+@deftypefn {Target Hook} int TARGET_SCHED_VARIABLE_ISSUE (FILE *@var{file}, int @var{verbose}, rtx_insn *@var{insn}, int @var{more})
This hook is executed by the scheduler after it has scheduled an insn
from the ready list. It should return the number of insns which can
still be issued in the current cycle. The default is
was scheduled.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_ADJUST_COST (rtx @var{insn}, rtx @var{link}, rtx @var{dep_insn}, int @var{cost})
+@deftypefn {Target Hook} int TARGET_SCHED_ADJUST_COST (rtx_insn *@var{insn}, rtx @var{link}, rtx_insn *@var{dep_insn}, int @var{cost})
This function corrects the value of @var{cost} based on the
relationship between @var{insn} and @var{dep_insn} through the
dependence @var{link}. It should return the new value. The default
@pxref{Processor pipeline description}.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_ADJUST_PRIORITY (rtx @var{insn}, int @var{priority})
+@deftypefn {Target Hook} int TARGET_SCHED_ADJUST_PRIORITY (rtx_insn *@var{insn}, int @var{priority})
This hook adjusts the integer scheduling priority @var{priority} of
@var{insn}. It should return the new priority. Increase the priority to
execute @var{insn} earlier, reduce the priority to execute @var{insn}
This hook is used to check whether target platform supports macro fusion.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_SCHED_MACRO_FUSION_PAIR_P (rtx @var{condgen}, rtx @var{condjmp})
+@deftypefn {Target Hook} bool TARGET_SCHED_MACRO_FUSION_PAIR_P (rtx_insn *@var{condgen}, rtx_insn *@var{condjmp})
This hook is used to check whether two insns could be macro fused for
target microarchitecture. If this hook returns true for the given insn pair
(@var{condgen} and @var{condjmp}), scheduler will put them into a sched
The hook can be used to initialize data used by the previous hook.
@end deftypefn
-@deftypefn {Target Hook} rtx TARGET_SCHED_DFA_POST_CYCLE_INSN (void)
+@deftypefn {Target Hook} {rtx_insn *} TARGET_SCHED_DFA_POST_CYCLE_INSN (void)
The hook is analogous to @samp{TARGET_SCHED_DFA_PRE_CYCLE_INSN} but used
to changed the state as if the insn were scheduled when the new
simulated processor cycle finishes.
The default is no multipass scheduling.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD (rtx @var{insn}, int @var{ready_index})
+@deftypefn {Target Hook} int TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD (rtx_insn *@var{insn}, int @var{ready_index})
This hook controls what insns from the ready insn queue will be
considered for the multipass insn scheduling. If the hook returns
scheduling.
@end deftypefn
-@deftypefn {Target Hook} void TARGET_SCHED_FIRST_CYCLE_MULTIPASS_ISSUE (void *@var{data}, signed char *@var{ready_try}, int @var{n_ready}, rtx @var{insn}, const void *@var{prev_data})
+@deftypefn {Target Hook} void TARGET_SCHED_FIRST_CYCLE_MULTIPASS_ISSUE (void *@var{data}, signed char *@var{ready_try}, int @var{n_ready}, rtx_insn *@var{insn}, const void *@var{prev_data})
This hook is called when multipass scheduling evaluates instruction INSN.
@end deftypefn
This hook finalizes target-specific data used in multipass scheduling.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_DFA_NEW_CYCLE (FILE *@var{dump}, int @var{verbose}, rtx @var{insn}, int @var{last_clock}, int @var{clock}, int *@var{sort_p})
+@deftypefn {Target Hook} int TARGET_SCHED_DFA_NEW_CYCLE (FILE *@var{dump}, int @var{verbose}, rtx_insn *@var{insn}, int @var{last_clock}, int @var{clock}, int *@var{sort_p})
This hook is called by the insn scheduler before issuing @var{insn}
on cycle @var{clock}. If the hook returns nonzero,
@var{insn} is not issued on this processor cycle. Instead,
Deallocate a store for target scheduling context pointed to by @var{tc}.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_SPECULATE_INSN (rtx @var{insn}, unsigned int @var{dep_status}, rtx *@var{new_pat})
+@deftypefn {Target Hook} int TARGET_SCHED_SPECULATE_INSN (rtx_insn *@var{insn}, unsigned int @var{dep_status}, rtx *@var{new_pat})
This hook is called by the insn scheduler when @var{insn} has only
speculative dependencies and therefore can be scheduled speculatively.
The hook is used to check if the pattern of @var{insn} has a speculative
instruction should branch to recovery code, or @code{false} otherwise.
@end deftypefn
-@deftypefn {Target Hook} rtx TARGET_SCHED_GEN_SPEC_CHECK (rtx @var{insn}, rtx @var{label}, unsigned int @var{ds})
+@deftypefn {Target Hook} rtx TARGET_SCHED_GEN_SPEC_CHECK (rtx_insn *@var{insn}, rtx_insn *@var{label}, unsigned int @var{ds})
This hook is called by the insn scheduler to generate a pattern for recovery
check instruction. If @var{mutate_p} is zero, then @var{insn} is a
speculative instruction for which the check should be generated.
of instructions divided by the issue rate.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_SCHED_DISPATCH (rtx @var{insn}, int @var{x})
+@deftypefn {Target Hook} bool TARGET_SCHED_DISPATCH (rtx_insn *@var{insn}, int @var{x})
This hook is called by Haifa Scheduler. It returns true if dispatch scheduling
is supported in hardware and the condition specified in the parameter is true.
@end deftypefn
-@deftypefn {Target Hook} void TARGET_SCHED_DISPATCH_DO (rtx @var{insn}, int @var{x})
+@deftypefn {Target Hook} void TARGET_SCHED_DISPATCH_DO (rtx_insn *@var{insn}, int @var{x})
This hook is called by Haifa Scheduler. It performs the operation specified
in its second parameter.
@end deftypefn
If this macro is not defined, it is equivalent to a null statement.
@end defmac
-@deftypefn {Target Hook} void TARGET_ASM_FINAL_POSTSCAN_INSN (FILE *@var{file}, rtx @var{insn}, rtx *@var{opvec}, int @var{noperands})
+@deftypefn {Target Hook} void TARGET_ASM_FINAL_POSTSCAN_INSN (FILE *@var{file}, rtx_insn *@var{insn}, rtx *@var{opvec}, int @var{noperands})
If defined, this target hook is a function which is executed just after the
output of assembler code for @var{insn}, to change the mode of the assembler
if necessary.
If the target implements @code{TARGET_ASM_UNWIND_EMIT}, this hook may be used to emit a directive to install a personality hook into the unwind info. This hook should not be used if dwarf2 unwind info is used.
@end deftypefn
-@deftypefn {Target Hook} void TARGET_ASM_UNWIND_EMIT (FILE *@var{stream}, rtx @var{insn})
+@deftypefn {Target Hook} void TARGET_ASM_UNWIND_EMIT (FILE *@var{stream}, rtx_insn *@var{insn})
This target hook emits assembly directives required to unwind the
given instruction. This is only used when @code{TARGET_EXCEPT_UNWIND_INFO}
returns @code{UI_TARGET}.
Generate one or more insns to set @var{entity} to @var{mode}. @var{hard_reg_live} is the set of hard registers live at the point where the insn(s) are to be inserted. @var{prev_moxde} indicates the mode to switch from. Sets of a lower numbered entity will be emitted before sets of a higher numbered entity to a mode of the same or lower priority.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_MODE_NEEDED (int @var{entity}, rtx @var{insn})
+@deftypefn {Target Hook} int TARGET_MODE_NEEDED (int @var{entity}, rtx_insn *@var{insn})
@var{entity} is an integer specifying a mode-switched entity. If @code{OPTIMIZE_MODE_SWITCHING} is defined, you must define this macro to return an integer value not larger than the corresponding element in @code{NUM_MODES_FOR_MODE_SWITCHING}, to denote the mode that @var{entity} must be switched into prior to the execution of @var{insn}.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_MODE_AFTER (int @var{entity}, int @var{mode}, rtx @var{insn})
+@deftypefn {Target Hook} int TARGET_MODE_AFTER (int @var{entity}, int @var{mode}, rtx_insn *@var{insn})
@var{entity} is an integer specifying a mode-switched entity. If this macro is defined, it is evaluated for every @var{insn} during mode switching. It determines the mode that an insn results in (if different from the incoming mode).
@end deftypefn
if the loop must be the innermost, and if there are no other restrictions.
@end deftypefn
-@deftypefn {Target Hook} {const char *} TARGET_INVALID_WITHIN_DOLOOP (const_rtx @var{insn})
+@deftypefn {Target Hook} {const char *} TARGET_INVALID_WITHIN_DOLOOP (const rtx_insn *@var{insn})
Take an instruction in @var{insn} and return NULL if it is valid within a
low-overhead loop, otherwise return a string explaining why doloop
loops containing function calls or branch on table instructions.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_LEGITIMATE_COMBINED_INSN (rtx @var{insn})
+@deftypefn {Target Hook} bool TARGET_LEGITIMATE_COMBINED_INSN (rtx_insn *@var{insn})
Take an instruction in @var{insn} and return @code{false} if the instruction is not appropriate as a combination of two or more instructions. The default is to accept all instructions.
@end deftypefn
if (lookahead <= 0 || SCHED_GROUP_P (ready_element (ready, 0))
|| DEBUG_INSN_P (ready_element (ready, 0)))
{
- if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
+ if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
*insn_ptr = ready_remove_first_dispatch (ready);
else
*insn_ptr = ready_remove_first (ready);
if (TODO_SPEC (insn) & SPECULATIVE)
generate_recovery_code (insn);
- if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
+ if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
targetm.sched.dispatch_do (insn, ADD_TO_DISPATCH_WINDOW);
/* Update counters, etc in the scheduler's front end. */
flag_schedule_speculative_load = 0;
#endif
- if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON))
- targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT);
+ if (targetm.sched.dispatch (NULL, IS_DISPATCH_ON))
+ targetm.sched.dispatch_do (NULL, DISPATCH_INIT);
if (live_range_shrinkage_p)
sched_pressure = SCHED_PRESSURE_WEIGHTED;
current instruction pattern,
1 - need to change pattern for *NEW_PAT to be speculative. */
int
-sched_speculate_insn (rtx insn, ds_t request, rtx *new_pat)
+sched_speculate_insn (rtx_insn *insn, ds_t request, rtx *new_pat)
{
gcc_assert (current_sched_info->flags & DO_SPECULATION
&& (request & SPECULATIVE)
}
}
- if (targetm.sched.dispatch (NULL_RTX, DISPATCH_VIOLATION))
+ if (targetm.sched.dispatch (NULL, DISPATCH_VIOLATION))
return ready_remove_first (ready);
for (i = 1; i < ready->n_ready; i++)
}
bool
-hook_bool_rtx_true (rtx a ATTRIBUTE_UNUSED)
+hook_bool_rtx_insn_true (rtx_insn *insn ATTRIBUTE_UNUSED)
{
return true;
}
return NULL;
}
-/* Generic hook that takes a rtx and returns a NULL string. */
+/* Generic hook that takes an rtx_insn *and returns a NULL string. */
const char *
-hook_constcharptr_const_rtx_null (const_rtx r ATTRIBUTE_UNUSED)
+hook_constcharptr_const_rtx_insn_null (const rtx_insn *insn ATTRIBUTE_UNUSED)
{
return NULL;
}
return NULL;
}
-/* Generic hook that takes a rtx and an int and returns a bool. */
+/* Generic hook that takes a rtx_insn * and an int and returns a bool. */
bool
-hook_bool_rtx_int_false (rtx insn ATTRIBUTE_UNUSED, int mode ATTRIBUTE_UNUSED)
+hook_bool_rtx_insn_int_false (rtx_insn *insn ATTRIBUTE_UNUSED,
+ int mode ATTRIBUTE_UNUSED)
{
return false;
}
-/* Generic hook that takes a rtx and an int and returns void. */
+/* Generic hook that takes a rtx_insn * and an int and returns void. */
void
-hook_void_rtx_int (rtx insn ATTRIBUTE_UNUSED, int mode ATTRIBUTE_UNUSED)
+hook_void_rtx_insn_int (rtx_insn *insn ATTRIBUTE_UNUSED,
+ int mode ATTRIBUTE_UNUSED)
{
}
HOST_WIDE_INT,
HOST_WIDE_INT,
const_tree);
-extern bool hook_bool_rtx_true (rtx);
+extern bool hook_bool_rtx_insn_true (rtx_insn *);
extern bool hook_bool_rtx_false (rtx);
-extern bool hook_bool_rtx_int_false (rtx, int);
+extern bool hook_bool_rtx_insn_int_false (rtx_insn *, int);
extern bool hook_bool_uintp_uintp_false (unsigned int *, unsigned int *);
extern bool hook_bool_rtx_int_int_int_intp_bool_false (rtx, int, int, int,
int *, bool);
extern void hook_void_void (void);
extern void hook_void_constcharptr (const char *);
-extern void hook_void_rtx_int (rtx, int);
+extern void hook_void_rtx_insn_int (rtx_insn *, int);
extern void hook_void_FILEptr_constcharptr (FILE *, const char *);
extern bool hook_bool_FILEptr_rtx_false (FILE *, rtx);
extern void hook_void_tree (tree);
extern const char *hook_constcharptr_void_null (void);
extern const char *hook_constcharptr_const_tree_null (const_tree);
-extern const char *hook_constcharptr_const_rtx_null (const_rtx);
+extern const char *hook_constcharptr_const_rtx_insn_null (const rtx_insn *);
extern const char *hook_constcharptr_const_tree_const_tree_null (const_tree, const_tree);
extern const char *hook_constcharptr_int_const_tree_null (int, const_tree);
extern const char *hook_constcharptr_int_const_tree_const_tree_null (int, const_tree, const_tree);
from scheduling them apart. */
static void
-sched_macro_fuse_insns (rtx insn)
+sched_macro_fuse_insns (rtx_insn *insn)
{
- rtx prev;
+ rtx_insn *prev;
if (any_condjump_p (insn))
{
extern void sched_extend_ready_list (int);
extern void sched_finish_ready_list (void);
extern void sched_change_pattern (rtx, rtx);
-extern int sched_speculate_insn (rtx, ds_t, rtx *);
+extern int sched_speculate_insn (rtx_insn *, ds_t, rtx *);
extern void unlink_bb_notes (basic_block, basic_block);
extern void add_block (basic_block, basic_block);
extern rtx_note *bb_note (basic_block);
rtx_insn *insn_rtx;
insn_t insn;
basic_block recovery_block;
- rtx label;
+ rtx_insn *label;
/* Create a recovery block if target is going to emit branchy check, or if
ORIG_INSN was speculative already. */
else
{
recovery_block = NULL;
- label = NULL_RTX;
+ label = NULL;
}
/* Get pattern of the check. */
"This target hook emits assembly directives required to unwind the\n\
given instruction. This is only used when @code{TARGET_EXCEPT_UNWIND_INFO}\n\
returns @code{UI_TARGET}.",
- void, (FILE *stream, rtx insn),
+ void, (FILE *stream, rtx_insn *insn),
NULL)
DEFHOOKPOD
The contents of this vector are what was used to convert the insn\n\
template into assembler code, so you can change the assembler mode\n\
by checking the contents of the vector.",
- void, (FILE *file, rtx insn, rtx *opvec, int noperands),
+ void, (FILE *file, rtx_insn *insn, rtx *opvec, int noperands),
NULL)
/* Emit the trampoline template. This hook may be NULL. */
times of the first and the second insns. If these values are not\n\
acceptable, you could use the hook to modify them too. See also\n\
@pxref{Processor pipeline description}.",
- int, (rtx insn, rtx link, rtx dep_insn, int cost), NULL)
+ int, (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost), NULL)
/* Adjust the priority of an insn as you see fit. Returns the new priority. */
DEFHOOK
execute @var{insn} earlier, reduce the priority to execute @var{insn}\n\
later. Do not define this hook if you do not need to adjust the\n\
scheduling priorities of insns.",
- int, (rtx insn, int priority), NULL)
+ int, (rtx_insn *insn, int priority), NULL)
/* Function which returns the maximum number of insns that can be
scheduled in the same machine cycle. This must be constant
debug output to. @var{verbose} is the verbose level provided by\n\
@option{-fsched-verbose-@var{n}}. @var{insn} is the instruction that\n\
was scheduled.",
- int, (FILE *file, int verbose, rtx insn, int more), NULL)
+ int, (FILE *file, int verbose, rtx_insn *insn, int more), NULL)
/* Initialize machine-dependent scheduling code. */
DEFHOOK
target microarchitecture. If this hook returns true for the given insn pair\n\
(@var{condgen} and @var{condjmp}), scheduler will put them into a sched\n\
group, and they will not be scheduled apart.",
- bool, (rtx condgen, rtx condjmp), NULL)
+ bool, (rtx_insn *condgen, rtx_insn *condjmp), NULL)
/* The following member value is a pointer to a function called
after evaluation forward dependencies of insns in chain given
"The hook is analogous to @samp{TARGET_SCHED_DFA_PRE_CYCLE_INSN} but used\n\
to changed the state as if the insn were scheduled when the new\n\
simulated processor cycle finishes.",
- rtx, (void), NULL)
+ rtx_insn *, (void), NULL)
/* The values of the following two members are pointers to
functions used to simplify the automaton descriptions.
to allow backends make correct judgements.\n\
\n\
The default is that any ready insns can be chosen to be issued.",
- int, (rtx insn, int ready_index), NULL)
+ int, (rtx_insn *insn, int ready_index), NULL)
/* This hook prepares the target for a new round of multipass
scheduling.
DEFHOOK
(first_cycle_multipass_issue,
"This hook is called when multipass scheduling evaluates instruction INSN.",
- void, (void *data, signed char *ready_try, int n_ready, rtx insn,
+ void, (void *data, signed char *ready_try, int n_ready, rtx_insn *insn,
const void *prev_data), NULL)
/* This hook is called when multipass scheduling backtracks from evaluation of
@var{last_clock} and @var{clock} are, respectively, the\n\
processor cycle on which the previous insn has been issued,\n\
and the current processor cycle.",
- int, (FILE *dump, int verbose, rtx insn, int last_clock,
+ int, (FILE *dump, int verbose, rtx_insn *insn, int last_clock,
int clock, int *sort_p),
NULL)
"Given the current cost, @var{cost}, of an insn, @var{insn}, calculate and\
return a new cost based on its relationship to @var{dep_insn} through the\
dependence of weakness @var{dw}. The default is to make no adjustment.",
- int, (rtx insn, int dep_type1, rtx dep_insn, int cost, unsigned int dw), NULL)
+ int, (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn, int cost,
+ unsigned int dw),
+ NULL)
/* The following member value is a pointer to a function called
by the insn scheduler. This hook is called to notify the backend
or @minus{}1, if it doesn't. @var{request} describes the type of requested\n\
speculation. If the return value equals 1 then @var{new_pat} is assigned\n\
the generated speculative pattern.",
- int, (rtx insn, unsigned int dep_status, rtx *new_pat), NULL)
+ int, (rtx_insn *insn, unsigned int dep_status, rtx *new_pat), NULL)
/* The following member value is a pointer to a function called
by the insn scheduler. It should return true if the check instruction
recovery code (a simple check). If @var{mutate_p} is nonzero, then\n\
a pattern for a branchy check corresponding to a simple check denoted by\n\
@var{insn} should be generated. In this case @var{label} can't be null.",
- rtx, (rtx insn, rtx label, unsigned int ds), NULL)
+ rtx, (rtx_insn *insn, rtx_insn *label, unsigned int ds), NULL)
/* The following member value is a pointer to a function that provides
information about the speculation capabilities of the target.
DEFHOOK_UNDOC
(get_insn_spec_ds,
"Return speculation types of instruction @var{insn}.",
- unsigned int, (rtx insn), NULL)
+ unsigned int, (rtx_insn *insn), NULL)
DEFHOOK_UNDOC
(get_insn_checked_ds,
"Return speculation types that are checked for instruction @var{insn}",
- unsigned int, (rtx insn), NULL)
+ unsigned int, (rtx_insn *insn), NULL)
DEFHOOK_UNDOC
(skip_rtx_p,
(dispatch_do,
"This hook is called by Haifa Scheduler. It performs the operation specified\n\
in its second parameter.",
-void, (rtx insn, int x),
-hook_void_rtx_int)
+void, (rtx_insn *insn, int x),
+hook_void_rtx_insn_int)
/* The following member value is a a function that returns true is
dispatch schedling is supported in hardware and condition passed
(dispatch,
"This hook is called by Haifa Scheduler. It returns true if dispatch scheduling\n\
is supported in hardware and the condition specified in the parameter is true.",
-bool, (rtx insn, int x),
-hook_bool_rtx_int_false)
+bool, (rtx_insn *insn, int x),
+hook_bool_rtx_insn_int_false)
DEFHOOKPOD
(exposed_pipeline,
DEFHOOK_UNDOC
(cannot_copy_insn_p,
"True if the insn @var{x} cannot be duplicated.",
- bool, (rtx), NULL)
+ bool, (rtx_insn *), NULL)
/* True if X is considered to be commutative. */
DEFHOOK
the reason why the doloop could not be applied.\n\
By default, the RTL loop optimizer does not use a present doloop pattern for\n\
loops containing function calls or branch on table instructions.",
- const char *, (const_rtx insn),
+ const char *, (const rtx_insn *insn),
default_invalid_within_doloop)
/* Returns true for a legitimate combined insn. */
"Take an instruction in @var{insn} and return @code{false} if the instruction\
is not appropriate as a combination of two or more instructions. The\
default is to accept all instructions.",
- bool, (rtx insn),
- hook_bool_rtx_true)
+ bool, (rtx_insn *insn),
+ hook_bool_rtx_insn_true)
DEFHOOK
(valid_dllimport_attribute_p,
DEFHOOK
(needed,
"@var{entity} is an integer specifying a mode-switched entity. If @code{OPTIMIZE_MODE_SWITCHING} is defined, you must define this macro to return an integer value not larger than the corresponding element in @code{NUM_MODES_FOR_MODE_SWITCHING}, to denote the mode that @var{entity} must be switched into prior to the execution of @var{insn}.",
- int, (int entity, rtx insn), NULL)
+ int, (int entity, rtx_insn *insn), NULL)
DEFHOOK
(after,
"@var{entity} is an integer specifying a mode-switched entity. If this macro is defined, it is evaluated for every @var{insn} during mode switching. It determines the mode that an insn results in (if different from the incoming mode).",
- int, (int entity, int mode, rtx insn), NULL)
+ int, (int entity, int mode, rtx_insn *insn), NULL)
DEFHOOK
(entry,
these cases. */
const char *
-default_invalid_within_doloop (const_rtx insn)
+default_invalid_within_doloop (const rtx_insn *insn)
{
if (CALL_P (insn))
return "Function call in loop.";
extern bool default_has_ifunc_p (void);
-extern const char * default_invalid_within_doloop (const_rtx);
+extern const char * default_invalid_within_doloop (const rtx_insn *);
extern tree default_builtin_vectorized_function (tree, tree, tree);