+2014-09-09 David Malcolm <dmalcolm@redhat.com>
+
+ * rtl.h (single_set_2): Strengthen first param from const_rtx to
+ const rtx_insn *, and move prototype to above...
+ (single_set): ...this. Convert this from a macro to an inline
+ function, enforcing the requirement that the param is a const
+ rtx_insn *.
+ (find_args_size_adjust): Strengthen param from rtx to rtx_insn *.
+
+ * config/arm/aarch-common-protos.h (aarch_crypto_can_dual_issue):
+ Strengthen both params from rtx to rtx_insn *.
+ * config/arm/aarch-common.c (aarch_crypto_can_dual_issue):
+ Likewise; introduce locals "producer_set", "consumer_set", using
+ them in place of "producer" and "consumer" when dealing with SET
+ rather than insn.
+ * config/avr/avr.c (avr_out_plus): Add checked cast to rtx_insn *
+ when invoking single_set in region guarded by INSN_P.
+ (avr_out_bitop): Likewise.
+ (_reg_unused_after): Introduce local rtx_sequence * "seq" in
+ region guarded by GET_CODE check, using methods to strengthen
+ local "this_insn" from rtx to rtx_insn *, and for clarity.
+ * config/avr/avr.md (define_insn_and_split "xload8<mode>_A"):
+ Strengthen local "insn" from rtx to rtx_insn *.
+ (define_insn_and_split "xload<mode>_A"): Likewise.
+ * config/bfin/bfin.c (trapping_loads_p): Likewise for param
+ "insn".
+ (find_load): Likewise for return type.
+ (workaround_speculation): Likewise for both locals named
+ "load_insn".
+ * config/cris/cris.c (cris_cc0_user_requires_cmp): Likewise for
+ local "cc0_user".
+ * config/cris/cris.md (define_peephole2 ; moversideqi): Likewise
+ for local "prev".
+ * config/h8300/h8300-protos.h (notice_update_cc): Likewise for
+ param 2.
+ * config/h8300/h8300.c (notice_update_cc): Likewise.
+ * config/i386/i386.c (ix86_flags_dependent): Likewise for params
+ "insn" and "dep_insn".
+ (exact_store_load_dependency): Likewise for both params.
+ (ix86_macro_fusion_pair_p): Eliminate local named "single_set"
+ since this now clashes with inline function. Instead, delay
+ calling single_set until the point where its needed, and then
+ assign the result to "compare_set" and rework the conditional that
+ follows.
+ * config/ia64/ia64.md (define_expand "tablejump"): Strengthen
+ local "last" from rtx to rtx_insn *.
+ * config/mips/mips-protos.h (mips_load_store_insns): Likewise for
+ second param.
+ (mips_store_data_bypass_p): Likewise for both params.
+ * config/mips/mips.c (mips_load_store_insns): Likewise for second
+ param.
+ (mips_store_data_bypass_p): Likewise for both params.
+ (mips_orphaned_high_part_p): Likewise for param "insn".
+ * config/mn10300/mn10300.c (extract_bundle): Likewise.
+ (mn10300_bundle_liw): Likewise for locals "r", "insn1", "insn2".
+ Introduce local rtx "insn2_pat".
+ * config/rl78/rl78.c (move_elim_pass): Likewise for locals "insn",
+ "ninsn".
+ (rl78_remove_unused_sets): Likewise for locals "insn", "ninsn".
+ Introduce local rtx "set", using it in place of "insn" for the
+ result of single_set. This appears to fix a bug, since the call
+ to find_regno_note on a SET does nothing.
+ * config/rs6000/rs6000.c (set_to_load_agen): Strengthen both
+ params from rtx to rtx_insn *.
+ (set_to_load_agen): Likewise.
+ * config/s390/s390.c (s390_label_align): Likewise for local
+ "prev_insn". Introduce new rtx locals "set" and "src", using
+ them in place of "prev_insn" for the results of single_set
+ and SET_SRC respectively.
+ (s390_swap_cmp): Strengthen local "jump" from rtx to rtx_insn *.
+ Introduce new rtx local "set" using in place of "jump" for the
+ result of single_set. Use SET_SRC (set) rather than plain
+ XEXP (set, 1).
+ * config/sh/sh.c (noncall_uses_reg): Strengthen param 2from
+ rtx to rtx_insn *.
+ (noncall_uses_reg): Likewise.
+ (reg_unused_after): Introduce local rtx_sequence * "seq" in region
+ guarded by GET_CODE check, using its methods for clarity, and to
+ enable strengthening local "this_insn" from rtx to rtx_insn *.
+ * config/sh/sh.md (define_expand "mulhisi3"): Strengthen local
+ "insn" from rtx to rtx_insn *.
+ (define_expand "umulhisi3"): Likewise.
+ (define_expand "smulsi3_highpart"): Likewise.
+ (define_expand "umulsi3_highpart"): Likewise.
+ * config/sparc/sparc.c (sparc_do_work_around_errata): Likewise for
+ local "after". Replace GET_CODE check with a dyn_cast,
+ introducing new local rtx_sequence * "seq", using insn method for
+ typesafety.
+
+ * dwarf2cfi.c (dwarf2out_frame_debug): Strengthen param "insn"
+ from rtx to rtx_insn *. Introduce local rtx "pat", using it in
+ place of "insn" once we're dealing with patterns rather than the
+ input insn.
+ (scan_insn_after): Strengthen param "insn" from rtx to rtx_insn *.
+ (scan_trace): Likewise for local "elt", updating lookups within
+ sequence to use insn method rather than element method.
+ * expr.c (find_args_size_adjust): Strengthen param "insn" from rtx
+ to rtx_insn *.
+ * gcse.c (gcse_emit_move_after): Likewise for local "new_rtx".
+ * ifcvt.c (noce_try_abs): Likewise for local "insn".
+ * ira.c (fix_reg_equiv_init): Add checked cast to rtx_insn * when
+ invoking single_set.
+ * lra-constraints.c (insn_rhs_dead_pseudo_p): Strengthen param
+ "insn" from rtx to rtx_insn *.
+ (skip_usage_debug_insns): Likewise for return type, adding a
+ checked cast.
+ (check_secondary_memory_needed_p): Likewise for local "insn".
+ (inherit_reload_reg): Likewise.
+ * modulo-sched.c (sms_schedule): Likewise for local "count_init".
+ * recog.c (peep2_attempt): Likewise for local "old_insn", adding
+ checked casts.
+ (store_data_bypass_p): Likewise for both params.
+ (if_test_bypass_p): Likewise.
+ * recog.h (store_data_bypass_p): Likewise for both params.
+ (if_test_bypass_p): Likewise.
+ * reload.c (find_equiv_reg): Likewise for local "where".
+ * reorg.c (delete_jump): Likewise for param "insn".
+ * rtlanal.c (single_set_2): Strenghen param "insn" from const_rtx
+ to const rtx_insn *.
+ * store-motion.c (replace_store_insn): Likewise for param "del".
+ (delete_store): Strengthen local "i" from rtx to rtx_insn_list *,
+ and use its methods for clarity, and to strengthen local "del"
+ from rtx to rtx_insn *.
+ (build_store_vectors): Use insn method of "st" when calling
+ replace_store_insn for typesafety and clarity.
+
2014-09-09 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
* config/rs6000/rs6000.c (rtx_is_swappable_p): Add
#ifndef GCC_AARCH_COMMON_PROTOS_H
#define GCC_AARCH_COMMON_PROTOS_H
-extern int aarch_crypto_can_dual_issue (rtx, rtx);
+extern int aarch_crypto_can_dual_issue (rtx_insn *, rtx_insn *);
extern bool aarch_rev16_p (rtx);
extern bool aarch_rev16_shleft_mask_imm_p (rtx, enum machine_mode);
extern bool aarch_rev16_shright_mask_imm_p (rtx, enum machine_mode);
implementations. This function identifies such pairs. */
int
-aarch_crypto_can_dual_issue (rtx producer, rtx consumer)
+aarch_crypto_can_dual_issue (rtx_insn *producer_insn, rtx_insn *consumer_insn)
{
+ rtx producer_set, consumer_set;
rtx producer_src, consumer_src;
- producer = single_set (producer);
- consumer = single_set (consumer);
+ producer_set = single_set (producer_insn);
+ consumer_set = single_set (consumer_insn);
- producer_src = producer ? SET_SRC (producer) : NULL;
- consumer_src = consumer ? SET_SRC (consumer) : NULL;
+ producer_src = producer_set ? SET_SRC (producer_set) : NULL;
+ consumer_src = consumer_set ? SET_SRC (consumer_set) : NULL;
if (producer_src && consumer_src
&& GET_CODE (producer_src) == UNSPEC && GET_CODE (consumer_src) == UNSPEC
|| (XINT (producer_src, 1) == UNSPEC_AESD
&& XINT (consumer_src, 1) == UNSPEC_AESIMC)))
{
- unsigned int regno = REGNO (SET_DEST (producer));
+ unsigned int regno = REGNO (SET_DEST (producer_set));
- return REGNO (SET_DEST (consumer)) == regno
+ return REGNO (SET_DEST (consumer_set)) == regno
&& REGNO (XVECEXP (consumer_src, 0, 0)) == regno;
}
int cc_plus, cc_minus, cc_dummy;
int len_plus, len_minus;
rtx op[4];
- rtx xpattern = INSN_P (insn) ? single_set (insn) : insn;
+ rtx xpattern = INSN_P (insn) ? single_set (as_a <rtx_insn *> (insn)) : insn;
rtx xdest = SET_DEST (xpattern);
enum machine_mode mode = GET_MODE (xdest);
enum machine_mode imode = int_mode_for_mode (mode);
avr_out_bitop (rtx insn, rtx *xop, int *plen)
{
/* CODE and MODE of the operation. */
- rtx xpattern = INSN_P (insn) ? single_set (insn) : insn;
+ rtx xpattern = INSN_P (insn) ? single_set (as_a <rtx_insn *> (insn)) : insn;
enum rtx_code code = GET_CODE (SET_SRC (xpattern));
enum machine_mode mode = GET_MODE (xop[0]);
we must return 0. */
else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
{
+ rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn));
int i;
int retval = 0;
- for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ for (i = 0; i < seq->len (); i++)
{
- rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
+ rtx_insn *this_insn = seq->insn (i);
rtx set = single_set (this_insn);
if (CALL_P (this_insn))
; in not able to allocate segment registers and reload the resulting
; expressions. Notice that no address register can hold a PSImode. */
- rtx insn, addr = XEXP (operands[1], 0);
+ rtx_insn *insn;
+ rtx addr = XEXP (operands[1], 0);
rtx hi8 = gen_reg_rtx (QImode);
rtx reg_z = gen_rtx_REG (HImode, REG_Z);
rtx reg_z = gen_rtx_REG (HImode, REG_Z);
rtx addr_hi8 = simplify_gen_subreg (QImode, addr, PSImode, 2);
addr_space_t as = MEM_ADDR_SPACE (operands[1]);
- rtx insn;
+ rtx_insn *insn;
/* Split the address to R21:Z */
emit_move_insn (reg_z, simplify_gen_subreg (HImode, addr, PSImode, 0));
/* Return nonzero if INSN contains any loads that may trap. */
static bool
-trapping_loads_p (rtx insn, int np_reg, bool after_np_branch)
+trapping_loads_p (rtx_insn *insn, int np_reg, bool after_np_branch)
{
rtx mem = SET_SRC (single_set (insn));
/* Return INSN if it is of TYPE_MCLD. Alternatively, if INSN is the start of
a three-insn bundle, see if one of them is a load and return that if so.
- Return NULL_RTX if the insn does not contain loads. */
-static rtx
+ Return NULL if the insn does not contain loads. */
+static rtx_insn *
find_load (rtx_insn *insn)
{
if (!NONDEBUG_INSN_P (insn))
- return NULL_RTX;
+ return NULL;
if (get_attr_type (insn) == TYPE_MCLD)
return insn;
if (GET_MODE (insn) != SImode)
- return NULL_RTX;
+ return NULL;
do {
insn = NEXT_INSN (insn);
if ((GET_MODE (insn) == SImode || GET_MODE (insn) == QImode)
&& get_attr_type (insn) == TYPE_MCLD)
return insn;
} while (GET_MODE (insn) != QImode);
- return NULL_RTX;
+ return NULL;
}
/* Determine whether PAT is an indirect call pattern. */
}
else if (NONDEBUG_INSN_P (insn))
{
- rtx load_insn = find_load (insn);
+ rtx_insn *load_insn = find_load (insn);
enum attr_type type = type_for_anomaly (insn);
if (cycles_since_jump < INT_MAX)
if (NONDEBUG_INSN_P (target))
{
- rtx load_insn = find_load (target);
+ rtx_insn *load_insn = find_load (target);
enum attr_type type = type_for_anomaly (target);
int delay_needed = 0;
if (cycles_since_jump < INT_MAX)
bool
cris_cc0_user_requires_cmp (rtx insn)
{
- rtx cc0_user = NULL;
+ rtx_insn *cc0_user = NULL;
rtx body;
rtx set;
(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])]
;; Checking the previous insn is a bit too awkward for the condition.
{
- rtx prev = prev_nonnote_insn (curr_insn);
+ rtx_insn *prev = prev_nonnote_insn (curr_insn);
if (prev != NULL_RTX)
{
rtx set = single_set (prev);
extern const char *output_simode_bld (int, rtx[]);
extern void final_prescan_insn (rtx_insn *, rtx *, int);
extern int h8300_expand_movsi (rtx[]);
-extern void notice_update_cc (rtx, rtx);
+extern void notice_update_cc (rtx, rtx_insn *);
extern const char *output_logical_op (enum machine_mode, rtx *);
extern unsigned int compute_logical_op_length (enum machine_mode,
rtx *);
/* Update the condition code from the insn. */
void
-notice_update_cc (rtx body, rtx insn)
+notice_update_cc (rtx body, rtx_insn *insn)
{
rtx set;
by DEP_INSN and nothing set by DEP_INSN. */
static bool
-ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
+ix86_flags_dependent (rtx_insn *insn, rtx_insn *dep_insn, enum attr_type insn_type)
{
rtx set, set2;
/* Return true if there exists exact dependency for store & load, i.e.
the same memory address is used in them. */
static bool
-exact_store_load_dependency (rtx store, rtx load)
+exact_store_load_dependency (rtx_insn *store, rtx_insn *load)
{
rtx set1, set2;
ix86_macro_fusion_pair_p (rtx_insn *condgen, rtx_insn *condjmp)
{
rtx src, dest;
- rtx single_set = single_set (condgen);
enum rtx_code ccode;
rtx compare_set = NULL_RTX, test_if, cond;
rtx alu_set = NULL_RTX, addr = NULL_RTX;
&& get_attr_type (condgen) != TYPE_ALU)
return false;
- if (single_set == NULL_RTX
+ compare_set = single_set (condgen);
+ if (compare_set == NULL_RTX
&& !TARGET_FUSE_ALU_AND_BRANCH)
return false;
- if (single_set != NULL_RTX)
- compare_set = single_set;
- else
+ if (compare_set == NULL_RTX)
{
int i;
rtx pat = PATTERN (condgen);
stream was the memory load. Grab the address from that.
Note we have to momentarily pop out of the sequence started
by the insn-emit wrapper in order to grab the last insn. */
- rtx last, set;
+ rtx_insn *last;
+ rtx set;
end_sequence ();
last = get_last_insn ();
extern int mips_address_insns (rtx, enum machine_mode, bool);
extern int mips_const_insns (rtx);
extern int mips_split_const_insns (rtx);
-extern int mips_load_store_insns (rtx, rtx);
+extern int mips_load_store_insns (rtx, rtx_insn *);
extern int mips_idiv_insns (void);
extern rtx_insn *mips_emit_move (rtx, rtx);
#ifdef RTX_CODE
extern const char *mips_output_probe_stack_range (rtx, rtx);
extern unsigned int mips_hard_regno_nregs (int, enum machine_mode);
extern bool mips_linked_madd_p (rtx_insn *, rtx_insn *);
-extern bool mips_store_data_bypass_p (rtx, rtx);
+extern bool mips_store_data_bypass_p (rtx_insn *, rtx_insn *);
extern int mips_dspalu_bypass_p (rtx, rtx);
extern rtx mips_prefetch_cookie (rtx, rtx);
BASE_INSN_LENGTH is the length of one instruction. */
int
-mips_load_store_insns (rtx mem, rtx insn)
+mips_load_store_insns (rtx mem, rtx_insn *insn)
{
enum machine_mode mode;
bool might_split_p;
for that case. */
bool
-mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
+mips_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
return false;
LO_SUMs in the current function. */
static bool
-mips_orphaned_high_part_p (mips_offset_table *htab, rtx insn)
+mips_orphaned_high_part_p (mips_offset_table *htab, rtx_insn *insn)
{
enum mips_symbol_type type;
rtx x, set;
cannot be bundled. */
static bool
-extract_bundle (rtx insn, struct liw_data * pdata)
+extract_bundle (rtx_insn *insn, struct liw_data * pdata)
{
bool allow_consts = true;
rtx p;
gcc_assert (pdata != NULL);
- if (insn == NULL_RTX)
+ if (insn == NULL)
return false;
/* Make sure that we are dealing with a simple SET insn. */
p = single_set (insn);
static void
mn10300_bundle_liw (void)
{
- rtx r;
+ rtx_insn *r;
- for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
+ for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
{
- rtx insn1, insn2;
+ rtx_insn *insn1, *insn2;
struct liw_data liw1, liw2;
insn1 = r;
delete_insn (insn2);
+ rtx insn2_pat;
if (liw1.op == LIW_OP_CMP)
- insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
- GEN_INT (liw2.op));
+ insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
+ GEN_INT (liw2.op));
else if (liw2.op == LIW_OP_CMP)
- insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
- GEN_INT (liw1.op));
+ insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
+ GEN_INT (liw1.op));
else
- insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
- GEN_INT (liw1.op), GEN_INT (liw2.op));
+ insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
+ GEN_INT (liw1.op), GEN_INT (liw2.op));
- insn2 = emit_insn_after (insn2, insn1);
+ insn2 = emit_insn_after (insn2_pat, insn1);
delete_insn (insn1);
r = insn2;
}
static unsigned int
move_elim_pass (void)
{
- rtx insn, ninsn, prev = NULL_RTX;
+ rtx_insn *insn, *ninsn;
+ rtx prev = NULL_RTX;
for (insn = get_insns (); insn; insn = ninsn)
{
static void
rl78_remove_unused_sets (void)
{
- rtx insn, ninsn = NULL_RTX;
+ rtx_insn *insn, *ninsn = NULL;
rtx dest;
for (insn = get_insns (); insn; insn = ninsn)
{
ninsn = next_nonnote_nondebug_insn (insn);
- if ((insn = single_set (insn)) == NULL_RTX)
+ rtx set = single_set (insn);
+ if (set == NULL)
continue;
- dest = SET_DEST (insn);
+ dest = SET_DEST (set);
if (GET_CODE (dest) != REG || REGNO (dest) > 23)
continue;
static bool is_cracked_insn (rtx);
static bool is_load_insn (rtx, rtx *);
static bool is_store_insn (rtx, rtx *);
-static bool set_to_load_agen (rtx,rtx);
+static bool set_to_load_agen (rtx_insn *,rtx_insn *);
static bool insn_terminates_group_p (rtx , enum group_termination);
static bool insn_must_be_first_in_group (rtx);
static bool insn_must_be_last_in_group (rtx);
/* The function returns true if out_inst sets a value that is
used in the address generation computation of in_insn */
static bool
-set_to_load_agen (rtx out_insn, rtx in_insn)
+set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
{
rtx out_set, in_set;
int
s390_label_align (rtx label)
{
- rtx prev_insn = prev_active_insn (label);
+ rtx_insn *prev_insn = prev_active_insn (label);
+ rtx set, src;
if (prev_insn == NULL_RTX)
goto old;
- prev_insn = single_set (prev_insn);
+ set = single_set (prev_insn);
- if (prev_insn == NULL_RTX)
+ if (set == NULL_RTX)
goto old;
- prev_insn = SET_SRC (prev_insn);
+ src = SET_SRC (set);
/* Don't align literal pool base labels. */
- if (GET_CODE (prev_insn) == UNSPEC
- && XINT (prev_insn, 1) == UNSPEC_MAIN_BASE)
+ if (GET_CODE (src) == UNSPEC
+ && XINT (src, 1) == UNSPEC_MAIN_BASE)
return 0;
old:
if (cond == NULL_RTX)
{
- rtx jump = find_cond_jump (NEXT_INSN (insn));
- jump = jump ? single_set (jump) : NULL_RTX;
+ rtx_insn *jump = find_cond_jump (NEXT_INSN (insn));
+ rtx set = jump ? single_set (jump) : NULL_RTX;
- if (jump == NULL_RTX)
+ if (set == NULL_RTX)
return;
- cond = XEXP (XEXP (jump, 1), 0);
+ cond = XEXP (SET_SRC (set), 0);
}
*op0 = *op1;
static bool broken_move (rtx_insn *);
static bool mova_p (rtx_insn *);
static rtx_insn *find_barrier (int, rtx_insn *, rtx_insn *);
-static bool noncall_uses_reg (rtx, rtx, rtx *);
+static bool noncall_uses_reg (rtx, rtx_insn *, rtx *);
static rtx_insn *gen_block_redirect (rtx_insn *, int, int);
static void sh_reorg (void);
static void sh_option_override (void);
setting it while calling it. Set *SET to a SET rtx if the register
is set by INSN. */
static bool
-noncall_uses_reg (rtx reg, rtx insn, rtx *set)
+noncall_uses_reg (rtx reg, rtx_insn *insn, rtx *set)
{
rtx pattern, reg2;
we must return 0. */
else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
{
+ rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn));
int i;
int retval = 0;
- for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ for (i = 0; i < seq->len (); i++)
{
- rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
+ rtx_insn *this_insn = seq->insn (i);
rtx set = single_set (this_insn);
if (CALL_P (this_insn))
(reg:SI MACL_REG))]
"TARGET_SH1"
{
- rtx insn, macl;
+ rtx_insn *insn;
+ rtx macl;
macl = gen_rtx_REG (SImode, MACL_REG);
start_sequence ();
(reg:SI MACL_REG))]
"TARGET_SH1"
{
- rtx insn, macl;
+ rtx_insn *insn;
+ rtx macl;
macl = gen_rtx_REG (SImode, MACL_REG);
start_sequence ();
(reg:SI MACH_REG))]
"TARGET_SH2"
{
- rtx insn, mach;
+ rtx_insn *insn;
+ rtx mach;
mach = gen_rtx_REG (SImode, MACH_REG);
start_sequence ();
(reg:SI MACH_REG))]
"TARGET_SH2"
{
- rtx insn, mach;
+ rtx_insn *insn;
+ rtx mach;
mach = gen_rtx_REG (SImode, MACH_REG);
start_sequence ();
/* The problematic combination is with the sibling FP register. */
const unsigned int x = REGNO (SET_DEST (set));
const unsigned int y = x ^ 1;
- rtx after;
+ rtx_insn *after;
int i;
next = next_active_insn (insn);
if (++i == n_insns)
break;
branch_p = true;
- after = NULL_RTX;
+ after = NULL;
}
/* This is a branch with a filled delay slot. */
- else if (GET_CODE (PATTERN (after)) == SEQUENCE)
+ else if (rtx_sequence *seq =
+ dyn_cast <rtx_sequence *> (PATTERN (after)))
{
if (++i == n_insns)
break;
branch_p = true;
- after = XVECEXP (PATTERN (after), 0, 1);
+ after = seq->insn (1);
}
/* This is a regular instruction. */
else
register to the stack. */
static void
-dwarf2out_frame_debug (rtx insn)
+dwarf2out_frame_debug (rtx_insn *insn)
{
- rtx note, n;
+ rtx note, n, pat;
bool handled_one = false;
for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
switch (REG_NOTE_KIND (note))
{
case REG_FRAME_RELATED_EXPR:
- insn = XEXP (note, 0);
+ pat = XEXP (note, 0);
goto do_frame_expr;
case REG_CFA_DEF_CFA:
if (!handled_one)
{
- insn = PATTERN (insn);
+ pat = PATTERN (insn);
do_frame_expr:
- dwarf2out_frame_debug_expr (insn);
+ dwarf2out_frame_debug_expr (pat);
/* Check again. A parallel can save and update the same register.
We could probably check just once, here, but this is safer than
removing the check at the start of the function. */
- if (clobbers_queued_reg_save (insn))
+ if (clobbers_queued_reg_save (pat))
dwarf2out_flush_queued_reg_saves ();
}
}
/* A subroutine of scan_trace. Do what needs to be done "after" INSN. */
static void
-scan_insn_after (rtx insn)
+scan_insn_after (rtx_insn *insn)
{
if (RTX_FRAME_RELATED_P (insn))
dwarf2out_frame_debug (insn);
handling for the positioning of the notes. */
if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
{
- rtx elt;
+ rtx_insn *elt;
int i, n = pat->len ();
control = pat->insn (0);
gcc_assert (!RTX_FRAME_RELATED_P (control));
gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
- elt = pat->element (1);
+ elt = pat->insn (1);
if (INSN_FROM_TARGET_P (elt))
{
for (i = 1; i < n; ++i)
{
- elt = pat->element (i);
+ elt = pat->insn (i);
scan_insn_after (elt);
}
cannot be trivially extracted, the return value is INT_MIN. */
HOST_WIDE_INT
-find_args_size_adjust (rtx insn)
+find_args_size_adjust (rtx_insn *insn)
{
rtx dest, set, pat;
int i;
static rtx
gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
{
- rtx new_rtx;
+ rtx_insn *new_rtx;
const_rtx set = single_set_gcse (insn);
rtx set2;
rtx note;
REG_EQUAL note or a simple source if necessary. */
if (REG_P (c))
{
- rtx set, insn = prev_nonnote_insn (earliest);
+ rtx set;
+ rtx_insn *insn = prev_nonnote_insn (earliest);
if (insn
&& BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (earliest)
&& (set = single_set (insn))
{
next = XEXP (x, 1);
insn = XEXP (x, 0);
- set = single_set (insn);
+ set = single_set (as_a <rtx_insn *> (insn));
ira_assert (set != NULL_RTX
&& (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
if (REG_P (SET_DEST (set))
/* Return true if INSN contains a dying pseudo in INSN right hand
side. */
static bool
-insn_rhs_dead_pseudo_p (rtx insn)
+insn_rhs_dead_pseudo_p (rtx_insn *insn)
{
rtx set = single_set (insn);
}
/* Return first non-debug insn in list USAGE_INSNS. */
-static rtx
+static rtx_insn *
skip_usage_debug_insns (rtx usage_insns)
{
rtx insn;
insn != NULL_RTX && GET_CODE (insn) == INSN_LIST;
insn = XEXP (insn, 1))
;
- return insn;
+ return safe_as_a <rtx_insn *> (insn);
}
/* Return true if we need secondary memory moves for insn in
#ifndef SECONDARY_MEMORY_NEEDED
return false;
#else
- rtx insn, set, dest;
+ rtx_insn *insn;
+ rtx set, dest;
enum reg_class cl;
if (inher_cl == ALL_REGS
transformation will be unprofitable. */
if (lra_dump_file != NULL)
{
- rtx insn = skip_usage_debug_insns (next_usage_insns);
+ rtx_insn *insn = skip_usage_debug_insns (next_usage_insns);
rtx set = single_set (insn);
lra_assert (set != NULL_RTX);
FOR_EACH_LOOP (loop, 0)
{
rtx_insn *head, *tail;
- rtx count_reg, count_init;
+ rtx count_reg;
+ rtx_insn *count_init;
int mii, rec_mii, stage_count, min_cycle;
int64_t loop_count = 0;
bool opt_sc_p;
/* In case of th loop have doloop register it gets special
handling. */
- count_init = NULL_RTX;
+ count_init = NULL;
if ((count_reg = doloop_register_get (head, tail)))
{
basic_block pre_header;
int i;
rtx_insn *last, *before_try, *x;
rtx eh_note, as_note;
- rtx old_insn;
+ rtx_insn *old_insn;
rtx_insn *new_insn;
bool was_call = false;
/* If we are splitting an RTX_FRAME_RELATED_P insn, do not allow it to
match more than one insn, or to be split into more than one insn. */
- old_insn = peep2_insn_data[peep2_current].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[peep2_current].insn);
if (RTX_FRAME_RELATED_P (old_insn))
{
bool any_note = false;
rtx note;
j = peep2_buf_position (peep2_current + i);
- old_insn = peep2_insn_data[j].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
if (!CALL_P (old_insn))
continue;
was_call = true;
while (++i <= match_len)
{
j = peep2_buf_position (peep2_current + i);
- old_insn = peep2_insn_data[j].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
gcc_assert (!CALL_P (old_insn));
}
break;
for (i = match_len; i >= 0; --i)
{
int j = peep2_buf_position (peep2_current + i);
- old_insn = peep2_insn_data[j].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
as_note = find_reg_note (old_insn, REG_ARGS_SIZE, NULL);
if (as_note)
must be either a single_set or a PARALLEL with SETs inside. */
int
-store_data_bypass_p (rtx out_insn, rtx in_insn)
+store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
rtx out_set, in_set;
rtx out_pat, in_pat;
of insn categorization may be any JUMP or CALL insn. */
int
-if_test_bypass_p (rtx out_insn, rtx in_insn)
+if_test_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
rtx out_set, in_set;
#endif
extern rtx peephole2_insns (rtx, rtx, int *);
-extern int store_data_bypass_p (rtx, rtx);
-extern int if_test_bypass_p (rtx, rtx);
+extern int store_data_bypass_p (rtx_insn *, rtx_insn *);
+extern int if_test_bypass_p (rtx_insn *, rtx_insn *);
#ifndef GENERATOR_FILE
/* Try recognizing the instruction INSN,
short *reload_reg_p, int goalreg, enum machine_mode mode)
{
rtx_insn *p = insn;
- rtx goaltry, valtry, value, where;
+ rtx goaltry, valtry, value;
+ rtx_insn *where;
rtx pat;
int regno = -1;
int valueno;
if that's what the previous thing was. */
static void
-delete_jump (rtx insn)
+delete_jump (rtx_insn *insn)
{
rtx set = single_set (insn);
/* Functions in rtlanal.c */
-/* Single set is implemented as macro for performance reasons. */
-#define single_set(I) (INSN_P (I) \
- ? (GET_CODE (PATTERN (I)) == SET \
- ? PATTERN (I) : single_set_1 (I)) \
- : NULL_RTX)
-#define single_set_1(I) single_set_2 (I, PATTERN (I))
+extern rtx single_set_2 (const rtx_insn *, const_rtx);
+
+/* Handle the cheap and common cases inline for performance. */
+
+inline rtx single_set (const rtx_insn *insn)
+{
+ if (!INSN_P (insn))
+ return NULL_RTX;
+
+ if (GET_CODE (PATTERN (insn)) == SET)
+ return PATTERN (insn);
+
+ /* Defer to the more expensive case. */
+ return single_set_2 (insn, PATTERN (insn));
+}
extern enum machine_mode get_address_mode (rtx mem);
extern int rtx_addr_can_trap_p (const_rtx);
extern int no_labels_between_p (const rtx_insn *, const rtx_insn *);
extern int modified_in_p (const_rtx, const_rtx);
extern int reg_set_p (const_rtx, const_rtx);
-extern rtx single_set_2 (const_rtx, const_rtx);
extern int multiple_sets (const_rtx);
extern int set_noop_p (const_rtx);
extern int noop_move_p (const_rtx);
/* In expr.c */
extern rtx move_by_pieces (rtx, rtx, unsigned HOST_WIDE_INT,
unsigned int, int);
-extern HOST_WIDE_INT find_args_size_adjust (rtx);
+extern HOST_WIDE_INT find_args_size_adjust (rtx_insn *);
extern int fixup_args_size_notes (rtx_insn *, rtx_insn *, int);
/* In cfgrtl.c */
will not be used, which we ignore. */
rtx
-single_set_2 (const_rtx insn, const_rtx pat)
+single_set_2 (const rtx_insn *insn, const_rtx pat)
{
rtx set = NULL;
int set_verified = 1;
/* This routine will replace a store with a SET to a specified register. */
static void
-replace_store_insn (rtx reg, rtx del, basic_block bb, struct st_expr *smexpr)
+replace_store_insn (rtx reg, rtx_insn *del, basic_block bb,
+ struct st_expr *smexpr)
{
rtx_insn *insn;
rtx mem, note, set, ptr;
static void
delete_store (struct st_expr * expr, basic_block bb)
{
- rtx reg, i, del;
+ rtx reg;
if (expr->reaching_reg == NULL_RTX)
expr->reaching_reg = gen_reg_rtx_and_attrs (expr->pattern);
reg = expr->reaching_reg;
- for (i = expr->avail_stores; i; i = XEXP (i, 1))
+ for (rtx_insn_list *i = expr->avail_stores; i; i = i->next ())
{
- del = XEXP (i, 0);
+ rtx_insn *del = i->insn ();
if (BLOCK_FOR_INSN (del) == bb)
{
/* We know there is only one since we deleted redundant
rtx r = gen_reg_rtx_and_attrs (ptr->pattern);
if (dump_file)
fprintf (dump_file, "Removing redundant store:\n");
- replace_store_insn (r, XEXP (st, 0), bb, ptr);
+ replace_store_insn (r, st->insn (), bb, ptr);
continue;
}
bitmap_set_bit (st_avloc[bb->index], ptr->index);