#include "regs.h"
#include "addresses.h"
#include "expr.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "vec.h"
+#include "machmode.h"
+#include "input.h"
#include "function.h"
#include "flags.h"
#include "basic-block.h"
static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx, bool);
static void validate_replace_src_1 (rtx *, void *);
-static rtx split_insn (rtx);
+static rtx split_insn (rtx_insn *);
+
+struct target_recog default_target_recog;
+#if SWITCHABLE_TARGET
+struct target_recog *this_target_recog = &default_target_recog;
+#endif
/* Nonzero means allow operands to be volatile.
This should be 0 if you are generating rtl, such as if you are calling
struct recog_data_d recog_data;
-/* Contains a vector of operand_alternative structures for every operand.
+/* Contains a vector of operand_alternative structures, such that
+ operand OP of alternative A is at index A * n_operands + OP.
Set up by preprocess_constraints. */
-struct operand_alternative recog_op_alt[MAX_RECOG_OPERANDS][MAX_RECOG_ALTERNATIVES];
+const operand_alternative *recog_op_alt;
+
+/* Used to provide recog_op_alt for asms. */
+static operand_alternative asm_op_alt[MAX_RECOG_OPERANDS
+ * MAX_RECOG_ALTERNATIVES];
/* On return from `constrain_operands', indicate which alternative
was satisfied. */
if (reload_completed)
{
/* ??? Doh! We've not got the wrapping insn. Cook one up. */
- extract_insn (make_insn_raw (x));
- constrain_operands (1);
+ rtx_insn *insn = make_insn_raw (x);
+ extract_insn (insn);
+ constrain_operands (1, get_enabled_alternatives (insn));
return which_alternative >= 0;
}
Otherwise the changes will take effect immediately. */
int
-insn_invalid_p (rtx insn, bool in_group)
+insn_invalid_p (rtx_insn *insn, bool in_group)
{
rtx pat = PATTERN (insn);
int num_clobbers = 0;
{
extract_insn (insn);
- if (! constrain_operands (1))
+ if (! constrain_operands (1, get_preferred_alternatives (insn)))
return 1;
}
}
else if (DEBUG_INSN_P (object))
continue;
- else if (insn_invalid_p (object, true))
+ else if (insn_invalid_p (as_a <rtx_insn *> (object), true))
{
rtx pat = PATTERN (object);
if (object)
{
if (object != last_object && last_object && INSN_P (last_object))
- df_insn_rescan (last_object);
+ df_insn_rescan (as_a <rtx_insn *> (last_object));
last_object = object;
}
}
if (last_object && INSN_P (last_object))
- df_insn_rescan (last_object);
+ df_insn_rescan (as_a <rtx_insn *> (last_object));
num_changes = 0;
}
{
rtx x = *loc;
enum rtx_code code = GET_CODE (x);
- rtx new_rtx;
+ rtx new_rtx = NULL_RTX;
if (SWAPPABLE_OPERANDS_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
code = GET_CODE (x);
}
+ /* Canonicalize arithmetics with all constant operands. */
+ switch (GET_RTX_CLASS (code))
+ {
+ case RTX_UNARY:
+ if (CONSTANT_P (XEXP (x, 0)))
+ new_rtx = simplify_unary_operation (code, GET_MODE (x), XEXP (x, 0),
+ op0_mode);
+ break;
+ case RTX_COMM_ARITH:
+ case RTX_BIN_ARITH:
+ if (CONSTANT_P (XEXP (x, 0)) && CONSTANT_P (XEXP (x, 1)))
+ new_rtx = simplify_binary_operation (code, GET_MODE (x), XEXP (x, 0),
+ XEXP (x, 1));
+ break;
+ case RTX_COMPARE:
+ case RTX_COMM_COMPARE:
+ if (CONSTANT_P (XEXP (x, 0)) && CONSTANT_P (XEXP (x, 1)))
+ new_rtx = simplify_relational_operation (code, GET_MODE (x), op0_mode,
+ XEXP (x, 0), XEXP (x, 1));
+ break;
+ default:
+ break;
+ }
+ if (new_rtx)
+ {
+ validate_change (object, loc, new_rtx, 1);
+ return;
+ }
+
switch (code)
{
case PLUS:
&& MEM_P (sub))
return 0;
+#ifdef CANNOT_CHANGE_MODE_CLASS
+ if (REG_P (sub)
+ && REGNO (sub) < FIRST_PSEUDO_REGISTER
+ && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
+ && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT
+ && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT
+ /* LRA can generate some invalid SUBREGS just for matched
+ operand reload presentation. LRA needs to treat them as
+ valid. */
+ && ! LRA_SUBREG_P (op))
+ return 0;
+#endif
+
/* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally
create such rtl, and we must reject it. */
if (SCALAR_FLOAT_MODE_P (GET_MODE (op))
int
register_operand (rtx op, enum machine_mode mode)
{
- if (GET_MODE (op) != mode && mode != VOIDmode)
- return 0;
-
if (GET_CODE (op) == SUBREG)
{
rtx sub = SUBREG_REG (op);
(Ideally, (SUBREG (MEM)...) should not exist after reload,
but currently it does result from (SUBREG (REG)...) where the
reg went on the stack.) */
- if (! reload_completed && MEM_P (sub))
- return general_operand (op, mode);
-
-#ifdef CANNOT_CHANGE_MODE_CLASS
- if (REG_P (sub)
- && REGNO (sub) < FIRST_PSEUDO_REGISTER
- && REG_CANNOT_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
- && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT
- && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT
- /* LRA can generate some invalid SUBREGS just for matched
- operand reload presentation. LRA needs to treat them as
- valid. */
- && ! LRA_SUBREG_P (op))
- return 0;
-#endif
-
- /* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally
- create such rtl, and we must reject it. */
- if (SCALAR_FLOAT_MODE_P (GET_MODE (op))
- /* LRA can use subreg to store a floating point value in an
- integer mode. Although the floating point and the
- integer modes need the same number of hard registers, the
- size of floating point mode can be less than the integer
- mode. */
- && ! lra_in_progress
- && GET_MODE_SIZE (GET_MODE (op)) > GET_MODE_SIZE (GET_MODE (sub)))
+ if (!REG_P (sub) && (reload_completed || !MEM_P (sub)))
return 0;
-
- op = sub;
}
-
- return (REG_P (op)
- && (REGNO (op) >= FIRST_PSEUDO_REGISTER
- || in_hard_reg_set_p (operand_reg_set,
- GET_MODE (op), REGNO (op))));
+ else if (!REG_P (op))
+ return 0;
+ return general_operand (op, mode);
}
/* Return 1 for a register in Pmode; ignore the tested mode. */
return (GET_CODE (op) == SCRATCH
|| (REG_P (op)
- && (lra_in_progress || REGNO (op) < FIRST_PSEUDO_REGISTER)));
+ && (lra_in_progress
+ || (REGNO (op) < FIRST_PSEUDO_REGISTER
+ && REGNO_REG_CLASS (REGNO (op)) != NO_REGS))));
}
/* Return 1 if OP is a valid immediate operand for mode MODE.
: mode, op));
}
-/* Returns 1 if OP is an operand that is a CONST_INT. */
+/* Returns 1 if OP is an operand that is a CONST_INT of mode MODE. */
int
const_int_operand (rtx op, enum machine_mode mode)
return 1;
}
+#if TARGET_SUPPORTS_WIDE_INT
+/* Returns 1 if OP is an operand that is a CONST_INT or CONST_WIDE_INT
+ of mode MODE. */
+int
+const_scalar_int_operand (rtx op, enum machine_mode mode)
+{
+ if (!CONST_SCALAR_INT_P (op))
+ return 0;
+
+ if (CONST_INT_P (op))
+ return const_int_operand (op, mode);
+
+ if (mode != VOIDmode)
+ {
+ int prec = GET_MODE_PRECISION (mode);
+ int bitsize = GET_MODE_BITSIZE (mode);
+
+ if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize)
+ return 0;
+
+ if (prec == bitsize)
+ return 1;
+ else
+ {
+ /* Multiword partial int. */
+ HOST_WIDE_INT x
+ = CONST_WIDE_INT_ELT (op, CONST_WIDE_INT_NUNITS (op) - 1);
+ return (sext_hwi (x, prec & (HOST_BITS_PER_WIDE_INT - 1)) == x);
+ }
+ }
+ return 1;
+}
+
+/* Returns 1 if OP is an operand that is a constant integer or constant
+ floating-point number of MODE. */
+
+int
+const_double_operand (rtx op, enum machine_mode mode)
+{
+ return (GET_CODE (op) == CONST_DOUBLE)
+ && (GET_MODE (op) == mode || mode == VOIDmode);
+}
+#else
/* Returns 1 if OP is an operand that is a constant integer or constant
- floating-point number. */
+ floating-point number of MODE. */
int
const_double_operand (rtx op, enum machine_mode mode)
&& (mode == VOIDmode || GET_MODE (op) == mode
|| GET_MODE (op) == VOIDmode));
}
-
-/* Return 1 if OP is a general operand that is not an immediate operand. */
+#endif
+/* Return 1 if OP is a general operand that is not an immediate
+ operand of mode MODE. */
int
nonimmediate_operand (rtx op, enum machine_mode mode)
{
if (CONSTANT_P (op))
return immediate_operand (op, mode);
-
- if (GET_MODE (op) != mode && mode != VOIDmode)
- return 0;
-
- if (GET_CODE (op) == SUBREG)
- {
- /* Before reload, we can allow (SUBREG (MEM...)) as a register operand
- because it is guaranteed to be reloaded into one.
- Just make sure the MEM is valid in itself.
- (Ideally, (SUBREG (MEM)...) should not exist after reload,
- but currently it does result from (SUBREG (REG)...) where the
- reg went on the stack.) */
- if (! reload_completed && MEM_P (SUBREG_REG (op)))
- return general_operand (op, mode);
- op = SUBREG_REG (op);
- }
-
- return (REG_P (op)
- && (REGNO (op) >= FIRST_PSEUDO_REGISTER
- || in_hard_reg_set_p (operand_reg_set,
- GET_MODE (op), REGNO (op))));
+ return register_operand (op, mode);
}
/* Return 1 if OP is a valid operand that stands for pushing a
while (*constraint)
{
+ enum constraint_num cn;
char c = *constraint;
int len;
switch (c)
case ',':
constraint++;
continue;
- case '=':
- case '+':
- case '*':
- case '%':
- case '!':
- case '#':
- case '&':
- case '?':
- break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
}
continue;
- case 'p':
- if (address_operand (op, VOIDmode))
- result = 1;
- break;
-
- case TARGET_MEM_CONSTRAINT:
- case 'V': /* non-offsettable */
- if (memory_operand (op, VOIDmode))
- result = 1;
- break;
+ /* The rest of the compiler assumes that reloading the address
+ of a MEM into a register will make it fit an 'o' constraint.
+ That is, if it sees a MEM operand for an 'o' constraint,
+ it assumes that (mem (base-reg)) will fit.
+ That assumption fails on targets that don't have offsettable
+ addresses at all. We therefore need to treat 'o' asm
+ constraints as a special case and only accept operands that
+ are already offsettable, thus proving that at least one
+ offsettable address exists. */
case 'o': /* offsettable */
if (offsettable_nonstrict_memref_p (op))
result = 1;
break;
- case '<':
- /* ??? Before auto-inc-dec, auto inc/dec insns are not supposed to exist,
- excepting those that expand_call created. Further, on some
- machines which do not have generalized auto inc/dec, an inc/dec
- is not a memory_operand.
-
- Match any memory and hope things are resolved after reload. */
-
- if (MEM_P (op)
- && (1
- || GET_CODE (XEXP (op, 0)) == PRE_DEC
- || GET_CODE (XEXP (op, 0)) == POST_DEC))
+ case 'g':
+ if (general_operand (op, VOIDmode))
result = 1;
-#ifdef AUTO_INC_DEC
- incdec_ok = true;
-#endif
break;
- case '>':
- if (MEM_P (op)
- && (1
- || GET_CODE (XEXP (op, 0)) == PRE_INC
- || GET_CODE (XEXP (op, 0)) == POST_INC))
- result = 1;
#ifdef AUTO_INC_DEC
+ case '<':
+ case '>':
+ /* ??? Before auto-inc-dec, auto inc/dec insns are not supposed
+ to exist, excepting those that expand_call created. Further,
+ on some machines which do not have generalized auto inc/dec,
+ an inc/dec is not a memory_operand.
+
+ Match any memory and hope things are resolved after reload. */
incdec_ok = true;
#endif
- break;
-
- case 'E':
- case 'F':
- if (CONST_DOUBLE_AS_FLOAT_P (op)
- || (GET_CODE (op) == CONST_VECTOR
- && GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_FLOAT))
- result = 1;
- break;
-
- case 'G':
- if (CONST_DOUBLE_AS_FLOAT_P (op)
- && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'G', constraint))
- result = 1;
- break;
- case 'H':
- if (CONST_DOUBLE_AS_FLOAT_P (op)
- && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, 'H', constraint))
- result = 1;
- break;
-
- case 's':
- if (CONST_SCALAR_INT_P (op))
- break;
- /* Fall through. */
-
- case 'i':
- if (CONSTANT_P (op) && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)))
- result = 1;
- break;
-
- case 'n':
- if (CONST_SCALAR_INT_P (op))
- result = 1;
- break;
+ default:
+ cn = lookup_constraint (constraint);
+ switch (get_constraint_type (cn))
+ {
+ case CT_REGISTER:
+ if (!result
+ && reg_class_for_constraint (cn) != NO_REGS
+ && GET_MODE (op) != BLKmode
+ && register_operand (op, VOIDmode))
+ result = 1;
+ break;
- case 'I':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'I', constraint))
- result = 1;
- break;
- case 'J':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'J', constraint))
- result = 1;
- break;
- case 'K':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'K', constraint))
- result = 1;
- break;
- case 'L':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'L', constraint))
- result = 1;
- break;
- case 'M':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'M', constraint))
- result = 1;
- break;
- case 'N':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'N', constraint))
- result = 1;
- break;
- case 'O':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'O', constraint))
- result = 1;
- break;
- case 'P':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), 'P', constraint))
- result = 1;
- break;
+ case CT_CONST_INT:
+ if (!result
+ && CONST_INT_P (op)
+ && insn_const_int_ok_for_constraint (INTVAL (op), cn))
+ result = 1;
+ break;
- case 'X':
- result = 1;
- break;
+ case CT_MEMORY:
+ /* Every memory operand can be reloaded to fit. */
+ result = result || memory_operand (op, VOIDmode);
+ break;
- case 'g':
- if (general_operand (op, VOIDmode))
- result = 1;
- break;
+ case CT_ADDRESS:
+ /* Every address operand can be reloaded to fit. */
+ result = result || address_operand (op, VOIDmode);
+ break;
- default:
- /* For all other letters, we first check for a register class,
- otherwise it is an EXTRA_CONSTRAINT. */
- if (REG_CLASS_FROM_CONSTRAINT (c, constraint) != NO_REGS)
- {
- case 'r':
- if (GET_MODE (op) == BLKmode)
- break;
- if (register_operand (op, VOIDmode))
- result = 1;
+ case CT_FIXED_FORM:
+ result = result || constraint_satisfied_p (op, cn);
+ break;
}
-#ifdef EXTRA_CONSTRAINT_STR
- else if (EXTRA_MEMORY_CONSTRAINT (c, constraint))
- /* Every memory operand can be reloaded to fit. */
- result = result || memory_operand (op, VOIDmode);
- else if (EXTRA_ADDRESS_CONSTRAINT (c, constraint))
- /* Every address operand can be reloaded to fit. */
- result = result || address_operand (op, VOIDmode);
- else if (EXTRA_CONSTRAINT_STR (op, c, constraint))
- result = 1;
-#endif
break;
}
len = CONSTRAINT_LEN (c, constraint);
return targetm.mode_dependent_address_p (addr, addrspace);
}
\f
+/* Return true if boolean attribute ATTR is supported. */
+
+static bool
+have_bool_attr (bool_attr attr)
+{
+ switch (attr)
+ {
+ case BA_ENABLED:
+ return HAVE_ATTR_enabled;
+ case BA_PREFERRED_FOR_SIZE:
+ return HAVE_ATTR_enabled || HAVE_ATTR_preferred_for_size;
+ case BA_PREFERRED_FOR_SPEED:
+ return HAVE_ATTR_enabled || HAVE_ATTR_preferred_for_speed;
+ }
+ gcc_unreachable ();
+}
+
+/* Return the value of ATTR for instruction INSN. */
+
+static bool
+get_bool_attr (rtx_insn *insn, bool_attr attr)
+{
+ switch (attr)
+ {
+ case BA_ENABLED:
+ return get_attr_enabled (insn);
+ case BA_PREFERRED_FOR_SIZE:
+ return get_attr_enabled (insn) && get_attr_preferred_for_size (insn);
+ case BA_PREFERRED_FOR_SPEED:
+ return get_attr_enabled (insn) && get_attr_preferred_for_speed (insn);
+ }
+ gcc_unreachable ();
+}
+
+/* Like get_bool_attr_mask, but don't use the cache. */
+
+static alternative_mask
+get_bool_attr_mask_uncached (rtx_insn *insn, bool_attr attr)
+{
+ /* Temporarily install enough information for get_attr_<foo> to assume
+ that the insn operands are already cached. As above, the attribute
+ mustn't depend on the values of operands, so we don't provide their
+ real values here. */
+ rtx old_insn = recog_data.insn;
+ int old_alternative = which_alternative;
+
+ recog_data.insn = insn;
+ alternative_mask mask = ALL_ALTERNATIVES;
+ int n_alternatives = insn_data[INSN_CODE (insn)].n_alternatives;
+ for (int i = 0; i < n_alternatives; i++)
+ {
+ which_alternative = i;
+ if (!get_bool_attr (insn, attr))
+ mask &= ~ALTERNATIVE_BIT (i);
+ }
+
+ recog_data.insn = old_insn;
+ which_alternative = old_alternative;
+ return mask;
+}
+
+/* Return the mask of operand alternatives that are allowed for INSN
+ by boolean attribute ATTR. This mask depends only on INSN and on
+ the current target; it does not depend on things like the values of
+ operands. */
+
+static alternative_mask
+get_bool_attr_mask (rtx_insn *insn, bool_attr attr)
+{
+ /* Quick exit for asms and for targets that don't use these attributes. */
+ int code = INSN_CODE (insn);
+ if (code < 0 || !have_bool_attr (attr))
+ return ALL_ALTERNATIVES;
+
+ /* Calling get_attr_<foo> can be expensive, so cache the mask
+ for speed. */
+ if (!this_target_recog->x_bool_attr_masks[code][attr])
+ this_target_recog->x_bool_attr_masks[code][attr]
+ = get_bool_attr_mask_uncached (insn, attr);
+ return this_target_recog->x_bool_attr_masks[code][attr];
+}
+
+/* Return the set of alternatives of INSN that are allowed by the current
+ target. */
+
+alternative_mask
+get_enabled_alternatives (rtx_insn *insn)
+{
+ return get_bool_attr_mask (insn, BA_ENABLED);
+}
+
+/* Return the set of alternatives of INSN that are allowed by the current
+ target and are preferred for the current size/speed optimization
+ choice. */
+
+alternative_mask
+get_preferred_alternatives (rtx_insn *insn)
+{
+ if (optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn)))
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SPEED);
+ else
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SIZE);
+}
+
+/* Return the set of alternatives of INSN that are allowed by the current
+ target and are preferred for the size/speed optimization choice
+ associated with BB. Passing a separate BB is useful if INSN has not
+ been emitted yet or if we are considering moving it to a different
+ block. */
+
+alternative_mask
+get_preferred_alternatives (rtx_insn *insn, basic_block bb)
+{
+ if (optimize_bb_for_speed_p (bb))
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SPEED);
+ else
+ return get_bool_attr_mask (insn, BA_PREFERRED_FOR_SIZE);
+}
+
+/* Assert that the cached boolean attributes for INSN are still accurate.
+ The backend is required to define these attributes in a way that only
+ depends on the current target (rather than operands, compiler phase,
+ etc.). */
+
+bool
+check_bool_attrs (rtx_insn *insn)
+{
+ int code = INSN_CODE (insn);
+ if (code >= 0)
+ for (int i = 0; i <= BA_LAST; ++i)
+ {
+ enum bool_attr attr = (enum bool_attr) i;
+ if (this_target_recog->x_bool_attr_masks[code][attr])
+ gcc_assert (this_target_recog->x_bool_attr_masks[code][attr]
+ == get_bool_attr_mask_uncached (insn, attr));
+ }
+ return true;
+}
+
/* Like extract_insn, but save insn extracted and don't extract again, when
called again for the same insn expecting that recog_data still contain the
valid information. This is used primary by gen_attr infrastructure that
often does extract insn again and again. */
void
-extract_insn_cached (rtx insn)
+extract_insn_cached (rtx_insn *insn)
{
if (recog_data.insn == insn && INSN_CODE (insn) >= 0)
return;
recog_data.insn = insn;
}
+/* Do uncached extract_insn, constrain_operands and complain about failures.
+ This should be used when extracting a pre-existing constrained instruction
+ if the caller wants to know which alternative was chosen. */
+void
+extract_constrain_insn (rtx_insn *insn)
+{
+ extract_insn (insn);
+ if (!constrain_operands (reload_completed, get_enabled_alternatives (insn)))
+ fatal_insn_not_found (insn);
+}
+
/* Do cached extract_insn, constrain_operands and complain about failures.
Used by insn_attrtab. */
void
-extract_constrain_insn_cached (rtx insn)
+extract_constrain_insn_cached (rtx_insn *insn)
{
extract_insn_cached (insn);
if (which_alternative == -1
- && !constrain_operands (reload_completed))
+ && !constrain_operands (reload_completed,
+ get_enabled_alternatives (insn)))
fatal_insn_not_found (insn);
}
-/* Do cached constrain_operands and complain about failures. */
+/* Do cached constrain_operands on INSN and complain about failures. */
int
-constrain_operands_cached (int strict)
+constrain_operands_cached (rtx_insn *insn, int strict)
{
if (which_alternative == -1)
- return constrain_operands (strict);
+ return constrain_operands (strict, get_enabled_alternatives (insn));
else
return 1;
}
/* Analyze INSN and fill in recog_data. */
void
-extract_insn (rtx insn)
+extract_insn (rtx_insn *insn)
{
int i;
int icode;
gcc_assert (recog_data.n_alternatives <= MAX_RECOG_ALTERNATIVES);
- if (INSN_CODE (insn) < 0)
- for (i = 0; i < recog_data.n_alternatives; i++)
- recog_data.alternative_enabled_p[i] = true;
- else
- {
- recog_data.insn = insn;
- for (i = 0; i < recog_data.n_alternatives; i++)
- {
- which_alternative = i;
- recog_data.alternative_enabled_p[i]
- = HAVE_ATTR_enabled ? get_attr_enabled (insn) : 1;
- }
- }
+ recog_data.enabled_alternatives = get_enabled_alternatives (insn);
recog_data.insn = NULL;
which_alternative = -1;
}
-/* After calling extract_insn, you can use this function to extract some
- information from the constraint strings into a more usable form.
- The collected data is stored in recog_op_alt. */
+/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS operands,
+ N_ALTERNATIVES alternatives and constraint strings CONSTRAINTS.
+ OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries and CONSTRAINTS
+ has N_OPERANDS entries. */
+
void
-preprocess_constraints (void)
+preprocess_constraints (int n_operands, int n_alternatives,
+ const char **constraints,
+ operand_alternative *op_alt_base)
{
- int i;
-
- for (i = 0; i < recog_data.n_operands; i++)
- memset (recog_op_alt[i], 0, (recog_data.n_alternatives
- * sizeof (struct operand_alternative)));
-
- for (i = 0; i < recog_data.n_operands; i++)
+ for (int i = 0; i < n_operands; i++)
{
int j;
struct operand_alternative *op_alt;
- const char *p = recog_data.constraints[i];
+ const char *p = constraints[i];
- op_alt = recog_op_alt[i];
+ op_alt = op_alt_base;
- for (j = 0; j < recog_data.n_alternatives; j++)
+ for (j = 0; j < n_alternatives; j++, op_alt += n_operands)
{
- op_alt[j].cl = NO_REGS;
- op_alt[j].constraint = p;
- op_alt[j].matches = -1;
- op_alt[j].matched = -1;
-
- if (!recog_data.alternative_enabled_p[j])
- {
- p = skip_alternative (p);
- continue;
- }
+ op_alt[i].cl = NO_REGS;
+ op_alt[i].constraint = p;
+ op_alt[i].matches = -1;
+ op_alt[i].matched = -1;
if (*p == '\0' || *p == ',')
{
- op_alt[j].anything_ok = 1;
+ op_alt[i].anything_ok = 1;
continue;
}
switch (c)
{
- case '=': case '+': case '*': case '%':
- case 'E': case 'F': case 'G': case 'H':
- case 's': case 'i': case 'n':
- case 'I': case 'J': case 'K': case 'L':
- case 'M': case 'N': case 'O': case 'P':
- /* These don't say anything we care about. */
- break;
-
case '?':
- op_alt[j].reject += 6;
+ op_alt[i].reject += 6;
break;
case '!':
- op_alt[j].reject += 600;
+ op_alt[i].reject += 600;
break;
case '&':
- op_alt[j].earlyclobber = 1;
+ op_alt[i].earlyclobber = 1;
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
{
char *end;
- op_alt[j].matches = strtoul (p, &end, 10);
- recog_op_alt[op_alt[j].matches][j].matched = i;
+ op_alt[i].matches = strtoul (p, &end, 10);
+ op_alt[op_alt[i].matches].matched = i;
p = end;
}
continue;
- case TARGET_MEM_CONSTRAINT:
- op_alt[j].memory_ok = 1;
- break;
- case '<':
- op_alt[j].decmem_ok = 1;
- break;
- case '>':
- op_alt[j].incmem_ok = 1;
- break;
- case 'V':
- op_alt[j].nonoffmem_ok = 1;
- break;
- case 'o':
- op_alt[j].offmem_ok = 1;
- break;
case 'X':
- op_alt[j].anything_ok = 1;
- break;
-
- case 'p':
- op_alt[j].is_address = 1;
- op_alt[j].cl = reg_class_subunion[(int) op_alt[j].cl]
- [(int) base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
- ADDRESS, SCRATCH)];
+ op_alt[i].anything_ok = 1;
break;
case 'g':
- case 'r':
- op_alt[j].cl =
- reg_class_subunion[(int) op_alt[j].cl][(int) GENERAL_REGS];
+ op_alt[i].cl =
+ reg_class_subunion[(int) op_alt[i].cl][(int) GENERAL_REGS];
break;
default:
- if (EXTRA_MEMORY_CONSTRAINT (c, p))
+ enum constraint_num cn = lookup_constraint (p);
+ enum reg_class cl;
+ switch (get_constraint_type (cn))
{
- op_alt[j].memory_ok = 1;
+ case CT_REGISTER:
+ cl = reg_class_for_constraint (cn);
+ if (cl != NO_REGS)
+ op_alt[i].cl = reg_class_subunion[op_alt[i].cl][cl];
break;
- }
- if (EXTRA_ADDRESS_CONSTRAINT (c, p))
- {
- op_alt[j].is_address = 1;
- op_alt[j].cl
+
+ case CT_CONST_INT:
+ break;
+
+ case CT_MEMORY:
+ op_alt[i].memory_ok = 1;
+ break;
+
+ case CT_ADDRESS:
+ op_alt[i].is_address = 1;
+ op_alt[i].cl
= (reg_class_subunion
- [(int) op_alt[j].cl]
+ [(int) op_alt[i].cl]
[(int) base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
ADDRESS, SCRATCH)]);
break;
- }
- op_alt[j].cl
- = (reg_class_subunion
- [(int) op_alt[j].cl]
- [(int) REG_CLASS_FROM_CONSTRAINT ((unsigned char) c, p)]);
+ case CT_FIXED_FORM:
+ break;
+ }
break;
}
p += CONSTRAINT_LEN (c, p);
}
}
+/* Return an array of operand_alternative instructions for
+ instruction ICODE. */
+
+const operand_alternative *
+preprocess_insn_constraints (int icode)
+{
+ gcc_checking_assert (IN_RANGE (icode, 0, LAST_INSN_CODE));
+ if (this_target_recog->x_op_alt[icode])
+ return this_target_recog->x_op_alt[icode];
+
+ int n_operands = insn_data[icode].n_operands;
+ if (n_operands == 0)
+ return 0;
+ /* Always provide at least one alternative so that which_op_alt ()
+ works correctly. If the instruction has 0 alternatives (i.e. all
+ constraint strings are empty) then each operand in this alternative
+ will have anything_ok set. */
+ int n_alternatives = MAX (insn_data[icode].n_alternatives, 1);
+ int n_entries = n_operands * n_alternatives;
+
+ operand_alternative *op_alt = XCNEWVEC (operand_alternative, n_entries);
+ const char **constraints = XALLOCAVEC (const char *, n_operands);
+
+ for (int i = 0; i < n_operands; ++i)
+ constraints[i] = insn_data[icode].operand[i].constraint;
+ preprocess_constraints (n_operands, n_alternatives, constraints, op_alt);
+
+ this_target_recog->x_op_alt[icode] = op_alt;
+ return op_alt;
+}
+
+/* After calling extract_insn, you can use this function to extract some
+ information from the constraint strings into a more usable form.
+ The collected data is stored in recog_op_alt. */
+
+void
+preprocess_constraints (rtx insn)
+{
+ int icode = INSN_CODE (insn);
+ if (icode >= 0)
+ recog_op_alt = preprocess_insn_constraints (icode);
+ else
+ {
+ int n_operands = recog_data.n_operands;
+ int n_alternatives = recog_data.n_alternatives;
+ int n_entries = n_operands * n_alternatives;
+ memset (asm_op_alt, 0, n_entries * sizeof (operand_alternative));
+ preprocess_constraints (n_operands, n_alternatives,
+ recog_data.constraints, asm_op_alt);
+ recog_op_alt = asm_op_alt;
+ }
+}
+
/* Check the operands of an insn against the insn's operand constraints
- and return 1 if they are valid.
+ and return 1 if they match any of the alternatives in ALTERNATIVES.
+
The information about the insn's operands, constraints, operand modes
etc. is obtained from the global variables set up by extract_insn.
};
int
-constrain_operands (int strict)
+constrain_operands (int strict, alternative_mask alternatives)
{
const char *constraints[MAX_RECOG_OPERANDS];
int matching_operands[MAX_RECOG_OPERANDS];
int lose = 0;
funny_match_index = 0;
- if (!recog_data.alternative_enabled_p[which_alternative])
+ if (!TEST_BIT (alternatives, which_alternative))
{
int i;
c = '\0';
break;
- case '?': case '!': case '*': case '%':
- case '=': case '+':
- break;
-
case '#':
/* Ignore rest of this alternative as far as
constraint checking is concerned. */
win = 1;
break;
- case 'X':
- /* This is used for a MATCH_SCRATCH in the cases when
- we don't actually need anything. So anything goes
- any time. */
- win = 1;
- break;
-
- case TARGET_MEM_CONSTRAINT:
- /* Memory operands must be valid, to the extent
- required by STRICT. */
- if (MEM_P (op))
- {
- if (strict > 0
- && !strict_memory_address_addr_space_p
- (GET_MODE (op), XEXP (op, 0),
- MEM_ADDR_SPACE (op)))
- break;
- if (strict == 0
- && !memory_address_addr_space_p
- (GET_MODE (op), XEXP (op, 0),
- MEM_ADDR_SPACE (op)))
- break;
- win = 1;
- }
- /* Before reload, accept what reload can turn into mem. */
- else if (strict < 0 && CONSTANT_P (op))
- win = 1;
- /* During reload, accept a pseudo */
- else if (reload_in_progress && REG_P (op)
- && REGNO (op) >= FIRST_PSEUDO_REGISTER)
- win = 1;
- break;
-
- case '<':
- if (MEM_P (op)
- && (GET_CODE (XEXP (op, 0)) == PRE_DEC
- || GET_CODE (XEXP (op, 0)) == POST_DEC))
- win = 1;
- break;
-
- case '>':
- if (MEM_P (op)
- && (GET_CODE (XEXP (op, 0)) == PRE_INC
- || GET_CODE (XEXP (op, 0)) == POST_INC))
- win = 1;
- break;
-
- case 'E':
- case 'F':
- if (CONST_DOUBLE_AS_FLOAT_P (op)
- || (GET_CODE (op) == CONST_VECTOR
- && GET_MODE_CLASS (GET_MODE (op)) == MODE_VECTOR_FLOAT))
- win = 1;
- break;
-
- case 'G':
- case 'H':
- if (CONST_DOUBLE_AS_FLOAT_P (op)
- && CONST_DOUBLE_OK_FOR_CONSTRAINT_P (op, c, p))
- win = 1;
- break;
-
- case 's':
- if (CONST_SCALAR_INT_P (op))
- break;
- case 'i':
- if (CONSTANT_P (op))
- win = 1;
- break;
-
- case 'n':
- if (CONST_SCALAR_INT_P (op))
- win = 1;
- break;
-
- case 'I':
- case 'J':
- case 'K':
- case 'L':
- case 'M':
- case 'N':
- case 'O':
- case 'P':
- if (CONST_INT_P (op)
- && CONST_OK_FOR_CONSTRAINT_P (INTVAL (op), c, p))
- win = 1;
- break;
-
- case 'V':
- if (MEM_P (op)
- && ((strict > 0 && ! offsettable_memref_p (op))
- || (strict < 0
- && !(CONSTANT_P (op) || MEM_P (op)))
- || (reload_in_progress
- && !(REG_P (op)
- && REGNO (op) >= FIRST_PSEUDO_REGISTER))))
- win = 1;
- break;
-
- case 'o':
- if ((strict > 0 && offsettable_memref_p (op))
- || (strict == 0 && offsettable_nonstrict_memref_p (op))
- /* Before reload, accept what reload can handle. */
- || (strict < 0
- && (CONSTANT_P (op) || MEM_P (op)))
- /* During reload, accept a pseudo */
- || (reload_in_progress && REG_P (op)
- && REGNO (op) >= FIRST_PSEUDO_REGISTER))
- win = 1;
- break;
-
default:
{
- enum reg_class cl;
-
- cl = (c == 'r'
- ? GENERAL_REGS : REG_CLASS_FROM_CONSTRAINT (c, p));
+ enum constraint_num cn = lookup_constraint (p);
+ enum reg_class cl = reg_class_for_constraint (cn);
if (cl != NO_REGS)
{
if (strict < 0
&& reg_fits_class_p (op, cl, offset, mode)))
win = 1;
}
-#ifdef EXTRA_CONSTRAINT_STR
- else if (EXTRA_CONSTRAINT_STR (op, c, p))
+
+ else if (constraint_satisfied_p (op, cn))
win = 1;
- else if (EXTRA_MEMORY_CONSTRAINT (c, p)
+ else if (insn_extra_memory_constraint (cn)
/* Every memory operand can be reloaded to fit. */
&& ((strict < 0 && MEM_P (op))
/* Before reload, accept what reload can turn
|| (reload_in_progress && REG_P (op)
&& REGNO (op) >= FIRST_PSEUDO_REGISTER)))
win = 1;
- else if (EXTRA_ADDRESS_CONSTRAINT (c, p)
+ else if (insn_extra_address_constraint (cn)
/* Every address operand can be reloaded to fit. */
&& strict < 0)
win = 1;
&& REGNO (op) >= FIRST_PSEUDO_REGISTER
&& reg_renumber[REGNO (op)] < 0
&& reg_equiv_mem (REGNO (op)) != 0
- && EXTRA_CONSTRAINT_STR
- (reg_equiv_mem (REGNO (op)), c, p))
+ && constraint_satisfied_p
+ (reg_equiv_mem (REGNO (op)), cn))
win = 1;
-#endif
break;
}
}
/* If we are about to reject this, but we are not to test strictly,
try a very loose test. Only return failure if it fails also. */
if (strict == 0)
- return constrain_operands (-1);
+ return constrain_operands (-1, alternatives);
else
return 0;
}
or NULL if unsuccessful. */
static rtx
-split_insn (rtx insn)
+split_insn (rtx_insn *insn)
{
/* Split insns here to get max fine-grain parallelism. */
- rtx first = PREV_INSN (insn);
- rtx last = try_split (PATTERN (insn), insn, 1);
+ rtx_insn *first = PREV_INSN (insn);
+ rtx_insn *last = try_split (PATTERN (insn), insn, 1);
rtx insn_set, last_set, note;
if (last == insn)
FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
- rtx insn, next;
+ rtx_insn *insn, *next;
bool finish = false;
rtl_profile_for_bb (bb);
unsigned int
split_all_insns_noflow (void)
{
- rtx next, insn;
+ rtx_insn *next, *insn;
for (insn = get_insns (); insn; insn = next)
{
{
enum reg_class cl;
HARD_REG_SET live;
- df_ref *def_rec;
+ df_ref def;
int i;
gcc_assert (from < MAX_INSNS_PER_PEEP2 + 1);
gcc_assert (peep2_insn_data[from].insn != NULL_RTX);
/* Don't use registers set or clobbered by the insn. */
- for (def_rec = DF_INSN_DEFS (peep2_insn_data[from].insn);
- *def_rec; def_rec++)
- SET_HARD_REG_BIT (live, DF_REF_REGNO (*def_rec));
+ FOR_EACH_INSN_DEF (def, peep2_insn_data[from].insn)
+ SET_HARD_REG_BIT (live, DF_REF_REGNO (def));
from = peep2_buf_position (from + 1);
}
- cl = (class_str[0] == 'r' ? GENERAL_REGS
- : REG_CLASS_FROM_CONSTRAINT (class_str[0], class_str));
+ cl = reg_class_for_constraint (lookup_constraint (class_str));
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
replacing them with ATTEMPT. Returns the last insn emitted, or NULL
if the replacement is rejected. */
-static rtx
-peep2_attempt (basic_block bb, rtx insn, int match_len, rtx attempt)
+static rtx_insn *
+peep2_attempt (basic_block bb, rtx uncast_insn, int match_len, rtx_insn *attempt)
{
+ rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
int i;
- rtx last, eh_note, as_note, before_try, x;
- rtx old_insn, new_insn;
+ rtx_insn *last, *before_try, *x;
+ rtx eh_note, as_note;
+ rtx_insn *old_insn;
+ rtx_insn *new_insn;
bool was_call = false;
/* If we are splitting an RTX_FRAME_RELATED_P insn, do not allow it to
match more than one insn, or to be split into more than one insn. */
- old_insn = peep2_insn_data[peep2_current].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[peep2_current].insn);
if (RTX_FRAME_RELATED_P (old_insn))
{
bool any_note = false;
rtx note;
j = peep2_buf_position (peep2_current + i);
- old_insn = peep2_insn_data[j].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
if (!CALL_P (old_insn))
continue;
was_call = true;
CALL_INSN_FUNCTION_USAGE (new_insn)
= CALL_INSN_FUNCTION_USAGE (old_insn);
+ SIBLING_CALL_P (new_insn) = SIBLING_CALL_P (old_insn);
for (note = REG_NOTES (old_insn);
note;
while (++i <= match_len)
{
j = peep2_buf_position (peep2_current + i);
- old_insn = peep2_insn_data[j].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
gcc_assert (!CALL_P (old_insn));
}
break;
for (i = match_len; i >= 0; --i)
{
int j = peep2_buf_position (peep2_current + i);
- old_insn = peep2_insn_data[j].insn;
+ old_insn = as_a <rtx_insn *> (peep2_insn_data[j].insn);
as_note = find_reg_note (old_insn, REG_ARGS_SIZE, NULL);
if (as_note)
eh_note = find_reg_note (peep2_insn_data[i].insn, REG_EH_REGION, NULL_RTX);
/* Replace the old sequence with the new. */
+ rtx_insn *peepinsn = as_a <rtx_insn *> (peep2_insn_data[i].insn);
last = emit_insn_after_setloc (attempt,
peep2_insn_data[i].insn,
- INSN_LOCATION (peep2_insn_data[i].insn));
+ INSN_LOCATION (peepinsn));
before_try = PREV_INSN (insn);
delete_insn_chain (insn, peep2_insn_data[i].insn, false);
matched, and which now need to be replaced in the buffer. */
static void
-peep2_update_life (basic_block bb, int match_len, rtx last, rtx prev)
+peep2_update_life (basic_block bb, int match_len, rtx_insn *last,
+ rtx_insn *prev)
{
int i = peep2_buf_position (peep2_current + match_len + 1);
- rtx x;
+ rtx_insn *x;
regset_head live;
INIT_REG_SET (&live);
COPY_REG_SET (peep2_insn_data[pos].live_before, live);
peep2_current_count++;
- df_simulate_one_insn_forwards (bb, insn, live);
+ df_simulate_one_insn_forwards (bb, as_a <rtx_insn *> (insn), live);
return true;
}
static void
peephole2_optimize (void)
{
- rtx insn;
+ rtx_insn *insn;
bitmap live;
int i;
basic_block bb;
insn = BB_HEAD (bb);
for (;;)
{
- rtx attempt, head;
+ rtx_insn *attempt;
+ rtx head;
int match_len;
if (!past_end && !NONDEBUG_INSN_P (insn))
/* Match the peephole. */
head = peep2_insn_data[peep2_current].insn;
- attempt = peephole2_insns (PATTERN (head), head, &match_len);
+ attempt = safe_as_a <rtx_insn *> (
+ peephole2_insns (PATTERN (head), head, &match_len));
if (attempt != NULL)
{
- rtx last = peep2_attempt (bb, head, match_len, attempt);
+ rtx_insn *last = peep2_attempt (bb, head, match_len, attempt);
if (last)
{
peep2_update_life (bb, match_len, last, PREV_INSN (attempt));
BITMAP_FREE (live);
if (peep2_do_rebuild_jump_labels)
rebuild_jump_labels (get_insns ());
+ if (peep2_do_cleanup_cfg)
+ cleanup_cfg (CLEANUP_CFG_CHANGED);
}
#endif /* HAVE_peephole2 */
must be either a single_set or a PARALLEL with SETs inside. */
int
-store_data_bypass_p (rtx out_insn, rtx in_insn)
+store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
rtx out_set, in_set;
rtx out_pat, in_pat;
of insn categorization may be any JUMP or CALL insn. */
int
-if_test_bypass_p (rtx out_insn, rtx in_insn)
+if_test_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
{
rtx out_set, in_set;
return true;
}
\f
-static bool
-gate_handle_peephole2 (void)
-{
- return (optimize > 0 && flag_peephole2);
-}
-
static unsigned int
rest_of_handle_peephole2 (void)
{
RTL_PASS, /* type */
"peephole2", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
TV_PEEPHOLE2, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- ( TODO_df_finish | TODO_verify_rtl_sharing | 0 ), /* todo_flags_finish */
+ TODO_df_finish, /* todo_flags_finish */
};
class pass_peephole2 : public rtl_opt_pass
/* The epiphany backend creates a second instance of this pass, so we need
a clone method. */
opt_pass * clone () { return new pass_peephole2 (m_ctxt); }
- bool gate () { return gate_handle_peephole2 (); }
- unsigned int execute () { return rest_of_handle_peephole2 (); }
+ virtual bool gate (function *) { return (optimize > 0 && flag_peephole2); }
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_peephole2 ();
+ }
}; // class pass_peephole2
return new pass_peephole2 (ctxt);
}
-static unsigned int
-rest_of_handle_split_all_insns (void)
-{
- split_all_insns ();
- return 0;
-}
-
namespace {
const pass_data pass_data_split_all_insns =
RTL_PASS, /* type */
"split1", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
- true, /* has_execute */
TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
/* The epiphany backend creates a second instance of this pass, so
we need a clone method. */
opt_pass * clone () { return new pass_split_all_insns (m_ctxt); }
- unsigned int execute () { return rest_of_handle_split_all_insns (); }
+ virtual unsigned int execute (function *)
+ {
+ split_all_insns ();
+ return 0;
+ }
}; // class pass_split_all_insns
RTL_PASS, /* type */
"split2", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- false, /* has_gate */
- true, /* has_execute */
TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
{}
/* opt_pass methods: */
- unsigned int execute () { return rest_of_handle_split_after_reload (); }
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_split_after_reload ();
+ }
}; // class pass_split_after_reload
return new pass_split_after_reload (ctxt);
}
-static bool
-gate_handle_split_before_regstack (void)
-{
-#if HAVE_ATTR_length && defined (STACK_REGS)
- /* If flow2 creates new instructions which need splitting
- and scheduling after reload is not done, they might not be
- split until final which doesn't allow splitting
- if HAVE_ATTR_length. */
-# ifdef INSN_SCHEDULING
- return (optimize && !flag_schedule_insns_after_reload);
-# else
- return (optimize);
-# endif
-#else
- return 0;
-#endif
-}
-
-static unsigned int
-rest_of_handle_split_before_regstack (void)
-{
- split_all_insns ();
- return 0;
-}
-
namespace {
const pass_data pass_data_split_before_regstack =
RTL_PASS, /* type */
"split3", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
{}
/* opt_pass methods: */
- bool gate () { return gate_handle_split_before_regstack (); }
- unsigned int execute () {
- return rest_of_handle_split_before_regstack ();
- }
+ virtual bool gate (function *);
+ virtual unsigned int execute (function *)
+ {
+ split_all_insns ();
+ return 0;
+ }
}; // class pass_split_before_regstack
+bool
+pass_split_before_regstack::gate (function *)
+{
+#if HAVE_ATTR_length && defined (STACK_REGS)
+ /* If flow2 creates new instructions which need splitting
+ and scheduling after reload is not done, they might not be
+ split until final which doesn't allow splitting
+ if HAVE_ATTR_length. */
+# ifdef INSN_SCHEDULING
+ return (optimize && !flag_schedule_insns_after_reload);
+# else
+ return (optimize);
+# endif
+#else
+ return 0;
+#endif
+}
+
} // anon namespace
rtl_opt_pass *
return new pass_split_before_regstack (ctxt);
}
-static bool
-gate_handle_split_before_sched2 (void)
-{
-#ifdef INSN_SCHEDULING
- return optimize > 0 && flag_schedule_insns_after_reload;
-#else
- return 0;
-#endif
-}
-
static unsigned int
rest_of_handle_split_before_sched2 (void)
{
RTL_PASS, /* type */
"split4", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_verify_flow, /* todo_flags_finish */
+ 0, /* todo_flags_finish */
};
class pass_split_before_sched2 : public rtl_opt_pass
{}
/* opt_pass methods: */
- bool gate () { return gate_handle_split_before_sched2 (); }
- unsigned int execute () { return rest_of_handle_split_before_sched2 (); }
+ virtual bool gate (function *)
+ {
+#ifdef INSN_SCHEDULING
+ return optimize > 0 && flag_schedule_insns_after_reload;
+#else
+ return false;
+#endif
+ }
+
+ virtual unsigned int execute (function *)
+ {
+ return rest_of_handle_split_before_sched2 ();
+ }
}; // class pass_split_before_sched2
return new pass_split_before_sched2 (ctxt);
}
-/* The placement of the splitting that we do for shorten_branches
- depends on whether regstack is used by the target or not. */
-static bool
-gate_do_final_split (void)
-{
-#if HAVE_ATTR_length && !defined (STACK_REGS)
- return 1;
-#else
- return 0;
-#endif
-}
-
namespace {
const pass_data pass_data_split_for_shorten_branches =
RTL_PASS, /* type */
"split5", /* name */
OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
TV_NONE, /* tv_id */
0, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_verify_rtl_sharing, /* todo_flags_finish */
+ 0, /* todo_flags_finish */
};
class pass_split_for_shorten_branches : public rtl_opt_pass
{}
/* opt_pass methods: */
- bool gate () { return gate_do_final_split (); }
- unsigned int execute () { return split_all_insns_noflow (); }
+ virtual bool gate (function *)
+ {
+ /* The placement of the splitting that we do for shorten_branches
+ depends on whether regstack is used by the target or not. */
+#if HAVE_ATTR_length && !defined (STACK_REGS)
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ virtual unsigned int execute (function *)
+ {
+ return split_all_insns_noflow ();
+ }
}; // class pass_split_for_shorten_branches
{
return new pass_split_for_shorten_branches (ctxt);
}
+
+/* (Re)initialize the target information after a change in target. */
+
+void
+recog_init ()
+{
+ /* The information is zero-initialized, so we don't need to do anything
+ first time round. */
+ if (!this_target_recog->x_initialized)
+ {
+ this_target_recog->x_initialized = true;
+ return;
+ }
+ memset (this_target_recog->x_bool_attr_masks, 0,
+ sizeof (this_target_recog->x_bool_attr_masks));
+ for (int i = 0; i < LAST_INSN_CODE; ++i)
+ if (this_target_recog->x_op_alt[i])
+ {
+ free (this_target_recog->x_op_alt[i]);
+ this_target_recog->x_op_alt[i] = 0;
+ }
+}