#include "stringpool.h"
#include "stor-layout.h"
#include "calls.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "vec.h"
+#include "machmode.h"
+#include "input.h"
#include "function.h"
#include "expr.h"
+#include "insn-codes.h"
#include "optabs.h"
#include "libfuncs.h"
#include "flags.h"
#include "target-def.h"
#include "common/common-target.h"
#include "langhooks.h"
-#include "sched-int.h"
-#include "vec.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "lcm.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
+#include "predict.h"
#include "basic-block.h"
+#include "sched-int.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
#include "gimple-fold.h"
#include "opts.h"
#include "tree-pass.h"
#include "context.h"
+#include "hash-map.h"
+#include "plugin-api.h"
+#include "ipa-ref.h"
#include "cgraph.h"
#include "builtins.h"
+#include "rtl-iter.h"
/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
#define UNSPEC_ADDRESS_P(X) \
COSTS_N_INSNS (35), /* int_div_di */
4, /* branch_cost */
4 /* memory_latency */
+ },
+ /* Octeon III */
+ {
+ COSTS_N_INSNS (6), /* fp_add */
+ COSTS_N_INSNS (6), /* fp_mult_sf */
+ COSTS_N_INSNS (7), /* fp_mult_df */
+ COSTS_N_INSNS (25), /* fp_div_sf */
+ COSTS_N_INSNS (48), /* fp_div_df */
+ COSTS_N_INSNS (6), /* int_mult_si */
+ COSTS_N_INSNS (6), /* int_mult_di */
+ COSTS_N_INSNS (18), /* int_div_si */
+ COSTS_N_INSNS (35), /* int_div_di */
+ 4, /* branch_cost */
+ 4 /* memory_latency */
},
{ /* R3900 */
COSTS_N_INSNS (2), /* fp_add */
};
\f
static rtx mips_find_pic_call_symbol (rtx_insn *, rtx, bool);
-static int mips_register_move_cost (enum machine_mode, reg_class_t,
+static int mips_register_move_cost (machine_mode, reg_class_t,
reg_class_t);
-static unsigned int mips_function_arg_boundary (enum machine_mode, const_tree);
+static unsigned int mips_function_arg_boundary (machine_mode, const_tree);
+static machine_mode mips_get_reg_raw_mode (int regno);
\f
-/* This hash table keeps track of implicit "mips16" and "nomips16" attributes
- for -mflip_mips16. It maps decl names onto a boolean mode setting. */
-struct GTY (()) mflip_mips16_entry {
- const char *name;
- bool mips16_p;
-};
-static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
-
-/* Hash table callbacks for mflip_mips16_htab. */
-
-static hashval_t
-mflip_mips16_htab_hash (const void *entry)
+struct mips16_flip_traits : default_hashmap_traits
{
- return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
-}
+ static hashval_t hash (const char *s) { return htab_hash_string (s); }
+ static bool
+ equal_keys (const char *a, const char *b)
+ {
+ return !strcmp (a, b);
+ }
+};
-static int
-mflip_mips16_htab_eq (const void *entry, const void *name)
-{
- return strcmp (((const struct mflip_mips16_entry *) entry)->name,
- (const char *) name) == 0;
-}
+/* This hash table keeps track of implicit "mips16" and "nomips16" attributes
+ for -mflip_mips16. It maps decl names onto a boolean mode setting. */
+static GTY (()) hash_map<const char *, bool, mips16_flip_traits> *
+ mflip_mips16_htab;
/* True if -mflip-mips16 should next add an attribute for the default MIPS16
mode, false if it should next add an attribute for the opposite mode. */
static bool
mflip_mips16_use_mips16_p (tree decl)
{
- struct mflip_mips16_entry *entry;
const char *name;
- hashval_t hash;
- void **slot;
bool base_is_mips16 = (mips_base_compression_flags & MASK_MIPS16) != 0;
/* Use the opposite of the command-line setting for anonymous decls. */
return !base_is_mips16;
if (!mflip_mips16_htab)
- mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
- mflip_mips16_htab_eq, NULL);
+ mflip_mips16_htab
+ = hash_map<const char *, bool, mips16_flip_traits>::create_ggc (37);
name = IDENTIFIER_POINTER (DECL_NAME (decl));
- hash = htab_hash_string (name);
- slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
- entry = (struct mflip_mips16_entry *) *slot;
- if (!entry)
+
+ bool existed;
+ bool *slot = &mflip_mips16_htab->get_or_insert (name, &existed);
+ if (!existed)
{
mips16_flipper = !mips16_flipper;
- entry = ggc_alloc<mflip_mips16_entry> ();
- entry->name = name;
- entry->mips16_p = mips16_flipper ? !base_is_mips16 : base_is_mips16;
- *slot = entry;
+ *slot = mips16_flipper ? !base_is_mips16 : base_is_mips16;
}
- return entry->mips16_p;
+ return *slot;
}
\f
/* Predicates to test for presence of "near" and "far"/"long_call"
/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
static bool
-mips_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+mips_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
return mips_const_insns (x) > 0;
}
data section. */
static bool
-mips_rtx_constant_in_small_data_p (enum machine_mode mode)
+mips_rtx_constant_in_small_data_p (machine_mode mode)
{
return (!TARGET_EMBEDDED_DATA
&& TARGET_LOCAL_SDATA
extended ones. */
static int
-mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
+mips_symbol_insns_1 (enum mips_symbol_type type, machine_mode mode)
{
if (mips_use_pcrel_pool_p[(int) type])
{
In both cases, instruction counts are based off BASE_INSN_LENGTH. */
static int
-mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
+mips_symbol_insns (enum mips_symbol_type type, machine_mode mode)
{
return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
}
/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
static bool
-mips_cannot_force_const_mem (enum machine_mode mode, rtx x)
+mips_cannot_force_const_mem (machine_mode mode, rtx x)
{
enum mips_symbol_type type;
rtx base, offset;
constants when we're using a per-function constant pool. */
static bool
-mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+mips_use_blocks_for_constant_p (machine_mode mode ATTRIBUTE_UNUSED,
const_rtx x ATTRIBUTE_UNUSED)
{
return !TARGET_MIPS16_PCREL_LOADS;
STRICT_P is true if REG_OK_STRICT is in effect. */
int
-mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
+mips_regno_mode_ok_for_base_p (int regno, machine_mode mode,
bool strict_p)
{
if (!HARD_REGISTER_NUM_P (regno))
STRICT_P is true if REG_OK_STRICT is in effect. */
static bool
-mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
+mips_valid_base_register_p (rtx x, machine_mode mode, bool strict_p)
{
if (!strict_p && GET_CODE (x) == SUBREG)
x = SUBREG_REG (x);
can address a value of mode MODE. */
static bool
-mips_valid_offset_p (rtx x, enum machine_mode mode)
+mips_valid_offset_p (rtx x, machine_mode mode)
{
/* Check that X is a signed 16-bit number. */
if (!const_arith_operand (x, Pmode))
LO_SUM symbol has type SYMBOL_TYPE. */
static bool
-mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
+mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, machine_mode mode)
{
/* Check that symbols of type SYMBOL_TYPE can be used to access values
of mode MODE. */
static bool
mips_classify_address (struct mips_address_info *info, rtx x,
- enum machine_mode mode, bool strict_p)
+ machine_mode mode, bool strict_p)
{
switch (GET_CODE (x))
{
/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
static bool
-mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
+mips_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
{
struct mips_address_info addr;
/* Return true if X is a legitimate $sp-based address for mode MDOE. */
bool
-mips_stack_address_p (rtx x, enum machine_mode mode)
+mips_stack_address_p (rtx x, machine_mode mode)
{
struct mips_address_info addr;
sense, because their use is so restricted. */
static bool
-mips_lx_address_p (rtx addr, enum machine_mode mode)
+mips_lx_address_p (rtx addr, machine_mode mode)
{
if (GET_CODE (addr) != PLUS
|| !REG_P (XEXP (addr, 0))
an 8-bit immediate field that's shifted left twice. */
static bool
-mips16_unextended_reference_p (enum machine_mode mode, rtx base,
+mips16_unextended_reference_p (machine_mode mode, rtx base,
unsigned HOST_WIDE_INT offset)
{
if (mode != BLKmode && offset % GET_MODE_SIZE (mode) == 0)
enough. */
int
-mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
+mips_address_insns (rtx x, machine_mode mode, bool might_split_p)
{
struct mips_address_info addr;
int factor;
OFFSET_PREDICATE. */
bool
-m16_based_address_p (rtx x, enum machine_mode mode,
+m16_based_address_p (rtx x, machine_mode mode,
insn_operand_predicate_fn offset_predicate)
{
struct mips_address_info addr;
for a microMIPS LWSP or SWSP insn. */
bool
-lwsp_swsp_address_p (rtx x, enum machine_mode mode)
+lwsp_swsp_address_p (rtx x, machine_mode mode)
{
struct mips_address_info addr;
MODE is the mode of the value being accessed. */
bool
-umips_12bit_offset_address_p (rtx x, enum machine_mode mode)
+umips_12bit_offset_address_p (rtx x, machine_mode mode)
{
struct mips_address_info addr;
int
mips_load_store_insns (rtx mem, rtx_insn *insn)
{
- enum machine_mode mode;
+ machine_mode mode;
bool might_split_p;
rtx set;
Return that new register. */
static rtx
-mips_force_unary (enum machine_mode mode, enum rtx_code code, rtx op0)
+mips_force_unary (machine_mode mode, enum rtx_code code, rtx op0)
{
rtx reg;
of mode MODE. Return that new register. */
static rtx
-mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
+mips_force_binary (machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
{
rtx reg;
is guaranteed to be a legitimate address for mode MODE. */
bool
-mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
+mips_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
{
enum mips_symbol_context context;
enum mips_symbol_type symbol_type;
/* If X is not a valid address for mode MODE, force it into a register. */
static rtx
-mips_force_address (rtx x, enum machine_mode mode)
+mips_force_address (rtx x, machine_mode mode)
{
if (!mips_legitimate_address_p (mode, x, false))
x = force_reg (Pmode, x);
static rtx
mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
- enum machine_mode mode)
+ machine_mode mode)
{
rtx base, addr;
HOST_WIDE_INT offset;
mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
{
struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
- enum machine_mode mode;
+ machine_mode mode;
unsigned int i, num_ops;
rtx x;
move_operand. */
static void
-mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
+mips_legitimize_const_move (machine_mode mode, rtx dest, rtx src)
{
rtx base, offset;
sequence that is valid. */
bool
-mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
+mips_legitimize_move (machine_mode mode, rtx dest, rtx src)
{
if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
{
&& symbol_type == SYMBOL_GP_RELATIVE);
}
-/* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
- containing MEM, or null if none. */
+/* Return true if OP refers to small data symbols directly, not through
+ a LO_SUM. CONTEXT is the context in which X appears. */
static int
-mips_small_data_pattern_1 (rtx *loc, void *data)
+mips_small_data_pattern_1 (rtx x, enum mips_symbol_context context)
{
- enum mips_symbol_context context;
-
- /* Ignore things like "g" constraints in asms. We make no particular
- guarantee about which symbolic constants are acceptable as asm operands
- versus which must be forced into a GPR. */
- if (GET_CODE (*loc) == LO_SUM || GET_CODE (*loc) == ASM_OPERANDS)
- return -1;
-
- if (MEM_P (*loc))
+ subrtx_var_iterator::array_type array;
+ FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
{
- if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
- return 1;
- return -1;
- }
+ rtx x = *iter;
- context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
- return mips_rewrite_small_data_p (*loc, context);
+ /* Ignore things like "g" constraints in asms. We make no particular
+ guarantee about which symbolic constants are acceptable as asm operands
+ versus which must be forced into a GPR. */
+ if (GET_CODE (x) == LO_SUM || GET_CODE (x) == ASM_OPERANDS)
+ iter.skip_subrtxes ();
+ else if (MEM_P (x))
+ {
+ if (mips_small_data_pattern_1 (XEXP (x, 0), SYMBOL_CONTEXT_MEM))
+ return true;
+ iter.skip_subrtxes ();
+ }
+ else if (mips_rewrite_small_data_p (x, context))
+ return true;
+ }
+ return false;
}
/* Return true if OP refers to small data symbols directly, not through
bool
mips_small_data_pattern_p (rtx op)
{
- return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
+ return mips_small_data_pattern_1 (op, SYMBOL_CONTEXT_LEA);
}
-/* A for_each_rtx callback, used by mips_rewrite_small_data.
- DATA is the containing MEM, or null if none. */
+/* Rewrite *LOC so that it refers to small data using explicit
+ relocations. CONTEXT is the context in which *LOC appears. */
-static int
-mips_rewrite_small_data_1 (rtx *loc, void *data)
+static void
+mips_rewrite_small_data_1 (rtx *loc, enum mips_symbol_context context)
{
- enum mips_symbol_context context;
-
- if (MEM_P (*loc))
+ subrtx_ptr_iterator::array_type array;
+ FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
{
- for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
- return -1;
+ rtx *loc = *iter;
+ if (MEM_P (*loc))
+ {
+ mips_rewrite_small_data_1 (&XEXP (*loc, 0), SYMBOL_CONTEXT_MEM);
+ iter.skip_subrtxes ();
+ }
+ else if (mips_rewrite_small_data_p (*loc, context))
+ {
+ *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
+ iter.skip_subrtxes ();
+ }
+ else if (GET_CODE (*loc) == LO_SUM)
+ iter.skip_subrtxes ();
}
-
- context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
- if (mips_rewrite_small_data_p (*loc, context))
- *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
-
- if (GET_CODE (*loc) == LO_SUM)
- return -1;
-
- return 0;
}
/* Rewrite instruction pattern PATTERN so that it refers to small data
mips_rewrite_small_data (rtx pattern)
{
pattern = copy_insn (pattern);
- for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
+ mips_rewrite_small_data_1 (&pattern, SYMBOL_CONTEXT_LEA);
return pattern;
}
\f
/* Return the cost of floating-point multiplications of mode MODE. */
static int
-mips_fp_mult_cost (enum machine_mode mode)
+mips_fp_mult_cost (machine_mode mode)
{
return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
}
/* Return the cost of floating-point divisions of mode MODE. */
static int
-mips_fp_div_cost (enum machine_mode mode)
+mips_fp_div_cost (machine_mode mode)
{
return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
}
cost of OP itself. */
static int
-mips_sign_extend_cost (enum machine_mode mode, rtx op)
+mips_sign_extend_cost (machine_mode mode, rtx op)
{
if (MEM_P (op))
/* Extended loads are as cheap as unextended ones. */
cost of OP itself. */
static int
-mips_zero_extend_cost (enum machine_mode mode, rtx op)
+mips_zero_extend_cost (machine_mode mode, rtx op)
{
if (MEM_P (op))
/* Extended loads are as cheap as unextended ones. */
assuming that the move will be in pieces of at most UNITS bytes. */
static int
-mips_set_reg_reg_piece_cost (enum machine_mode mode, unsigned int units)
+mips_set_reg_reg_piece_cost (machine_mode mode, unsigned int units)
{
return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units);
}
/* Return the cost of moving between two registers of mode MODE. */
static int
-mips_set_reg_reg_cost (enum machine_mode mode)
+mips_set_reg_reg_cost (machine_mode mode)
{
switch (GET_MODE_CLASS (mode))
{
mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
int *total, bool speed)
{
- enum machine_mode mode = GET_MODE (x);
+ machine_mode mode = GET_MODE (x);
bool float_mode_p = FLOAT_MODE_P (mode);
int cost;
rtx addr;
/* Implement TARGET_ADDRESS_COST. */
static int
-mips_address_cost (rtx addr, enum machine_mode mode,
+mips_address_cost (rtx addr, machine_mode mode,
addr_space_t as ATTRIBUTE_UNUSED,
bool speed ATTRIBUTE_UNUSED)
{
mips_subword (rtx op, bool high_p)
{
unsigned int byte, offset;
- enum machine_mode mode;
+ machine_mode mode;
mode = GET_MODE (op);
if (mode == VOIDmode)
mips_output_move (rtx dest, rtx src)
{
enum rtx_code dest_code, src_code;
- enum machine_mode mode;
+ machine_mode mode;
enum mips_symbol_type symbol_type;
bool dbl_p;
static bool
mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
- enum machine_mode mode)
+ machine_mode mode)
{
HOST_WIDE_INT plus_one;
mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
rtx target, rtx cmp0, rtx cmp1)
{
- enum machine_mode mode;
+ machine_mode mode;
/* First see if there is a MIPS instruction that can do this operation.
If not, try doing the same for the inverse operation. If that also
a simple round-robin allocation scheme. */
static rtx
-mips_allocate_fcc (enum machine_mode mode)
+mips_allocate_fcc (machine_mode mode)
{
unsigned int regno, count;
mips_expand_conditional_trap (rtx comparison)
{
rtx op0, op1;
- enum machine_mode mode;
+ machine_mode mode;
enum rtx_code code;
/* MIPS conditional trap instructions don't have GT or LE flavors,
static void
mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
- enum machine_mode mode, const_tree type, bool named)
+ machine_mode mode, const_tree type, bool named)
{
bool doubleword_aligned_p;
unsigned int num_bytes, num_words, max_regs;
/* Only leading floating-point scalars are passed in
floating-point registers. We also handle vector floats the same
say, which is OK because they are not covered by the standard ABI. */
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
info->fpr_p = (!cum->gp_reg_found
&& cum->arg_number < 2
&& (type == 0
/* Scalar, complex and vector floating-point types are passed in
floating-point registers, as long as this is a named rather
than a variable argument. */
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
info->fpr_p = (named
&& (type == 0 || FLOAT_TYPE_P (type))
&& (GET_MODE_CLASS (mode) == MODE_FLOAT
/* Implement TARGET_FUNCTION_ARG. */
static rtx
-mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+mips_function_arg (cumulative_args_t cum_v, machine_mode mode,
const_tree type, bool named)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
if (mode == VOIDmode)
{
if (TARGET_MIPS16 && cum->fp_code != 0)
- return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
+ return gen_rtx_REG ((machine_mode) cum->fp_code, 0);
else
return NULL;
}
&& GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
{
rtx real, imag;
- enum machine_mode inner;
+ machine_mode inner;
unsigned int regno;
inner = GET_MODE_INNER (mode);
/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
static void
-mips_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+mips_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
const_tree type, bool named)
{
CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
static int
mips_arg_partial_bytes (cumulative_args_t cum,
- enum machine_mode mode, tree type, bool named)
+ machine_mode mode, tree type, bool named)
{
struct mips_arg_info info;
to STACK_BOUNDARY bits if the type requires it. */
static unsigned int
-mips_function_arg_boundary (enum machine_mode mode, const_tree type)
+mips_function_arg_boundary (machine_mode mode, const_tree type)
{
unsigned int alignment;
return alignment;
}
+/* Implement TARGET_GET_RAW_RESULT_MODE and TARGET_GET_RAW_ARG_MODE. */
+
+static machine_mode
+mips_get_reg_raw_mode (int regno)
+{
+ if (TARGET_FLOATXX && FP_REG_P (regno))
+ return DFmode;
+ return default_get_reg_raw_mode (regno);
+}
+
/* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
upward rather than downward. In other words, return true if the
first byte of the stack slot has useful data, false if the last
byte does. */
bool
-mips_pad_arg_upward (enum machine_mode mode, const_tree type)
+mips_pad_arg_upward (machine_mode mode, const_tree type)
{
/* On little-endian targets, the first byte of every stack argument
is passed in the first byte of the stack slot. */
the opposite if the most significant byte does. */
bool
-mips_pad_reg_upward (enum machine_mode mode, tree type)
+mips_pad_reg_upward (machine_mode mode, tree type)
{
/* No shifting is required for floating-point arguments. */
if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
static bool
mips_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
- enum machine_mode mode, const_tree type,
+ machine_mode mode, const_tree type,
bool named ATTRIBUTE_UNUSED)
{
if (mips_abi == ABI_EABI)
static bool
mips_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
+ machine_mode mode ATTRIBUTE_UNUSED,
const_tree type ATTRIBUTE_UNUSED, bool named)
{
return mips_abi == ABI_EABI && named;
floating-point register. */
static bool
-mips_return_mode_in_fpr_p (enum machine_mode mode)
+mips_return_mode_in_fpr_p (machine_mode mode)
{
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT || mode != V2SFmode);
return ((GET_MODE_CLASS (mode) == MODE_FLOAT
|| mode == V2SFmode
|| GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
the structure itself has mode BLKmode. */
static rtx
-mips_return_fpr_single (enum machine_mode type_mode,
- enum machine_mode value_mode)
+mips_return_fpr_single (machine_mode type_mode,
+ machine_mode value_mode)
{
rtx x;
Otherwise the values are packed together as closely as possible. */
static rtx
-mips_return_fpr_pair (enum machine_mode mode,
- enum machine_mode mode1, HOST_WIDE_INT offset1,
- enum machine_mode mode2, HOST_WIDE_INT offset2)
+mips_return_fpr_pair (machine_mode mode,
+ machine_mode mode1, HOST_WIDE_INT offset1,
+ machine_mode mode2, HOST_WIDE_INT offset2)
{
int inc;
- inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
+ inc = (TARGET_NEWABI || mips_abi == ABI_32 ? 2 : MAX_FPRS_PER_FMT);
return gen_rtx_PARALLEL
(mode,
gen_rtvec (2,
static rtx
mips_function_value_1 (const_tree valtype, const_tree fn_decl_or_type,
- enum machine_mode mode)
+ machine_mode mode)
{
if (valtype)
{
/* Implement TARGET_LIBCALL_VALUE. */
static rtx
-mips_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
+mips_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
{
return mips_function_value_1 (NULL_TREE, NULL_TREE, mode);
}
/* Implement TARGET_FUNCTION_VALUE_REGNO_P.
- On the MIPS, R2 R3 and F0 F2 are the only register thus used.
- Currently, R2 and F0 are only implemented here (C has no complex type). */
+ On the MIPS, R2 R3 and F0 F2 are the only register thus used. */
static bool
mips_function_value_regno_p (const unsigned int regno)
{
+ /* Most types only require one GPR or one FPR for return values but for
+ hard-float two FPRs can be used for _Complex types (for all ABIs)
+ and long doubles (for n64). */
if (regno == GP_RETURN
|| regno == FP_RETURN
- || (LONG_DOUBLE_TYPE_SIZE == 128
- && FP_RETURN != GP_RETURN
+ || (FP_RETURN != GP_RETURN
&& regno == FP_RETURN + 2))
return true;
+ /* For o32 FP32, _Complex double will be returned in four 32-bit registers.
+ This does not apply to o32 FPXX as floating-point function argument and
+ return registers are described as 64-bit even though floating-point
+ registers are primarily described as 32-bit internally.
+ See: mips_get_reg_raw_mode. */
+ if ((mips_abi == ABI_32 && TARGET_FLOAT32)
+ && FP_RETURN != GP_RETURN
+ && (regno == FP_RETURN + 1
+ || regno == FP_RETURN + 3))
+ return true;
+
return false;
}
/* Implement TARGET_SETUP_INCOMING_VARARGS. */
static void
-mips_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
+mips_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
tree type, int *pretend_size ATTRIBUTE_UNUSED,
int no_rtl)
{
{
/* We can't use move_block_from_reg, because it will use
the wrong mode. */
- enum machine_mode mode;
+ machine_mode mode;
int off, i;
/* Set OFF to the offset from virtual_incoming_args_rtx of
}
}
\f
+struct local_alias_traits : default_hashmap_traits
+{
+ static hashval_t hash (rtx);
+ static bool equal_keys (rtx, rtx);
+};
+
/* Each locally-defined hard-float MIPS16 function has a local symbol
associated with it. This hash table maps the function symbol (FUNC)
to the local symbol (LOCAL). */
-struct GTY(()) mips16_local_alias {
- rtx func;
- rtx local;
-};
-static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
+static GTY (()) hash_map<rtx, rtx, local_alias_traits> *mips16_local_aliases;
/* Hash table callbacks for mips16_local_aliases. */
-static hashval_t
-mips16_local_aliases_hash (const void *entry)
+hashval_t
+local_alias_traits::hash (rtx func)
{
- const struct mips16_local_alias *alias;
-
- alias = (const struct mips16_local_alias *) entry;
- return htab_hash_string (XSTR (alias->func, 0));
+ return htab_hash_string (XSTR (func, 0));
}
-static int
-mips16_local_aliases_eq (const void *entry1, const void *entry2)
+bool
+local_alias_traits::equal_keys (rtx func1, rtx func2)
{
- const struct mips16_local_alias *alias1, *alias2;
-
- alias1 = (const struct mips16_local_alias *) entry1;
- alias2 = (const struct mips16_local_alias *) entry2;
- return rtx_equal_p (alias1->func, alias2->func);
+ return rtx_equal_p (func1, func2);
}
/* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
static rtx
mips16_local_alias (rtx func)
{
- struct mips16_local_alias *alias, tmp_alias;
- void **slot;
-
/* Create the hash table if this is the first call. */
if (mips16_local_aliases == NULL)
- mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
- mips16_local_aliases_eq, NULL);
+ mips16_local_aliases
+ = hash_map<rtx, rtx, local_alias_traits>::create_ggc (37);
/* Look up the function symbol, creating a new entry if need be. */
- tmp_alias.func = func;
- slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
+ bool existed;
+ rtx *slot = &mips16_local_aliases->get_or_insert (func, &existed);
gcc_assert (slot != NULL);
- alias = (struct mips16_local_alias *) *slot;
- if (alias == NULL)
+ if (!existed)
{
const char *func_name, *local_name;
rtx local;
SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
/* Create a new structure to represent the mapping. */
- alias = ggc_alloc<struct mips16_local_alias> ();
- alias->func = func;
- alias->local = local;
- *slot = alias;
+ *slot = local;
}
- return alias->local;
+ return *slot;
}
\f
/* A chained list of functions for which mips16_build_call_stub has already
return mode MODE in the name of a MIPS16 function stub. */
static const char *
-mips16_call_stub_mode_suffix (enum machine_mode mode)
+mips16_call_stub_mode_suffix (machine_mode mode)
{
if (mode == SFmode)
return "sf";
else if (mode == DCmode)
return "dc";
else if (mode == V2SFmode)
- return "df";
+ {
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT);
+ return "df";
+ }
else
gcc_unreachable ();
}
if (TARGET_64BIT)
fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
reg_names[gpreg], reg_names[fpreg]);
- else if (TARGET_FLOAT64)
+ else if (ISA_HAS_MXHC1)
{
fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
}
+ else if (TARGET_FLOATXX && direction == 't')
+ {
+ /* Use the argument save area to move via memory. */
+ fprintf (asm_out_file, "\tsw\t%s,0($sp)\n", reg_names[gpreg]);
+ fprintf (asm_out_file, "\tsw\t%s,4($sp)\n", reg_names[gpreg + 1]);
+ fprintf (asm_out_file, "\tldc1\t%s,0($sp)\n", reg_names[fpreg]);
+ }
+ else if (TARGET_FLOATXX && direction == 'f')
+ {
+ /* Use the argument save area to move via memory. */
+ fprintf (asm_out_file, "\tsdc1\t%s,0($sp)\n", reg_names[fpreg]);
+ fprintf (asm_out_file, "\tlw\t%s,0($sp)\n", reg_names[gpreg]);
+ fprintf (asm_out_file, "\tlw\t%s,4($sp)\n", reg_names[gpreg + 1]);
+ }
else
{
/* Move the least-significant word. */
for (f = (unsigned int) fp_code; f != 0; f >>= 2)
{
- enum machine_mode mode;
+ machine_mode mode;
struct mips_arg_info info;
if ((f & 3) == 1)
{
rtx fn, insn, retval;
tree return_type;
- enum machine_mode return_mode;
+ machine_mode return_mode;
const char *name;
return_type = DECL_RESULT (current_function_decl);
case SCmode:
mips_output_32bit_xfer ('f', GP_RETURN + TARGET_BIG_ENDIAN,
TARGET_BIG_ENDIAN
- ? FP_REG_FIRST + MAX_FPRS_PER_FMT
+ ? FP_REG_FIRST + 2
: FP_REG_FIRST);
mips_output_32bit_xfer ('f', GP_RETURN + TARGET_LITTLE_ENDIAN,
TARGET_LITTLE_ENDIAN
- ? FP_REG_FIRST + MAX_FPRS_PER_FMT
+ ? FP_REG_FIRST + 2
: FP_REG_FIRST);
if (GET_MODE (retval) == SCmode && TARGET_64BIT)
{
case DCmode:
mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
- FP_REG_FIRST + MAX_FPRS_PER_FMT);
+ FP_REG_FIRST + 2);
/* Fall though. */
case DFmode:
case V2SFmode:
+ gcc_assert (TARGET_PAIRED_SINGLE_FLOAT
+ || GET_MODE (retval) != V2SFmode);
mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
break;
return true;
}
\f
-/* Implement MOVE_BY_PIECES_P. */
+/* Implement TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */
bool
-mips_move_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
+mips_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
+ unsigned int align,
+ enum by_pieces_operation op,
+ bool speed_p)
{
- if (HAVE_movmemsi)
+ if (op == STORE_BY_PIECES)
+ return mips_store_by_pieces_p (size, align);
+ if (op == MOVE_BY_PIECES && HAVE_movmemsi)
{
/* movmemsi is meant to generate code that is at least as good as
move_by_pieces. However, movmemsi effectively uses a by-pieces
return size < UNITS_PER_WORD;
return size <= MIPS_MAX_MOVE_BYTES_STRAIGHT;
}
- /* The default value. If this becomes a target hook, we should
- call the default definition instead. */
- return (move_by_pieces_ninsns (size, align, MOVE_MAX_PIECES + 1)
- < (unsigned int) MOVE_RATIO (optimize_insn_for_speed_p ()));
+
+ return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
}
-/* Implement STORE_BY_PIECES_P. */
+/* Implement a handler for STORE_BY_PIECES operations
+ for TARGET_USE_MOVE_BY_PIECES_INFRASTRUCTURE_P. */
bool
mips_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
HOST_WIDE_INT offset, delta;
unsigned HOST_WIDE_INT bits;
int i;
- enum machine_mode mode;
+ machine_mode mode;
rtx *regs;
/* Work out how many bits to move at a time. If both operands have
rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
rtx unshifted_mask_reg, mask, inverted_mask, si_op;
rtx res = NULL;
- enum machine_mode mode;
+ machine_mode mode;
mode = GET_MODE (mem);
HOST_WIDE_INT bitpos)
{
rtx left, right;
- enum machine_mode mode;
+ machine_mode mode;
if (!mips_get_unaligned_mem (dest, width, bitpos, &left, &right))
return false;
/* Return true if X is a MEM with the same size as MODE. */
bool
-mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
+mips_mem_fits_mode_p (machine_mode mode, rtx x)
{
return (MEM_P (x)
&& MEM_SIZE_KNOWN_P (x)
mask_low_and_shift_len for the actual definition. */
bool
-mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
+mask_low_and_shift_p (machine_mode mode, rtx mask, rtx shift, int maxlen)
{
return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
}
see the table in the comment before the pattern. */
bool
-and_operands_ok (enum machine_mode mode, rtx op1, rtx op2)
+and_operands_ok (machine_mode mode, rtx op1, rtx op2)
{
return (memory_operand (op1, mode)
? and_load_operand (op2, mode)
return the length of the mask, otherwise return -1. */
int
-mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
+mask_low_and_shift_len (machine_mode mode, rtx mask, rtx shift)
{
HOST_WIDE_INT shval;
/* Implement TARGET_SELECT_RTX_SECTION. */
static section *
-mips_select_rtx_section (enum machine_mode mode, rtx x,
+mips_select_rtx_section (machine_mode mode, rtx x,
unsigned HOST_WIDE_INT align)
{
/* ??? Consider using mergeable small data sections. */
mips_dwarf_register_span (rtx reg)
{
rtx high, low;
- enum machine_mode mode;
-
- /* By default, GCC maps increasing register numbers to increasing
- memory locations, but paired FPRs are always little-endian,
- regardless of the prevailing endianness. */
+ machine_mode mode;
+
+ /* TARGET_FLOATXX is implemented as 32-bit floating-point registers but
+ ensures that double-precision registers are treated as if they were
+ 64-bit physical registers. The code will run correctly with 32-bit or
+ 64-bit registers which means that dwarf information cannot be precise
+ for all scenarios. We choose to state that the 64-bit values are stored
+ in a single 64-bit 'piece'. This slightly unusual construct can then be
+ interpreted as either a pair of registers if the registers are 32-bit or
+ a single 64-bit register depending on hardware. */
mode = GET_MODE (reg);
if (FP_REG_P (REGNO (reg))
- && TARGET_BIG_ENDIAN
- && MAX_FPRS_PER_FMT > 1
+ && TARGET_FLOATXX
&& GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
+ {
+ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, reg));
+ }
+ /* By default, GCC maps increasing register numbers to increasing
+ memory locations, but paired FPRs are always little-endian,
+ regardless of the prevailing endianness. */
+ else if (FP_REG_P (REGNO (reg))
+ && TARGET_BIG_ENDIAN
+ && MAX_FPRS_PER_FMT > 1
+ && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
{
gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
high = mips_subword (reg, true);
return NULL_RTX;
}
+/* Implement TARGET_DWARF_FRAME_REG_MODE. */
+
+static machine_mode
+mips_dwarf_frame_reg_mode (int regno)
+{
+ machine_mode mode = default_dwarf_frame_reg_mode (regno);
+
+ if (FP_REG_P (regno) && mips_abi == ABI_32 && TARGET_FLOAT64)
+ mode = SImode;
+
+ return mode;
+}
+
/* DSP ALU can bypass data with no delays for the following pairs. */
enum insn_code dspalu_bypass_table[][2] =
{
fprintf (asm_out_file, "\t.nan\t%s\n",
mips_nan == MIPS_IEEE_754_2008 ? "2008" : "legacy");
+#ifdef HAVE_AS_DOT_MODULE
+ /* Record the FP ABI. See below for comments. */
+ if (TARGET_NO_FLOAT)
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ fputs ("\t.gnu_attribute 4, 0\n", asm_out_file);
+#else
+ ;
+#endif
+ else if (!TARGET_HARD_FLOAT_ABI)
+ fputs ("\t.module\tsoftfloat\n", asm_out_file);
+ else if (!TARGET_DOUBLE_FLOAT)
+ fputs ("\t.module\tsinglefloat\n", asm_out_file);
+ else if (TARGET_FLOATXX)
+ fputs ("\t.module\tfp=xx\n", asm_out_file);
+ else if (TARGET_FLOAT64)
+ fputs ("\t.module\tfp=64\n", asm_out_file);
+ else
+ fputs ("\t.module\tfp=32\n", asm_out_file);
+
+ if (TARGET_ODD_SPREG)
+ fputs ("\t.module\toddspreg\n", asm_out_file);
+ else
+ fputs ("\t.module\tnooddspreg\n", asm_out_file);
+
+#else
#ifdef HAVE_AS_GNU_ATTRIBUTE
{
int attr;
/* Single-float code, -msingle-float. */
else if (!TARGET_DOUBLE_FLOAT)
attr = 2;
- /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64. */
- else if (!TARGET_64BIT && TARGET_FLOAT64)
- attr = 4;
+ /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64.
+ Reserved attr=4.
+ This case used 12 callee-saved double-precision registers
+ and is deprecated. */
+ /* 64-bit or 32-bit FP registers on a 32-bit target, -mfpxx. */
+ else if (TARGET_FLOATXX)
+ attr = 5;
+ /* 64-bit FP registers on a 32-bit target, -mfp64 -modd-spreg. */
+ else if (mips_abi == ABI_32 && TARGET_FLOAT64 && TARGET_ODD_SPREG)
+ attr = 6;
+ /* 64-bit FP registers on a 32-bit target, -mfp64 -mno-odd-spreg. */
+ else if (mips_abi == ABI_32 && TARGET_FLOAT64)
+ attr = 7;
/* Regular FP code, FP regs same size as GP regs, -mdouble-float. */
else
attr = 1;
fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", attr);
}
+#endif
#endif
/* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
stack pointer. */
static void
-mips_save_restore_reg (enum machine_mode mode, int regno,
+mips_save_restore_reg (machine_mode mode, int regno,
HOST_WIDE_INT offset, mips_save_restore_fn fn)
{
rtx mem;
static void
mips_save_reg (rtx reg, rtx mem)
{
- if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
+ if (GET_MODE (reg) == DFmode
+ && (!TARGET_FLOAT64
+ || mips_abi == ABI_32))
{
rtx x1, x2;
mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
mips_save_restore_fn fn)
{
- enum machine_mode fpr_mode;
+ machine_mode fpr_mode;
int regno;
const struct mips_frame_info *frame = &cfun->machine->frame;
HOST_WIDE_INT offset;
regno -= MAX_FPRS_PER_FMT)
if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
{
- mips_save_restore_reg (fpr_mode, regno, offset, fn);
+ if (!TARGET_FLOAT64 && TARGET_DOUBLE_FLOAT
+ && (fixed_regs[regno] || fixed_regs[regno + 1]))
+ {
+ if (fixed_regs[regno])
+ mips_save_restore_reg (SFmode, regno + 1, offset, fn);
+ else
+ mips_save_restore_reg (SFmode, regno, offset, fn);
+ }
+ else
+ mips_save_restore_reg (fpr_mode, regno, offset, fn);
offset -= GET_MODE_SIZE (fpr_mode);
}
}
return "";
}
-/* A for_each_rtx callback. Stop the search if *X is a kernel register. */
+/* Return true if X contains a kernel register. */
-static int
-mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
+static bool
+mips_refers_to_kernel_reg_p (const_rtx x)
{
- return REG_P (*x) && KERNEL_REG_P (REGNO (*x));
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, x, NONCONST)
+ if (REG_P (*iter) && KERNEL_REG_P (REGNO (*iter)))
+ return true;
+ return false;
}
/* Expand the "prologue" pattern. */
rtx_insn *insn;
for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
if (INSN_P (insn)
- && for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
+ && mips_refers_to_kernel_reg_p (PATTERN (insn)))
break;
/* Emit a move from K1 to COP0 Status after insn. */
gcc_assert (insn != NULL_RTX);
$7 instead and adjust the return insn appropriately. */
if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
- else if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
+ else if (GET_MODE (reg) == DFmode
+ && (!TARGET_FLOAT64
+ || mips_abi == ABI_32))
{
mips_add_cfa_restore (mips_subword (reg, true));
mips_add_cfa_restore (mips_subword (reg, false));
{
for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
if (INSN_P (insn)
- && for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
+ && mips_refers_to_kernel_reg_p (PATTERN (insn)))
break;
gcc_assert (insn != NULL_RTX);
/* Insert disable interrupts before the first use of K0 or K1. */
The result of this function is cached in mips_hard_regno_mode_ok. */
static bool
-mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
+mips_hard_regno_mode_ok_p (unsigned int regno, machine_mode mode)
{
unsigned int size;
enum mode_class mclass;
&& (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
|| (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
{
+ /* Deny use of odd-numbered registers for 32-bit data for
+ the o32 FP64A ABI. */
+ if (TARGET_O32_FP64A_ABI && size <= 4 && (regno & 1) != 0)
+ return false;
+
/* Allow 64-bit vector modes for Loongson-2E/2F. */
if (TARGET_LOONGSON_VECTORS
&& (mode == V2SImode
/* Implement HARD_REGNO_NREGS. */
unsigned int
-mips_hard_regno_nregs (int regno, enum machine_mode mode)
+mips_hard_regno_nregs (int regno, machine_mode mode)
{
if (ST_REG_P (regno))
/* The size of FP status registers is always 4, because they only hold
in mips_hard_regno_nregs. */
int
-mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
+mips_class_max_nregs (enum reg_class rclass, machine_mode mode)
{
int size;
HARD_REG_SET left;
/* Implement CANNOT_CHANGE_MODE_CLASS. */
bool
-mips_cannot_change_mode_class (enum machine_mode from,
- enum machine_mode to,
+mips_cannot_change_mode_class (machine_mode from,
+ machine_mode to,
enum reg_class rclass)
{
/* Allow conversions between different Loongson integer vectors,
/* Implement target hook small_register_classes_for_mode_p. */
static bool
-mips_small_register_classes_for_mode_p (enum machine_mode mode
+mips_small_register_classes_for_mode_p (machine_mode mode
ATTRIBUTE_UNUSED)
{
return TARGET_MIPS16;
/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
static bool
-mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
+mips_mode_ok_for_mov_fmt_p (machine_mode mode)
{
switch (mode)
{
/* Implement MODES_TIEABLE_P. */
bool
-mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
+mips_modes_tieable_p (machine_mode mode1, machine_mode mode2)
{
/* FPRs allow no mode punning, so it's not worth tying modes if we'd
prefer to put one of them in FPRs. */
the maximum for us. */
static int
-mips_register_move_cost (enum machine_mode mode,
+mips_register_move_cost (machine_mode mode,
reg_class_t from, reg_class_t to)
{
reg_class_t dregs;
/* Implement TARGET_MEMORY_MOVE_COST. */
static int
-mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
+mips_memory_move_cost (machine_mode mode, reg_class_t rclass, bool in)
{
return (mips_cost->memory_latency
+ memory_move_secondary_cost (mode, rclass, in));
}
+/* Implement SECONDARY_MEMORY_NEEDED. */
+
+bool
+mips_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
+ machine_mode mode)
+{
+ /* Ignore spilled pseudos. */
+ if (lra_in_progress && (class1 == NO_REGS || class2 == NO_REGS))
+ return false;
+
+ if (((class1 == FP_REGS) != (class2 == FP_REGS))
+ && ((TARGET_FLOATXX && !ISA_HAS_MXHC1)
+ || TARGET_O32_FP64A_ABI)
+ && GET_MODE_SIZE (mode) >= 8)
+ return true;
+
+ return false;
+}
+
/* Return the register class required for a secondary register when
copying between one of the registers in RCLASS and value X, which
has mode MODE. X is the source of the move if IN_P, otherwise it
enum reg_class
mips_secondary_reload_class (enum reg_class rclass,
- enum machine_mode mode, rtx x, bool)
+ machine_mode mode, rtx x, bool)
{
int regno;
/* Implement TARGET_MODE_REP_EXTENDED. */
static int
-mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
+mips_mode_rep_extended (machine_mode mode, machine_mode mode_rep)
{
/* On 64-bit targets, SImode register values are sign-extended to DImode. */
if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
/* Implement TARGET_VALID_POINTER_MODE. */
static bool
-mips_valid_pointer_mode (enum machine_mode mode)
+mips_valid_pointer_mode (machine_mode mode)
{
return mode == SImode || (TARGET_64BIT && mode == DImode);
}
/* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
static bool
-mips_vector_mode_supported_p (enum machine_mode mode)
+mips_vector_mode_supported_p (machine_mode mode)
{
switch (mode)
{
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
static bool
-mips_scalar_mode_supported_p (enum machine_mode mode)
+mips_scalar_mode_supported_p (machine_mode mode)
{
if (ALL_FIXED_POINT_MODE_P (mode)
&& GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
\f
/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
-static enum machine_mode
-mips_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED)
+static machine_mode
+mips_preferred_simd_mode (machine_mode mode ATTRIBUTE_UNUSED)
{
if (TARGET_PAIRED_SINGLE_FLOAT
&& mode == SFmode)
case PROCESSOR_R9000:
case PROCESSOR_OCTEON:
case PROCESSOR_OCTEON2:
+ case PROCESSOR_OCTEON3:
return 2;
case PROCESSOR_SB1:
of the vector itself. */
static tree
-mips_builtin_vector_type (tree type, enum machine_mode mode)
+mips_builtin_vector_type (tree type, machine_mode mode)
{
static tree types[2 * (int) MAX_MACHINE_MODE];
int mode_index;
static rtx
mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
- enum machine_mode mode, int ignore)
+ machine_mode mode, int ignore)
{
tree fndecl;
unsigned int fcode, avail;
struct mips16_constant *next;
rtx value;
rtx_code_label *label;
- enum machine_mode mode;
+ machine_mode mode;
};
/* Information about an incomplete MIPS16 constant pool. FIRST is the
static rtx_code_label *
mips16_add_constant (struct mips16_constant_pool *pool,
- rtx value, enum machine_mode mode)
+ rtx value, machine_mode mode)
{
struct mips16_constant **p, *c;
bool first_of_size_p;
instruction emitted. MODE is the mode of the constant. */
static rtx_insn *
-mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx_insn *insn)
+mips16_emit_constants_1 (machine_mode mode, rtx value, rtx_insn *insn)
{
if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
{
}
}
-/* This structure is used to communicate with mips16_rewrite_pool_refs.
- INSN is the instruction we're rewriting and POOL points to the current
- constant pool. */
-struct mips16_rewrite_pool_refs_info {
- rtx_insn *insn;
- struct mips16_constant_pool *pool;
-};
-
-/* Rewrite *X so that constant pool references refer to the constant's
- label instead. DATA points to a mips16_rewrite_pool_refs_info
- structure. */
+/* Rewrite INSN so that constant pool references refer to the constant's
+ label instead. */
-static int
-mips16_rewrite_pool_refs (rtx *x, void *data)
+static void
+mips16_rewrite_pool_refs (rtx_insn *insn, struct mips16_constant_pool *pool)
{
- struct mips16_rewrite_pool_refs_info *info =
- (struct mips16_rewrite_pool_refs_info *) data;
-
- if (force_to_mem_operand (*x, Pmode))
+ subrtx_ptr_iterator::array_type array;
+ FOR_EACH_SUBRTX_PTR (iter, array, &PATTERN (insn), ALL)
{
- rtx mem = force_const_mem (GET_MODE (*x), *x);
- validate_change (info->insn, x, mem, false);
- }
+ rtx *loc = *iter;
- if (MEM_P (*x))
- {
- mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
- return -1;
- }
-
- /* Don't rewrite the __mips16_rdwr symbol. */
- if (GET_CODE (*x) == UNSPEC && XINT (*x, 1) == UNSPEC_TLS_GET_TP)
- return -1;
-
- if (TARGET_MIPS16_TEXT_LOADS)
- mips16_rewrite_pool_constant (info->pool, x);
+ if (force_to_mem_operand (*loc, Pmode))
+ {
+ rtx mem = force_const_mem (GET_MODE (*loc), *loc);
+ validate_change (insn, loc, mem, false);
+ }
- return GET_CODE (*x) == CONST ? -1 : 0;
+ if (MEM_P (*loc))
+ {
+ mips16_rewrite_pool_constant (pool, &XEXP (*loc, 0));
+ iter.skip_subrtxes ();
+ }
+ else
+ {
+ if (TARGET_MIPS16_TEXT_LOADS)
+ mips16_rewrite_pool_constant (pool, loc);
+ if (GET_CODE (*loc) == CONST
+ /* Don't rewrite the __mips16_rdwr symbol. */
+ || (GET_CODE (*loc) == UNSPEC
+ && XINT (*loc, 1) == UNSPEC_TLS_GET_TP))
+ iter.skip_subrtxes ();
+ }
+ }
}
/* Return whether CFG is used in mips_reorg. */
mips16_lay_out_constants (bool split_p)
{
struct mips16_constant_pool pool;
- struct mips16_rewrite_pool_refs_info info;
rtx_insn *insn, *barrier;
if (!TARGET_MIPS16_PCREL_LOADS)
{
/* Rewrite constant pool references in INSN. */
if (USEFUL_INSN_P (insn))
- {
- info.insn = insn;
- info.pool = &pool;
- for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
- }
+ mips16_rewrite_pool_refs (insn, &pool);
pool.insn_address += mips16_insn_length (insn);
{
HOST_WIDE_INT bitoffset, bitsize;
tree inner, var_offset;
- enum machine_mode mode;
+ machine_mode mode;
int unsigned_p, volatile_p;
inner = get_inner_reference (expr, &bitsize, &bitoffset, &var_offset, &mode,
return offset < tree_to_uhwi (DECL_SIZE_UNIT (inner));
}
-/* A for_each_rtx callback for which DATA points to the instruction
- containing *X. Stop the search if we find a MEM that is not safe
- from R10K speculation. */
+/* Return true if X contains a MEM that is not safe from R10K speculation.
+ INSN is the instruction that contains X. */
-static int
-r10k_needs_protection_p_1 (rtx *loc, void *data)
+static bool
+r10k_needs_protection_p_1 (rtx x, rtx_insn *insn)
{
- rtx mem;
-
- mem = *loc;
- if (!MEM_P (mem))
- return 0;
-
- if (MEM_EXPR (mem)
- && MEM_OFFSET_KNOWN_P (mem)
- && r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
- return -1;
-
- if (r10k_safe_address_p (XEXP (mem, 0), (rtx_insn *) data))
- return -1;
-
- return 1;
+ subrtx_var_iterator::array_type array;
+ FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
+ {
+ rtx mem = *iter;
+ if (MEM_P (mem))
+ {
+ if ((MEM_EXPR (mem)
+ && MEM_OFFSET_KNOWN_P (mem)
+ && r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
+ || r10k_safe_address_p (XEXP (mem, 0), insn))
+ iter.skip_subrtxes ();
+ else
+ return true;
+ }
+ }
+ return false;
}
/* A note_stores callback for which DATA points to an instruction pointer.
rtx_insn **insn_ptr;
insn_ptr = (rtx_insn **) data;
- if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
+ if (*insn_ptr && r10k_needs_protection_p_1 (x, *insn_ptr))
*insn_ptr = NULL;
}
-/* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
- Return nonzero if the call is not to a declared function. */
+/* X is the pattern of a call instruction. Return true if the call is
+ not to a declared function. */
-static int
-r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
+static bool
+r10k_needs_protection_p_call (const_rtx x)
{
- rtx x;
-
- x = *loc;
- if (!MEM_P (x))
- return 0;
-
- x = XEXP (x, 0);
- if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
- return -1;
-
- return 1;
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, x, NONCONST)
+ {
+ const_rtx mem = *iter;
+ if (MEM_P (mem))
+ {
+ const_rtx addr = XEXP (mem, 0);
+ if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DECL (addr))
+ iter.skip_subrtxes ();
+ else
+ return true;
+ }
+ }
+ return false;
}
/* Return true if instruction INSN needs to be protected by an R10K
r10k_needs_protection_p (rtx_insn *insn)
{
if (CALL_P (insn))
- return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
+ return r10k_needs_protection_p_call (PATTERN (insn));
if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
{
return insn == NULL_RTX;
}
- return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
+ return r10k_needs_protection_p_1 (PATTERN (insn), insn);
}
/* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
}
}
\f
-/* A temporary variable used by for_each_rtx callbacks, etc. */
+/* A temporary variable used by note_uses callbacks, etc. */
static rtx_insn *mips_sim_insn;
/* A structure representing the state of the processor pipeline.
}
}
-/* A for_each_rtx callback. If *X is a register, advance simulation state
- DATA until mips_sim_insn can read the register's value. */
-
-static int
-mips_sim_wait_regs_2 (rtx *x, void *data)
-{
- if (REG_P (*x))
- mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
- return 0;
-}
-
-/* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
+/* A note_uses callback. For each register in *X, advance simulation
+ state DATA until mips_sim_insn can read the register's value. */
static void
mips_sim_wait_regs_1 (rtx *x, void *data)
{
- for_each_rtx (x, mips_sim_wait_regs_2, data);
+ subrtx_var_iterator::array_type array;
+ FOR_EACH_SUBRTX_VAR (iter, array, *x, NONCONST)
+ if (REG_P (*iter))
+ mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *iter);
}
/* Advance simulation state STATE until all of INSN's register
mips_tuning_info.fast_mult_zero_zero_p = setting;
start_sequence ();
- enum machine_mode dword_mode = TARGET_64BIT ? TImode : DImode;
+ machine_mode dword_mode = TARGET_64BIT ? TImode : DImode;
rtx hilo = gen_rtx_REG (dword_mode, MD_REG_FIRST);
mips_emit_move_or_split (hilo, const0_rtx, SPLIT_FOR_SPEED);
return INTVAL (offset) <= entry->offset;
}
-/* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
- Record every LO_SUM in *LOC. */
+/* Search X for LO_SUMs and record them in HTAB. */
-static int
-mips_record_lo_sum (rtx *loc, void *data)
+static void
+mips_record_lo_sums (const_rtx x, mips_offset_table *htab)
{
- if (GET_CODE (*loc) == LO_SUM)
- mips_lo_sum_offset_lookup ((mips_offset_table*) data,
- XEXP (*loc, 1), INSERT);
- return 0;
+ subrtx_iterator::array_type array;
+ FOR_EACH_SUBRTX (iter, array, x, NONCONST)
+ if (GET_CODE (*iter) == LO_SUM)
+ mips_lo_sum_offset_lookup (htab, XEXP (*iter, 1), INSERT);
}
/* Return true if INSN is a SET of an orphaned high-part relocation.
get_referenced_operands (string, used, noperands);
for (int i = 0; i < noperands; ++i)
if (used[i])
- for_each_rtx (&ops[i], mips_record_lo_sum, &htab);
+ mips_record_lo_sums (ops[i], &htab);
}
else
- for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, &htab);
+ mips_record_lo_sums (PATTERN (subinsn), &htab);
}
last_insn = 0;
target_flags &= ~MASK_FLOAT64;
}
+ if (mips_abi != ABI_32 && TARGET_FLOATXX)
+ error ("%<-mfpxx%> can only be used with the o32 ABI");
+ else if (ISA_MIPS1 && !TARGET_FLOAT32)
+ error ("%<-march=%s%> requires %<-mfp32%>", mips_arch_info->name);
+ else if (TARGET_FLOATXX && !mips_lra_flag)
+ error ("%<-mfpxx%> requires %<-mlra%>");
+
/* End of code shared with GAS. */
/* The R5900 FPU only supports single precision. */
warning (0, "the %qs architecture does not support madd or msub"
" instructions", mips_arch_info->name);
+ /* If neither -modd-spreg nor -mno-odd-spreg was given on the command
+ line, set MASK_ODD_SPREG based on the ISA and ABI. */
+ if ((target_flags_explicit & MASK_ODD_SPREG) == 0)
+ {
+ /* Disable TARGET_ODD_SPREG when using the o32 FPXX ABI. */
+ if (!ISA_HAS_ODD_SPREG || TARGET_FLOATXX)
+ target_flags &= ~MASK_ODD_SPREG;
+ else
+ target_flags |= MASK_ODD_SPREG;
+ }
+ else if (TARGET_ODD_SPREG && !ISA_HAS_ODD_SPREG)
+ warning (0, "the %qs architecture does not support odd single-precision"
+ " registers", mips_arch_info->name);
+
+ if (!TARGET_ODD_SPREG && TARGET_64BIT)
+ {
+ error ("unsupported combination: %s", "-mgp64 -mno-odd-spreg");
+ /* Allow compilation to continue further even though invalid output
+ will be produced. */
+ target_flags |= MASK_ODD_SPREG;
+ }
+
/* The effect of -mabicalls isn't defined for the EABI. */
if (mips_abi == ABI_EABI && TARGET_ABICALLS)
{
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
mips_hard_regno_mode_ok[mode][regno]
- = mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
+ = mips_hard_regno_mode_ok_p (regno, (machine_mode) mode);
/* Function to allocate machine-dependent function status. */
init_machine_status = &mips_init_machine_status;
call_really_used_regs[regno] = call_used_regs[regno] = 1;
}
/* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
- for n32. */
- if (mips_abi == ABI_N32)
+ for n32 and o32 FP64. */
+ if (mips_abi == ABI_N32
+ || (mips_abi == ABI_32
+ && TARGET_FLOAT64))
{
int regno;
for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
return false;
}
-/* A for_each_rtx callback. Stop the search if *X is an AT register. */
-
-static int
-mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
-{
- return REG_P (*x) && REGNO (*x) == AT_REGNUM;
-}
-
/* Return true if INSN needs to be wrapped in ".set noat".
INSN has NOPERANDS operands, stored in OPVEC. */
static bool
mips_need_noat_wrapper_p (rtx_insn *insn, rtx *opvec, int noperands)
{
- int i;
-
if (recog_memoized (insn) >= 0)
- for (i = 0; i < noperands; i++)
- if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
- return true;
+ {
+ subrtx_iterator::array_type array;
+ for (int i = 0; i < noperands; i++)
+ FOR_EACH_SUBRTX (iter, array, opvec[i], NONCONST)
+ if (REG_P (*iter) && REGNO (*iter) == AT_REGNUM)
+ return true;
+ }
return false;
}
when TARGET_LOONGSON_VECTORS is true. */
static unsigned HOST_WIDE_INT
-mips_shift_truncation_mask (enum machine_mode mode)
+mips_shift_truncation_mask (machine_mode mode)
{
if (TARGET_LOONGSON_VECTORS && VECTOR_MODE_P (mode))
return 0;
{
rtx target, op0, op1;
unsigned char perm[MAX_VECT_LEN];
- enum machine_mode vmode;
+ machine_mode vmode;
unsigned char nelt;
bool one_vector_p;
bool testing_p;
mips_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
const unsigned char *perm, unsigned nelt)
{
- enum machine_mode v2mode;
+ machine_mode v2mode;
rtx x;
v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
/* Implement TARGET_VECTORIZE_VEC_PERM_CONST_OK. */
static bool
-mips_vectorize_vec_perm_const_ok (enum machine_mode vmode,
+mips_vectorize_vec_perm_const_ok (machine_mode vmode,
const unsigned char *sel)
{
struct expand_vec_perm_d d;
void
mips_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p)
{
- enum machine_mode imode = GET_MODE (operands[1]);
+ machine_mode imode = GET_MODE (operands[1]);
rtx (*unpack) (rtx, rtx, rtx);
rtx (*cmpgt) (rtx, rtx, rtx);
rtx tmp, dest, zero;
/* A subroutine of mips_expand_vec_init, expand via broadcast. */
static void
-mips_expand_vi_broadcast (enum machine_mode vmode, rtx target, rtx elt)
+mips_expand_vi_broadcast (machine_mode vmode, rtx target, rtx elt)
{
struct expand_vec_perm_d d;
rtx t1;
elements of VALS with zeros, copy the constant vector to TARGET. */
static void
-mips_expand_vi_constant (enum machine_mode vmode, unsigned nelt,
+mips_expand_vi_constant (machine_mode vmode, unsigned nelt,
rtx target, rtx vals)
{
rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
/* A subroutine of mips_expand_vec_init, expand anything via memory. */
static void
-mips_expand_vi_general (enum machine_mode vmode, enum machine_mode imode,
+mips_expand_vi_general (machine_mode vmode, machine_mode imode,
unsigned nelt, unsigned nvar, rtx target, rtx vals)
{
rtx mem = assign_stack_temp (vmode, GET_MODE_SIZE (vmode));
void
mips_expand_vector_init (rtx target, rtx vals)
{
- enum machine_mode vmode = GET_MODE (target);
- enum machine_mode imode = GET_MODE_INNER (vmode);
+ machine_mode vmode = GET_MODE (target);
+ machine_mode imode = GET_MODE_INNER (vmode);
unsigned i, nelt = GET_MODE_NUNITS (vmode);
unsigned nvar = 0, one_var = -1u;
bool all_same = true;
void
mips_expand_vec_reduc (rtx target, rtx in, rtx (*gen)(rtx, rtx, rtx))
{
- enum machine_mode vmode = GET_MODE (in);
+ machine_mode vmode = GET_MODE (in);
unsigned char perm2[2];
rtx last, next, fold, x;
bool ok;
mips_expand_vec_minmax (rtx target, rtx op0, rtx op1,
rtx (*cmp) (rtx, rtx, rtx), bool min_p)
{
- enum machine_mode vmode = GET_MODE (target);
+ machine_mode vmode = GET_MODE (target);
rtx tc, t0, t1, x;
tc = gen_reg_rtx (vmode);
emit_insn (gen_rtx_SET (VOIDmode, target, x));
}
+/* Implement HARD_REGNO_CALLER_SAVE_MODE. */
+
+machine_mode
+mips_hard_regno_caller_save_mode (unsigned int regno,
+ unsigned int nregs,
+ machine_mode mode)
+{
+ /* For performance, avoid saving/restoring upper parts of a register
+ by returning MODE as save mode when the mode is known. */
+ if (mode == VOIDmode)
+ return choose_hard_reg_mode (regno, nregs, false);
+ else
+ return mode;
+}
+
/* Implement TARGET_CASE_VALUES_THRESHOLD. */
unsigned int
static reg_class_t
mips_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED)
+ machine_mode mode ATTRIBUTE_UNUSED)
{
if (TARGET_MIPS16)
return SPILL_REGS;
#define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
#undef TARGET_FUNCTION_ARG_BOUNDARY
#define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
+#undef TARGET_GET_RAW_RESULT_MODE
+#define TARGET_GET_RAW_RESULT_MODE mips_get_reg_raw_mode
+#undef TARGET_GET_RAW_ARG_MODE
+#define TARGET_GET_RAW_ARG_MODE mips_get_reg_raw_mode
#undef TARGET_MODE_REP_EXTENDED
#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
#endif
#undef TARGET_DWARF_REGISTER_SPAN
#define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
+#undef TARGET_DWARF_FRAME_REG_MODE
+#define TARGET_DWARF_FRAME_REG_MODE mips_dwarf_frame_reg_mode
#undef TARGET_ASM_FINAL_POSTSCAN_INSN
#define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
#undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
#define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
+#undef TARGET_USE_BY_PIECES_INFRASTRUCTURE_P
+#define TARGET_USE_BY_PIECES_INFRASTRUCTURE_P \
+ mips_use_by_pieces_infrastructure_p
+
#undef TARGET_SPILL_CLASS
#define TARGET_SPILL_CLASS mips_spill_class
#undef TARGET_LRA_P