/* Default target hook functions.
- Copyright (C) 2003-2015 Free Software Foundation, Inc.
+ Copyright (C) 2003-2019 Free Software Foundation, Inc.
This file is part of GCC.
#include "tree.h"
#include "tree-ssa-alias.h"
#include "gimple-expr.h"
+#include "memmodel.h"
#include "tm_p.h"
#include "stringpool.h"
+#include "tree-vrp.h"
#include "tree-ssanames.h"
+#include "profile-count.h"
#include "optabs.h"
#include "regs.h"
#include "recog.h"
#include "calls.h"
#include "expr.h"
#include "output.h"
+#include "common/common-target.h"
#include "reload.h"
#include "intl.h"
#include "opts.h"
#include "gimplify.h"
-
+#include "predict.h"
+#include "params.h"
+#include "real.h"
+#include "langhooks.h"
+#include "sbitmap.h"
bool
default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
}
bool
-default_legitimize_address_displacement (rtx *disp ATTRIBUTE_UNUSED,
- rtx *offset ATTRIBUTE_UNUSED,
- machine_mode mode ATTRIBUTE_UNUSED)
+default_legitimize_address_displacement (rtx *, rtx *, poly_int64,
+ machine_mode)
+{
+ return false;
+}
+
+bool
+default_const_not_ok_for_debug_p (rtx x)
{
+ if (GET_CODE (x) == UNSPEC)
+ return true;
return false;
}
!= default_setup_incoming_varargs);
}
-machine_mode
+scalar_int_mode
default_eh_return_filter_mode (void)
{
return targetm.unwind_word_mode ();
}
-machine_mode
+scalar_int_mode
default_libgcc_cmp_return_mode (void)
{
return word_mode;
}
-machine_mode
+scalar_int_mode
default_libgcc_shift_count_mode (void)
{
return word_mode;
}
-machine_mode
+scalar_int_mode
default_unwind_word_mode (void)
{
return word_mode;
unsigned HOST_WIDE_INT
default_shift_truncation_mask (machine_mode mode)
{
- return SHIFT_COUNT_TRUNCATED ? GET_MODE_BITSIZE (mode) - 1 : 0;
+ return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0;
}
/* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL. */
/* The default implementation of TARGET_MODE_REP_EXTENDED. */
int
-default_mode_rep_extended (machine_mode mode ATTRIBUTE_UNUSED,
- machine_mode mode_rep ATTRIBUTE_UNUSED)
+default_mode_rep_extended (scalar_int_mode, scalar_int_mode)
{
return UNKNOWN;
}
return get_identifier (stripped);
}
+/* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE. */
+
+machine_mode
+default_translate_mode_attribute (machine_mode mode)
+{
+ return mode;
+}
+
/* True if MODE is valid for the target. By "valid", we mean able to
be manipulated in non-trivial ways. In particular, this means all
the arithmetic is supported.
supported by optabs.c. */
bool
-default_scalar_mode_supported_p (machine_mode mode)
+default_scalar_mode_supported_p (scalar_mode mode)
{
int precision = GET_MODE_PRECISION (mode);
be supported as a scalar mode). */
bool
-default_libgcc_floating_mode_supported_p (machine_mode mode)
+default_libgcc_floating_mode_supported_p (scalar_float_mode mode)
{
switch (mode)
{
#ifdef HAVE_SFmode
- case SFmode:
+ case E_SFmode:
#endif
#ifdef HAVE_DFmode
- case DFmode:
+ case E_DFmode:
#endif
#ifdef HAVE_XFmode
- case XFmode:
+ case E_XFmode:
#endif
#ifdef HAVE_TFmode
- case TFmode:
+ case E_TFmode:
#endif
return true;
}
}
+/* Return the machine mode to use for the type _FloatN, if EXTENDED is
+ false, or _FloatNx, if EXTENDED is true, or VOIDmode if not
+ supported. */
+opt_scalar_float_mode
+default_floatn_mode (int n, bool extended)
+{
+ if (extended)
+ {
+ opt_scalar_float_mode cand1, cand2;
+ scalar_float_mode mode;
+ switch (n)
+ {
+ case 32:
+#ifdef HAVE_DFmode
+ cand1 = DFmode;
+#endif
+ break;
+
+ case 64:
+#ifdef HAVE_XFmode
+ cand1 = XFmode;
+#endif
+#ifdef HAVE_TFmode
+ cand2 = TFmode;
+#endif
+ break;
+
+ case 128:
+ break;
+
+ default:
+ /* Those are the only valid _FloatNx types. */
+ gcc_unreachable ();
+ }
+ if (cand1.exists (&mode)
+ && REAL_MODE_FORMAT (mode)->ieee_bits > n
+ && targetm.scalar_mode_supported_p (mode)
+ && targetm.libgcc_floating_mode_supported_p (mode))
+ return cand1;
+ if (cand2.exists (&mode)
+ && REAL_MODE_FORMAT (mode)->ieee_bits > n
+ && targetm.scalar_mode_supported_p (mode)
+ && targetm.libgcc_floating_mode_supported_p (mode))
+ return cand2;
+ }
+ else
+ {
+ opt_scalar_float_mode cand;
+ scalar_float_mode mode;
+ switch (n)
+ {
+ case 16:
+ /* Always enable _Float16 if we have basic support for the mode.
+ Targets can control the range and precision of operations on
+ the _Float16 type using TARGET_C_EXCESS_PRECISION. */
+#ifdef HAVE_HFmode
+ cand = HFmode;
+#endif
+ break;
+
+ case 32:
+#ifdef HAVE_SFmode
+ cand = SFmode;
+#endif
+ break;
+
+ case 64:
+#ifdef HAVE_DFmode
+ cand = DFmode;
+#endif
+ break;
+
+ case 128:
+#ifdef HAVE_TFmode
+ cand = TFmode;
+#endif
+ break;
+
+ default:
+ break;
+ }
+ if (cand.exists (&mode)
+ && REAL_MODE_FORMAT (mode)->ieee_bits == n
+ && targetm.scalar_mode_supported_p (mode)
+ && targetm.libgcc_floating_mode_supported_p (mode))
+ return cand;
+ }
+ return opt_scalar_float_mode ();
+}
+
+/* Define this to return true if the _Floatn and _Floatnx built-in functions
+ should implicitly enable the built-in function without the __builtin_ prefix
+ in addition to the normal built-in function with the __builtin_ prefix. The
+ default is to only enable built-in functions without the __builtin_ prefix
+ for the GNU C langauge. The argument FUNC is the enum builtin_in_function
+ id of the function to be enabled. */
+
+bool
+default_floatn_builtin_p (int func ATTRIBUTE_UNUSED)
+{
+ static bool first_time_p = true;
+ static bool c_or_objective_c;
+
+ if (first_time_p)
+ {
+ first_time_p = false;
+ c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC ();
+ }
+
+ return c_or_objective_c;
+}
+
/* Make some target macros useable by target-independent code. */
bool
targhook_words_big_endian (void)
tree vectype,
int misalign ATTRIBUTE_UNUSED)
{
- unsigned elements;
-
switch (type_of_cost)
{
case scalar_stmt:
return 3;
case vec_construct:
- elements = TYPE_VECTOR_SUBPARTS (vectype);
- return elements / 2 + 1;
+ return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
default:
gcc_unreachable ();
/* Reciprocal. */
tree
-default_builtin_reciprocal (unsigned int fn ATTRIBUTE_UNUSED,
- bool md_fn ATTRIBUTE_UNUSED,
- bool sqrt ATTRIBUTE_UNUSED)
+default_builtin_reciprocal (tree)
{
return NULL_TREE;
}
return 0;
}
+void
+hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED,
+ tree ATTRIBUTE_UNUSED)
+{
+}
+
void
default_function_arg_advance (cumulative_args_t ca ATTRIBUTE_UNUSED,
machine_mode mode ATTRIBUTE_UNUSED,
gcc_unreachable ();
}
+/* Default implementation of TARGET_FUNCTION_ARG_OFFSET. */
+
+HOST_WIDE_INT
+default_function_arg_offset (machine_mode, const_tree)
+{
+ return 0;
+}
+
+/* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad
+ upward, but pad short args downward on big-endian machines. */
+
+pad_direction
+default_function_arg_padding (machine_mode mode, const_tree type)
+{
+ if (!BYTES_BIG_ENDIAN)
+ return PAD_UPWARD;
+
+ unsigned HOST_WIDE_INT size;
+ if (mode == BLKmode)
+ {
+ if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ return PAD_UPWARD;
+ size = int_size_in_bytes (type);
+ }
+ else
+ /* Targets with variable-sized modes must override this hook
+ and handle variable-sized modes explicitly. */
+ size = GET_MODE_SIZE (mode).to_constant ();
+
+ if (size < (PARM_BOUNDARY / BITS_PER_UNIT))
+ return PAD_DOWNWARD;
+
+ return PAD_UPWARD;
+}
+
rtx
default_function_arg (cumulative_args_t ca ATTRIBUTE_UNUSED,
machine_mode mode ATTRIBUTE_UNUSED,
const_rtx fun ATTRIBUTE_UNUSED)
{
#ifdef LIBCALL_VALUE
- return LIBCALL_VALUE (mode);
+ return LIBCALL_VALUE (MACRO_MODE (mode));
#else
gcc_unreachable ();
#endif
sorry ("nested function trampolines not supported on this target");
}
-int
-default_return_pops_args (tree fundecl ATTRIBUTE_UNUSED,
- tree funtype ATTRIBUTE_UNUSED,
- int size ATTRIBUTE_UNUSED)
+poly_int64
+default_return_pops_args (tree, tree, poly_int64)
{
return 0;
}
reg_class_t
default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
- reg_class_t cl)
+ reg_class_t cl,
+ reg_class_t best_cl ATTRIBUTE_UNUSED)
{
return cl;
}
extern bool
default_lra_p (void)
{
- return false;
+ return true;
}
int
}
#ifdef SECONDARY_INPUT_RELOAD_CLASS
if (in_p)
- rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class, reload_mode, x);
+ rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class,
+ MACRO_MODE (reload_mode), x);
#endif
#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
if (! in_p)
- rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class, reload_mode, x);
+ rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class,
+ MACRO_MODE (reload_mode), x);
#endif
if (rclass != NO_REGS)
{
return rclass;
}
+/* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE. */
+
+machine_mode
+default_secondary_memory_needed_mode (machine_mode mode)
+{
+ if (!targetm.lra_p ()
+ && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
+ && INTEGRAL_MODE_P (mode))
+ return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
+ return mode;
+}
+
/* By default, if flag_pic is true, then neither local nor global relocs
should be placed in readonly memory. */
return flag_pic ? 3 : 0;
}
+/* By default, address diff vectors are generated
+for jump tables when flag_pic is true. */
+
+bool
+default_generate_pic_addr_diff_vec (void)
+{
+ return flag_pic;
+}
+
/* By default, do no modification. */
tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED,
tree id)
return id;
}
+/* The default implementation of TARGET_STATIC_RTX_ALIGNMENT. */
+
+HOST_WIDE_INT
+default_static_rtx_alignment (machine_mode mode)
+{
+ return GET_MODE_ALIGNMENT (mode);
+}
+
+/* The default implementation of TARGET_CONSTANT_ALIGNMENT. */
+
+HOST_WIDE_INT
+default_constant_alignment (const_tree, HOST_WIDE_INT align)
+{
+ return align;
+}
+
+/* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings
+ to at least BITS_PER_WORD but otherwise makes no changes. */
+
+HOST_WIDE_INT
+constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align)
+{
+ if (TREE_CODE (exp) == STRING_CST)
+ return MAX (align, BITS_PER_WORD);
+ return align;
+}
+
/* Default to natural alignment for vector types. */
HOST_WIDE_INT
default_vector_alignment (const_tree type)
{
- return tree_to_shwi (TYPE_SIZE (type));
+ HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
+ if (align > MAX_OFILE_ALIGNMENT)
+ align = MAX_OFILE_ALIGNMENT;
+ return align;
}
-bool
-default_builtin_vector_alignment_reachable (const_tree type, bool is_packed)
-{
- if (is_packed)
- return false;
+/* The default implementation of
+ TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */
- /* Assuming that types whose size is > pointer-size are not guaranteed to be
- naturally aligned. */
- if (tree_int_cst_compare (TYPE_SIZE (type), bitsize_int (POINTER_SIZE)) > 0)
- return false;
+poly_uint64
+default_preferred_vector_alignment (const_tree type)
+{
+ return TYPE_ALIGN (type);
+}
- /* Assuming that types whose size is <= pointer-size
- are naturally aligned. */
- return true;
+/* By default assume vectors of element TYPE require a multiple of the natural
+ alignment of TYPE. TYPE is naturally aligned if IS_PACKED is false. */
+bool
+default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed)
+{
+ return ! is_packed;
}
/* By default, assume that a target supports any factor of misalignment
possibly adds/subtracts using bit-twiddling. */
machine_mode
-default_preferred_simd_mode (machine_mode mode ATTRIBUTE_UNUSED)
+default_preferred_simd_mode (scalar_mode)
{
return word_mode;
}
+/* By default do not split reductions further. */
+
+machine_mode
+default_split_reduction (machine_mode mode)
+{
+ return mode;
+}
+
/* By default only the size derived from the preferred vector mode
is tried. */
-unsigned int
-default_autovectorize_vector_sizes (void)
+void
+default_autovectorize_vector_sizes (vector_sizes *)
{
- return 0;
}
-/* By defaults a vector of integers is used as a mask. */
+/* By default a vector of integers is used as a mask. */
-machine_mode
-default_get_mask_mode (unsigned nunits, unsigned vector_size)
+opt_machine_mode
+default_get_mask_mode (poly_uint64 nunits, poly_uint64 vector_size)
{
- unsigned elem_size = vector_size / nunits;
- machine_mode elem_mode
- = smallest_mode_for_size (elem_size * BITS_PER_UNIT, MODE_INT);
+ unsigned int elem_size = vector_element_size (vector_size, nunits);
+ scalar_int_mode elem_mode
+ = smallest_int_mode_for_size (elem_size * BITS_PER_UNIT);
machine_mode vector_mode;
- gcc_assert (elem_size * nunits == vector_size);
+ gcc_assert (known_eq (elem_size * nunits, vector_size));
- vector_mode = mode_for_vector (elem_mode, nunits);
- if (!VECTOR_MODE_P (vector_mode)
- || !targetm.vector_mode_supported_p (vector_mode))
- vector_mode = BLKmode;
+ if (mode_for_vector (elem_mode, nunits).exists (&vector_mode)
+ && VECTOR_MODE_P (vector_mode)
+ && targetm.vector_mode_supported_p (vector_mode))
+ return vector_mode;
- return vector_mode;
+ return opt_machine_mode ();
+}
+
+/* By default consider masked stores to be expensive. */
+
+bool
+default_empty_mask_is_expensive (unsigned ifn)
+{
+ return ifn == IFN_MASK_STORE;
}
/* By default, the cost model accumulates three separate costs (prologue,
/* Determine whether or not a pointer mode is valid. Assume defaults
of ptr_mode or Pmode - can be overridden. */
bool
-default_valid_pointer_mode (machine_mode mode)
+default_valid_pointer_mode (scalar_int_mode mode)
{
return (mode == ptr_mode || mode == Pmode);
}
/* Return the mode for a pointer to a given ADDRSPACE,
defaulting to ptr_mode for all address spaces. */
-machine_mode
+scalar_int_mode
default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
{
return ptr_mode;
/* Return the mode for an address in a given ADDRSPACE,
defaulting to Pmode for all address spaces. */
-machine_mode
+scalar_int_mode
default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
{
return Pmode;
To match the above, the same modes apply to all address spaces. */
bool
-default_addr_space_valid_pointer_mode (machine_mode mode,
+default_addr_space_valid_pointer_mode (scalar_int_mode mode,
addr_space_t as ATTRIBUTE_UNUSED)
{
return targetm.valid_pointer_mode (mode);
return as;
}
+/* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE.
+ Don't complain about any address space. */
+
+void
+default_addr_space_diagnose_usage (addr_space_t, location_t)
+{
+}
+
+
/* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be
called for targets with only a generic address space. */
gcc_unreachable ();
}
+/* The defualt implementation of TARGET_HARD_REGNO_NREGS. */
+
+unsigned int
+default_hard_regno_nregs (unsigned int, machine_mode mode)
+{
+ /* Targets with variable-sized modes must provide their own definition
+ of this hook. */
+ return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD);
+}
+
bool
default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED)
{
bool
default_target_can_inline_p (tree caller, tree callee)
{
- bool ret = false;
tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee);
tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller);
-
- /* If callee has no option attributes, then it is ok to inline */
- if (!callee_opts)
- ret = true;
-
- /* If caller has no option attributes, but callee does then it is not ok to
- inline */
- else if (!caller_opts)
- ret = false;
+ if (! callee_opts)
+ callee_opts = target_option_default_node;
+ if (! caller_opts)
+ caller_opts = target_option_default_node;
/* If both caller and callee have attributes, assume that if the
pointer is different, the two functions have different target
options since build_target_option_node uses a hash table for the
options. */
- else
- ret = (callee_opts == caller_opts);
-
- return ret;
+ return callee_opts == caller_opts;
}
/* If the machine does not have a case insn that compares the bounds,
#ifndef MEMORY_MOVE_COST
return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in));
#else
- return MEMORY_MOVE_COST (mode, (enum reg_class) rclass, in);
+ return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in);
#endif
}
#ifndef REGISTER_MOVE_COST
return 2;
#else
- return REGISTER_MOVE_COST (mode, (enum reg_class) from, (enum reg_class) to);
+ return REGISTER_MOVE_COST (MACRO_MODE (mode),
+ (enum reg_class) from, (enum reg_class) to);
#endif
}
+/* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS. */
+
+bool
+default_slow_unaligned_access (machine_mode, unsigned int)
+{
+ return STRICT_ALIGNMENT;
+}
+
+/* The default implementation of TARGET_ESTIMATED_POLY_VALUE. */
+
+HOST_WIDE_INT
+default_estimated_poly_value (poly_int64 x)
+{
+ return x.coeffs[0];
+}
+
/* For hooks which use the MOVE_RATIO macro, this gives the legacy default
- behaviour. SPEED_P is true if we are compiling for speed. */
+ behavior. SPEED_P is true if we are compiling for speed. */
unsigned int
get_move_ratio (bool speed_p ATTRIBUTE_UNUSED)
switch (op)
{
- case CLEAR_BY_PIECES:
- max_size = STORE_MAX_PIECES;
- ratio = CLEAR_RATIO (speed_p);
- break;
- case MOVE_BY_PIECES:
- max_size = MOVE_MAX_PIECES;
- ratio = get_move_ratio (speed_p);
- break;
- case SET_BY_PIECES:
- max_size = STORE_MAX_PIECES;
- ratio = SET_RATIO (speed_p);
- break;
- case STORE_BY_PIECES:
- max_size = STORE_MAX_PIECES;
- ratio = get_move_ratio (speed_p);
- break;
+ case CLEAR_BY_PIECES:
+ max_size = STORE_MAX_PIECES;
+ ratio = CLEAR_RATIO (speed_p);
+ break;
+ case MOVE_BY_PIECES:
+ max_size = MOVE_MAX_PIECES;
+ ratio = get_move_ratio (speed_p);
+ break;
+ case SET_BY_PIECES:
+ max_size = STORE_MAX_PIECES;
+ ratio = SET_RATIO (speed_p);
+ break;
+ case STORE_BY_PIECES:
+ max_size = STORE_MAX_PIECES;
+ ratio = get_move_ratio (speed_p);
+ break;
+ case COMPARE_BY_PIECES:
+ max_size = COMPARE_MAX_PIECES;
+ /* Pick a likely default, just as in get_move_ratio. */
+ ratio = speed_p ? 15 : 3;
+ break;
+ }
+
+ return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio;
+}
+
+/* This hook controls code generation for expanding a memcmp operation by
+ pieces. Return 1 for the normal pattern of compare/jump after each pair
+ of loads, or a higher number to reduce the number of branches. */
+
+int
+default_compare_by_pieces_branch_ratio (machine_mode)
+{
+ return 1;
+}
+
+/* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function
+ entry. If RECORD_P is true and the target supports named sections,
+ the location of the NOPs will be recorded in a special object section
+ called "__patchable_function_entries". This routine may be called
+ twice per function to put NOPs before and after the function
+ entry. */
+
+void
+default_print_patchable_function_entry (FILE *file,
+ unsigned HOST_WIDE_INT patch_area_size,
+ bool record_p)
+{
+ const char *nop_templ = 0;
+ int code_num;
+ rtx_insn *my_nop = make_insn_raw (gen_nop ());
+
+ /* We use the template alone, relying on the (currently sane) assumption
+ that the NOP template does not have variable operands. */
+ code_num = recog_memoized (my_nop);
+ nop_templ = get_insn_template (code_num, my_nop);
+
+ if (record_p && targetm_common.have_named_sections)
+ {
+ char buf[256];
+ static int patch_area_number;
+ section *previous_section = in_section;
+ const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false);
+
+ gcc_assert (asm_op != NULL);
+ patch_area_number++;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", patch_area_number);
+
+ switch_to_section (get_section ("__patchable_function_entries",
+ 0, NULL));
+ fputs (asm_op, file);
+ assemble_name_raw (file, buf);
+ fputc ('\n', file);
+
+ switch_to_section (previous_section);
+ ASM_OUTPUT_LABEL (file, buf);
}
- return move_by_pieces_ninsns (size, alignment, max_size + 1) < ratio;
+ unsigned i;
+ for (i = 0; i < patch_area_size; ++i)
+ fprintf (file, "\t%s\n", nop_templ);
}
bool
machine_mode mode ATTRIBUTE_UNUSED)
{
#ifdef CLASS_MAX_NREGS
- return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass, mode);
+ return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass,
+ MACRO_MODE (mode));
#else
- return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
+ /* Targets with variable-sized modes must provide their own definition
+ of this hook. */
+ unsigned int size = GET_MODE_SIZE (mode).to_constant ();
+ return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
#endif
}
return UI_NONE;
}
+/* Targets that set NUM_POLY_INT_COEFFS to something greater than 1
+ must define this hook. */
+
+unsigned int
+default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *)
+{
+ gcc_unreachable ();
+}
+
/* Determine the correct mode for a Dwarf frame register that represents
register REGNO. */
{
machine_mode save_mode = reg_raw_mode[regno];
- if (HARD_REGNO_CALL_PART_CLOBBERED (regno, save_mode))
+ if (targetm.hard_regno_call_part_clobbered (NULL, regno, save_mode))
save_mode = choose_hard_reg_mode (regno, 1, true);
return save_mode;
}
/* To be used by targets where reg_raw_mode doesn't return the right
mode for registers used in apply_builtin_return and apply_builtin_arg. */
-machine_mode
+fixed_size_mode
default_get_reg_raw_mode (int regno)
{
- return reg_raw_mode[regno];
+ /* Targets must override this hook if the underlying register is
+ variable-sized. */
+ return as_a <fixed_size_mode> (reg_raw_mode[regno]);
}
/* Return true if a leaf function should stay leaf even with profiling
/* Default version of cstore_mode. */
-machine_mode
+scalar_int_mode
default_cstore_mode (enum insn_code icode)
{
- return insn_data[(int) icode].operand[0].mode;
+ return as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
}
/* Default version of member_type_forces_blk. */
if (indirect)
type = build_pointer_type (type);
+ if (targetm.calls.split_complex_arg
+ && TREE_CODE (type) == COMPLEX_TYPE
+ && targetm.calls.split_complex_arg (type))
+ {
+ tree real_part, imag_part;
+
+ real_part = std_gimplify_va_arg_expr (valist,
+ TREE_TYPE (type), pre_p, NULL);
+ real_part = get_initialized_tmp_var (real_part, pre_p, NULL);
+
+ imag_part = std_gimplify_va_arg_expr (unshare_expr (valist),
+ TREE_TYPE (type), pre_p, NULL);
+ imag_part = get_initialized_tmp_var (imag_part, pre_p, NULL);
+
+ return build2 (COMPLEX_EXPR, type, real_part, imag_part);
+ }
+
align = PARM_BOUNDARY / BITS_PER_UNIT;
boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
/* va_list pointer is aligned to PARM_BOUNDARY. If argument actually
requires greater alignment, we must perform dynamic alignment. */
if (boundary > align
+ && !TYPE_EMPTY_P (type)
&& !integer_zerop (TYPE_SIZE (type)))
{
t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
if (boundary < TYPE_ALIGN (type))
{
type = build_variant_type_copy (type);
- TYPE_ALIGN (type) = boundary;
+ SET_TYPE_ALIGN (type, boundary);
}
/* Compute the rounded size of the type. */
- type_size = size_in_bytes (type);
+ type_size = arg_size_in_bytes (type);
rounded_size = round_up (type_size, align);
/* Reduce rounded_size so it's sharable with the postqueue. */
return build_va_arg_indirect_ref (addr);
}
-tree
-default_chkp_bound_type (void)
+void
+default_setup_incoming_vararg_bounds (cumulative_args_t ca ATTRIBUTE_UNUSED,
+ machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ int *pretend_arg_size ATTRIBUTE_UNUSED,
+ int second_time ATTRIBUTE_UNUSED)
{
- tree res = make_node (POINTER_BOUNDS_TYPE);
- TYPE_PRECISION (res) = TYPE_PRECISION (size_type_node) * 2;
- TYPE_NAME (res) = get_identifier ("__bounds_type");
- SET_TYPE_MODE (res, targetm.chkp_bound_mode ());
- layout_type (res);
- return res;
}
-enum machine_mode
-default_chkp_bound_mode (void)
+/* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
+ not support nested low-overhead loops. */
+
+bool
+can_use_doloop_if_innermost (const widest_int &, const widest_int &,
+ unsigned int loop_depth, bool)
{
- return VOIDmode;
+ return loop_depth == 1;
}
-tree
-default_builtin_chkp_function (unsigned int fcode ATTRIBUTE_UNUSED)
+/* Default implementation of TARGET_OPTAB_SUPPORTED_P. */
+
+bool
+default_optab_supported_p (int, machine_mode, machine_mode, optimization_type)
{
- return NULL_TREE;
+ return true;
}
-rtx
-default_chkp_function_value_bounds (const_tree ret_type ATTRIBUTE_UNUSED,
- const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
- bool outgoing ATTRIBUTE_UNUSED)
+/* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST. */
+
+unsigned int
+default_max_noce_ifcvt_seq_cost (edge e)
{
- gcc_unreachable ();
+ bool predictable_p = predictable_edge_p (e);
+
+ enum compiler_param param
+ = (predictable_p
+ ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
+ : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
+
+ /* If we have a parameter set, use that, otherwise take a guess using
+ BRANCH_COST. */
+ if (global_options_set.x_param_values[param])
+ return PARAM_VALUE (param);
+ else
+ return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
}
-tree
-default_chkp_make_bounds_constant (HOST_WIDE_INT lb ATTRIBUTE_UNUSED,
- HOST_WIDE_INT ub ATTRIBUTE_UNUSED)
+/* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION. */
+
+unsigned int
+default_min_arithmetic_precision (void)
{
- return NULL_TREE;
+ return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT;
}
-int
-default_chkp_initialize_bounds (tree var ATTRIBUTE_UNUSED,
- tree lb ATTRIBUTE_UNUSED,
- tree ub ATTRIBUTE_UNUSED,
- tree *stmts ATTRIBUTE_UNUSED)
+/* Default implementation of TARGET_C_EXCESS_PRECISION. */
+
+enum flt_eval_method
+default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED)
+{
+ return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
+}
+
+/* Default implementation for
+ TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE. */
+HOST_WIDE_INT
+default_stack_clash_protection_alloca_probe_range (void)
{
return 0;
}
+/* The default implementation of TARGET_EARLY_REMAT_MODES. */
+
void
-default_setup_incoming_vararg_bounds (cumulative_args_t ca ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- tree type ATTRIBUTE_UNUSED,
- int *pretend_arg_size ATTRIBUTE_UNUSED,
- int second_time ATTRIBUTE_UNUSED)
+default_select_early_remat_modes (sbitmap)
{
}
-/* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
- not support nested low-overhead loops. */
+/* The default implementation of TARGET_PREFERRED_ELSE_VALUE. */
+tree
+default_preferred_else_value (unsigned, tree type, unsigned, tree *)
+{
+ return build_zero_cst (type);
+}
+
+/* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE. */
bool
-can_use_doloop_if_innermost (const widest_int &, const widest_int &,
- unsigned int loop_depth, bool)
+default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED)
+{
+#ifdef HAVE_speculation_barrier
+ return active ? HAVE_speculation_barrier : true;
+#else
+ return false;
+#endif
+}
+/* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE
+ that can be used on targets that never have speculative execution. */
+bool
+speculation_safe_value_not_needed (bool active)
+{
+ return !active;
+}
+
+/* Default implementation of the speculation-safe-load builtin. This
+ implementation simply copies val to result and generates a
+ speculation_barrier insn, if such a pattern is defined. */
+rtx
+default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED,
+ rtx result, rtx val,
+ rtx failval ATTRIBUTE_UNUSED)
+{
+ emit_move_insn (result, val);
+
+#ifdef HAVE_speculation_barrier
+ /* Assume the target knows what it is doing: if it defines a
+ speculation barrier, but it is not enabled, then assume that one
+ isn't needed. */
+ if (HAVE_speculation_barrier)
+ emit_insn (gen_speculation_barrier ());
+#endif
+
+ return result;
+}
+
+void
+default_remove_extra_call_preserved_regs (rtx_insn *, HARD_REG_SET *)
{
- return loop_depth == 1;
}
#include "gt-targhooks.h"