/* Expand builtin functions.
Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
- Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+ 2012 Free Software Foundation, Inc.
This file is part of GCC.
/* Setup an array of _DECL trees, make sure each element is
initialized to NULL_TREE. */
-tree built_in_decls[(int) END_BUILTINS];
-/* Declarations used when constructing the builtin implicitly in the compiler.
- It may be NULL_TREE when this is invalid (for instance runtime is not
- required to implement the function call in all cases). */
-tree implicit_built_in_decls[(int) END_BUILTINS];
+builtin_info_type builtin_info;
static const char *c_getstr (tree);
static rtx c_readstr (const char *, enum machine_mode);
const REAL_VALUE_TYPE *, bool);
static tree do_mpfr_remquo (tree, tree, tree);
static tree do_mpfr_lgamma_r (tree, tree, tree);
+static void expand_builtin_sync_synchronize (void);
/* Return true if NAME starts with __builtin_ or __sync_. */
-bool
+static bool
is_builtin_name (const char *name)
{
if (strncmp (name, "__builtin_", 10) == 0)
return true;
if (strncmp (name, "__sync_", 7) == 0)
return true;
+ if (strncmp (name, "__atomic_", 9) == 0)
+ return true;
return false;
}
return is_builtin_name (name);
}
-/* Compute values M and N such that M divides (address of EXP - N) and
- such that N < M. Store N in *BITPOSP and return M.
+/* Compute values M and N such that M divides (address of EXP - N) and such
+ that N < M. If these numbers can be determined, store M in alignp and N in
+ *BITPOSP and return true. Otherwise return false and store BITS_PER_UNIT to
+ *alignp and any bit-offset to *bitposp.
Note that the address (and thus the alignment) computed here is based
on the address to which a symbol resolves, whereas DECL_ALIGN is based
on the address at which an object is actually located. These two
addresses are not always the same. For example, on ARM targets,
the address &foo of a Thumb function foo() has the lowest bit set,
- whereas foo() itself starts on an even address. */
+ whereas foo() itself starts on an even address.
-unsigned int
-get_object_alignment_1 (tree exp, unsigned HOST_WIDE_INT *bitposp)
+ If ADDR_P is true we are taking the address of the memory reference EXP
+ and thus cannot rely on the access taking place. */
+
+static bool
+get_object_alignment_2 (tree exp, unsigned int *alignp,
+ unsigned HOST_WIDE_INT *bitposp, bool addr_p)
{
HOST_WIDE_INT bitsize, bitpos;
tree offset;
enum machine_mode mode;
int unsignedp, volatilep;
- unsigned int align, inner;
+ unsigned int inner, align = BITS_PER_UNIT;
+ bool known_alignment = false;
/* Get the innermost object and the constant (bitpos) and possibly
variable (offset) offset of the access. */
/* Extract alignment information from the innermost object and
possibly adjust bitpos and offset. */
- if (TREE_CODE (exp) == CONST_DECL)
- exp = DECL_INITIAL (exp);
- if (DECL_P (exp)
- && TREE_CODE (exp) != LABEL_DECL)
- {
- if (TREE_CODE (exp) == FUNCTION_DECL)
- {
- /* Function addresses can encode extra information besides their
- alignment. However, if TARGET_PTRMEMFUNC_VBIT_LOCATION
- allows the low bit to be used as a virtual bit, we know
- that the address itself must be 2-byte aligned. */
- if (TARGET_PTRMEMFUNC_VBIT_LOCATION == ptrmemfunc_vbit_in_pfn)
- align = 2 * BITS_PER_UNIT;
- else
- align = BITS_PER_UNIT;
- }
- else
- align = DECL_ALIGN (exp);
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ {
+ /* Function addresses can encode extra information besides their
+ alignment. However, if TARGET_PTRMEMFUNC_VBIT_LOCATION
+ allows the low bit to be used as a virtual bit, we know
+ that the address itself must be at least 2-byte aligned. */
+ if (TARGET_PTRMEMFUNC_VBIT_LOCATION == ptrmemfunc_vbit_in_pfn)
+ align = 2 * BITS_PER_UNIT;
}
- else if (CONSTANT_CLASS_P (exp))
+ else if (TREE_CODE (exp) == LABEL_DECL)
+ ;
+ else if (TREE_CODE (exp) == CONST_DECL)
{
+ /* The alignment of a CONST_DECL is determined by its initializer. */
+ exp = DECL_INITIAL (exp);
align = TYPE_ALIGN (TREE_TYPE (exp));
#ifdef CONSTANT_ALIGNMENT
- align = (unsigned)CONSTANT_ALIGNMENT (exp, align);
+ if (CONSTANT_CLASS_P (exp))
+ align = (unsigned) CONSTANT_ALIGNMENT (exp, align);
#endif
+ known_alignment = true;
+ }
+ else if (DECL_P (exp))
+ {
+ align = DECL_ALIGN (exp);
+ known_alignment = true;
}
else if (TREE_CODE (exp) == VIEW_CONVERT_EXPR)
- align = TYPE_ALIGN (TREE_TYPE (exp));
- else if (TREE_CODE (exp) == INDIRECT_REF)
- align = TYPE_ALIGN (TREE_TYPE (exp));
- else if (TREE_CODE (exp) == MEM_REF)
+ {
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+ }
+ else if (TREE_CODE (exp) == INDIRECT_REF
+ || TREE_CODE (exp) == MEM_REF
+ || TREE_CODE (exp) == TARGET_MEM_REF)
{
tree addr = TREE_OPERAND (exp, 0);
- struct ptr_info_def *pi;
+ unsigned ptr_align;
+ unsigned HOST_WIDE_INT ptr_bitpos;
+
if (TREE_CODE (addr) == BIT_AND_EXPR
&& TREE_CODE (TREE_OPERAND (addr, 1)) == INTEGER_CST)
{
align *= BITS_PER_UNIT;
addr = TREE_OPERAND (addr, 0);
}
- else
- align = BITS_PER_UNIT;
- if (TREE_CODE (addr) == SSA_NAME
- && (pi = SSA_NAME_PTR_INFO (addr)))
- {
- bitpos += (pi->misalign * BITS_PER_UNIT) & ~(align - 1);
- align = MAX (pi->align * BITS_PER_UNIT, align);
- }
- else if (TREE_CODE (addr) == ADDR_EXPR)
- align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0)));
- bitpos += mem_ref_offset (exp).low * BITS_PER_UNIT;
- }
- else if (TREE_CODE (exp) == TARGET_MEM_REF)
- {
- struct ptr_info_def *pi;
- tree addr = TMR_BASE (exp);
- if (TREE_CODE (addr) == BIT_AND_EXPR
- && TREE_CODE (TREE_OPERAND (addr, 1)) == INTEGER_CST)
+
+ known_alignment
+ = get_pointer_alignment_1 (addr, &ptr_align, &ptr_bitpos);
+ align = MAX (ptr_align, align);
+
+ /* The alignment of the pointer operand in a TARGET_MEM_REF
+ has to take the variable offset parts into account. */
+ if (TREE_CODE (exp) == TARGET_MEM_REF)
{
- align = (TREE_INT_CST_LOW (TREE_OPERAND (addr, 1))
- & -TREE_INT_CST_LOW (TREE_OPERAND (addr, 1)));
- align *= BITS_PER_UNIT;
- addr = TREE_OPERAND (addr, 0);
+ if (TMR_INDEX (exp))
+ {
+ unsigned HOST_WIDE_INT step = 1;
+ if (TMR_STEP (exp))
+ step = TREE_INT_CST_LOW (TMR_STEP (exp));
+ align = MIN (align, (step & -step) * BITS_PER_UNIT);
+ }
+ if (TMR_INDEX2 (exp))
+ align = BITS_PER_UNIT;
+ known_alignment = false;
}
+
+ /* When EXP is an actual memory reference then we can use
+ TYPE_ALIGN of a pointer indirection to derive alignment.
+ Do so only if get_pointer_alignment_1 did not reveal absolute
+ alignment knowledge and if using that alignment would
+ improve the situation. */
+ if (!addr_p && !known_alignment
+ && TYPE_ALIGN (TREE_TYPE (exp)) > align)
+ align = TYPE_ALIGN (TREE_TYPE (exp));
else
- align = BITS_PER_UNIT;
- if (TREE_CODE (addr) == SSA_NAME
- && (pi = SSA_NAME_PTR_INFO (addr)))
- {
- bitpos += (pi->misalign * BITS_PER_UNIT) & ~(align - 1);
- align = MAX (pi->align * BITS_PER_UNIT, align);
- }
- else if (TREE_CODE (addr) == ADDR_EXPR)
- align = MAX (align, get_object_alignment (TREE_OPERAND (addr, 0)));
- if (TMR_OFFSET (exp))
- bitpos += TREE_INT_CST_LOW (TMR_OFFSET (exp)) * BITS_PER_UNIT;
- if (TMR_INDEX (exp) && TMR_STEP (exp))
{
- unsigned HOST_WIDE_INT step = TREE_INT_CST_LOW (TMR_STEP (exp));
- align = MIN (align, (step & -step) * BITS_PER_UNIT);
+ /* Else adjust bitpos accordingly. */
+ bitpos += ptr_bitpos;
+ if (TREE_CODE (exp) == MEM_REF
+ || TREE_CODE (exp) == TARGET_MEM_REF)
+ bitpos += mem_ref_offset (exp).low * BITS_PER_UNIT;
}
- else if (TMR_INDEX (exp))
- align = BITS_PER_UNIT;
- if (TMR_INDEX2 (exp))
- align = BITS_PER_UNIT;
}
- else
- align = BITS_PER_UNIT;
+ else if (TREE_CODE (exp) == STRING_CST)
+ {
+ /* STRING_CST are the only constant objects we allow to be not
+ wrapped inside a CONST_DECL. */
+ align = TYPE_ALIGN (TREE_TYPE (exp));
+#ifdef CONSTANT_ALIGNMENT
+ if (CONSTANT_CLASS_P (exp))
+ align = (unsigned) CONSTANT_ALIGNMENT (exp, align);
+#endif
+ known_alignment = true;
+ }
/* If there is a non-constant offset part extract the maximum
alignment that can prevail. */
}
offset = next_offset;
}
-
/* Alignment is innermost object alignment adjusted by the constant
and non-constant offset parts. */
align = MIN (align, inner);
- bitpos = bitpos & (align - 1);
- *bitposp = bitpos;
- return align;
+ *alignp = align;
+ *bitposp = bitpos & (*alignp - 1);
+ return known_alignment;
+}
+
+/* For a memory reference expression EXP compute values M and N such that M
+ divides (&EXP - N) and such that N < M. If these numbers can be determined,
+ store M in alignp and N in *BITPOSP and return true. Otherwise return false
+ and store BITS_PER_UNIT to *alignp and any bit-offset to *bitposp. */
+
+bool
+get_object_alignment_1 (tree exp, unsigned int *alignp,
+ unsigned HOST_WIDE_INT *bitposp)
+{
+ return get_object_alignment_2 (exp, alignp, bitposp, false);
}
/* Return the alignment in bits of EXP, an object. */
unsigned HOST_WIDE_INT bitpos = 0;
unsigned int align;
- align = get_object_alignment_1 (exp, &bitpos);
+ get_object_alignment_1 (exp, &align, &bitpos);
/* align and bitpos now specify known low bits of the pointer.
ptr & (align - 1) == bitpos. */
if (bitpos != 0)
align = (bitpos & -bitpos);
-
return align;
}
-/* Return the alignment in bits of EXP, a pointer valued expression.
- The alignment returned is, by default, the alignment of the thing that
- EXP points to. If it is not a POINTER_TYPE, 0 is returned.
+/* For a pointer valued expression EXP compute values M and N such that M
+ divides (EXP - N) and such that N < M. If these numbers can be determined,
+ store M in alignp and N in *BITPOSP and return true. Return false if
+ the results are just a conservative approximation.
- Otherwise, look at the expression to see if we can do better, i.e., if the
- expression is actually pointing at an object whose alignment is tighter. */
+ If EXP is not a pointer, false is returned too. */
-unsigned int
-get_pointer_alignment (tree exp)
+bool
+get_pointer_alignment_1 (tree exp, unsigned int *alignp,
+ unsigned HOST_WIDE_INT *bitposp)
{
STRIP_NOPS (exp);
if (TREE_CODE (exp) == ADDR_EXPR)
- return get_object_alignment (TREE_OPERAND (exp, 0));
+ return get_object_alignment_2 (TREE_OPERAND (exp, 0),
+ alignp, bitposp, true);
else if (TREE_CODE (exp) == SSA_NAME
&& POINTER_TYPE_P (TREE_TYPE (exp)))
{
+ unsigned int ptr_align, ptr_misalign;
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (exp);
- unsigned align;
- if (!pi)
- return BITS_PER_UNIT;
- if (pi->misalign != 0)
- align = (pi->misalign & -pi->misalign);
+
+ if (pi && get_ptr_info_alignment (pi, &ptr_align, &ptr_misalign))
+ {
+ *bitposp = ptr_misalign * BITS_PER_UNIT;
+ *alignp = ptr_align * BITS_PER_UNIT;
+ /* We cannot really tell whether this result is an approximation. */
+ return true;
+ }
else
- align = pi->align;
- return align * BITS_PER_UNIT;
+ {
+ *bitposp = 0;
+ *alignp = BITS_PER_UNIT;
+ return false;
+ }
+ }
+ else if (TREE_CODE (exp) == INTEGER_CST)
+ {
+ *alignp = BIGGEST_ALIGNMENT;
+ *bitposp = ((TREE_INT_CST_LOW (exp) * BITS_PER_UNIT)
+ & (BIGGEST_ALIGNMENT - 1));
+ return true;
}
- return POINTER_TYPE_P (TREE_TYPE (exp)) ? BITS_PER_UNIT : 0;
+ *bitposp = 0;
+ *alignp = BITS_PER_UNIT;
+ return false;
+}
+
+/* Return the alignment in bits of EXP, a pointer valued expression.
+ The alignment returned is, by default, the alignment of the thing that
+ EXP points to. If it is not a POINTER_TYPE, 0 is returned.
+
+ Otherwise, look at the expression to see if we can do better, i.e., if the
+ expression is actually pointing at an object whose alignment is tighter. */
+
+unsigned int
+get_pointer_alignment (tree exp)
+{
+ unsigned HOST_WIDE_INT bitpos = 0;
+ unsigned int align;
+
+ get_pointer_alignment_1 (exp, &align, &bitpos);
+
+ /* align and bitpos now specify known low bits of the pointer.
+ ptr & (align - 1) == bitpos. */
+
+ if (bitpos != 0)
+ align = (bitpos & -bitpos);
+
+ return align;
}
/* Compute the length of a C string. TREE_STRING_LENGTH is not the right
&& GET_MODE_SIZE (mode) >= UNITS_PER_WORD)
j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1;
j *= BITS_PER_UNIT;
- gcc_assert (j < 2 * HOST_BITS_PER_WIDE_INT);
+ gcc_assert (j < HOST_BITS_PER_DOUBLE_INT);
if (ch)
ch = (unsigned char) str[i];
tem = RETURN_ADDR_RTX (count, tem);
#else
tem = memory_address (Pmode,
- plus_constant (tem, GET_MODE_SIZE (Pmode)));
+ plus_constant (Pmode, tem, GET_MODE_SIZE (Pmode)));
tem = gen_frame_mem (Pmode, tem);
#endif
return tem;
set_mem_alias_set (mem, setjmp_alias_set);
emit_move_insn (mem, targetm.builtin_setjmp_frame_value ());
- mem = gen_rtx_MEM (Pmode, plus_constant (buf_addr, GET_MODE_SIZE (Pmode))),
+ mem = gen_rtx_MEM (Pmode, plus_constant (Pmode, buf_addr,
+ GET_MODE_SIZE (Pmode))),
set_mem_alias_set (mem, setjmp_alias_set);
emit_move_insn (validize_mem (mem),
force_reg (Pmode, gen_rtx_LABEL_REF (Pmode, receiver_label)));
stack_save = gen_rtx_MEM (sa_mode,
- plus_constant (buf_addr,
+ plus_constant (Pmode, buf_addr,
2 * GET_MODE_SIZE (Pmode)));
set_mem_alias_set (stack_save, setjmp_alias_set);
emit_stack_save (SAVE_NONLOCAL, &stack_save);
#endif
{
fp = gen_rtx_MEM (Pmode, buf_addr);
- lab = gen_rtx_MEM (Pmode, plus_constant (buf_addr,
+ lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, buf_addr,
GET_MODE_SIZE (Pmode)));
- stack = gen_rtx_MEM (sa_mode, plus_constant (buf_addr,
+ stack = gen_rtx_MEM (sa_mode, plus_constant (Pmode, buf_addr,
2 * GET_MODE_SIZE (Pmode)));
set_mem_alias_set (fp, setjmp_alias_set);
set_mem_alias_set (lab, setjmp_alias_set);
r_save_area = copy_to_reg (r_save_area);
r_fp = gen_rtx_MEM (Pmode, r_save_area);
r_sp = gen_rtx_MEM (STACK_SAVEAREA_MODE (SAVE_NONLOCAL),
- plus_constant (r_save_area, GET_MODE_SIZE (Pmode)));
+ plus_constant (Pmode, r_save_area,
+ GET_MODE_SIZE (Pmode)));
crtl->has_nonlocal_goto = 1;
= gen_rtx_MEM (sa_mode,
memory_address
(sa_mode,
- plus_constant (buf_addr, 2 * GET_MODE_SIZE (Pmode))));
+ plus_constant (Pmode, buf_addr,
+ 2 * GET_MODE_SIZE (Pmode))));
emit_stack_save (SAVE_NONLOCAL, &stack_save);
}
{
tree orig_exp = exp;
rtx addr, mem;
- HOST_WIDE_INT off;
/* When EXP is not resolved SAVE_EXPR, MEM_ATTRS can be still derived
from its expression, for expr->a.b only <variable>.a.b is recorded. */
mem = gen_rtx_MEM (BLKmode, memory_address (BLKmode, addr));
/* Get an expression we can use to find the attributes to assign to MEM.
- If it is an ADDR_EXPR, use the operand. Otherwise, dereference it if
- we can. First remove any nops. */
+ First remove any nops. */
while (CONVERT_EXPR_P (exp)
&& POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (exp, 0))))
exp = TREE_OPERAND (exp, 0);
- off = 0;
- if (TREE_CODE (exp) == POINTER_PLUS_EXPR
- && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
- && host_integerp (TREE_OPERAND (exp, 1), 0)
- && (off = tree_low_cst (TREE_OPERAND (exp, 1), 0)) > 0)
- exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
- else if (TREE_CODE (exp) == ADDR_EXPR)
- exp = TREE_OPERAND (exp, 0);
- else if (POINTER_TYPE_P (TREE_TYPE (exp)))
- exp = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (exp)), exp);
- else
- exp = NULL;
-
- /* Honor attributes derived from exp, except for the alias set
- (as builtin stringops may alias with anything) and the size
- (as stringops may access multiple array elements). */
- if (exp)
- {
+ /* Build a MEM_REF representing the whole accessed area as a byte blob,
+ (as builtin stringops may alias with anything). */
+ exp = fold_build2 (MEM_REF,
+ build_array_type (char_type_node,
+ build_range_type (sizetype,
+ size_one_node, len)),
+ exp, build_int_cst (ptr_type_node, 0));
+
+ /* If the MEM_REF has no acceptable address, try to get the base object
+ from the original address we got, and build an all-aliasing
+ unknown-sized access to that one. */
+ if (is_gimple_mem_ref_addr (TREE_OPERAND (exp, 0)))
+ set_mem_attributes (mem, exp, 0);
+ else if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
+ && (exp = get_base_address (TREE_OPERAND (TREE_OPERAND (exp, 0),
+ 0))))
+ {
+ exp = build_fold_addr_expr (exp);
+ exp = fold_build2 (MEM_REF,
+ build_array_type (char_type_node,
+ build_range_type (sizetype,
+ size_zero_node,
+ NULL)),
+ exp, build_int_cst (ptr_type_node, 0));
set_mem_attributes (mem, exp, 0);
-
- if (off)
- mem = adjust_automodify_address_nv (mem, BLKmode, NULL, off);
-
- /* Allow the string and memory builtins to overflow from one
- field into another, see http://gcc.gnu.org/PR23561.
- Thus avoid COMPONENT_REFs in MEM_EXPR unless we know the whole
- memory accessed by the string or memory builtin will fit
- within the field. */
- if (MEM_EXPR (mem) && TREE_CODE (MEM_EXPR (mem)) == COMPONENT_REF)
- {
- tree mem_expr = MEM_EXPR (mem);
- HOST_WIDE_INT offset = -1, length = -1;
- tree inner = exp;
-
- while (TREE_CODE (inner) == ARRAY_REF
- || CONVERT_EXPR_P (inner)
- || TREE_CODE (inner) == VIEW_CONVERT_EXPR
- || TREE_CODE (inner) == SAVE_EXPR)
- inner = TREE_OPERAND (inner, 0);
-
- gcc_assert (TREE_CODE (inner) == COMPONENT_REF);
-
- if (MEM_OFFSET_KNOWN_P (mem))
- offset = MEM_OFFSET (mem);
-
- if (offset >= 0 && len && host_integerp (len, 0))
- length = tree_low_cst (len, 0);
-
- while (TREE_CODE (inner) == COMPONENT_REF)
- {
- tree field = TREE_OPERAND (inner, 1);
- gcc_assert (TREE_CODE (mem_expr) == COMPONENT_REF);
- gcc_assert (field == TREE_OPERAND (mem_expr, 1));
-
- /* Bitfields are generally not byte-addressable. */
- gcc_assert (!DECL_BIT_FIELD (field)
- || ((tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
- % BITS_PER_UNIT) == 0
- && host_integerp (DECL_SIZE (field), 0)
- && (TREE_INT_CST_LOW (DECL_SIZE (field))
- % BITS_PER_UNIT) == 0));
-
- /* If we can prove that the memory starting at XEXP (mem, 0) and
- ending at XEXP (mem, 0) + LENGTH will fit into this field, we
- can keep the COMPONENT_REF in MEM_EXPR. But be careful with
- fields without DECL_SIZE_UNIT like flexible array members. */
- if (length >= 0
- && DECL_SIZE_UNIT (field)
- && host_integerp (DECL_SIZE_UNIT (field), 0))
- {
- HOST_WIDE_INT size
- = TREE_INT_CST_LOW (DECL_SIZE_UNIT (field));
- if (offset <= size
- && length <= size
- && offset + length <= size)
- break;
- }
-
- if (offset >= 0
- && host_integerp (DECL_FIELD_OFFSET (field), 0))
- offset += TREE_INT_CST_LOW (DECL_FIELD_OFFSET (field))
- + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
- / BITS_PER_UNIT;
- else
- {
- offset = -1;
- length = -1;
- }
-
- mem_expr = TREE_OPERAND (mem_expr, 0);
- inner = TREE_OPERAND (inner, 0);
- }
-
- if (mem_expr == NULL)
- offset = -1;
- if (mem_expr != MEM_EXPR (mem))
- {
- set_mem_expr (mem, mem_expr);
- if (offset >= 0)
- set_mem_offset (mem, offset);
- else
- clear_mem_offset (mem);
- }
- }
- set_mem_alias_set (mem, 0);
- clear_mem_size (mem);
}
-
+ set_mem_alias_set (mem, 0);
return mem;
}
\f
as we might have pretended they were passed. Make sure it's a valid
operand, as emit_move_insn isn't expected to handle a PLUS. */
tem
- = force_operand (plus_constant (tem, crtl->args.pretend_args_size),
+ = force_operand (plus_constant (Pmode, tem, crtl->args.pretend_args_size),
NULL_RTX);
#endif
emit_move_insn (adjust_address (registers, Pmode, 0), tem);
dest = virtual_outgoing_args_rtx;
#ifndef STACK_GROWS_DOWNWARD
if (CONST_INT_P (argsize))
- dest = plus_constant (dest, -INTVAL (argsize));
+ dest = plus_constant (Pmode, dest, -INTVAL (argsize));
else
dest = gen_rtx_PLUS (Pmode, dest, negate_rtx (Pmode, argsize));
#endif
fcode = BUILT_IN_MATHFN##_R; fcodef = BUILT_IN_MATHFN##F_R ; \
fcodel = BUILT_IN_MATHFN##L_R ; break;
-/* Return mathematic function equivalent to FN but operating directly
- on TYPE, if available. If IMPLICIT is true find the function in
- implicit_built_in_decls[], otherwise use built_in_decls[]. If we
- can't do the conversion, return zero. */
+/* Return mathematic function equivalent to FN but operating directly on TYPE,
+ if available. If IMPLICIT is true use the implicit builtin declaration,
+ otherwise use the explicit declaration. If we can't do the conversion,
+ return zero. */
static tree
-mathfn_built_in_1 (tree type, enum built_in_function fn, bool implicit)
+mathfn_built_in_1 (tree type, enum built_in_function fn, bool implicit_p)
{
- tree const *const fn_arr
- = implicit ? implicit_built_in_decls : built_in_decls;
- enum built_in_function fcode, fcodef, fcodel;
+ enum built_in_function fcode, fcodef, fcodel, fcode2;
switch (fn)
{
}
if (TYPE_MAIN_VARIANT (type) == double_type_node)
- return fn_arr[fcode];
+ fcode2 = fcode;
else if (TYPE_MAIN_VARIANT (type) == float_type_node)
- return fn_arr[fcodef];
+ fcode2 = fcodef;
else if (TYPE_MAIN_VARIANT (type) == long_double_type_node)
- return fn_arr[fcodel];
+ fcode2 = fcodel;
else
return NULL_TREE;
+
+ if (implicit_p && !builtin_decl_implicit_p (fcode2))
+ return NULL_TREE;
+
+ return builtin_decl_explicit (fcode2);
}
/* Like mathfn_built_in_1(), but always use the implicit array. */
interclass_mathfn_icode (tree arg, tree fndecl)
{
bool errno_set = false;
- optab builtin_optab = 0;
+ optab builtin_optab = unknown_optab;
enum machine_mode mode;
switch (DECL_FUNCTION_CODE (fndecl))
rtx op1a, op2a;
if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIF)
- fn = built_in_decls[BUILT_IN_SINCOSF];
+ fn = builtin_decl_explicit (BUILT_IN_SINCOSF);
else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPI)
- fn = built_in_decls[BUILT_IN_SINCOS];
+ fn = builtin_decl_explicit (BUILT_IN_SINCOS);
else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIL)
- fn = built_in_decls[BUILT_IN_SINCOSL];
+ fn = builtin_decl_explicit (BUILT_IN_SINCOSL);
else
gcc_unreachable ();
- op1 = assign_temp (TREE_TYPE (arg), 0, 1, 1);
- op2 = assign_temp (TREE_TYPE (arg), 0, 1, 1);
- op1a = copy_to_mode_reg (Pmode, XEXP (op1, 0));
- op2a = copy_to_mode_reg (Pmode, XEXP (op2, 0));
+ op1 = assign_temp (TREE_TYPE (arg), 1, 1);
+ op2 = assign_temp (TREE_TYPE (arg), 1, 1);
+ op1a = copy_addr_to_reg (XEXP (op1, 0));
+ op2a = copy_addr_to_reg (XEXP (op2, 0));
top1 = make_tree (build_pointer_type (TREE_TYPE (arg)), op1a);
top2 = make_tree (build_pointer_type (TREE_TYPE (arg)), op2a);
tree ctype = build_complex_type (type);
if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIF)
- fn = built_in_decls[BUILT_IN_CEXPF];
+ fn = builtin_decl_explicit (BUILT_IN_CEXPF);
else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPI)
- fn = built_in_decls[BUILT_IN_CEXP];
+ fn = builtin_decl_explicit (BUILT_IN_CEXP);
else if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CEXPIL)
- fn = built_in_decls[BUILT_IN_CEXPL];
+ fn = builtin_decl_explicit (BUILT_IN_CEXPL);
else
gcc_unreachable ();
tree fndecl = get_callee_fndecl (exp);
tree arg;
enum machine_mode mode;
-
- /* There's no easy way to detect the case we need to set EDOM. */
- if (flag_errno_math)
- return NULL_RTX;
+ enum built_in_function fallback_fn = BUILT_IN_NONE;
if (!validate_arglist (exp, REAL_TYPE, VOID_TYPE))
gcc_unreachable ();
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_FLT_FN (BUILT_IN_IRINT):
+ fallback_fn = BUILT_IN_LRINT;
+ /* FALLTHRU */
CASE_FLT_FN (BUILT_IN_LRINT):
CASE_FLT_FN (BUILT_IN_LLRINT):
- builtin_optab = lrint_optab; break;
+ builtin_optab = lrint_optab;
+ break;
CASE_FLT_FN (BUILT_IN_IROUND):
+ fallback_fn = BUILT_IN_LROUND;
+ /* FALLTHRU */
CASE_FLT_FN (BUILT_IN_LROUND):
CASE_FLT_FN (BUILT_IN_LLROUND):
- builtin_optab = lround_optab; break;
+ builtin_optab = lround_optab;
+ break;
default:
gcc_unreachable ();
}
+ /* There's no easy way to detect the case we need to set EDOM. */
+ if (flag_errno_math && fallback_fn == BUILT_IN_NONE)
+ return NULL_RTX;
+
/* Make a suitable register to place result in. */
mode = TYPE_MODE (TREE_TYPE (exp));
- target = gen_reg_rtx (mode);
+ /* There's no easy way to detect the case we need to set EDOM. */
+ if (!flag_errno_math)
+ {
+ target = gen_reg_rtx (mode);
- /* Wrap the computation of the argument in a SAVE_EXPR, as we may
- need to expand the argument again. This way, we will not perform
- side-effects more the once. */
- CALL_EXPR_ARG (exp, 0) = arg = builtin_save_expr (arg);
+ /* Wrap the computation of the argument in a SAVE_EXPR, as we may
+ need to expand the argument again. This way, we will not perform
+ side-effects more the once. */
+ CALL_EXPR_ARG (exp, 0) = arg = builtin_save_expr (arg);
- op0 = expand_expr (arg, NULL, VOIDmode, EXPAND_NORMAL);
+ op0 = expand_expr (arg, NULL, VOIDmode, EXPAND_NORMAL);
- start_sequence ();
+ start_sequence ();
- if (expand_sfix_optab (target, op0, builtin_optab))
- {
- /* Output the entire sequence. */
- insns = get_insns ();
+ if (expand_sfix_optab (target, op0, builtin_optab))
+ {
+ /* Output the entire sequence. */
+ insns = get_insns ();
+ end_sequence ();
+ emit_insn (insns);
+ return target;
+ }
+
+ /* If we were unable to expand via the builtin, stop the sequence
+ (without outputting the insns) and call to the library function
+ with the stabilized argument list. */
end_sequence ();
- emit_insn (insns);
- return target;
}
- /* If we were unable to expand via the builtin, stop the sequence
- (without outputting the insns) and call to the library function
- with the stabilized argument list. */
- end_sequence ();
+ if (fallback_fn != BUILT_IN_NONE)
+ {
+ /* Fall back to rounding to long int. Use implicit_p 0 - for non-C99
+ targets, (int) round (x) should never be transformed into
+ BUILT_IN_IROUND and if __builtin_iround is called directly, emit
+ a call to lround in the hope that the target provides at least some
+ C99 functions. This should result in the best user experience for
+ not full C99 targets. */
+ tree fallback_fndecl = mathfn_built_in_1 (TREE_TYPE (arg),
+ fallback_fn, 0);
+
+ exp = build_call_nofold_loc (EXPR_LOCATION (exp),
+ fallback_fndecl, 1, arg);
+
+ target = expand_call (exp, NULL_RTX, target == const0_rtx);
+ return convert_to_mode (mode, target, 0);
+ }
target = expand_call (exp, target, target == const0_rtx);
rtx target, enum machine_mode mode, int endp)
{
/* If return value is ignored, transform mempcpy into memcpy. */
- if (target == const0_rtx && implicit_built_in_decls[BUILT_IN_MEMCPY])
+ if (target == const0_rtx && builtin_decl_implicit_p (BUILT_IN_MEMCPY))
{
- tree fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
tree result = build_call_nofold_loc (UNKNOWN_LOCATION, fn, 3,
dest, src, len);
return expand_expr (result, target, mode, EXPAND_NORMAL);
adjust it. */
if (endp == 1)
{
- rtx tem = plus_constant (gen_lowpart (GET_MODE (target), target), 1);
+ rtx tem = plus_constant (GET_MODE (target),
+ gen_lowpart (GET_MODE (target), target), 1);
emit_move_insn (target, force_operand (tem, NULL_RTX));
}
}
src = CALL_EXPR_ARG (exp, 1);
/* If return value is ignored, transform stpcpy into strcpy. */
- if (target == const0_rtx && implicit_built_in_decls[BUILT_IN_STRCPY])
+ if (target == const0_rtx && builtin_decl_implicit (BUILT_IN_STRCPY))
{
- tree fn = implicit_built_in_decls[BUILT_IN_STRCPY];
+ tree fn = builtin_decl_implicit (BUILT_IN_STRCPY);
tree result = build_call_nofold_loc (loc, fn, 2, dst, src);
return expand_expr (result, target, mode, EXPAND_NORMAL);
}
if (GET_MODE (target) != GET_MODE (ret))
ret = gen_lowpart (GET_MODE (target), ret);
- ret = plus_constant (ret, INTVAL (len_rtx));
+ ret = plus_constant (GET_MODE (ret), ret, INTVAL (len_rtx));
ret = emit_move_insn (target, force_operand (ret, NULL_RTX));
gcc_assert (ret);
expression to exit or longjmp. */
gimplify_and_add (valist, pre_p);
t = build_call_expr_loc (loc,
- implicit_built_in_decls[BUILT_IN_TRAP], 0);
+ builtin_decl_implicit (BUILT_IN_TRAP), 0);
gimplify_and_add (t, pre_p);
/* This is dead code, but go ahead and finish so that the
if (!REG_P (tem)
&& ! CONSTANT_P (tem))
- tem = copy_to_mode_reg (Pmode, tem);
+ tem = copy_addr_to_reg (tem);
return tem;
}
}
{
rtx op0;
rtx result;
+ bool valid_arglist;
+ unsigned int align;
+ bool alloca_with_align = (DECL_FUNCTION_CODE (get_callee_fndecl (exp))
+ == BUILT_IN_ALLOCA_WITH_ALIGN);
- /* Emit normal call if marked not-inlineable. */
- if (CALL_CANNOT_INLINE_P (exp))
+ /* Emit normal call if we use mudflap. */
+ if (flag_mudflap)
return NULL_RTX;
- if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE))
+ valid_arglist
+ = (alloca_with_align
+ ? validate_arglist (exp, INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE)
+ : validate_arglist (exp, INTEGER_TYPE, VOID_TYPE));
+
+ if (!valid_arglist)
return NULL_RTX;
/* Compute the argument. */
op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
+ /* Compute the alignment. */
+ align = (alloca_with_align
+ ? TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1))
+ : BIGGEST_ALIGNMENT);
+
/* Allocate the desired space. */
- result = allocate_dynamic_stack_space (op0, 0, BIGGEST_ALIGNMENT,
- cannot_accumulate);
+ result = allocate_dynamic_stack_space (op0, 0, align, cannot_accumulate);
result = convert_memory_address (ptr_mode, result);
return result;
}
-/* Expand a call to a bswap builtin with argument ARG0. MODE
- is the mode to expand with. */
+/* Expand a call to bswap builtin in EXP.
+ Return NULL_RTX if a normal call should be emitted rather than expanding the
+ function in-line. If convenient, the result should be placed in TARGET.
+ SUBTARGET may be used as the target for computing one of EXP's operands. */
static rtx
-expand_builtin_bswap (tree exp, rtx target, rtx subtarget)
+expand_builtin_bswap (enum machine_mode target_mode, tree exp, rtx target,
+ rtx subtarget)
{
- enum machine_mode mode;
tree arg;
rtx op0;
return NULL_RTX;
arg = CALL_EXPR_ARG (exp, 0);
- mode = TYPE_MODE (TREE_TYPE (arg));
- op0 = expand_expr (arg, subtarget, VOIDmode, EXPAND_NORMAL);
+ op0 = expand_expr (arg,
+ subtarget && GET_MODE (subtarget) == target_mode
+ ? subtarget : NULL_RTX,
+ target_mode, EXPAND_NORMAL);
+ if (GET_MODE (op0) != target_mode)
+ op0 = convert_to_mode (target_mode, op0, 1);
- target = expand_unop (mode, bswap_optab, op0, target, 1);
+ target = expand_unop (target_mode, bswap_optab, op0, target, 1);
gcc_assert (target);
- return convert_to_mode (mode, target, 0);
+ return convert_to_mode (target_mode, target, 1);
}
/* Expand a call to a unary builtin in EXP.
}
static rtx
-expand_builtin_init_trampoline (tree exp)
+expand_builtin_init_trampoline (tree exp, bool onstack)
{
tree t_tramp, t_func, t_chain;
rtx m_tramp, r_tramp, r_chain, tmp;
m_tramp = gen_rtx_MEM (BLKmode, r_tramp);
MEM_NOTRAP_P (m_tramp) = 1;
- /* The TRAMP argument should be the address of a field within the
- local function's FRAME decl. Let's see if we can fill in the
- to fill in the MEM_ATTRs for this memory. */
+ /* If ONSTACK, the TRAMP argument should be the address of a field
+ within the local function's FRAME decl. Either way, let's see if
+ we can fill in the MEM_ATTRs for this memory. */
if (TREE_CODE (t_tramp) == ADDR_EXPR)
set_mem_attributes_minus_bitpos (m_tramp, TREE_OPERAND (t_tramp, 0),
true, 0);
+ /* Creator of a heap trampoline is responsible for making sure the
+ address is aligned to at least STACK_BOUNDARY. Normally malloc
+ will ensure this anyhow. */
tmp = round_trampoline_addr (r_tramp);
if (tmp != r_tramp)
{
/* Generate insns to initialize the trampoline. */
targetm.calls.trampoline_init (m_tramp, t_func, r_chain);
- trampolines_created = 1;
+ if (onstack)
+ {
+ trampolines_created = 1;
- warning_at (DECL_SOURCE_LOCATION (t_func), OPT_Wtrampolines,
- "trampoline generated for nested function %qD", t_func);
+ warning_at (DECL_SOURCE_LOCATION (t_func), OPT_Wtrampolines,
+ "trampoline generated for nested function %qD", t_func);
+ }
return const0_rtx;
}
if (bitpos < GET_MODE_BITSIZE (rmode))
{
- double_int mask = double_int_setbit (double_int_zero, bitpos);
+ double_int mask = double_int_zero.set_bit (bitpos);
if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode))
temp = gen_lowpart (rmode, temp);
return mode_for_size (BITS_PER_UNIT << fcode_diff, MODE_INT, 0);
}
-/* Expand the memory expression LOC and return the appropriate memory operand
- for the builtin_sync operations. */
+/* Expand the memory expression LOC and return the appropriate memory operand
+ for the builtin_sync operations. */
+
+static rtx
+get_builtin_sync_mem (tree loc, enum machine_mode mode)
+{
+ rtx addr, mem;
+
+ addr = expand_expr (loc, NULL_RTX, ptr_mode, EXPAND_SUM);
+ addr = convert_memory_address (Pmode, addr);
+
+ /* Note that we explicitly do not want any alias information for this
+ memory, so that we kill all other live memories. Otherwise we don't
+ satisfy the full barrier semantics of the intrinsic. */
+ mem = validize_mem (gen_rtx_MEM (mode, addr));
+
+ /* The alignment needs to be at least according to that of the mode. */
+ set_mem_align (mem, MAX (GET_MODE_ALIGNMENT (mode),
+ get_pointer_alignment (loc)));
+ set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
+ MEM_VOLATILE_P (mem) = 1;
+
+ return mem;
+}
+
+/* Make sure an argument is in the right mode.
+ EXP is the tree argument.
+ MODE is the mode it should be in. */
+
+static rtx
+expand_expr_force_mode (tree exp, enum machine_mode mode)
+{
+ rtx val;
+ enum machine_mode old_mode;
+
+ val = expand_expr (exp, NULL_RTX, mode, EXPAND_NORMAL);
+ /* If VAL is promoted to a wider mode, convert it back to MODE. Take care
+ of CONST_INTs, where we know the old_mode only from the call argument. */
+
+ old_mode = GET_MODE (val);
+ if (old_mode == VOIDmode)
+ old_mode = TYPE_MODE (TREE_TYPE (exp));
+ val = convert_modes (mode, old_mode, val, 1);
+ return val;
+}
+
+
+/* Expand the __sync_xxx_and_fetch and __sync_fetch_and_xxx intrinsics.
+ EXP is the CALL_EXPR. CODE is the rtx code
+ that corresponds to the arithmetic or logical operation from the name;
+ an exception here is that NOT actually means NAND. TARGET is an optional
+ place for us to store the results; AFTER is true if this is the
+ fetch_and_xxx form. */
+
+static rtx
+expand_builtin_sync_operation (enum machine_mode mode, tree exp,
+ enum rtx_code code, bool after,
+ rtx target)
+{
+ rtx val, mem;
+ location_t loc = EXPR_LOCATION (exp);
+
+ if (code == NOT && warn_sync_nand)
+ {
+ tree fndecl = get_callee_fndecl (exp);
+ enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
+
+ static bool warned_f_a_n, warned_n_a_f;
+
+ switch (fcode)
+ {
+ case BUILT_IN_SYNC_FETCH_AND_NAND_1:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_2:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_4:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_8:
+ case BUILT_IN_SYNC_FETCH_AND_NAND_16:
+ if (warned_f_a_n)
+ break;
+
+ fndecl = builtin_decl_implicit (BUILT_IN_SYNC_FETCH_AND_NAND_N);
+ inform (loc, "%qD changed semantics in GCC 4.4", fndecl);
+ warned_f_a_n = true;
+ break;
+
+ case BUILT_IN_SYNC_NAND_AND_FETCH_1:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_2:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_4:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_8:
+ case BUILT_IN_SYNC_NAND_AND_FETCH_16:
+ if (warned_n_a_f)
+ break;
+
+ fndecl = builtin_decl_implicit (BUILT_IN_SYNC_NAND_AND_FETCH_N);
+ inform (loc, "%qD changed semantics in GCC 4.4", fndecl);
+ warned_n_a_f = true;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
+
+ return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SEQ_CST,
+ after);
+}
+
+/* Expand the __sync_val_compare_and_swap and __sync_bool_compare_and_swap
+ intrinsics. EXP is the CALL_EXPR. IS_BOOL is
+ true if this is the boolean form. TARGET is a place for us to store the
+ results; this is NOT optional if IS_BOOL is true. */
+
+static rtx
+expand_builtin_compare_and_swap (enum machine_mode mode, tree exp,
+ bool is_bool, rtx target)
+{
+ rtx old_val, new_val, mem;
+ rtx *pbool, *poval;
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ old_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
+ new_val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode);
+
+ pbool = poval = NULL;
+ if (target != const0_rtx)
+ {
+ if (is_bool)
+ pbool = ⌖
+ else
+ poval = ⌖
+ }
+ if (!expand_atomic_compare_and_swap (pbool, poval, mem, old_val, new_val,
+ false, MEMMODEL_SEQ_CST,
+ MEMMODEL_SEQ_CST))
+ return NULL_RTX;
+
+ return target;
+}
+
+/* Expand the __sync_lock_test_and_set intrinsic. Note that the most
+ general form is actually an atomic exchange, and some targets only
+ support a reduced form with the second argument being a constant 1.
+ EXP is the CALL_EXPR; TARGET is an optional place for us to store
+ the results. */
+
+static rtx
+expand_builtin_sync_lock_test_and_set (enum machine_mode mode, tree exp,
+ rtx target)
+{
+ rtx val, mem;
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
+
+ return expand_sync_lock_test_and_set (target, mem, val);
+}
+
+/* Expand the __sync_lock_release intrinsic. EXP is the CALL_EXPR. */
+
+static void
+expand_builtin_sync_lock_release (enum machine_mode mode, tree exp)
+{
+ rtx mem;
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+
+ expand_atomic_store (mem, const0_rtx, MEMMODEL_RELEASE, true);
+}
+
+/* Given an integer representing an ``enum memmodel'', verify its
+ correctness and return the memory model enum. */
+
+static enum memmodel
+get_memmodel (tree exp)
+{
+ rtx op;
+ unsigned HOST_WIDE_INT val;
+
+ /* If the parameter is not a constant, it's a run time value so we'll just
+ convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking. */
+ if (TREE_CODE (exp) != INTEGER_CST)
+ return MEMMODEL_SEQ_CST;
+
+ op = expand_normal (exp);
+
+ val = INTVAL (op);
+ if (targetm.memmodel_check)
+ val = targetm.memmodel_check (val);
+ else if (val & ~MEMMODEL_MASK)
+ {
+ warning (OPT_Winvalid_memory_model,
+ "Unknown architecture specifier in memory model to builtin.");
+ return MEMMODEL_SEQ_CST;
+ }
+
+ if ((INTVAL(op) & MEMMODEL_MASK) >= MEMMODEL_LAST)
+ {
+ warning (OPT_Winvalid_memory_model,
+ "invalid memory model argument to builtin");
+ return MEMMODEL_SEQ_CST;
+ }
+
+ return (enum memmodel) val;
+}
+
+/* Expand the __atomic_exchange intrinsic:
+ TYPE __atomic_exchange (TYPE *object, TYPE desired, enum memmodel)
+ EXP is the CALL_EXPR.
+ TARGET is an optional place for us to store the results. */
+
+static rtx
+expand_builtin_atomic_exchange (enum machine_mode mode, tree exp, rtx target)
+{
+ rtx val, mem;
+ enum memmodel model;
+
+ model = get_memmodel (CALL_EXPR_ARG (exp, 2));
+ if ((model & MEMMODEL_MASK) == MEMMODEL_CONSUME)
+ {
+ error ("invalid memory model for %<__atomic_exchange%>");
+ return NULL_RTX;
+ }
+
+ if (!flag_inline_atomics)
+ return NULL_RTX;
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
+
+ return expand_atomic_exchange (target, mem, val, model);
+}
+
+/* Expand the __atomic_compare_exchange intrinsic:
+ bool __atomic_compare_exchange (TYPE *object, TYPE *expect,
+ TYPE desired, BOOL weak,
+ enum memmodel success,
+ enum memmodel failure)
+ EXP is the CALL_EXPR.
+ TARGET is an optional place for us to store the results. */
+
+static rtx
+expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp,
+ rtx target)
+{
+ rtx expect, desired, mem, oldval;
+ enum memmodel success, failure;
+ tree weak;
+ bool is_weak;
+
+ success = get_memmodel (CALL_EXPR_ARG (exp, 4));
+ failure = get_memmodel (CALL_EXPR_ARG (exp, 5));
+
+ if ((failure & MEMMODEL_MASK) == MEMMODEL_RELEASE
+ || (failure & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
+ {
+ error ("invalid failure memory model for %<__atomic_compare_exchange%>");
+ return NULL_RTX;
+ }
+
+ if (failure > success)
+ {
+ error ("failure memory model cannot be stronger than success "
+ "memory model for %<__atomic_compare_exchange%>");
+ return NULL_RTX;
+ }
+
+ if (!flag_inline_atomics)
+ return NULL_RTX;
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+
+ expect = expand_normal (CALL_EXPR_ARG (exp, 1));
+ expect = convert_memory_address (Pmode, expect);
+ expect = gen_rtx_MEM (mode, expect);
+ desired = expand_expr_force_mode (CALL_EXPR_ARG (exp, 2), mode);
+
+ weak = CALL_EXPR_ARG (exp, 3);
+ is_weak = false;
+ if (host_integerp (weak, 0) && tree_low_cst (weak, 0) != 0)
+ is_weak = true;
+
+ oldval = expect;
+ if (!expand_atomic_compare_and_swap ((target == const0_rtx ? NULL : &target),
+ &oldval, mem, oldval, desired,
+ is_weak, success, failure))
+ return NULL_RTX;
+
+ if (oldval != expect)
+ emit_move_insn (expect, oldval);
+
+ return target;
+}
+
+/* Expand the __atomic_load intrinsic:
+ TYPE __atomic_load (TYPE *object, enum memmodel)
+ EXP is the CALL_EXPR.
+ TARGET is an optional place for us to store the results. */
+
+static rtx
+expand_builtin_atomic_load (enum machine_mode mode, tree exp, rtx target)
+{
+ rtx mem;
+ enum memmodel model;
+
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+ if ((model & MEMMODEL_MASK) == MEMMODEL_RELEASE
+ || (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
+ {
+ error ("invalid memory model for %<__atomic_load%>");
+ return NULL_RTX;
+ }
+
+ if (!flag_inline_atomics)
+ return NULL_RTX;
+
+ /* Expand the operand. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+
+ return expand_atomic_load (target, mem, model);
+}
+
+
+/* Expand the __atomic_store intrinsic:
+ void __atomic_store (TYPE *object, TYPE desired, enum memmodel)
+ EXP is the CALL_EXPR.
+ TARGET is an optional place for us to store the results. */
+
+static rtx
+expand_builtin_atomic_store (enum machine_mode mode, tree exp)
+{
+ rtx mem, val;
+ enum memmodel model;
+
+ model = get_memmodel (CALL_EXPR_ARG (exp, 2));
+ if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED
+ && (model & MEMMODEL_MASK) != MEMMODEL_SEQ_CST
+ && (model & MEMMODEL_MASK) != MEMMODEL_RELEASE)
+ {
+ error ("invalid memory model for %<__atomic_store%>");
+ return NULL_RTX;
+ }
+
+ if (!flag_inline_atomics)
+ return NULL_RTX;
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
+
+ return expand_atomic_store (mem, val, model, false);
+}
+
+/* Expand the __atomic_fetch_XXX intrinsic:
+ TYPE __atomic_fetch_XXX (TYPE *object, TYPE val, enum memmodel)
+ EXP is the CALL_EXPR.
+ TARGET is an optional place for us to store the results.
+ CODE is the operation, PLUS, MINUS, ADD, XOR, or IOR.
+ FETCH_AFTER is true if returning the result of the operation.
+ FETCH_AFTER is false if returning the value before the operation.
+ IGNORE is true if the result is not used.
+ EXT_CALL is the correct builtin for an external call if this cannot be
+ resolved to an instruction sequence. */
+
+static rtx
+expand_builtin_atomic_fetch_op (enum machine_mode mode, tree exp, rtx target,
+ enum rtx_code code, bool fetch_after,
+ bool ignore, enum built_in_function ext_call)
+{
+ rtx val, mem, ret;
+ enum memmodel model;
+ tree fndecl;
+ tree addr;
+
+ model = get_memmodel (CALL_EXPR_ARG (exp, 2));
+
+ /* Expand the operands. */
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
+
+ /* Only try generating instructions if inlining is turned on. */
+ if (flag_inline_atomics)
+ {
+ ret = expand_atomic_fetch_op (target, mem, val, code, model, fetch_after);
+ if (ret)
+ return ret;
+ }
+
+ /* Return if a different routine isn't needed for the library call. */
+ if (ext_call == BUILT_IN_NONE)
+ return NULL_RTX;
+
+ /* Change the call to the specified function. */
+ fndecl = get_callee_fndecl (exp);
+ addr = CALL_EXPR_FN (exp);
+ STRIP_NOPS (addr);
+
+ gcc_assert (TREE_OPERAND (addr, 0) == fndecl);
+ TREE_OPERAND (addr, 0) = builtin_decl_explicit(ext_call);
+
+ /* Expand the call here so we can emit trailing code. */
+ ret = expand_call (exp, target, ignore);
+
+ /* Replace the original function just in case it matters. */
+ TREE_OPERAND (addr, 0) = fndecl;
+
+ /* Then issue the arithmetic correction to return the right result. */
+ if (!ignore)
+ {
+ if (code == NOT)
+ {
+ ret = expand_simple_binop (mode, AND, ret, val, NULL_RTX, true,
+ OPTAB_LIB_WIDEN);
+ ret = expand_simple_unop (mode, NOT, ret, target, true);
+ }
+ else
+ ret = expand_simple_binop (mode, code, ret, val, target, true,
+ OPTAB_LIB_WIDEN);
+ }
+ return ret;
+}
+
+
+#ifndef HAVE_atomic_clear
+# define HAVE_atomic_clear 0
+# define gen_atomic_clear(x,y) (gcc_unreachable (), NULL_RTX)
+#endif
+
+/* Expand an atomic clear operation.
+ void _atomic_clear (BOOL *obj, enum memmodel)
+ EXP is the call expression. */
static rtx
-get_builtin_sync_mem (tree loc, enum machine_mode mode)
+expand_builtin_atomic_clear (tree exp)
{
- rtx addr, mem;
+ enum machine_mode mode;
+ rtx mem, ret;
+ enum memmodel model;
- addr = expand_expr (loc, NULL_RTX, ptr_mode, EXPAND_SUM);
- addr = convert_memory_address (Pmode, addr);
+ mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1));
- /* Note that we explicitly do not want any alias information for this
- memory, so that we kill all other live memories. Otherwise we don't
- satisfy the full barrier semantics of the intrinsic. */
- mem = validize_mem (gen_rtx_MEM (mode, addr));
+ if ((model & MEMMODEL_MASK) == MEMMODEL_ACQUIRE
+ || (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
+ {
+ error ("invalid memory model for %<__atomic_store%>");
+ return const0_rtx;
+ }
- /* The alignment needs to be at least according to that of the mode. */
- set_mem_align (mem, MAX (GET_MODE_ALIGNMENT (mode),
- get_pointer_alignment (loc)));
- set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
- MEM_VOLATILE_P (mem) = 1;
+ if (HAVE_atomic_clear)
+ {
+ emit_insn (gen_atomic_clear (mem, model));
+ return const0_rtx;
+ }
- return mem;
+ /* Try issuing an __atomic_store, and allow fallback to __sync_lock_release.
+ Failing that, a store is issued by __atomic_store. The only way this can
+ fail is if the bool type is larger than a word size. Unlikely, but
+ handle it anyway for completeness. Assume a single threaded model since
+ there is no atomic support in this case, and no barriers are required. */
+ ret = expand_atomic_store (mem, const0_rtx, model, true);
+ if (!ret)
+ emit_move_insn (mem, const0_rtx);
+ return const0_rtx;
}
-/* Expand the __sync_xxx_and_fetch and __sync_fetch_and_xxx intrinsics.
- EXP is the CALL_EXPR. CODE is the rtx code
- that corresponds to the arithmetic or logical operation from the name;
- an exception here is that NOT actually means NAND. TARGET is an optional
- place for us to store the results; AFTER is true if this is the
- fetch_and_xxx form. IGNORE is true if we don't actually care about
- the result of the operation at all. */
+/* Expand an atomic test_and_set operation.
+ bool _atomic_test_and_set (BOOL *obj, enum memmodel)
+ EXP is the call expression. */
static rtx
-expand_builtin_sync_operation (enum machine_mode mode, tree exp,
- enum rtx_code code, bool after,
- rtx target, bool ignore)
+expand_builtin_atomic_test_and_set (tree exp, rtx target)
{
- rtx val, mem;
- enum machine_mode old_mode;
- location_t loc = EXPR_LOCATION (exp);
+ rtx mem;
+ enum memmodel model;
+ enum machine_mode mode;
- if (code == NOT && warn_sync_nand)
- {
- tree fndecl = get_callee_fndecl (exp);
- enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
+ mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
+ mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1));
- static bool warned_f_a_n, warned_n_a_f;
+ return expand_atomic_test_and_set (target, mem, model);
+}
- switch (fcode)
- {
- case BUILT_IN_SYNC_FETCH_AND_NAND_1:
- case BUILT_IN_SYNC_FETCH_AND_NAND_2:
- case BUILT_IN_SYNC_FETCH_AND_NAND_4:
- case BUILT_IN_SYNC_FETCH_AND_NAND_8:
- case BUILT_IN_SYNC_FETCH_AND_NAND_16:
- if (warned_f_a_n)
- break;
+/* Return true if (optional) argument ARG1 of size ARG0 is always lock free on
+ this architecture. If ARG1 is NULL, use typical alignment for size ARG0. */
- fndecl = implicit_built_in_decls[BUILT_IN_SYNC_FETCH_AND_NAND_N];
- inform (loc, "%qD changed semantics in GCC 4.4", fndecl);
- warned_f_a_n = true;
- break;
+static tree
+fold_builtin_atomic_always_lock_free (tree arg0, tree arg1)
+{
+ int size;
+ enum machine_mode mode;
+ unsigned int mode_align, type_align;
- case BUILT_IN_SYNC_NAND_AND_FETCH_1:
- case BUILT_IN_SYNC_NAND_AND_FETCH_2:
- case BUILT_IN_SYNC_NAND_AND_FETCH_4:
- case BUILT_IN_SYNC_NAND_AND_FETCH_8:
- case BUILT_IN_SYNC_NAND_AND_FETCH_16:
+ if (TREE_CODE (arg0) != INTEGER_CST)
+ return NULL_TREE;
- if (warned_n_a_f)
- break;
+ size = INTVAL (expand_normal (arg0)) * BITS_PER_UNIT;
+ mode = mode_for_size (size, MODE_INT, 0);
+ mode_align = GET_MODE_ALIGNMENT (mode);
- fndecl = implicit_built_in_decls[BUILT_IN_SYNC_NAND_AND_FETCH_N];
- inform (loc, "%qD changed semantics in GCC 4.4", fndecl);
- warned_n_a_f = true;
- break;
+ if (TREE_CODE (arg1) == INTEGER_CST && INTVAL (expand_normal (arg1)) == 0)
+ type_align = mode_align;
+ else
+ {
+ tree ttype = TREE_TYPE (arg1);
- default:
- gcc_unreachable ();
- }
- }
+ /* This function is usually invoked and folded immediately by the front
+ end before anything else has a chance to look at it. The pointer
+ parameter at this point is usually cast to a void *, so check for that
+ and look past the cast. */
+ if (TREE_CODE (arg1) == NOP_EXPR && POINTER_TYPE_P (ttype)
+ && VOID_TYPE_P (TREE_TYPE (ttype)))
+ arg1 = TREE_OPERAND (arg1, 0);
- /* Expand the operands. */
- mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+ ttype = TREE_TYPE (arg1);
+ gcc_assert (POINTER_TYPE_P (ttype));
- val = expand_expr (CALL_EXPR_ARG (exp, 1), NULL_RTX, mode, EXPAND_NORMAL);
- /* If VAL is promoted to a wider mode, convert it back to MODE. Take care
- of CONST_INTs, where we know the old_mode only from the call argument. */
- old_mode = GET_MODE (val);
- if (old_mode == VOIDmode)
- old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 1)));
- val = convert_modes (mode, old_mode, val, 1);
+ /* Get the underlying type of the object. */
+ ttype = TREE_TYPE (ttype);
+ type_align = TYPE_ALIGN (ttype);
+ }
- if (ignore)
- return expand_sync_operation (mem, val, code);
+ /* If the object has smaller alignment, the the lock free routines cannot
+ be used. */
+ if (type_align < mode_align)
+ return boolean_false_node;
+
+ /* Check if a compare_and_swap pattern exists for the mode which represents
+ the required size. The pattern is not allowed to fail, so the existence
+ of the pattern indicates support is present. */
+ if (can_compare_and_swap_p (mode, true))
+ return boolean_true_node;
else
- return expand_sync_fetch_operation (mem, val, code, after, target);
+ return boolean_false_node;
}
-/* Expand the __sync_val_compare_and_swap and __sync_bool_compare_and_swap
- intrinsics. EXP is the CALL_EXPR. IS_BOOL is
- true if this is the boolean form. TARGET is a place for us to store the
- results; this is NOT optional if IS_BOOL is true. */
+/* Return true if the parameters to call EXP represent an object which will
+ always generate lock free instructions. The first argument represents the
+ size of the object, and the second parameter is a pointer to the object
+ itself. If NULL is passed for the object, then the result is based on
+ typical alignment for an object of the specified size. Otherwise return
+ false. */
static rtx
-expand_builtin_compare_and_swap (enum machine_mode mode, tree exp,
- bool is_bool, rtx target)
+expand_builtin_atomic_always_lock_free (tree exp)
{
- rtx old_val, new_val, mem;
- enum machine_mode old_mode;
-
- /* Expand the operands. */
- mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
-
-
- old_val = expand_expr (CALL_EXPR_ARG (exp, 1), NULL_RTX,
- mode, EXPAND_NORMAL);
- /* If VAL is promoted to a wider mode, convert it back to MODE. Take care
- of CONST_INTs, where we know the old_mode only from the call argument. */
- old_mode = GET_MODE (old_val);
- if (old_mode == VOIDmode)
- old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 1)));
- old_val = convert_modes (mode, old_mode, old_val, 1);
+ tree size;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
- new_val = expand_expr (CALL_EXPR_ARG (exp, 2), NULL_RTX,
- mode, EXPAND_NORMAL);
- /* If VAL is promoted to a wider mode, convert it back to MODE. Take care
- of CONST_INTs, where we know the old_mode only from the call argument. */
- old_mode = GET_MODE (new_val);
- if (old_mode == VOIDmode)
- old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 2)));
- new_val = convert_modes (mode, old_mode, new_val, 1);
+ if (TREE_CODE (arg0) != INTEGER_CST)
+ {
+ error ("non-constant argument 1 to __atomic_always_lock_free");
+ return const0_rtx;
+ }
- if (is_bool)
- return expand_bool_compare_and_swap (mem, old_val, new_val, target);
- else
- return expand_val_compare_and_swap (mem, old_val, new_val, target);
+ size = fold_builtin_atomic_always_lock_free (arg0, arg1);
+ if (size == boolean_true_node)
+ return const1_rtx;
+ return const0_rtx;
}
-/* Expand the __sync_lock_test_and_set intrinsic. Note that the most
- general form is actually an atomic exchange, and some targets only
- support a reduced form with the second argument being a constant 1.
- EXP is the CALL_EXPR; TARGET is an optional place for us to store
- the results. */
+/* Return a one or zero if it can be determined that object ARG1 of size ARG
+ is lock free on this architecture. */
-static rtx
-expand_builtin_sync_lock_test_and_set (enum machine_mode mode, tree exp,
- rtx target)
+static tree
+fold_builtin_atomic_is_lock_free (tree arg0, tree arg1)
{
- rtx val, mem;
- enum machine_mode old_mode;
-
- /* Expand the operands. */
- mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
- val = expand_expr (CALL_EXPR_ARG (exp, 1), NULL_RTX, mode, EXPAND_NORMAL);
- /* If VAL is promoted to a wider mode, convert it back to MODE. Take care
- of CONST_INTs, where we know the old_mode only from the call argument. */
- old_mode = GET_MODE (val);
- if (old_mode == VOIDmode)
- old_mode = TYPE_MODE (TREE_TYPE (CALL_EXPR_ARG (exp, 1)));
- val = convert_modes (mode, old_mode, val, 1);
+ if (!flag_inline_atomics)
+ return NULL_TREE;
+
+ /* If it isn't always lock free, don't generate a result. */
+ if (fold_builtin_atomic_always_lock_free (arg0, arg1) == boolean_true_node)
+ return boolean_true_node;
- return expand_sync_lock_test_and_set (mem, val, target);
+ return NULL_TREE;
}
-/* Expand the __sync_synchronize intrinsic. */
+/* Return true if the parameters to call EXP represent an object which will
+ always generate lock free instructions. The first argument represents the
+ size of the object, and the second parameter is a pointer to the object
+ itself. If NULL is passed for the object, then the result is based on
+ typical alignment for an object of the specified size. Otherwise return
+ NULL*/
-static void
-expand_builtin_sync_synchronize (void)
+static rtx
+expand_builtin_atomic_is_lock_free (tree exp)
{
- gimple x;
- VEC (tree, gc) *v_clobbers;
+ tree size;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
-#ifdef HAVE_memory_barrier
- if (HAVE_memory_barrier)
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
{
- emit_insn (gen_memory_barrier ());
- return;
+ error ("non-integer argument 1 to __atomic_is_lock_free");
+ return NULL_RTX;
}
-#endif
- if (synchronize_libfunc != NULL_RTX)
- {
- emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
- return;
- }
+ if (!flag_inline_atomics)
+ return NULL_RTX;
+
+ /* If the value is known at compile time, return the RTX for it. */
+ size = fold_builtin_atomic_is_lock_free (arg0, arg1);
+ if (size == boolean_true_node)
+ return const1_rtx;
- /* If no explicit memory barrier instruction is available, create an
- empty asm stmt with a memory clobber. */
- v_clobbers = VEC_alloc (tree, gc, 1);
- VEC_quick_push (tree, v_clobbers,
- tree_cons (NULL, build_string (6, "memory"), NULL));
- x = gimple_build_asm_vec ("", NULL, NULL, v_clobbers, NULL);
- gimple_asm_set_volatile (x, true);
- expand_asm_stmt (x);
+ return NULL_RTX;
}
-/* Expand the __sync_lock_release intrinsic. EXP is the CALL_EXPR. */
+/* Expand the __atomic_thread_fence intrinsic:
+ void __atomic_thread_fence (enum memmodel)
+ EXP is the CALL_EXPR. */
static void
-expand_builtin_sync_lock_release (enum machine_mode mode, tree exp)
+expand_builtin_atomic_thread_fence (tree exp)
{
- struct expand_operand ops[2];
- enum insn_code icode;
- rtx mem;
+ enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0));
+ expand_mem_thread_fence (model);
+}
- /* Expand the operands. */
- mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
+/* Expand the __atomic_signal_fence intrinsic:
+ void __atomic_signal_fence (enum memmodel)
+ EXP is the CALL_EXPR. */
- /* If there is an explicit operation in the md file, use it. */
- icode = direct_optab_handler (sync_lock_release_optab, mode);
- if (icode != CODE_FOR_nothing)
- {
- create_fixed_operand (&ops[0], mem);
- create_input_operand (&ops[1], const0_rtx, mode);
- if (maybe_expand_insn (icode, 2, ops))
- return;
- }
+static void
+expand_builtin_atomic_signal_fence (tree exp)
+{
+ enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0));
+ expand_mem_signal_fence (model);
+}
+
+/* Expand the __sync_synchronize intrinsic. */
- /* Otherwise we can implement this operation by emitting a barrier
- followed by a store of zero. */
- expand_builtin_sync_synchronize ();
- emit_move_insn (mem, const0_rtx);
+static void
+expand_builtin_sync_synchronize (void)
+{
+ expand_mem_thread_fence (MEMMODEL_SEQ_CST);
}
+
\f
/* Expand an expression EXP that calls a built-in function,
with result going to TARGET if that's convenient
set of builtins. */
if (!optimize
&& !called_as_built_in (fndecl)
- && DECL_ASSEMBLER_NAME_SET_P (fndecl)
&& fcode != BUILT_IN_ALLOCA
+ && fcode != BUILT_IN_ALLOCA_WITH_ALIGN
&& fcode != BUILT_IN_FREE)
return expand_call (exp, target, ignore);
return XEXP (DECL_RTL (DECL_RESULT (current_function_decl)), 0);
case BUILT_IN_ALLOCA:
+ case BUILT_IN_ALLOCA_WITH_ALIGN:
/* If the allocation stems from the declaration of a variable-sized
object, it cannot accumulate. */
target = expand_builtin_alloca (exp, CALL_ALLOCA_FOR_VAR_P (exp));
expand_stack_restore (CALL_EXPR_ARG (exp, 0));
return const0_rtx;
+ case BUILT_IN_BSWAP16:
case BUILT_IN_BSWAP32:
case BUILT_IN_BSWAP64:
- target = expand_builtin_bswap (exp, target, subtarget);
-
+ target = expand_builtin_bswap (target_mode, exp, target, subtarget);
if (target)
return target;
break;
return const0_rtx;
case BUILT_IN_INIT_TRAMPOLINE:
- return expand_builtin_init_trampoline (exp);
+ return expand_builtin_init_trampoline (exp, true);
+ case BUILT_IN_INIT_HEAP_TRAMPOLINE:
+ return expand_builtin_init_trampoline (exp, false);
case BUILT_IN_ADJUST_TRAMPOLINE:
return expand_builtin_adjust_trampoline (exp);
case BUILT_IN_SYNC_FETCH_AND_ADD_8:
case BUILT_IN_SYNC_FETCH_AND_ADD_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_ADD_1);
- target = expand_builtin_sync_operation (mode, exp, PLUS,
- false, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, PLUS, false, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_FETCH_AND_SUB_8:
case BUILT_IN_SYNC_FETCH_AND_SUB_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_SUB_1);
- target = expand_builtin_sync_operation (mode, exp, MINUS,
- false, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, MINUS, false, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_FETCH_AND_OR_8:
case BUILT_IN_SYNC_FETCH_AND_OR_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_OR_1);
- target = expand_builtin_sync_operation (mode, exp, IOR,
- false, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, IOR, false, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_FETCH_AND_AND_8:
case BUILT_IN_SYNC_FETCH_AND_AND_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_AND_1);
- target = expand_builtin_sync_operation (mode, exp, AND,
- false, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, AND, false, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_FETCH_AND_XOR_8:
case BUILT_IN_SYNC_FETCH_AND_XOR_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_XOR_1);
- target = expand_builtin_sync_operation (mode, exp, XOR,
- false, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, XOR, false, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_FETCH_AND_NAND_8:
case BUILT_IN_SYNC_FETCH_AND_NAND_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_NAND_1);
- target = expand_builtin_sync_operation (mode, exp, NOT,
- false, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, NOT, false, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_ADD_AND_FETCH_8:
case BUILT_IN_SYNC_ADD_AND_FETCH_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_ADD_AND_FETCH_1);
- target = expand_builtin_sync_operation (mode, exp, PLUS,
- true, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, PLUS, true, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_SUB_AND_FETCH_8:
case BUILT_IN_SYNC_SUB_AND_FETCH_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_SUB_AND_FETCH_1);
- target = expand_builtin_sync_operation (mode, exp, MINUS,
- true, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, MINUS, true, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_OR_AND_FETCH_8:
case BUILT_IN_SYNC_OR_AND_FETCH_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_OR_AND_FETCH_1);
- target = expand_builtin_sync_operation (mode, exp, IOR,
- true, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, IOR, true, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_AND_AND_FETCH_8:
case BUILT_IN_SYNC_AND_AND_FETCH_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_AND_AND_FETCH_1);
- target = expand_builtin_sync_operation (mode, exp, AND,
- true, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, AND, true, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_XOR_AND_FETCH_8:
case BUILT_IN_SYNC_XOR_AND_FETCH_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_XOR_AND_FETCH_1);
- target = expand_builtin_sync_operation (mode, exp, XOR,
- true, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, XOR, true, target);
if (target)
return target;
break;
case BUILT_IN_SYNC_NAND_AND_FETCH_8:
case BUILT_IN_SYNC_NAND_AND_FETCH_16:
mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_NAND_AND_FETCH_1);
- target = expand_builtin_sync_operation (mode, exp, NOT,
- true, target, ignore);
+ target = expand_builtin_sync_operation (mode, exp, NOT, true, target);
if (target)
return target;
break;
expand_builtin_sync_synchronize ();
return const0_rtx;
+ case BUILT_IN_ATOMIC_EXCHANGE_1:
+ case BUILT_IN_ATOMIC_EXCHANGE_2:
+ case BUILT_IN_ATOMIC_EXCHANGE_4:
+ case BUILT_IN_ATOMIC_EXCHANGE_8:
+ case BUILT_IN_ATOMIC_EXCHANGE_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_EXCHANGE_1);
+ target = expand_builtin_atomic_exchange (mode, exp, target);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_2:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_4:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_8:
+ case BUILT_IN_ATOMIC_COMPARE_EXCHANGE_16:
+ {
+ unsigned int nargs, z;
+ VEC(tree,gc) *vec;
+
+ mode =
+ get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1);
+ target = expand_builtin_atomic_compare_exchange (mode, exp, target);
+ if (target)
+ return target;
+
+ /* If this is turned into an external library call, the weak parameter
+ must be dropped to match the expected parameter list. */
+ nargs = call_expr_nargs (exp);
+ vec = VEC_alloc (tree, gc, nargs - 1);
+ for (z = 0; z < 3; z++)
+ VEC_quick_push (tree, vec, CALL_EXPR_ARG (exp, z));
+ /* Skip the boolean weak parameter. */
+ for (z = 4; z < 6; z++)
+ VEC_quick_push (tree, vec, CALL_EXPR_ARG (exp, z));
+ exp = build_call_vec (TREE_TYPE (exp), CALL_EXPR_FN (exp), vec);
+ break;
+ }
+
+ case BUILT_IN_ATOMIC_LOAD_1:
+ case BUILT_IN_ATOMIC_LOAD_2:
+ case BUILT_IN_ATOMIC_LOAD_4:
+ case BUILT_IN_ATOMIC_LOAD_8:
+ case BUILT_IN_ATOMIC_LOAD_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_LOAD_1);
+ target = expand_builtin_atomic_load (mode, exp, target);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_STORE_1:
+ case BUILT_IN_ATOMIC_STORE_2:
+ case BUILT_IN_ATOMIC_STORE_4:
+ case BUILT_IN_ATOMIC_STORE_8:
+ case BUILT_IN_ATOMIC_STORE_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_STORE_1);
+ target = expand_builtin_atomic_store (mode, exp);
+ if (target)
+ return const0_rtx;
+ break;
+
+ case BUILT_IN_ATOMIC_ADD_FETCH_1:
+ case BUILT_IN_ATOMIC_ADD_FETCH_2:
+ case BUILT_IN_ATOMIC_ADD_FETCH_4:
+ case BUILT_IN_ATOMIC_ADD_FETCH_8:
+ case BUILT_IN_ATOMIC_ADD_FETCH_16:
+ {
+ enum built_in_function lib;
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1);
+ lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_ADD_1 +
+ (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1));
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, true,
+ ignore, lib);
+ if (target)
+ return target;
+ break;
+ }
+ case BUILT_IN_ATOMIC_SUB_FETCH_1:
+ case BUILT_IN_ATOMIC_SUB_FETCH_2:
+ case BUILT_IN_ATOMIC_SUB_FETCH_4:
+ case BUILT_IN_ATOMIC_SUB_FETCH_8:
+ case BUILT_IN_ATOMIC_SUB_FETCH_16:
+ {
+ enum built_in_function lib;
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1);
+ lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_SUB_1 +
+ (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1));
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, true,
+ ignore, lib);
+ if (target)
+ return target;
+ break;
+ }
+ case BUILT_IN_ATOMIC_AND_FETCH_1:
+ case BUILT_IN_ATOMIC_AND_FETCH_2:
+ case BUILT_IN_ATOMIC_AND_FETCH_4:
+ case BUILT_IN_ATOMIC_AND_FETCH_8:
+ case BUILT_IN_ATOMIC_AND_FETCH_16:
+ {
+ enum built_in_function lib;
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_AND_FETCH_1);
+ lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_AND_1 +
+ (fcode - BUILT_IN_ATOMIC_AND_FETCH_1));
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, true,
+ ignore, lib);
+ if (target)
+ return target;
+ break;
+ }
+ case BUILT_IN_ATOMIC_NAND_FETCH_1:
+ case BUILT_IN_ATOMIC_NAND_FETCH_2:
+ case BUILT_IN_ATOMIC_NAND_FETCH_4:
+ case BUILT_IN_ATOMIC_NAND_FETCH_8:
+ case BUILT_IN_ATOMIC_NAND_FETCH_16:
+ {
+ enum built_in_function lib;
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1);
+ lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_NAND_1 +
+ (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1));
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, true,
+ ignore, lib);
+ if (target)
+ return target;
+ break;
+ }
+ case BUILT_IN_ATOMIC_XOR_FETCH_1:
+ case BUILT_IN_ATOMIC_XOR_FETCH_2:
+ case BUILT_IN_ATOMIC_XOR_FETCH_4:
+ case BUILT_IN_ATOMIC_XOR_FETCH_8:
+ case BUILT_IN_ATOMIC_XOR_FETCH_16:
+ {
+ enum built_in_function lib;
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1);
+ lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_XOR_1 +
+ (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1));
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, true,
+ ignore, lib);
+ if (target)
+ return target;
+ break;
+ }
+ case BUILT_IN_ATOMIC_OR_FETCH_1:
+ case BUILT_IN_ATOMIC_OR_FETCH_2:
+ case BUILT_IN_ATOMIC_OR_FETCH_4:
+ case BUILT_IN_ATOMIC_OR_FETCH_8:
+ case BUILT_IN_ATOMIC_OR_FETCH_16:
+ {
+ enum built_in_function lib;
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_OR_FETCH_1);
+ lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_OR_1 +
+ (fcode - BUILT_IN_ATOMIC_OR_FETCH_1));
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, true,
+ ignore, lib);
+ if (target)
+ return target;
+ break;
+ }
+ case BUILT_IN_ATOMIC_FETCH_ADD_1:
+ case BUILT_IN_ATOMIC_FETCH_ADD_2:
+ case BUILT_IN_ATOMIC_FETCH_ADD_4:
+ case BUILT_IN_ATOMIC_FETCH_ADD_8:
+ case BUILT_IN_ATOMIC_FETCH_ADD_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_ADD_1);
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, false,
+ ignore, BUILT_IN_NONE);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_FETCH_SUB_1:
+ case BUILT_IN_ATOMIC_FETCH_SUB_2:
+ case BUILT_IN_ATOMIC_FETCH_SUB_4:
+ case BUILT_IN_ATOMIC_FETCH_SUB_8:
+ case BUILT_IN_ATOMIC_FETCH_SUB_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_SUB_1);
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, false,
+ ignore, BUILT_IN_NONE);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_FETCH_AND_1:
+ case BUILT_IN_ATOMIC_FETCH_AND_2:
+ case BUILT_IN_ATOMIC_FETCH_AND_4:
+ case BUILT_IN_ATOMIC_FETCH_AND_8:
+ case BUILT_IN_ATOMIC_FETCH_AND_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_AND_1);
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, false,
+ ignore, BUILT_IN_NONE);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_FETCH_NAND_1:
+ case BUILT_IN_ATOMIC_FETCH_NAND_2:
+ case BUILT_IN_ATOMIC_FETCH_NAND_4:
+ case BUILT_IN_ATOMIC_FETCH_NAND_8:
+ case BUILT_IN_ATOMIC_FETCH_NAND_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_NAND_1);
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, false,
+ ignore, BUILT_IN_NONE);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_FETCH_XOR_1:
+ case BUILT_IN_ATOMIC_FETCH_XOR_2:
+ case BUILT_IN_ATOMIC_FETCH_XOR_4:
+ case BUILT_IN_ATOMIC_FETCH_XOR_8:
+ case BUILT_IN_ATOMIC_FETCH_XOR_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_XOR_1);
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, false,
+ ignore, BUILT_IN_NONE);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_FETCH_OR_1:
+ case BUILT_IN_ATOMIC_FETCH_OR_2:
+ case BUILT_IN_ATOMIC_FETCH_OR_4:
+ case BUILT_IN_ATOMIC_FETCH_OR_8:
+ case BUILT_IN_ATOMIC_FETCH_OR_16:
+ mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_OR_1);
+ target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, false,
+ ignore, BUILT_IN_NONE);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_TEST_AND_SET:
+ return expand_builtin_atomic_test_and_set (exp, target);
+
+ case BUILT_IN_ATOMIC_CLEAR:
+ return expand_builtin_atomic_clear (exp);
+
+ case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
+ return expand_builtin_atomic_always_lock_free (exp);
+
+ case BUILT_IN_ATOMIC_IS_LOCK_FREE:
+ target = expand_builtin_atomic_is_lock_free (exp);
+ if (target)
+ return target;
+ break;
+
+ case BUILT_IN_ATOMIC_THREAD_FENCE:
+ expand_builtin_atomic_thread_fence (exp);
+ return const0_rtx;
+
+ case BUILT_IN_ATOMIC_SIGNAL_FENCE:
+ expand_builtin_atomic_signal_fence (exp);
+ return const0_rtx;
+
case BUILT_IN_OBJECT_SIZE:
return expand_builtin_object_size (exp);
case BUILT_IN_STRCPY_CHK:
case BUILT_IN_STPCPY_CHK:
case BUILT_IN_STRNCPY_CHK:
+ case BUILT_IN_STPNCPY_CHK:
case BUILT_IN_STRCAT_CHK:
case BUILT_IN_STRNCAT_CHK:
case BUILT_IN_SNPRINTF_CHK:
{
tree fn, arg_types, pred_type, expected_type, call_expr, ret_type;
- fn = built_in_decls[BUILT_IN_EXPECT];
+ fn = builtin_decl_explicit (BUILT_IN_EXPECT);
arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn));
ret_type = TREE_TYPE (TREE_TYPE (fn));
pred_type = TREE_VALUE (arg_types);
if (width > HOST_BITS_PER_WIDE_INT)
{
hi = TREE_INT_CST_HIGH (arg);
- if (width < 2 * HOST_BITS_PER_WIDE_INT)
+ if (width < HOST_BITS_PER_DOUBLE_INT)
hi &= ~((unsigned HOST_WIDE_INT) (-1)
<< (width - HOST_BITS_PER_WIDE_INT));
}
return NULL_TREE;
}
-/* Fold function call to builtin_bswap and the long and long long
+/* Fold function call to builtin_bswap and the short, long and long long
variants. Return NULL_TREE if no simplification can be made. */
static tree
fold_builtin_bswap (tree fndecl, tree arg)
{
HOST_WIDE_INT hi, width, r_hi = 0;
unsigned HOST_WIDE_INT lo, r_lo = 0;
- tree type;
+ tree type = TREE_TYPE (TREE_TYPE (fndecl));
- type = TREE_TYPE (arg);
width = TYPE_PRECISION (type);
lo = TREE_INT_CST_LOW (arg);
hi = TREE_INT_CST_HIGH (arg);
switch (DECL_FUNCTION_CODE (fndecl))
{
+ case BUILT_IN_BSWAP16:
case BUILT_IN_BSWAP32:
case BUILT_IN_BSWAP64:
{
}
if (width < HOST_BITS_PER_WIDE_INT)
- return build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), r_lo);
+ return build_int_cst (type, r_lo);
else
- return build_int_cst_wide (TREE_TYPE (TREE_TYPE (fndecl)), r_lo, r_hi);
+ return build_int_cst_wide (type, r_lo, r_hi);
}
return NULL_TREE;
&& (MIN (src_align, dest_align) / BITS_PER_UNIT
>= (unsigned HOST_WIDE_INT) tree_low_cst (len, 1))))
{
- tree fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (!fn)
return NULL_TREE;
return build_call_expr_loc (loc, fn, 3, dest, src, len);
if (! operand_equal_p (TREE_OPERAND (src_base, 0),
TREE_OPERAND (dest_base, 0), 0))
return NULL_TREE;
- off = double_int_add (mem_ref_offset (src_base),
- shwi_to_double_int (src_offset));
- if (!double_int_fits_in_shwi_p (off))
+ off = mem_ref_offset (src_base) +
+ double_int::from_shwi (src_offset);
+ if (!off.fits_shwi ())
return NULL_TREE;
src_offset = off.low;
- off = double_int_add (mem_ref_offset (dest_base),
- shwi_to_double_int (dest_offset));
- if (!double_int_fits_in_shwi_p (off))
+ off = mem_ref_offset (dest_base) +
+ double_int::from_shwi (dest_offset);
+ if (!off.fits_shwi ())
return NULL_TREE;
dest_offset = off.low;
if (ranges_overlap_p (src_offset, maxsize,
else
return NULL_TREE;
- fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (!fn)
return NULL_TREE;
return build_call_expr_loc (loc, fn, 3, dest, src, len);
if (!refs_may_alias_p_1 (&destr, &srcr, false))
{
tree fn;
- fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (!fn)
return NULL_TREE;
return build_call_expr_loc (loc, fn, 3, dest, src, len);
if (optimize_function_for_size_p (cfun))
return NULL_TREE;
- fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (!fn)
return NULL_TREE;
return NULL_TREE;
}
- len = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1));
+ len = fold_convert_loc (loc, size_type_node, len);
+ len = size_binop_loc (loc, PLUS_EXPR, len, build_int_cst (size_type_node, 1));
return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)),
build_call_expr_loc (loc, fn, 3, dest, src, len));
}
&& !integer_zerop (len))
return NULL_TREE;
- fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (!fn)
return NULL_TREE;
- lenp1 = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1));
+ lenp1 = size_binop_loc (loc, PLUS_EXPR,
+ fold_convert_loc (loc, size_type_node, len),
+ build_int_cst (size_type_node, 1));
/* We use dest twice in building our expression. Save it from
multiple expansions. */
dest = builtin_save_expr (dest);
return NULL_TREE;
/* OK transform into builtin memcpy. */
- fn = implicit_built_in_decls[BUILT_IN_MEMCPY];
+ fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
if (!fn)
return NULL_TREE;
+
+ len = fold_convert_loc (loc, size_type_node, len);
return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)),
build_call_expr_loc (loc, fn, 3, dest, src, len));
}
if (target_char_cast (arg2, &c))
return NULL_TREE;
- r = (char *) memchr (p1, c, tree_low_cst (len, 1));
+ r = (const char *) memchr (p1, c, tree_low_cst (len, 1));
if (r == NULL)
return build_int_cst (TREE_TYPE (arg1), 0);
CASE_FLT_FN (BUILT_IN_ISINF):
{
/* isinf(x) -> isgreater(fabs(x),DBL_MAX). */
- tree const isgr_fn = built_in_decls[BUILT_IN_ISGREATER];
+ tree const isgr_fn = builtin_decl_explicit (BUILT_IN_ISGREATER);
tree const type = TREE_TYPE (arg);
REAL_VALUE_TYPE r;
char buf[128];
case BUILT_IN_ISFINITE:
{
/* isfinite(x) -> islessequal(fabs(x),DBL_MAX). */
- tree const isle_fn = built_in_decls[BUILT_IN_ISLESSEQUAL];
+ tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL);
tree const type = TREE_TYPE (arg);
REAL_VALUE_TYPE r;
char buf[128];
{
/* isnormal(x) -> isgreaterequal(fabs(x),DBL_MIN) &
islessequal(fabs(x),DBL_MAX). */
- tree const isle_fn = built_in_decls[BUILT_IN_ISLESSEQUAL];
- tree const isge_fn = built_in_decls[BUILT_IN_ISGREATEREQUAL];
+ tree const isle_fn = builtin_decl_explicit (BUILT_IN_ISLESSEQUAL);
+ tree const isge_fn = builtin_decl_explicit (BUILT_IN_ISGREATEREQUAL);
tree const type = TREE_TYPE (arg);
REAL_VALUE_TYPE rmax, rmin;
char buf[128];
1. So e.g. "if (isinf_sign(x))" would be folded to just
"if (isinf(x) ? 1 : 0)" which becomes "if (isinf(x))". */
tree signbit_fn = mathfn_built_in_1 (TREE_TYPE (arg), BUILT_IN_SIGNBIT, 0);
- tree isinf_fn = built_in_decls[BUILT_IN_ISINF];
+ tree isinf_fn = builtin_decl_explicit (BUILT_IN_ISINF);
tree tmp = NULL_TREE;
arg = builtin_save_expr (arg);
CASE_FLT_FN (BUILT_IN_LLRINT):
return fold_fixed_mathfn (loc, fndecl, arg0);
+ case BUILT_IN_BSWAP16:
case BUILT_IN_BSWAP32:
case BUILT_IN_BSWAP64:
return fold_builtin_bswap (fndecl, arg0);
case BUILT_IN_STPCPY:
if (ignore)
{
- tree fn = implicit_built_in_decls[BUILT_IN_STRCPY];
+ tree fn = builtin_decl_implicit (BUILT_IN_STRCPY);
if (!fn)
break;
return fold_builtin_fprintf (loc, fndecl, arg0, arg1, NULL_TREE,
ignore, fcode);
+ case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
+ return fold_builtin_atomic_always_lock_free (arg0, arg1);
+
+ case BUILT_IN_ATOMIC_IS_LOCK_FREE:
+ return fold_builtin_atomic_is_lock_free (arg0, arg1);
+
default:
break;
}
DECL_FUNCTION_CODE (fndecl));
case BUILT_IN_STRNCPY_CHK:
- return fold_builtin_strncpy_chk (loc, arg0, arg1, arg2, arg3, NULL_TREE);
+ case BUILT_IN_STPNCPY_CHK:
+ return fold_builtin_stxncpy_chk (loc, arg0, arg1, arg2, arg3, NULL_TREE,
+ ignore, fcode);
case BUILT_IN_STRNCAT_CHK:
return fold_builtin_strncat_chk (loc, fndecl, arg0, arg1, arg2, arg3);
been inlined, otherwise e.g. -D_FORTIFY_SOURCE checking
might not be performed. */
-static bool
+bool
avoid_folding_inline_builtin (tree fndecl)
{
return (DECL_DECLARED_INLINE_P (fndecl)
if (p2[1] != '\0')
return NULL_TREE;
- fn = implicit_built_in_decls[BUILT_IN_STRCHR];
+ fn = builtin_decl_implicit (BUILT_IN_STRCHR);
if (!fn)
return NULL_TREE;
if (! integer_zerop (s2))
return NULL_TREE;
- fn = implicit_built_in_decls[BUILT_IN_STRCHR];
+ fn = builtin_decl_implicit (BUILT_IN_STRCHR);
if (!fn)
return NULL_TREE;
if (p2[1] != '\0')
return NULL_TREE; /* Really call strpbrk. */
- fn = implicit_built_in_decls[BUILT_IN_STRCHR];
+ fn = builtin_decl_implicit (BUILT_IN_STRCHR);
if (!fn)
return NULL_TREE;
{
/* See if we can store by pieces into (dst + strlen(dst)). */
tree newdst, call;
- tree strlen_fn = implicit_built_in_decls[BUILT_IN_STRLEN];
- tree strcpy_fn = implicit_built_in_decls[BUILT_IN_STRCPY];
+ tree strlen_fn = builtin_decl_implicit (BUILT_IN_STRLEN);
+ tree strcpy_fn = builtin_decl_implicit (BUILT_IN_STRCPY);
if (!strlen_fn || !strcpy_fn)
return NULL_TREE;
if (TREE_CODE (len) == INTEGER_CST && p
&& compare_tree_int (len, strlen (p)) >= 0)
{
- tree fn = implicit_built_in_decls[BUILT_IN_STRCAT];
+ tree fn = builtin_decl_implicit (BUILT_IN_STRCAT);
/* If the replacement _DECL isn't initialized, don't do the
transformation. */
if (p1 && p2)
{
const size_t r = strspn (p1, p2);
- return size_int (r);
+ return build_int_cst (size_type_node, r);
}
/* If either argument is "", return NULL_TREE. */
if (p1 && p2)
{
const size_t r = strcspn (p1, p2);
- return size_int (r);
+ return build_int_cst (size_type_node, r);
}
/* If the first argument is "", return NULL_TREE. */
/* If the second argument is "", return __builtin_strlen(s1). */
if (p2 && *p2 == '\0')
{
- tree fn = implicit_built_in_decls[BUILT_IN_STRLEN];
+ tree fn = builtin_decl_implicit (BUILT_IN_STRLEN);
/* If the replacement _DECL isn't initialized, don't do the
transformation. */
{
/* If we're using an unlocked function, assume the other unlocked
functions exist explicitly. */
- tree const fn_fputc = unlocked ? built_in_decls[BUILT_IN_FPUTC_UNLOCKED]
- : implicit_built_in_decls[BUILT_IN_FPUTC];
- tree const fn_fwrite = unlocked ? built_in_decls[BUILT_IN_FWRITE_UNLOCKED]
- : implicit_built_in_decls[BUILT_IN_FWRITE];
+ tree const fn_fputc = (unlocked
+ ? builtin_decl_explicit (BUILT_IN_FPUTC_UNLOCKED)
+ : builtin_decl_implicit (BUILT_IN_FPUTC));
+ tree const fn_fwrite = (unlocked
+ ? builtin_decl_explicit (BUILT_IN_FWRITE_UNLOCKED)
+ : builtin_decl_implicit (BUILT_IN_FWRITE));
/* If the return value is used, don't do the transformation. */
if (!ignore)
tree fntype = TREE_TYPE (current_function_decl);
int nargs = call_expr_nargs (exp);
tree arg;
+ /* There is good chance the current input_location points inside the
+ definition of the va_start macro (perhaps on the token for
+ builtin) in a system header, so warnings will not be emitted.
+ Use the location in real source code. */
+ source_location current_location =
+ linemap_unwind_to_first_non_reserved_loc (line_table, input_location,
+ NULL);
if (!stdarg_p (fntype))
{
{
/* Evidently an out of date version of <stdarg.h>; can't validate
va_start's second argument, but can still work as intended. */
- warning (0, "%<__builtin_next_arg%> called without an argument");
+ warning_at (current_location,
+ OPT_Wvarargs,
+ "%<__builtin_next_arg%> called without an argument");
return true;
}
else if (nargs > 1)
argument. We just warn and set the arg to be the last
argument so that we will get wrong-code because of
it. */
- warning (0, "second parameter of %<va_start%> not last named argument");
+ warning_at (current_location,
+ OPT_Wvarargs,
+ "second parameter of %<va_start%> not last named argument");
}
/* Undefined by C99 7.15.1.4p4 (va_start):
the default argument promotions, the behavior is undefined."
*/
else if (DECL_REGISTER (arg))
- warning (0, "undefined behaviour when second parameter of "
- "%<va_start%> is declared with %<register%> storage");
+ {
+ warning_at (current_location,
+ OPT_Wvarargs,
+ "undefined behaviour when second parameter of "
+ "%<va_start%> is declared with %<register%> storage");
+ }
/* We want to verify the second parameter just once before the tree
optimizers are run and then avoid keeping it in the tree,
/* If the format doesn't contain % args or %%, use strcpy. */
if (strchr (fmt_str, target_percent) == NULL)
{
- tree fn = implicit_built_in_decls[BUILT_IN_STRCPY];
+ tree fn = builtin_decl_implicit (BUILT_IN_STRCPY);
if (!fn)
return NULL_TREE;
else if (fmt_str && strcmp (fmt_str, target_percent_s) == 0)
{
tree fn;
- fn = implicit_built_in_decls[BUILT_IN_STRCPY];
+ fn = builtin_decl_implicit (BUILT_IN_STRCPY);
if (!fn)
return NULL_TREE;
if (call && retval)
{
retval = fold_convert_loc
- (loc, TREE_TYPE (TREE_TYPE (implicit_built_in_decls[BUILT_IN_SPRINTF])),
+ (loc, TREE_TYPE (TREE_TYPE (builtin_decl_implicit (BUILT_IN_SPRINTF))),
retval);
return build2 (COMPOUND_EXPR, TREE_TYPE (retval), call, retval);
}
/* If the format doesn't contain % args or %%, use strcpy. */
if (strchr (fmt_str, target_percent) == NULL)
{
- tree fn = implicit_built_in_decls[BUILT_IN_STRCPY];
+ tree fn = builtin_decl_implicit (BUILT_IN_STRCPY);
size_t len = strlen (fmt_str);
/* Don't optimize snprintf (buf, 4, "abc", ptr++). */
/* If the format is "%s", use strcpy if the result isn't used. */
else if (fmt_str && strcmp (fmt_str, target_percent_s) == 0)
{
- tree fn = implicit_built_in_decls[BUILT_IN_STRCPY];
+ tree fn = builtin_decl_implicit (BUILT_IN_STRCPY);
unsigned HOST_WIDE_INT origlen;
/* Don't crash on snprintf (str1, cst, "%s"). */
if (call && retval)
{
- tree fn = built_in_decls[BUILT_IN_SNPRINTF];
+ tree fn = builtin_decl_explicit (BUILT_IN_SNPRINTF);
retval = fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fn)), retval);
return build2 (COMPOUND_EXPR, TREE_TYPE (retval), call, retval);
}
switch (fcode)
{
case BUILT_IN_MEMCPY_CHK:
- fn = built_in_decls[BUILT_IN_MEMCPY];
+ fn = builtin_decl_explicit (BUILT_IN_MEMCPY);
break;
case BUILT_IN_MEMPCPY_CHK:
- fn = built_in_decls[BUILT_IN_MEMPCPY];
+ fn = builtin_decl_explicit (BUILT_IN_MEMPCPY);
break;
case BUILT_IN_MEMMOVE_CHK:
- fn = built_in_decls[BUILT_IN_MEMMOVE];
+ fn = builtin_decl_explicit (BUILT_IN_MEMMOVE);
break;
case BUILT_IN_MEMSET_CHK:
- fn = built_in_decls[BUILT_IN_MEMSET];
+ fn = builtin_decl_explicit (BUILT_IN_MEMSET);
break;
default:
break;
normal __memcpy_chk. */
if (readonly_data_expr (src))
{
- tree fn = built_in_decls[BUILT_IN_MEMCPY_CHK];
+ tree fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK);
if (!fn)
return NULL_RTX;
fn = build_call_nofold_loc (EXPR_LOCATION (exp), fn, 4,
break;
case BUILT_IN_STRNCAT_CHK:
case BUILT_IN_STRNCPY_CHK:
+ case BUILT_IN_STPNCPY_CHK:
len = CALL_EXPR_ARG (exp, 2);
size = CALL_EXPR_ARG (exp, 3);
break;
{
bytes = compute_builtin_object_size (ptr, object_size_type);
if (double_int_fits_to_tree_p (size_type_node,
- uhwi_to_double_int (bytes)))
+ double_int::from_uhwi (bytes)))
return build_int_cstu (size_type_node, bytes);
}
else if (TREE_CODE (ptr) == SSA_NAME)
bytes = compute_builtin_object_size (ptr, object_size_type);
if (bytes != (unsigned HOST_WIDE_INT) (object_size_type < 2 ? -1 : 0)
&& double_int_fits_to_tree_p (size_type_node,
- uhwi_to_double_int (bytes)))
+ double_int::from_uhwi (bytes)))
return build_int_cstu (size_type_node, bytes);
}
{
/* (void) __mempcpy_chk () can be optimized into
(void) __memcpy_chk (). */
- fn = built_in_decls[BUILT_IN_MEMCPY_CHK];
+ fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK);
if (!fn)
return NULL_TREE;
switch (fcode)
{
case BUILT_IN_MEMCPY_CHK:
- fn = built_in_decls[BUILT_IN_MEMCPY];
+ fn = builtin_decl_explicit (BUILT_IN_MEMCPY);
break;
case BUILT_IN_MEMPCPY_CHK:
- fn = built_in_decls[BUILT_IN_MEMPCPY];
+ fn = builtin_decl_explicit (BUILT_IN_MEMPCPY);
break;
case BUILT_IN_MEMMOVE_CHK:
- fn = built_in_decls[BUILT_IN_MEMMOVE];
+ fn = builtin_decl_explicit (BUILT_IN_MEMMOVE);
break;
case BUILT_IN_MEMSET_CHK:
- fn = built_in_decls[BUILT_IN_MEMSET];
+ fn = builtin_decl_explicit (BUILT_IN_MEMSET);
break;
default:
break;
/* If return value of __stpcpy_chk is ignored,
optimize into __strcpy_chk. */
- fn = built_in_decls[BUILT_IN_STRCPY_CHK];
+ fn = builtin_decl_explicit (BUILT_IN_STRCPY_CHK);
if (!fn)
return NULL_TREE;
/* If c_strlen returned something, but not a constant,
transform __strcpy_chk into __memcpy_chk. */
- fn = built_in_decls[BUILT_IN_MEMCPY_CHK];
+ fn = builtin_decl_explicit (BUILT_IN_MEMCPY_CHK);
if (!fn)
return NULL_TREE;
- len = size_binop_loc (loc, PLUS_EXPR, len, ssize_int (1));
+ len = fold_convert_loc (loc, size_type_node, len);
+ len = size_binop_loc (loc, PLUS_EXPR, len,
+ build_int_cst (size_type_node, 1));
return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)),
build_call_expr_loc (loc, fn, 4,
dest, src, len, size));
}
/* If __builtin_st{r,p}cpy_chk is used, assume st{r,p}cpy is available. */
- fn = built_in_decls[fcode == BUILT_IN_STPCPY_CHK
- ? BUILT_IN_STPCPY : BUILT_IN_STRCPY];
+ fn = builtin_decl_explicit (fcode == BUILT_IN_STPCPY_CHK
+ ? BUILT_IN_STPCPY : BUILT_IN_STRCPY);
if (!fn)
return NULL_TREE;
return build_call_expr_loc (loc, fn, 2, dest, src);
}
-/* Fold a call to the __strncpy_chk builtin. DEST, SRC, LEN, and SIZE
+/* Fold a call to the __st{r,p}ncpy_chk builtin. DEST, SRC, LEN, and SIZE
are the arguments to the call. If MAXLEN is not NULL, it is maximum
- length passed as third argument. */
+ length passed as third argument. IGNORE is true if return value can be
+ ignored. FCODE is the BUILT_IN_* code of the builtin. */
tree
-fold_builtin_strncpy_chk (location_t loc, tree dest, tree src,
- tree len, tree size, tree maxlen)
+fold_builtin_stxncpy_chk (location_t loc, tree dest, tree src,
+ tree len, tree size, tree maxlen, bool ignore,
+ enum built_in_function fcode)
{
tree fn;
|| !validate_arg (size, INTEGER_TYPE))
return NULL_TREE;
+ if (fcode == BUILT_IN_STPNCPY_CHK && ignore)
+ {
+ /* If return value of __stpncpy_chk is ignored,
+ optimize into __strncpy_chk. */
+ fn = builtin_decl_explicit (BUILT_IN_STRNCPY_CHK);
+ if (fn)
+ return build_call_expr_loc (loc, fn, 4, dest, src, len, size);
+ }
+
if (! host_integerp (size, 1))
return NULL_TREE;
return NULL_TREE;
}
- /* If __builtin_strncpy_chk is used, assume strncpy is available. */
- fn = built_in_decls[BUILT_IN_STRNCPY];
+ /* If __builtin_st{r,p}ncpy_chk is used, assume st{r,p}ncpy is available. */
+ fn = builtin_decl_explicit (fcode == BUILT_IN_STPNCPY_CHK
+ ? BUILT_IN_STPNCPY : BUILT_IN_STRNCPY);
if (!fn)
return NULL_TREE;
return NULL_TREE;
/* If __builtin_strcat_chk is used, assume strcat is available. */
- fn = built_in_decls[BUILT_IN_STRCAT];
+ fn = builtin_decl_explicit (BUILT_IN_STRCAT);
if (!fn)
return NULL_TREE;
&& ! tree_int_cst_lt (len, src_len))
{
/* If LEN >= strlen (SRC), optimize into __strcat_chk. */
- fn = built_in_decls[BUILT_IN_STRCAT_CHK];
+ fn = builtin_decl_explicit (BUILT_IN_STRCAT_CHK);
if (!fn)
return NULL_TREE;
}
/* If __builtin_strncat_chk is used, assume strncat is available. */
- fn = built_in_decls[BUILT_IN_STRNCAT];
+ fn = builtin_decl_explicit (BUILT_IN_STRNCAT);
if (!fn)
return NULL_TREE;
}
/* If __builtin_{,v}sprintf_chk is used, assume {,v}sprintf is available. */
- fn = built_in_decls[fcode == BUILT_IN_VSPRINTF_CHK
- ? BUILT_IN_VSPRINTF : BUILT_IN_SPRINTF];
+ fn = builtin_decl_explicit (fcode == BUILT_IN_VSPRINTF_CHK
+ ? BUILT_IN_VSPRINTF : BUILT_IN_SPRINTF);
if (!fn)
return NULL_TREE;
/* If __builtin_{,v}snprintf_chk is used, assume {,v}snprintf is
available. */
- fn = built_in_decls[fcode == BUILT_IN_VSNPRINTF_CHK
- ? BUILT_IN_VSNPRINTF : BUILT_IN_SNPRINTF];
+ fn = builtin_decl_explicit (fcode == BUILT_IN_VSNPRINTF_CHK
+ ? BUILT_IN_VSNPRINTF : BUILT_IN_SNPRINTF);
if (!fn)
return NULL_TREE;
{
/* If we're using an unlocked function, assume the other
unlocked functions exist explicitly. */
- fn_putchar = built_in_decls[BUILT_IN_PUTCHAR_UNLOCKED];
- fn_puts = built_in_decls[BUILT_IN_PUTS_UNLOCKED];
+ fn_putchar = builtin_decl_explicit (BUILT_IN_PUTCHAR_UNLOCKED);
+ fn_puts = builtin_decl_explicit (BUILT_IN_PUTS_UNLOCKED);
}
else
{
- fn_putchar = implicit_built_in_decls[BUILT_IN_PUTCHAR];
- fn_puts = implicit_built_in_decls[BUILT_IN_PUTS];
+ fn_putchar = builtin_decl_implicit (BUILT_IN_PUTCHAR);
+ fn_puts = builtin_decl_implicit (BUILT_IN_PUTS);
}
if (!init_target_chars ())
{
/* If we're using an unlocked function, assume the other
unlocked functions exist explicitly. */
- fn_fputc = built_in_decls[BUILT_IN_FPUTC_UNLOCKED];
- fn_fputs = built_in_decls[BUILT_IN_FPUTS_UNLOCKED];
+ fn_fputc = builtin_decl_explicit (BUILT_IN_FPUTC_UNLOCKED);
+ fn_fputs = builtin_decl_explicit (BUILT_IN_FPUTS_UNLOCKED);
}
else
{
- fn_fputc = implicit_built_in_decls[BUILT_IN_FPUTC];
- fn_fputs = implicit_built_in_decls[BUILT_IN_FPUTS];
+ fn_fputc = builtin_decl_implicit (BUILT_IN_FPUTC);
+ fn_fputs = builtin_decl_implicit (BUILT_IN_FPUTS);
}
if (!init_target_chars ())
return NULL_TREE;
}
-/* Look up the function in built_in_decls that corresponds to DECL
+/* Look up the function in builtin_decl that corresponds to DECL
and set ASMSPEC as its user assembler name. DECL must be a
function decl that declares a builtin. */
&& DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
&& asmspec != 0);
- builtin = built_in_decls [DECL_FUNCTION_CODE (decl)];
+ builtin = builtin_decl_explicit (DECL_FUNCTION_CODE (decl));
set_user_assembler_name (builtin, asmspec);
switch (DECL_FUNCTION_CODE (decl))
{
{
case BUILT_IN_ABS:
case BUILT_IN_ALLOCA:
+ case BUILT_IN_ALLOCA_WITH_ALIGN:
+ case BUILT_IN_BSWAP16:
case BUILT_IN_BSWAP32:
case BUILT_IN_BSWAP64:
case BUILT_IN_CLZ: