+2017-08-23 Daniel Santos <daniel.santos@pobox.com>
+
+ * config/i386/i386.h (ix86_frame::stack_realign_allocate_offset):
+ Remove field.
+ (ix86_frame::stack_realign_allocate): New field.
+ (struct machine_frame_state): Modify comments.
+ (machine_frame_state::sp_realigned_fp_end): New field.
+ * config/i386/i386.c (ix86_compute_frame_layout): Rework stack frame
+ layout calculation.
+ (sp_valid_at): Add assertion to assure no attempt to access invalid
+ offset of a realigned stack.
+ (fp_valid_at): Likewise.
+ (choose_baseaddr): Modify comments.
+ (ix86_emit_outlined_ms2sysv_save): Adjust to changes in
+ ix86_expand_prologue.
+ (ix86_expand_prologue): Modify stack realignment and allocation.
+ (ix86_expand_epilogue): Modify comments.
+ * doc/sourcebuild.texi: Add documentation for target selectors avx2,
+ avx2_runtime, avx512f, and avx512f_runtime.
+
2017-08-23 Uros Bizjak <ubizjak@gmail.com>
* config/i386/i386.opt: Remove unneeded Init(0) initializations.
gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
gcc_assert (preferred_alignment <= stack_alignment_needed);
+ /* The only ABI saving SSE regs should be 64-bit ms_abi. */
+ gcc_assert (TARGET_64BIT || !frame->nsseregs);
+ if (TARGET_64BIT && m->call_ms2sysv)
+ {
+ gcc_assert (stack_alignment_needed >= 16);
+ gcc_assert (!frame->nsseregs);
+ }
+
/* For SEH we have to limit the amount of code movement into the prologue.
At present we do this via a BLOCKAGE, at which point there's very little
scheduling that can be done, which means that there's very little point
if (TARGET_SEH)
frame->hard_frame_pointer_offset = offset;
- /* When re-aligning the stack frame, but not saving SSE registers, this
- is the offset we want adjust the stack pointer to. */
- frame->stack_realign_allocate_offset = offset;
+ /* Calculate the size of the va-arg area (not including padding, if any). */
+ frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
- /* The re-aligned stack starts here. Values before this point are not
- directly comparable with values below this point. Use sp_valid_at
- to determine if the stack pointer is valid for a given offset and
- fp_valid_at for the frame pointer. */
if (stack_realign_fp)
- offset = ROUND_UP (offset, stack_alignment_needed);
- frame->stack_realign_offset = offset;
-
- if (TARGET_64BIT && m->call_ms2sysv)
{
- gcc_assert (stack_alignment_needed >= 16);
- gcc_assert (!frame->nsseregs);
+ /* We may need a 16-byte aligned stack for the remainder of the
+ register save area, but the stack frame for the local function
+ may require a greater alignment if using AVX/2/512. In order
+ to avoid wasting space, we first calculate the space needed for
+ the rest of the register saves, add that to the stack pointer,
+ and then realign the stack to the boundary of the start of the
+ frame for the local function. */
+ HOST_WIDE_INT space_needed = 0;
+ HOST_WIDE_INT sse_reg_space_needed = 0;
- m->call_ms2sysv_pad_in = !!(offset & UNITS_PER_WORD);
- offset += xlogue_layout::get_instance ().get_stack_space_used ();
- }
+ if (TARGET_64BIT)
+ {
+ if (m->call_ms2sysv)
+ {
+ m->call_ms2sysv_pad_in = 0;
+ space_needed = xlogue_layout::get_instance ().get_stack_space_used ();
+ }
- /* Align and set SSE register save area. */
- else if (frame->nsseregs)
- {
- /* The only ABI that has saved SSE registers (Win64) also has a
- 16-byte aligned default stack. However, many programs violate
- the ABI, and Wine64 forces stack realignment to compensate.
+ else if (frame->nsseregs)
+ /* The only ABI that has saved SSE registers (Win64) also has a
+ 16-byte aligned default stack. However, many programs violate
+ the ABI, and Wine64 forces stack realignment to compensate. */
+ space_needed = frame->nsseregs * 16;
- If the incoming stack boundary is at least 16 bytes, or DRAP is
- required and the DRAP re-alignment boundary is at least 16 bytes,
- then we want the SSE register save area properly aligned. */
- if (ix86_incoming_stack_boundary >= 128
- || (stack_realign_drap && stack_alignment_needed >= 16))
- offset = ROUND_UP (offset, 16);
- offset += frame->nsseregs * 16;
- frame->stack_realign_allocate_offset = offset;
+ sse_reg_space_needed = space_needed = ROUND_UP (space_needed, 16);
+
+ /* 64-bit frame->va_arg_size should always be a multiple of 16, but
+ rounding to be pedantic. */
+ space_needed = ROUND_UP (space_needed + frame->va_arg_size, 16);
+ }
+ else
+ space_needed = frame->va_arg_size;
+
+ /* Record the allocation size required prior to the realignment AND. */
+ frame->stack_realign_allocate = space_needed;
+
+ /* The re-aligned stack starts at frame->stack_realign_offset. Values
+ before this point are not directly comparable with values below
+ this point. Use sp_valid_at to determine if the stack pointer is
+ valid for a given offset, fp_valid_at for the frame pointer, or
+ choose_baseaddr to have a base register chosen for you.
+
+ Note that the result of (frame->stack_realign_offset
+ & (stack_alignment_needed - 1)) may not equal zero. */
+ offset = ROUND_UP (offset + space_needed, stack_alignment_needed);
+ frame->stack_realign_offset = offset - space_needed;
+ frame->sse_reg_save_offset = frame->stack_realign_offset
+ + sse_reg_space_needed;
}
+ else
+ {
+ frame->stack_realign_offset = offset;
- frame->sse_reg_save_offset = offset;
+ if (TARGET_64BIT && m->call_ms2sysv)
+ {
+ m->call_ms2sysv_pad_in = !!(offset & UNITS_PER_WORD);
+ offset += xlogue_layout::get_instance ().get_stack_space_used ();
+ }
- /* Va-arg area */
- frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
- offset += frame->va_arg_size;
+ /* Align and set SSE register save area. */
+ else if (frame->nsseregs)
+ {
+ /* If the incoming stack boundary is at least 16 bytes, or DRAP is
+ required and the DRAP re-alignment boundary is at least 16 bytes,
+ then we want the SSE register save area properly aligned. */
+ if (ix86_incoming_stack_boundary >= 128
+ || (stack_realign_drap && stack_alignment_needed >= 16))
+ offset = ROUND_UP (offset, 16);
+ offset += frame->nsseregs * 16;
+ }
+ frame->sse_reg_save_offset = offset;
+ offset += frame->va_arg_size;
+ }
/* Align start of frame for local function. */
- if (stack_realign_fp
- || m->call_ms2sysv
- || offset != frame->sse_reg_save_offset
+ if (m->call_ms2sysv
+ || frame->va_arg_size != 0
|| size != 0
|| !crtl->is_leaf
|| cfun->calls_alloca
return len;
}
-/* Determine if the stack pointer is valid for accessing the cfa_offset.
- The register is saved at CFA - CFA_OFFSET. */
+/* Determine if the stack pointer is valid for accessing the CFA_OFFSET in
+ the frame save area. The register is saved at CFA - CFA_OFFSET. */
-static inline bool
+static bool
sp_valid_at (HOST_WIDE_INT cfa_offset)
{
const struct machine_frame_state &fs = cfun->machine->fs;
- return fs.sp_valid && !(fs.sp_realigned
- && cfa_offset <= fs.sp_realigned_offset);
+ if (fs.sp_realigned && cfa_offset <= fs.sp_realigned_offset)
+ {
+ /* Validate that the cfa_offset isn't in a "no-man's land". */
+ gcc_assert (cfa_offset <= fs.sp_realigned_fp_last);
+ return false;
+ }
+ return fs.sp_valid;
}
-/* Determine if the frame pointer is valid for accessing the cfa_offset.
- The register is saved at CFA - CFA_OFFSET. */
+/* Determine if the frame pointer is valid for accessing the CFA_OFFSET in
+ the frame save area. The register is saved at CFA - CFA_OFFSET. */
static inline bool
fp_valid_at (HOST_WIDE_INT cfa_offset)
{
const struct machine_frame_state &fs = cfun->machine->fs;
- return fs.fp_valid && !(fs.sp_valid && fs.sp_realigned
- && cfa_offset > fs.sp_realigned_offset);
+ if (fs.sp_realigned && cfa_offset > fs.sp_realigned_fp_last)
+ {
+ /* Validate that the cfa_offset isn't in a "no-man's land". */
+ gcc_assert (cfa_offset >= fs.sp_realigned_offset);
+ return false;
+ }
+ return fs.fp_valid;
}
/* Choose a base register based upon alignment requested, speed and/or
}
/* Return an RTX that points to CFA_OFFSET within the stack frame and
- the alignment of address. If align is non-null, it should point to
+ the alignment of address. If ALIGN is non-null, it should point to
an alignment value (in bits) that is preferred or zero and will
- recieve the alignment of the base register that was selected. The
- valid base registers are taken from CFUN->MACHINE->FS. */
+ recieve the alignment of the base register that was selected,
+ irrespective of rather or not CFA_OFFSET is a multiple of that
+ alignment value.
+
+ The valid base registers are taken from CFUN->MACHINE->FS. */
static rtx
choose_baseaddr (HOST_WIDE_INT cfa_offset, unsigned int *align)
rtx sym, addr;
rtx rax = gen_rtx_REG (word_mode, AX_REG);
const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
- HOST_WIDE_INT rax_offset = xlogue.get_stub_ptr_offset () + m->fs.sp_offset;
- HOST_WIDE_INT stack_alloc_size = frame.stack_pointer_offset - m->fs.sp_offset;
- HOST_WIDE_INT stack_align_off_in = xlogue.get_stack_align_off_in ();
+ HOST_WIDE_INT allocate = frame.stack_pointer_offset - m->fs.sp_offset;
+
+ /* AL should only be live with sysv_abi. */
+ gcc_assert (!ix86_eax_live_at_start_p ());
- /* Verify that the incoming stack 16-byte alignment offset matches the
- layout we're using. */
- gcc_assert (stack_align_off_in == (m->fs.sp_offset & UNITS_PER_WORD));
+ /* Setup RAX as the stub's base pointer. We use stack_realign_offset rather
+ we've actually realigned the stack or not. */
+ align = GET_MODE_ALIGNMENT (V4SFmode);
+ addr = choose_baseaddr (frame.stack_realign_offset
+ + xlogue.get_stub_ptr_offset (), &align);
+ gcc_assert (align >= GET_MODE_ALIGNMENT (V4SFmode));
+ emit_insn (gen_rtx_SET (rax, addr));
+
+ /* Allocate stack if not already done. */
+ if (allocate > 0)
+ pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-allocate), -1, false);
/* Get the stub symbol. */
sym = xlogue.get_stub_rtx (frame_pointer_needed ? XLOGUE_STUB_SAVE_HFP
: XLOGUE_STUB_SAVE);
RTVEC_ELT (v, vi++) = gen_rtx_USE (VOIDmode, sym);
- /* Setup RAX as the stub's base pointer. */
- align = GET_MODE_ALIGNMENT (V4SFmode);
- addr = choose_baseaddr (rax_offset, &align);
- gcc_assert (align >= GET_MODE_ALIGNMENT (V4SFmode));
- insn = emit_insn (gen_rtx_SET (rax, addr));
-
- gcc_assert (stack_alloc_size >= xlogue.get_stack_space_used ());
- pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (-stack_alloc_size), -1,
- m->fs.cfa_reg == stack_pointer_rtx);
for (i = 0; i < ncregs; ++i)
{
const xlogue_layout::reginfo &r = xlogue.get_reginfo (i);
rtx reg = gen_rtx_REG ((SSE_REGNO_P (r.regno) ? V4SFmode : word_mode),
r.regno);
- RTVEC_ELT (v, vi++) = gen_frame_store (reg, rax, -r.offset);;
+ RTVEC_ELT (v, vi++) = gen_frame_store (reg, rax, -r.offset);
}
gcc_assert (vi == (unsigned)GET_NUM_ELEM (v));
int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
+ /* Record last valid frame pointer offset. */
+ m->fs.sp_realigned_fp_last = frame.reg_save_offset;
+
/* The computation of the size of the re-aligned stack frame means
that we must allocate the size of the register save area before
performing the actual alignment. Otherwise we cannot guarantee
that there's enough storage above the realignment point. */
- allocate = frame.stack_realign_allocate_offset - m->fs.sp_offset;
- if (allocate && !m->call_ms2sysv)
+ allocate = frame.reg_save_offset - m->fs.sp_offset
+ + frame.stack_realign_allocate;
+ if (allocate)
pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (-allocate), -1, false);
insn = emit_insn (ix86_gen_andsp (stack_pointer_rtx,
stack_pointer_rtx,
GEN_INT (-align_bytes)));
- /* For the purposes of register save area addressing, the stack
- pointer can no longer be used to access anything in the frame
- below m->fs.sp_realigned_offset and the frame pointer cannot be
- used for anything at or above. */
m->fs.sp_offset = ROUND_UP (m->fs.sp_offset, align_bytes);
- m->fs.sp_realigned = true;
- m->fs.sp_realigned_offset = m->fs.sp_offset - frame.nsseregs * 16;
+ m->fs.sp_realigned_offset = m->fs.sp_offset
+ - frame.stack_realign_allocate;
+ /* The stack pointer may no longer be equal to CFA - m->fs.sp_offset.
+ Beyond this point, stack access should be done via choose_baseaddr or
+ by using sp_valid_at and fp_valid_at to determine the correct base
+ register. Henceforth, any CFA offset should be thought of as logical
+ and not physical. */
+ gcc_assert (m->fs.sp_realigned_offset >= m->fs.sp_realigned_fp_last);
gcc_assert (m->fs.sp_realigned_offset == frame.stack_realign_offset);
+ m->fs.sp_realigned = true;
+
/* SEH unwind emit doesn't currently support REG_CFA_EXPRESSION, which
is needed to describe where a register is saved using a realigned
stack pointer, so we need to invalidate the stack pointer for that
so probe if the size is non-negative to preserve the protection area. */
if (allocate >= 0 && flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
{
- /* We expect the registers to be saved when probes are used. */
+ /* We expect the GP registers to be saved when probes are used. */
gcc_assert (int_registers_saved);
if (STACK_CHECK_MOVING_SP)
if (restore_regs_via_mov || frame.nsseregs)
{
/* Ensure that the entire register save area is addressable via
- the stack pointer, if we will restore via sp. */
+ the stack pointer, if we will restore SSE regs via sp. */
if (TARGET_64BIT
&& m->fs.sp_offset > 0x7fffffff
- && !(fp_valid_at (frame.stack_realign_offset) || m->fs.drap_valid)
+ && sp_valid_at (frame.stack_realign_offset)
&& (frame.nsseregs + frame.nregs) != 0)
{
pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
HOST_WIDE_INT stack_pointer_offset;
HOST_WIDE_INT hfp_save_offset;
HOST_WIDE_INT reg_save_offset;
- HOST_WIDE_INT stack_realign_allocate_offset;
+ HOST_WIDE_INT stack_realign_allocate;
HOST_WIDE_INT stack_realign_offset;
HOST_WIDE_INT sse_reg_save_offset;
bool save_regs_using_mov;
};
-/* Machine specific frame tracking during prologue/epilogue generation. */
+/* Machine specific frame tracking during prologue/epilogue generation. All
+ values are positive, but since the x86 stack grows downward, are subtratced
+ from the CFA to produce a valid address. */
struct GTY(()) machine_frame_state
{
/* Indicates whether the stack pointer has been re-aligned. When set,
SP/FP continue to be relative to the CFA, but the stack pointer
- should only be used for offsets >= sp_realigned_offset, while
- the frame pointer should be used for offsets < sp_realigned_offset.
+ should only be used for offsets > sp_realigned_offset, while
+ the frame pointer should be used for offsets <= sp_realigned_fp_last.
The flags realigned and sp_realigned are mutually exclusive. */
BOOL_BITFIELD sp_realigned : 1;
- /* If sp_realigned is set, this is the offset from the CFA that the
- stack pointer was realigned to. */
+ /* If sp_realigned is set, this is the last valid offset from the CFA
+ that can be used for access with the frame pointer. */
+ HOST_WIDE_INT sp_realigned_fp_last;
+
+ /* If sp_realigned is set, this is the offset from the CFA that the stack
+ pointer was realigned, and may or may not be equal to sp_realigned_fp_last.
+ Access via the stack pointer is only valid for offsets that are greater than
+ this value. */
HOST_WIDE_INT sp_realigned_offset;
};
@item avx_runtime
Target supports the execution of @code{avx} instructions.
+@item avx2
+Target supports compiling @code{avx2} instructions.
+
+@item avx2_runtime
+Target supports the execution of @code{avx2} instructions.
+
+@item avx512f
+Target supports compiling @code{avx512f} instructions.
+
+@item avx512f_runtime
+Target supports the execution of @code{avx512f} instructions.
+
@item cell_hw
Test system can execute AltiVec and Cell PPU instructions.
+2017-08-23 Daniel Santos <daniel.santos@pobox.com>
+
+ * gcc.target/i386/pr80969-1.c: New testcase.
+ * gcc.target/i386/pr80969-2a.c: Likewise.
+ * gcc.target/i386/pr80969-2.c: Likewise.
+ * gcc.target/i386/pr80969-3.c: Likewise.
+ * gcc.target/i386/pr80969-4a.c: Likewise.
+ * gcc.target/i386/pr80969-4b.c: Likewise.
+ * gcc.target/i386/pr80969-4.c: Likewise.
+ * gcc.target/i386/pr80969-4.h: New header common to pr80969-4*.c
+ * lib/target-supports.exp (check_avx512_os_support_available,
+ check_avx512f_hw_available, check_effective_target_avx512f_runtime):
+ New proceedures for target avx512f and avx512f_runtime selectors.
+ (check_avx2_hw_available): Fix breakage due NULL being undefined.
+
2017-08-23 David Malcolm <dmalcolm@redhat.com>
* g++.dg/diagnostic/param-type-mismatch.C: New test acse.
--- /dev/null
+/* { dg-do run { target { ! x32 } } } */
+/* { dg-options "-Ofast -mabi=ms -mavx512f" } */
+/* { dg-require-effective-target avx512f } */
+
+int a[56];
+int b;
+int main (int argc, char *argv[]) {
+ int c;
+ for (; b; b++) {
+ c = b;
+ if (b & 1)
+ c = 2;
+ a[b] = c;
+ }
+ return 0;
+}
--- /dev/null
+/* { dg-do run { target { { ! x32 } && avx512f_runtime } } } */
+/* { dg-do compile { target { { ! x32 } && { ! avx512f_runtime } } } } */
+/* { dg-options "-Ofast -mabi=ms -mavx512f" } */
+/* { dg-require-effective-target avx512f } */
+
+/* Test when calling a sysv func. */
+
+int a[56];
+int b;
+
+static void __attribute__((sysv_abi)) sysv ()
+{
+}
+
+void __attribute__((sysv_abi)) (*volatile const sysv_noinfo)() = sysv;
+
+int main (int argc, char *argv[]) {
+ int c;
+ sysv_noinfo ();
+ for (; b; b++) {
+ c = b;
+ if (b & 1)
+ c = 2;
+ a[b] = c;
+ }
+ return 0;
+}
--- /dev/null
+/* { dg-do run { target { lp64 && avx512f_runtime } } } */
+/* { dg-do compile { target { lp64 && { ! avx512f_runtime } } } } */
+/* { dg-options "-Ofast -mabi=ms -mavx512f -mcall-ms2sysv-xlogues" } */
+/* { dg-require-effective-target avx512f } */
+
+/* Test when calling a sysv func using save/restore stubs. */
+
+#include "pr80969-2.c"
--- /dev/null
+/* { dg-do run { target { { ! x32 } && avx512f_runtime } } } */
+/* { dg-do compile { target { { ! x32 } && { ! avx512f_runtime } } } } */
+/* { dg-options "-Ofast -mabi=ms -mavx512f" } */
+/* { dg-require-effective-target avx512f } */
+
+/* Test with alloca (and DRAP). */
+
+#include <alloca.h>
+
+int a[56];
+volatile int b = -12345;
+volatile const int d = 42;
+
+void foo (int *x, int y, int z)
+{
+}
+
+void (*volatile const foo_noinfo)(int *, int, int) = foo;
+
+int main (int argc, char *argv[]) {
+ int c;
+ int *e = alloca (d);
+ foo_noinfo (e, d, 0);
+ for (; b; b++) {
+ c = b;
+ if (b & 1)
+ c = 2;
+ foo_noinfo (e, d, c);
+ a[-(b % 56)] = c;
+ }
+ return 0;
+}
--- /dev/null
+/* { dg-do run { target { { ! x32 } && avx512f_runtime } } } */
+/* { dg-do compile { target { { ! x32 } && { ! avx512f_runtime } } } } */
+/* { dg-options "-Ofast -mabi=ms -mavx512f" } */
+/* { dg-require-effective-target avx512f } */
+
+/* Test with avx512 and va_args. */
+
+#define CALLEE_ABI ms_abi
+#include "pr80969-4.h"
--- /dev/null
+
+#include <stdarg.h>
+#include <assert.h>
+
+#include "avx-check.h"
+
+int a[56];
+int b;
+
+__m128 n1 = { -283.3, -23.3, 213.4, 1119.03 };
+__m512d n2 = { -93.83, 893.318, 3994.3, -39484.0, 830.32, -328.32, 3.14159, 2.99792 };
+__m128i n3 = { 893, -3180 } ;
+int n4 = 324;
+double n5 = 103.3;
+__m128i n6 = { -123, 2 };
+__m128d n7 = { -91.387, -8193.518 };
+__m256d n8 = { -123.3, 2.3, 3.4, -10.03 };
+__m128 n9 = { -123.3, 2.3, 3.4, -10.03 };
+__m128i n10 = { 1233, -100 };
+int n11 = 407;
+double n12 = 304.9;
+__m128i n13 = { 233, -110 };
+__m256i n14 = { -1233, 23, 34, -1003 };
+__m512i n15 = { -393, -180, 213.4, 1119.03, -8193.518, -100, 304.9, 2.99792 };
+__m128d n16 = { 73.0, 63.18 };
+__m256 n17 = { -183.3, -22.3, 13.9, -119.3, 483.1, 122.3, -33.4, -9.37 };
+__m128 n18 = { -183.3, 22.3, 13.4, -19.03 };
+
+__m128 e1;
+__m512d e2;
+__m128i e3;
+int e4;
+double e5;
+__m128i e6;
+__m128d e7;
+__m256d e8;
+__m128 e9;
+__m128i e10;
+int e11;
+double e12;
+__m128i e13;
+__m256i e14;
+__m512i e15;
+__m128d e16;
+__m256 e17;
+__m128 e18;
+
+static void
+__attribute__((noinline, CALLEE_ABI))
+bar (__m128 a1, __m512d a2, __m128i a3, va_list va_arglist)
+{
+ e1 = a1;
+ e2 = a2;
+ e3 = a3;
+ e4 = va_arg (va_arglist, int);
+ e5 = va_arg (va_arglist, double);
+ e6 = va_arg (va_arglist, __m128i);
+ e7 = va_arg (va_arglist, __m128d);
+ e8 = va_arg (va_arglist, __m256d);
+ e9 = va_arg (va_arglist, __m128);
+ e10 = va_arg (va_arglist, __m128i);
+ e11 = va_arg (va_arglist, int);
+ e12 = va_arg (va_arglist, double);
+ e13 = va_arg (va_arglist, __m128i);
+ e14 = va_arg (va_arglist, __m256i);
+ e15 = va_arg (va_arglist, __m512i);
+ e16 = va_arg (va_arglist, __m128d);
+ e17 = va_arg (va_arglist, __m256);
+ e18 = va_arg (va_arglist, __m128);
+}
+
+void __attribute__((CALLEE_ABI))
+(*volatile const bar_noinfo) (__m128, __m512d, __m128i, va_list) = bar;
+
+static void
+__attribute__((noinline))
+foo (__m128 a1, __m512d a2, __m128i a3, ...)
+{
+ va_list va_arglist;
+ int c;
+
+ va_start (va_arglist, a3);
+ bar_noinfo (a1, a2, a3, va_arglist);
+ va_end (va_arglist);
+
+ for (; b; b++) {
+ c = b;
+ if (b & 1)
+ c = 2;
+ a[b] = c;
+ }
+}
+void (*volatile const foo_noinfo) (__m128, __m512d, __m128i, ...) = foo;
+
+static void
+avx_test (void)
+{
+ foo (n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12,
+ n13, n14, n15, n16, n17, n18);
+ assert (__builtin_memcmp (&e1, &n1, sizeof (e1)) == 0);
+ assert (__builtin_memcmp (&e2, &n2, sizeof (e2)) == 0);
+ assert (__builtin_memcmp (&e3, &n3, sizeof (e3)) == 0);
+ assert (n4 == e4);
+ assert (n5 == e5);
+ assert (__builtin_memcmp (&e6, &n6, sizeof (e6)) == 0);
+ assert (__builtin_memcmp (&e7, &n7, sizeof (e7)) == 0);
+ assert (__builtin_memcmp (&e8, &n8, sizeof (e8)) == 0);
+ assert (__builtin_memcmp (&e9, &n9, sizeof (e9)) == 0);
+ assert (__builtin_memcmp (&e10, &n10, sizeof (e10)) == 0);
+ assert (n11 == e11);
+ assert (n12 == e12);
+ assert (__builtin_memcmp (&e13, &n13, sizeof (e13)) == 0);
+ assert (__builtin_memcmp (&e14, &n14, sizeof (e14)) == 0);
+ assert (__builtin_memcmp (&e15, &n15, sizeof (e15)) == 0);
+ assert (__builtin_memcmp (&e16, &n16, sizeof (e16)) == 0);
+ assert (__builtin_memcmp (&e17, &n17, sizeof (e17)) == 0);
+ assert (__builtin_memcmp (&e18, &n18, sizeof (e18)) == 0);
+}
+
--- /dev/null
+/* { dg-do run { target { { ! x32 } && avx512f_runtime } } } */
+/* { dg-do compile { target { { ! x32 } && { ! avx512f_runtime } } } } */
+/* { dg-options "-Ofast -mabi=ms -mavx512f" } */
+/* { dg-require-effective-target avx512f } */
+
+/* Test with avx512, va_args, and ms to sysv call. */
+
+#define CALLEE_ABI sysv_abi
+#include "pr80969-4.h"
--- /dev/null
+/* { dg-do run { target { lp64 && avx512f_runtime } } } */
+/* { dg-do compile { target { lp64 && { ! avx512f_runtime } } } } */
+/* { dg-options "-Ofast -mabi=ms -mavx512f -mcall-ms2sysv-xlogues" } */
+/* { dg-require-effective-target avx512f } */
+
+/* Test with avx512, va_args, and ms to sysv call using save/restore stubs. */
+
+#define CALLEE_ABI sysv_abi
+#include "pr80969-4.h"
}]
}
+# Return 1 if the target OS supports running AVX executables, 0
+# otherwise. Cache the result.
+
+proc check_avx512_os_support_available { } {
+ return [check_cached_effective_target avx512_os_support_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget i?86-*-*] || [istarget x86_64-*-*]) } {
+ expr 0
+ } else {
+ # Check that OS has AVX512, AVX and SSE saving enabled.
+ check_runtime_nocache avx512_os_support_available {
+ int main ()
+ {
+ unsigned int eax, edx;
+
+ asm ("xgetbv" : "=a" (eax), "=d" (edx) : "c" (0));
+ return (eax & 0xe6) != 0xe6;
+ }
+ } ""
+ }
+ }]
+}
+
# Return 1 if the target supports executing SSE instructions, 0
# otherwise. Cache the result.
expr 0
} else {
check_runtime_nocache avx2_hw_available {
+ #include <stddef.h>
#include "cpuid.h"
int main ()
{
}]
}
+# Return 1 if the target supports executing AVX512 foundation instructions, 0
+# otherwise. Cache the result.
+
+proc check_avx512f_hw_available { } {
+ return [check_cached_effective_target avx512f_hw_available {
+ # If this is not the right target then we can skip the test.
+ if { !([istarget x86_64-*-*] || [istarget i?86-*-*]) } {
+ expr 0
+ } else {
+ check_runtime_nocache avx512f_hw_available {
+ #include <stddef.h>
+ #include "cpuid.h"
+ int main ()
+ {
+ unsigned int eax, ebx, ecx, edx;
+ if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx)
+ || !(ecx & bit_OSXSAVE))
+ return 1;
+
+ if (__get_cpuid_max (0, NULL) < 7)
+ return 1;
+
+ __cpuid_count (7, 0, eax, ebx, ecx, edx);
+
+ return !(ebx & bit_AVX512F);
+ }
+ } ""
+ }
+ }]
+}
+
# Return 1 if the target supports running SSE executables, 0 otherwise.
proc check_effective_target_sse_runtime { } {
return 0
}
+# Return 1 if the target supports running AVX512f executables, 0 otherwise.
+
+proc check_effective_target_avx512f_runtime { } {
+ if { [check_effective_target_avx512f]
+ && [check_avx512f_hw_available]
+ && [check_avx512_os_support_available] } {
+ return 1
+ }
+ return 0
+}
+
# Return 1 if we are compiling for 64-bit PowerPC but we do not use direct
# move instructions for moves from GPR to FPR.