-# Copyright (C) 1999-2018 Free Software Foundation, Inc.
+# Copyright (C) 1999-2019 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# Assume by default that CONTENTS is C code.
# Otherwise, code should contain:
# "// C++" for c++,
+# "// D" for D,
# "! Fortran" for Fortran code,
# "/* ObjC", for ObjC
# "// ObjC++" for ObjC++
switch -glob -- $contents {
"*! Fortran*" { set src ${basename}[pid].f90 }
"*// C++*" { set src ${basename}[pid].cc }
+ "*// D*" { set src ${basename}[pid].d }
"*// ObjC++*" { set src ${basename}[pid].mm }
"*/* ObjC*" { set src ${basename}[pid].m }
"*// Go*" { set src ${basename}[pid].go }
return 1
}
+ # pdp11 doesn't support it
+
+ if { [istarget pdp11*-*-*] } {
+ return 0
+ }
+
# ELF and ECOFF support it. a.out does with gas/gld but may also with
# other linkers, so we should try it
# return 1 if weak undefined symbols are supported.
proc check_effective_target_weak_undefined { } {
+ if { [istarget hppa*-*-hpux*] } {
+ return 0
+ }
return [check_runtime weak_undefined {
extern void foo () __attribute__((weak));
int main (void) { if (foo) return 1; return 0; }
# missing other needed machinery.
if {[istarget aarch64*-*-elf]
|| [istarget am3*-*-linux*]
+ || [istarget amdgcn-*-*]
|| [istarget arm*-*-eabi*]
|| [istarget arm*-*-elf]
|| [istarget arm*-*-symbianelf*]
if { [istarget nvptx-*-*] } {
return 0
}
+ if { [istarget amdgcn-*-*] } {
+ return 0
+ }
return 1
}
if { [istarget nvptx-*-*] } {
return 0
}
+ # It could be supported on amdgcn, but isn't yet.
+ if { [istarget amdgcn*-*-*] } {
+ return 0
+ }
return 1
}
# code, 0 otherwise.
proc check_effective_target_fopenacc {} {
- # nvptx can be built with the device-side bits of openacc, but it
+ # nvptx/amdgcn can be built with the device-side bits of openacc, but it
# does not make sense to test it as an openacc host.
if [istarget nvptx-*-*] { return 0 }
+ if [istarget amdgcn-*-*] { return 0 }
return [check_no_compiler_messages fopenacc object {
void foo (void) { }
# code, 0 otherwise.
proc check_effective_target_fopenmp {} {
- # nvptx can be built with the device-side bits of libgomp, but it
+ # nvptx/amdgcn can be built with the device-side bits of libgomp, but it
# does not make sense to test it as an openmp host.
if [istarget nvptx-*-*] { return 0 }
+ if [istarget amdgcn-*-*] { return 0 }
return [check_no_compiler_messages fopenmp object {
void foo (void) { }
} "-freorder-blocks-and-partition"]
&& [check_no_compiler_messages fprofile_use_freorder object {
void foo (void) { }
- } "-fprofile-use -freorder-blocks-and-partition"] } {
+ } "-fprofile-use -freorder-blocks-and-partition -Wno-missing-profile"] } {
return 1
}
return 0
|| [istarget *-*-dragonfly*]
|| [istarget *-*-freebsd*]
|| [istarget *-*-linux*]
- || [istarget *-*-gnu*] } {
+ || [istarget *-*-gnu*]
+ || [istarget *-*-amdhsa]} {
return 1;
}
if { [istarget *-*-solaris2.1\[1-9\]*] } {
# Return 1 if the target supports executing Loongson vector instructions,
# 0 otherwise. Cache the result.
-proc check_mips_loongson_hw_available { } {
- return [check_cached_effective_target mips_loongson_hw_available {
+proc check_mips_loongson_mmi_hw_available { } {
+ return [check_cached_effective_target mips_loongson_mmi_hw_available {
# If this is not the right target then we can skip the test.
if { !([istarget mips*-*-*]) } {
expr 0
} else {
- check_runtime_nocache mips_loongson_hw_available {
- #include <loongson.h>
+ check_runtime_nocache mips_loongson_mmi_hw_available {
+ #include <loongson-mmiintrin.h>
int main()
{
asm volatile ("paddw $f2,$f4,$f6");
return 0;
}
- } ""
+ } "-mloongson-mmi"
}
}]
}
# Return 1 if the target supports running Loongson executables, 0 otherwise.
-proc check_effective_target_mips_loongson_runtime { } {
- if { [check_effective_target_mips_loongson]
- && [check_mips_loongson_hw_available] } {
+proc check_effective_target_mips_loongson_mmi_runtime { } {
+ if { [check_effective_target_mips_loongson_mmi]
+ && [check_mips_loongson_mmi_hw_available] } {
return 1
}
return 0
}]
}
-# Return 1 if we support 32-bit or larger array and structure sizes
-# using default options, 0 otherwise. Avoid false positive on
-# targets with 20 or 24 bit address spaces.
+# Return 1 if we support 16-bit or larger array and structure sizes
+# using default options, 0 otherwise.
+# This implies at least a 20-bit address space, as no targets have an address
+# space between 16 and 20 bits.
+
+proc check_effective_target_size20plus { } {
+ return [check_no_compiler_messages size20plus object {
+ char dummy[65537L];
+ }]
+}
+
+# Return 1 if we support 24-bit or larger array and structure sizes
+# using default options, 0 otherwise.
+# This implies at least a 32-bit address space, as no targets have an address
+# space between 24 and 32 bits.
proc check_effective_target_size32plus { } {
return [check_no_compiler_messages size32plus object {
}]
}
+# Return 1 if int size is equal to float size,
+# 0 otherwise.
+
+proc check_effective_target_int_eq_float { } {
+ return [check_no_compiler_messages int_eq_float object {
+ int dummy[sizeof (int) >= sizeof (float) ? 1 : -1];
+ }]
+}
+
+# Return 1 if pointer size is equal to long size,
+# 0 otherwise.
+
+proc check_effective_target_ptr_eq_long { } {
+ # sizeof (void *) == 4 for msp430-elf -mlarge which is equal to
+ # sizeof (long). Avoid false positive.
+ if { [istarget msp430-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages ptr_eq_long object {
+ int dummy[sizeof (void *) == sizeof (long) ? 1 : -1];
+ }]
+}
+
# Return 1 if the target supports long double larger than double,
# 0 otherwise.
[istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
+ || [istarget amdgcn-*-*]
|| [istarget spu-*-*]
|| [istarget sparc*-*-*]
|| [istarget alpha*-*-*]
|| [istarget aarch64*-*-*]
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && ([et-is-effective-target mips_loongson]
+ && ([et-is-effective-target mips_loongson_mmi]
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports signed double->int conversion
|| [istarget aarch64*-*-*]
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports unsigned float->int conversion
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if peeling for alignment might be profitable on the target
# be able to assemble avx512f.
return [check_cached_effective_target_indexed vect_simd_clones {
expr { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_effective_target_avx512f]) }}]
+ && [check_effective_target_avx512f])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if this is a AArch64 target supporting big endian
check_effective_target_arm_neon_fp16_ok_nocache]
}
+# Return 1 if this is an ARM target supporting -mfpu=neon-fp16
+# and -mfloat-abi=softfp together. Some multilibs may be
+# incompatible with these options. Also set et_arm_neon_softfp_fp16_flags to
+# the best options to add.
+
+proc check_effective_target_arm_neon_softfp_fp16_ok_nocache { } {
+ global et_arm_neon_softfp_fp16_flags
+ global et_arm_neon_flags
+ set et_arm_neon_softfp_fp16_flags ""
+ if { [check_effective_target_arm32]
+ && [check_effective_target_arm_neon_ok] } {
+ foreach flags {"-mfpu=neon-fp16 -mfloat-abi=softfp"
+ "-mfpu=neon-fp16 -mfloat-abi=softfp -mfp16-format=ieee"} {
+ if { [check_no_compiler_messages_nocache arm_neon_softfp_fp16_ok object {
+ #include "arm_neon.h"
+ float16x4_t
+ foo (float32x4_t arg)
+ {
+ return vcvt_f16_f32 (arg);
+ }
+ } "$et_arm_neon_flags $flags"] } {
+ set et_arm_neon_softfp_fp16_flags [concat $et_arm_neon_flags $flags]
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_neon_softfp_fp16_ok { } {
+ return [check_cached_effective_target arm_neon_softfp_fp16_ok \
+ check_effective_target_arm_neon_softfp_fp16_ok_nocache]
+}
+
+
+
proc check_effective_target_arm_neon_fp16_hw { } {
if {! [check_effective_target_arm_neon_fp16_ok] } {
return 0
return "$flags $et_arm_neon_fp16_flags"
}
+proc add_options_for_arm_neon_softfp_fp16 { flags } {
+ if { ! [check_effective_target_arm_neon_softfp_fp16_ok] } {
+ return "$flags"
+ }
+ global et_arm_neon_softfp_fp16_flags
+ return "$flags $et_arm_neon_softfp_fp16_flags"
+}
+
# Return 1 if this is an ARM target supporting the FP16 alternative
# format. Some multilibs may be incompatible with the options needed. Also
# set et_arm_neon_fp16_flags to the best options to add.
} [add_options_for_arm_neonv2 ""]]
}
+# ID_AA64PFR1_EL1.BT using bits[3:0] == 1 implies BTI implimented.
+proc check_effective_target_aarch64_bti_hw { } {
+ if { ![istarget aarch64*-*-*] } {
+ return 0
+ }
+ return [check_runtime aarch64_bti_hw_available {
+ int
+ main (void)
+ {
+ int a;
+ asm volatile ("mrs %0, id_aa64pfr1_el1" : "=r" (a));
+ return !((a & 0xf) == 1);
+ }
+ } "-O2" ]
+}
+
+# Return 1 if GCC was configured with --enable-standard-branch-protection
+proc check_effective_target_default_branch_protection { } {
+ return [check_configured_with "enable-standard-branch-protection"]
+}
+
# Return 1 if the target supports the ARMv8.1 Adv.SIMD extension, 0
# otherwise. The test is valid for AArch64 and ARM. Record the command
# line options needed.
foreach flags {"" "-mfloat-abi=softfp -mfpu=neon-fp-armv8" "-mfloat-abi=hard -mfpu=neon-fp-armv8"} {
if { [check_no_compiler_messages_nocache \
arm_v8_2a_dotprod_neon_ok object {
+ #include <stdint.h>
#if !defined (__ARM_FEATURE_DOTPROD)
#error "__ARM_FEATURE_DOTPROD not defined"
#endif
float32x2_t
foo (float32x2_t r, float16x4_t a, float16x4_t b)
{
- return vfmlal_high_u32 (r, a, b);
+ return vfmlal_high_f16 (r, a, b);
}
} "$flags -march=armv8.2-a+fp16fml"] } {
set et_arm_fp16fml_neon_flags "$flags -march=armv8.2-a+fp16fml"
return "$flags -mmsa"
}
+# Add the options needed for MIPS Loongson MMI Architecture.
+
+proc add_options_for_mips_loongson_mmi { flags } {
+ if { ! [check_effective_target_mips_loongson_mmi] } {
+ return "$flags"
+ }
+ return "$flags -mloongson-mmi"
+}
+
+
# Return 1 if this a Loongson-2E or -2F target using an ABI that supports
# the Loongson vector modes.
-proc check_effective_target_mips_loongson { } {
+proc check_effective_target_mips_loongson_mmi { } {
return [check_no_compiler_messages loongson assembly {
+ #if !defined(__mips_loongson_mmi)
+ #error !__mips_loongson_mmi
+ #endif
#if !defined(__mips_loongson_vector_rev)
#error !__mips_loongson_vector_rev
#endif
} "-O2 -mthumb" ]
}
+# Return true if LDRD/STRD instructions are available on this target.
+proc check_effective_target_arm_ldrd_strd_ok { } {
+ if { ![check_effective_target_arm32] } {
+ return 0;
+ }
+
+ return [check_no_compiler_messages arm_ldrd_strd_ok object {
+ int main(void)
+ {
+ __UINT64_TYPE__ a = 1, b = 10;
+ __UINT64_TYPE__ *c = &b;
+ // `a` will be in a valid register since it's a DImode quantity.
+ asm ("ldrd %0, %1"
+ : "=r" (a)
+ : "m" (c));
+ return a == 10;
+ }
+ }]
+}
+
# Return 1 if this is a PowerPC target supporting -meabi.
proc check_effective_target_powerpc_eabi_ok { } {
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
&& ([et-is-effective-target mips_msa]
- || [et-is-effective-target mips_loongson]))
+ || [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return 1 if the target supports hardware vector shift by register operation.
+
+proc check_effective_target_vect_var_shift { } {
+ return [check_cached_effective_target_indexed vect_var_shift {
+ expr {(([istarget i?86-*-*] || [istarget x86_64-*-*])
+ && [check_avx2_available])
+ }}]
}
proc check_effective_target_whole_vector_shift { } {
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson])
+ && [et-is-effective-target mips_loongson_mmi])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] } {
set answer 1
} else {
set answer 0
proc check_effective_target_vect_bswap { } {
return [check_cached_effective_target_indexed vect_bswap {
- expr { [istarget aarch64*-*-*] || [is-effective-target arm_neon] }}]
+ expr { [istarget aarch64*-*-*]
+ || [is-effective-target arm_neon]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vector shift operation for char.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vectors of long, 0 otherwise.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] } {
set answer 1
} else {
set answer 0
&& [et-is-effective-target mips_msa])
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vxe]) }}]
+ && [check_effective_target_s390_vxe])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vectors of float without
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx])} }]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*]} }]
}
# Return 1 if the target supports conditional addition, subtraction,
|| [istarget spu-*-*]
|| [istarget alpha*-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson]) }}]
+ && [et-is-effective-target mips_loongson_mmi]) }}]
}
# Return 1 if the target plus current options does not support a vector
&& ([et-is-effective-target mpaired_single]
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if, for some VF:
|| ([istarget mips-*.*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
proc check_effective_target_xorsign { } {
return [check_cached_effective_target_indexed xorsign {
- expr { [istarget aarch64*-*-*] || [istarget arm*-*-*] }}]
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*] || [istarget arm*-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
&& ![check_effective_target_aarch64_sve])
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx])) }}]
+ && [check_effective_target_s390_vx]))
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx])) }}]
+ && [check_effective_target_s390_vx]))
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
proc check_effective_target_vect_usad_char { } {
return [check_cached_effective_target_indexed vect_usad_char {
- expr { [istarget i?86-*-*] || [istarget x86_64-*-*] }}]
+ expr { [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || ([istarget aarch64*-*-*]
+ && ![check_effective_target_aarch64_sve])
+ || ([istarget powerpc*-*-*]
+ && [check_p9vector_hw_available])}}]
}
# Return 1 if the target plus current options supports both signed
|| [check_effective_target_arm_vect_no_misalign]
|| ([istarget powerpc*-*-*] && [check_p8vector_hw_available])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson]) }}]
+ && [et-is-effective-target mips_loongson_mmi]) }}]
}
# Return 1 if the target supports a vector misalign access, 0 otherwise.
set et_vect_natural_alignment 1
if { [check_effective_target_arm_eabi]
|| [istarget nvptx-*-*]
- || [istarget s390*-*-*] } {
+ || [istarget s390*-*-*]
+ || [istarget amdgcn-*-*] } {
set et_vect_natural_alignment 0
}
verbose "check_effective_target_vect_natural_alignment:\
# Return true if fully-masked loops are supported.
proc check_effective_target_vect_fully_masked { } {
- return [check_effective_target_aarch64_sve]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
}
# Return 1 if the target doesn't prefer any alignment beyond element
return [check_cached_effective_target_indexed vect_element_align {
expr { ([istarget arm*-*-*]
&& ![check_effective_target_arm_vect_no_misalign])
- || [check_effective_target_vect_hw_misalign] }}]
+ || [check_effective_target_vect_hw_misalign]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if we expect to see unaligned accesses in at least some
# Return 1 if the target supports vector masked stores.
proc check_effective_target_vect_masked_store { } {
- return [check_effective_target_aarch64_sve]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
}
# Return 1 if the target supports vector scatter stores.
proc check_effective_target_vect_scatter_store { } {
- return [check_effective_target_aarch64_sve]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
}
# Return 1 if the target supports vector conditional operations, 0 otherwise.
|| ([istarget arm*-*-*]
&& [check_effective_target_arm_neon_ok])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector conditional operations where
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector char multiplication, 0 otherwise.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector short multiplication, 0 otherwise.
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
&& ([et-is-effective-target mips_msa]
- || [et-is-effective-target mips_loongson]))
+ || [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector int multiplication, 0 otherwise.
&& [et-is-effective-target mips_msa])
|| [check_effective_target_arm32]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports 64 bit hardware vector
|| [istarget aarch64*-*-*]) && N >= 2 && N <= 4 } {
return 1
}
+ if [check_effective_target_vect_fully_masked] {
+ return 1
+ }
return 0
}]
}
|| [istarget aarch64*-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_vfp_ok])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return any additional options to enable square root intructions.
+
+proc add_options_for_sqrt_insn { flags } {
+ if { [istarget amdgcn*-*-*] } {
+ return "$flags -ffast-math"
+ }
+ return $flags
}
# Return 1 if the target supports vector sqrtf calls.
proc check_effective_target_vect_call_lrint { } {
set et_vect_call_lrint 0
if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_effective_target_ilp32]) } {
+ && [check_effective_target_ilp32])
+ || [istarget amdgcn-*-*] } {
set et_vect_call_lrint 1
}
proc check_effective_target_vect_call_btrunc { } {
return [check_cached_effective_target_indexed vect_call_btrunc {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector btruncf calls.
proc check_effective_target_vect_call_btruncf { } {
return [check_cached_effective_target_indexed vect_call_btruncf {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector ceil calls.
proc check_effective_target_vect_call_ceil { } {
return [check_cached_effective_target_indexed vect_call_ceil {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector ceilf calls.
proc check_effective_target_vect_call_floorf { } {
return [check_cached_effective_target_indexed vect_call_floorf {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector lceil calls.
}
}
+# Return 1 if the target supports popcount on long.
+
+proc check_effective_target_popcountl { } {
+ return [check_no_messages_and_pattern popcountl "!\\(call" rtl-expand {
+ int foo (long b)
+ {
+ return __builtin_popcountl (b);
+ }
+ } "" ]
+}
+
# Return 1 if the target supports atomic operations on "long long"
# and can execute them.
#
}]
}
+# Return true if GCC was configured with --enable-newlib-nano-formatted-io
+proc check_effective_target_newlib_nano_io { } {
+ return [check_configured_with "--enable-newlib-nano-formatted-io"]
+}
+
# Some newlib versions don't provide a frexpl and instead depend
# on frexp to implement long double conversions in their printf-like
# functions. This leads to broken results. Detect such versions here.
}]
}
+# Return 1 if the target provides the D runtime.
+
+proc check_effective_target_d_runtime { } {
+ return [check_no_compiler_messages d_runtime executable {
+ // D
+ module mod;
+
+ extern(C) int main() {
+ return 0;
+ }
+ }]
+}
+
# Return 1 if target wchar_t is at least 4 bytes.
proc check_effective_target_4byte_wchar_t { } {
return 0;
}
+# Return true if we are compiling for AVX2 target.
+
+proc check_avx2_available { } {
+ if { [check_no_compiler_messages avx2_available assembly {
+ #ifndef __AVX2__
+ #error unsupported
+ #endif
+ } ""] } {
+ return 1;
+ }
+ return 0;
+}
+
# Return true if we are compiling for SSSE3 target.
proc check_ssse3_available { } {
} "-O2 -mf16c" ]
}
+proc check_effective_target_ms_hook_prologue { } {
+ if { [check_no_compiler_messages ms_hook_prologue object {
+ void __attribute__ ((__ms_hook_prologue__)) foo ();
+ } ""] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if 3dnow instructions can be compiled.
+proc check_effective_target_3dnow { } {
+ return [check_no_compiler_messages 3dnow object {
+ typedef int __m64 __attribute__ ((__vector_size__ (8)));
+ typedef float __v2sf __attribute__ ((__vector_size__ (8)));
+
+ __m64 _m_pfadd (__m64 __A, __m64 __B)
+ {
+ return (__m64) __builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B);
+ }
+ } "-O2 -m3dnow" ]
+}
+
+# Return 1 if sse3 instructions can be compiled.
+proc check_effective_target_sse3 { } {
+ return [check_no_compiler_messages sse3 object {
+ typedef double __m128d __attribute__ ((__vector_size__ (16)));
+ typedef double __v2df __attribute__ ((__vector_size__ (16)));
+
+ __m128d _mm_addsub_pd (__m128d __X, __m128d __Y)
+ {
+ return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y);
+ }
+ } "-O2 -msse3" ]
+}
+
+# Return 1 if ssse3 instructions can be compiled.
+proc check_effective_target_ssse3 { } {
+ return [check_no_compiler_messages ssse3 object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_abs_epi32 (__m128i __X)
+ {
+ return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X);
+ }
+ } "-O2 -mssse3" ]
+}
+
+# Return 1 if aes instructions can be compiled.
+proc check_effective_target_aes { } {
+ return [check_no_compiler_messages aes object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_aesimc_si128 (__m128i __X)
+ {
+ return (__m128i) __builtin_ia32_aesimc128 ((__v2di)__X);
+ }
+ } "-O2 -maes" ]
+}
+
+# Return 1 if vaes instructions can be compiled.
+proc check_effective_target_vaes { } {
+ return [check_no_compiler_messages vaes object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_aesimc_si128 (__m128i __X)
+ {
+ return (__m128i) __builtin_ia32_aesimc128 ((__v2di)__X);
+ }
+ } "-O2 -maes -mavx" ]
+}
+
+# Return 1 if pclmul instructions can be compiled.
+proc check_effective_target_pclmul { } {
+ return [check_no_compiler_messages pclmul object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i pclmulqdq_test (__m128i __X, __m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)__X,
+ (__v2di)__Y,
+ 1);
+ }
+ } "-O2 -mpclmul" ]
+}
+
+# Return 1 if vpclmul instructions can be compiled.
+proc check_effective_target_vpclmul { } {
+ return [check_no_compiler_messages vpclmul object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i pclmulqdq_test (__m128i __X, __m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)__X,
+ (__v2di)__Y,
+ 1);
+ }
+ } "-O2 -mpclmul -mavx" ]
+}
+
+# Return 1 if sse4a instructions can be compiled.
+proc check_effective_target_sse4a { } {
+ return [check_no_compiler_messages sse4a object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_insert_si64 (__m128i __X,__m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y);
+ }
+ } "-O2 -msse4a" ]
+}
+
+# Return 1 if fma4 instructions can be compiled.
+proc check_effective_target_fma4 { } {
+ return [check_no_compiler_messages fma4 object {
+ typedef float __m128 __attribute__ ((__vector_size__ (16)));
+ typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+ __m128 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+ {
+ return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A,
+ (__v4sf)__B,
+ (__v4sf)__C);
+ }
+ } "-O2 -mfma4" ]
+}
+
+# Return 1 if fma instructions can be compiled.
+proc check_effective_target_fma { } {
+ return [check_no_compiler_messages fma object {
+ typedef float __m128 __attribute__ ((__vector_size__ (16)));
+ typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+ __m128 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+ {
+ return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A,
+ (__v4sf)__B,
+ (__v4sf)__C);
+ }
+ } "-O2 -mfma" ]
+}
+
+# Return 1 if xop instructions can be compiled.
+proc check_effective_target_xop { } {
+ return [check_no_compiler_messages xop object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef short __v8hi __attribute__ ((__vector_size__ (16)));
+ __m128i _mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
+ {
+ return (__m128i) __builtin_ia32_vpmacssww ((__v8hi)__A,
+ (__v8hi)__B,
+ (__v8hi)__C);
+ }
+ } "-O2 -mxop" ]
+}
+
+# Return 1 if lzcnt instruction can be compiled.
+proc check_effective_target_lzcnt { } {
+ return [check_no_compiler_messages lzcnt object {
+ unsigned short _lzcnt (unsigned short __X)
+ {
+ return __builtin_clzs (__X);
+ }
+ } "-mlzcnt" ]
+}
+
+# Return 1 if bmi instructions can be compiled.
+proc check_effective_target_bmi { } {
+ return [check_no_compiler_messages bmi object {
+ unsigned int __bextr_u32 (unsigned int __X, unsigned int __Y)
+ {
+ return __builtin_ia32_bextr_u32 (__X, __Y);
+ }
+ } "-mbmi" ]
+}
+
+# Return 1 if ADX instructions can be compiled.
+proc check_effective_target_adx { } {
+ return [check_no_compiler_messages adx object {
+ unsigned char
+ _adxcarry_u32 (unsigned char __CF, unsigned int __X,
+ unsigned int __Y, unsigned int *__P)
+ {
+ return __builtin_ia32_addcarryx_u32 (__CF, __X, __Y, __P);
+ }
+ } "-madx" ]
+}
+
+# Return 1 if rtm instructions can be compiled.
+proc check_effective_target_rtm { } {
+ return [check_no_compiler_messages rtm object {
+ void
+ _rtm_xend (void)
+ {
+ return __builtin_ia32_xend ();
+ }
+ } "-mrtm" ]
+}
+
+# Return 1 if avx512vl instructions can be compiled.
+proc check_effective_target_avx512vl { } {
+ return [check_no_compiler_messages avx512vl object {
+ typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+ __v4di
+ mm256_and_epi64 (__v4di __X, __v4di __Y)
+ {
+ __v4di __W;
+ return __builtin_ia32_pandq256_mask (__X, __Y, __W, -1);
+ }
+ } "-mavx512vl" ]
+}
+
+# Return 1 if avx512cd instructions can be compiled.
+proc check_effective_target_avx512cd { } {
+ return [check_no_compiler_messages avx512cd_trans object {
+ typedef long long __v8di __attribute__ ((__vector_size__ (64)));
+ __v8di
+ _mm512_conflict_epi64 (__v8di __W, __v8di __A)
+ {
+ return (__v8di) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ -1);
+ }
+ } "-Wno-psabi -mavx512cd" ]
+}
+
+# Return 1 if avx512er instructions can be compiled.
+proc check_effective_target_avx512er { } {
+ return [check_no_compiler_messages avx512er_trans object {
+ typedef float __v16sf __attribute__ ((__vector_size__ (64)));
+ __v16sf
+ mm512_exp2a23_ps (__v16sf __X)
+ {
+ return __builtin_ia32_exp2ps_mask (__X, __X, -1, 4);
+ }
+ } "-Wno-psabi -mavx512er" ]
+}
+
+# Return 1 if sha instructions can be compiled.
+proc check_effective_target_sha { } {
+ return [check_no_compiler_messages sha object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_sha1msg1_epu32 (__m128i __X, __m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_sha1msg1 ((__v4si)__X,
+ (__v4si)__Y);
+ }
+ } "-O2 -msha" ]
+}
+
+# Return 1 if avx512dq instructions can be compiled.
+proc check_effective_target_avx512dq { } {
+ return [check_no_compiler_messages avx512dq object {
+ typedef long long __v8di __attribute__ ((__vector_size__ (64)));
+ __v8di
+ _mm512_mask_mullo_epi64 (__v8di __W, __v8di __A, __v8di __B)
+ {
+ return (__v8di) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ -1);
+ }
+ } "-mavx512dq" ]
+}
+
+# Return 1 if avx512bw instructions can be compiled.
+proc check_effective_target_avx512bw { } {
+ return [check_no_compiler_messages avx512bw object {
+ typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+ __v32hi
+ _mm512_mask_mulhrs_epi16 (__v32hi __W, __v32hi __A, __v32hi __B)
+ {
+ return (__v32hi) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ -1);
+ }
+ } "-mavx512bw" ]
+}
+
+# Return 1 if avx512ifma instructions can be compiled.
+proc check_effective_target_avx512ifma { } {
+ return [check_no_compiler_messages avx512ifma object {
+ typedef long long __v8di __attribute__ ((__vector_size__ (64)));
+ __v8di
+ _mm512_madd52lo_epu64 (__v8di __X, __v8di __Y, __v8di __Z)
+ {
+ return (__v8di) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __Z,
+ -1);
+ }
+ } "-mavx512ifma" ]
+}
+
+# Return 1 if avx512vbmi instructions can be compiled.
+proc check_effective_target_avx512vbmi { } {
+ return [check_no_compiler_messages avx512vbmi object {
+ typedef char __v64qi __attribute__ ((__vector_size__ (64)));
+ __v64qi
+ _mm512_multishift_epi64_epi8 (__v64qi __X, __v64qi __Y)
+ {
+ return (__v64qi) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v64qi) __Y,
+ -1);
+ }
+ } "-mavx512vbmi" ]
+}
+
+# Return 1 if avx512_4fmaps instructions can be compiled.
+proc check_effective_target_avx5124fmaps { } {
+ return [check_no_compiler_messages avx5124fmaps object {
+ typedef float __v16sf __attribute__ ((__vector_size__ (64)));
+ typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+
+ __v16sf
+ _mm512_mask_4fmadd_ps (__v16sf __DEST, __v16sf __A, __v16sf __B, __v16sf __C,
+ __v16sf __D, __v16sf __E, __v4sf *__F)
+ {
+ return (__v16sf) __builtin_ia32_4fmaddps_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__v16sf) __D,
+ (__v16sf) __E,
+ (const __v4sf *) __F,
+ (__v16sf) __DEST,
+ 0xffff);
+ }
+ } "-mavx5124fmaps" ]
+}
+
+# Return 1 if avx512_4vnniw instructions can be compiled.
+proc check_effective_target_avx5124vnniw { } {
+ return [check_no_compiler_messages avx5124vnniw object {
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
+
+ __v16si
+ _mm512_4dpwssd_epi32 (__v16si __A, __v16si __B, __v16si __C,
+ __v16si __D, __v16si __E, __v4si *__F)
+ {
+ return (__v16si) __builtin_ia32_vp4dpwssd ((__v16si) __B,
+ (__v16si) __C,
+ (__v16si) __D,
+ (__v16si) __E,
+ (__v16si) __A,
+ (const __v4si *) __F);
+ }
+ } "-mavx5124vnniw" ]
+}
+
+# Return 1 if avx512_vpopcntdq instructions can be compiled.
+proc check_effective_target_avx512vpopcntdq { } {
+ return [check_no_compiler_messages avx512vpopcntdq object {
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
+
+ __v16si
+ _mm512_popcnt_epi32 (__v16si __A)
+ {
+ return (__v16si) __builtin_ia32_vpopcountd_v16si ((__v16si) __A);
+ }
+ } "-mavx512vpopcntdq" ]
+}
+
+# Return 1 if 128 or 256-bit avx512_vpopcntdq instructions can be compiled.
+proc check_effective_target_avx512vpopcntdqvl { } {
+ return [check_no_compiler_messages avx512vpopcntdqvl object {
+ typedef int __v8si __attribute__ ((__vector_size__ (32)));
+
+ __v8si
+ _mm256_popcnt_epi32 (__v8si __A)
+ {
+ return (__v8si) __builtin_ia32_vpopcountd_v8si ((__v8si) __A);
+ }
+ } "-mavx512vpopcntdq -mavx512vl" ]
+}
+
+# Return 1 if gfni instructions can be compiled.
+proc check_effective_target_gfni { } {
+ return [check_no_compiler_messages gfni object {
+ typedef char __v16qi __attribute__ ((__vector_size__ (16)));
+
+ __v16qi
+ _mm_gf2p8affineinv_epi64_epi8 (__v16qi __A, __v16qi __B, const int __C)
+ {
+ return (__v16qi) __builtin_ia32_vgf2p8affineinvqb_v16qi ((__v16qi) __A,
+ (__v16qi) __B,
+ 0);
+ }
+ } "-mgfni" ]
+}
+
+# Return 1 if avx512vbmi2 instructions can be compiled.
+proc check_effective_target_avx512vbmi2 { } {
+ return [check_no_compiler_messages avx512vbmi2 object {
+ typedef char __v16qi __attribute__ ((__vector_size__ (16)));
+ typedef unsigned long long __mmask16;
+
+ __v16qi
+ _mm_mask_compress_epi8 (__v16qi __A, __mmask16 __B, __v16qi __C)
+ {
+ return (__v16qi) __builtin_ia32_compressqi128_mask((__v16qi)__C,
+ (__v16qi)__A,
+ (__mmask16)__B);
+ }
+ } "-mavx512vbmi2 -mavx512vl" ]
+}
+
+# Return 1 if avx512vbmi2 instructions can be compiled.
+proc check_effective_target_avx512vnni { } {
+ return [check_no_compiler_messages avx512vnni object {
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
+
+ __v16si
+ _mm_mask_compress_epi8 (__v16si __A, __v16si __B, __v16si __C)
+ {
+ return (__v16si) __builtin_ia32_vpdpbusd_v16si ((__v16si)__A,
+ (__v16si)__B,
+ (__v16si)__C);
+ }
+ } "-mavx512vnni -mavx512f" ]
+}
+
+# Return 1 if vaes instructions can be compiled.
+proc check_effective_target_avx512vaes { } {
+ return [check_no_compiler_messages avx512vaes object {
+
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
+
+ __v32qi
+ _mm256_aesdec_epi128 (__v32qi __A, __v32qi __B)
+ {
+ return (__v32qi)__builtin_ia32_vaesdec_v32qi ((__v32qi) __A, (__v32qi) __B);
+ }
+ } "-mvaes" ]
+}
+
+# Return 1 if vpclmulqdq instructions can be compiled.
+proc check_effective_target_vpclmulqdq { } {
+ return [check_no_compiler_messages vpclmulqdq object {
+ typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+
+ __v4di
+ _mm256_clmulepi64_epi128 (__v4di __A, __v4di __B)
+ {
+ return (__v4di) __builtin_ia32_vpclmulqdq_v4di (__A, __B, 0);
+ }
+ } "-mvpclmulqdq -mavx512vl" ]
+}
+
+# Return 1 if avx512_bitalg instructions can be compiled.
+proc check_effective_target_avx512bitalg { } {
+ return [check_no_compiler_messages avx512bitalg object {
+ typedef short int __v32hi __attribute__ ((__vector_size__ (64)));
+
+ __v32hi
+ _mm512_popcnt_epi16 (__v32hi __A)
+ {
+ return (__v32hi) __builtin_ia32_vpopcountw_v32hi ((__v32hi) __A);
+ }
+ } "-mavx512bitalg" ]
+}
+
# Return 1 if C wchar_t type is compatible with char16_t.
proc check_effective_target_wchar_t_char16_t_compatible { } {
# (LTO) support.
proc check_effective_target_lto { } {
- if { [istarget nvptx-*-*] } {
+ if { [istarget nvptx-*-*]
+ || [istarget amdgcn-*-*] } {
return 0;
}
return [check_no_compiler_messages lto object {
} "-flto"]
}
+# Return 1 if the compiler and linker support incremental link-time
+# optimization.
+
+proc check_effective_target_lto_incremental { } {
+ if ![check_effective_target_lto] {
+ return 0
+ }
+ return [check_no_compiler_messages lto_incremental executable {
+ int main () { return 0; }
+ } "-flto -r -nostdlib"]
+}
+
# Return 1 if -mx32 -maddress-mode=short can compile, 0 otherwise.
proc check_effective_target_maybe_x32 { } {
if { [check_effective_target_mpaired_single] } {
lappend EFFECTIVE_TARGETS mpaired_single
}
- if { [check_effective_target_mips_loongson] } {
- lappend EFFECTIVE_TARGETS mips_loongson
+ if { [check_effective_target_mips_loongson_mmi] } {
+ lappend EFFECTIVE_TARGETS mips_loongson_mmi
}
if { [check_effective_target_mips_msa] } {
lappend EFFECTIVE_TARGETS mips_msa
lappend DEFAULT_VECTCFLAGS "-march=z14" "-mzarch"
set dg-do-what-default compile
}
+ } elseif [istarget amdgcn-*-*] {
+ set dg-do-what-default run
} else {
return 0
}
}
}
+# Return 1 if <fenv.h> is available.
+
+proc check_effective_target_fenv {} {
+ return [check_no_compiler_messages fenv object {
+ #include <fenv.h>
+ } [add_options_for_ieee "-std=gnu99"]]
+}
+
# Return 1 if <fenv.h> is available with all the standard IEEE
# exceptions and floating-point exceptions are raised by arithmetic
# operations. (If the target requires special options for "inexact"
} [add_options_for_ieee "-std=gnu99"]]
}
+# Return 1 if -fexceptions is supported.
+
+proc check_effective_target_exceptions {} {
+ if { [istarget amdgcn*-*-*] } {
+ return 0
+ }
+ return 1
+}
+
+
proc check_effective_target_tiny {} {
return [check_cached_effective_target tiny {
if { [istarget aarch64*-*-*]
}]
}
-# Return 1 if LOGICAL_OP_NON_SHORT_CIRCUIT is set to 0 for the current target.
-
-proc check_effective_target_logical_op_short_circuit {} {
- if { [istarget mips*-*-*]
- || [istarget arc*-*-*]
- || [istarget avr*-*-*]
- || [istarget crisv32-*-*] || [istarget cris-*-*]
- || [istarget csky*-*-*]
- || [istarget mmix-*-*]
- || [istarget s390*-*-*]
- || [istarget powerpc*-*-*]
- || [istarget nios2*-*-*]
- || [istarget riscv*-*-*]
- || [istarget v850*-*-*]
- || [istarget visium-*-*]
- || [check_effective_target_arm_cortex_m] } {
- return 1
- }
- return 0
-}
-
# Return 1 if the target supports -mbranch-cost=N option.
proc check_effective_target_branch_cost {} {
return 0
}
-# Return 1 if there is an nvptx offload compiler.
+# Return 1 if the compiler has been configured with hsa offloading.
-proc check_effective_target_offload_nvptx { } {
- return [check_no_compiler_messages offload_nvptx object {
+proc check_effective_target_offload_hsa { } {
+ return [check_no_compiler_messages offload_hsa assembly {
int main () {return 0;}
- } "-foffload=nvptx-none" ]
+ } "-foffload=hsa" ]
}
# Return 1 if the compiler has been configured with hsa offloading.
-proc check_effective_target_offload_hsa { } {
- return [check_no_compiler_messages offload_hsa assembly {
+proc check_effective_target_offload_gcn { } {
+ return [check_no_compiler_messages offload_gcn assembly {
int main () {return 0;}
- } "-foffload=hsa" ]
+ } "-foffload=amdgcn-unknown-amdhsa" ]
}
# Return 1 if the target support -fprofile-update=atomic
}
} "-O2" ]
}
+
+# Return 1 if target supports floating point "infinite"
+proc check_effective_target_inf { } {
+ return [check_no_compiler_messages supports_inf assembly {
+ const double pinf = __builtin_inf ();
+ }]
+}
+
+# Return 1 if the target supports ARMv8.3 Adv.SIMD Complex instructions
+# instructions, 0 otherwise. The test is valid for ARM and for AArch64.
+# Record the command line options needed.
+
+proc check_effective_target_arm_v8_3a_complex_neon_ok_nocache { } {
+ global et_arm_v8_3a_complex_neon_flags
+ set et_arm_v8_3a_complex_neon_flags ""
+
+ if { ![istarget arm*-*-*] && ![istarget aarch64*-*-*] } {
+ return 0;
+ }
+
+ # Iterate through sets of options to find the compiler flags that
+ # need to be added to the -march option.
+ foreach flags {"" "-mfloat-abi=softfp -mfpu=auto" "-mfloat-abi=hard -mfpu=auto"} {
+ if { [check_no_compiler_messages_nocache \
+ arm_v8_3a_complex_neon_ok object {
+ #if !defined (__ARM_FEATURE_COMPLEX)
+ #error "__ARM_FEATURE_COMPLEX not defined"
+ #endif
+ } "$flags -march=armv8.3-a"] } {
+ set et_arm_v8_3a_complex_neon_flags "$flags -march=armv8.3-a"
+ return 1
+ }
+ }
+
+ return 0;
+}
+
+proc check_effective_target_arm_v8_3a_complex_neon_ok { } {
+ return [check_cached_effective_target arm_v8_3a_complex_neon_ok \
+ check_effective_target_arm_v8_3a_complex_neon_ok_nocache]
+}
+
+proc add_options_for_arm_v8_3a_complex_neon { flags } {
+ if { ! [check_effective_target_arm_v8_3a_complex_neon_ok] } {
+ return "$flags"
+ }
+ global et_arm_v8_3a_complex_neon_flags
+ return "$flags $et_arm_v8_3a_complex_neon_flags"
+}
+
+# Return 1 if the target supports executing AdvSIMD instructions from ARMv8.3
+# with the complex instruction extension, 0 otherwise. The test is valid for
+# ARM and for AArch64.
+
+proc check_effective_target_arm_v8_3a_complex_neon_hw { } {
+ if { ![check_effective_target_arm_v8_3a_complex_neon_ok] } {
+ return 0;
+ }
+ return [check_runtime arm_v8_3a_complex_neon_hw_available {
+ #include "arm_neon.h"
+ int
+ main (void)
+ {
+
+ float32x2_t results = {-4.0,5.0};
+ float32x2_t a = {1.0,3.0};
+ float32x2_t b = {2.0,5.0};
+
+ #ifdef __ARM_ARCH_ISA_A64
+ asm ("fcadd %0.2s, %1.2s, %2.2s, #90"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+
+ #else
+ asm ("vcadd.f32 %P0, %P1, %P2, #90"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+ #endif
+
+ return (results[0] == 8 && results[1] == 24) ? 1 : 0;
+ }
+ } [add_options_for_arm_v8_3a_complex_neon ""]]
+}
+
+# Return 1 if the target plus current options supports a vector
+# complex addition with rotate of half and single float modes, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+foreach N {hf sf} {
+ eval [string map [list N $N] {
+ proc check_effective_target_vect_complex_rot_N { } {
+ return [check_cached_effective_target_indexed vect_complex_rot_N {
+ expr { [istarget aarch64*-*-*]
+ || [istarget arm*-*-*] }}]
+ }
+ }]
+}
+
+# Return 1 if the target plus current options supports a vector
+# complex addition with rotate of double float modes, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+foreach N {df} {
+ eval [string map [list N $N] {
+ proc check_effective_target_vect_complex_rot_N { } {
+ return [check_cached_effective_target_indexed vect_complex_rot_N {
+ expr { [istarget aarch64*-*-*] }}]
+ }
+ }]
+}
+
+# Return 1 if this target uses an LLVM assembler and/or linker
+proc check_effective_target_llvm_binutils { } {
+ return [check_cached_effective_target llvm_binutils {
+ expr { [istarget amdgcn*-*-*]
+ || [check_effective_target_offload_gcn] }}]
+}