-# Copyright (C) 1999-2017 Free Software Foundation, Inc.
+# Copyright (C) 1999-2019 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# Assume by default that CONTENTS is C code.
# Otherwise, code should contain:
# "// C++" for c++,
+# "// D" for D,
# "! Fortran" for Fortran code,
# "/* ObjC", for ObjC
# "// ObjC++" for ObjC++
# and "// Go" for Go
# If the tool is ObjC/ObjC++ then we overide the extension to .m/.mm to
# allow for ObjC/ObjC++ specific flags.
+
proc check_compile {basename type contents args} {
global tool
verbose "check_compile tool: $tool for $basename"
switch -glob -- $contents {
"*! Fortran*" { set src ${basename}[pid].f90 }
"*// C++*" { set src ${basename}[pid].cc }
+ "*// D*" { set src ${basename}[pid].d }
"*// ObjC++*" { set src ${basename}[pid].mm }
"*/* ObjC*" { set src ${basename}[pid].m }
"*// Go*" { set src ${basename}[pid].go }
proc check_cached_effective_target { prop args } {
global et_cache
- global et_prop_list
set target [current_target_name]
- if {![info exists et_cache($prop,target)]
- || $et_cache($prop,target) != $target} {
+ if {![info exists et_cache($prop,$target)]} {
verbose "check_cached_effective_target $prop: checking $target" 2
- set et_cache($prop,target) $target
- set et_cache($prop,value) [uplevel eval $args]
- if {![info exists et_prop_list]
- || [lsearch $et_prop_list $prop] < 0} {
- lappend et_prop_list $prop
+ if {[string is true -strict $args] || [string is false -strict $args]} {
+ error {check_cached_effective_target condition already evaluated; did you pass [...] instead of the expected {...}?}
+ } else {
+ set code [catch {uplevel eval $args} result]
+ if {$code != 0 && $code != 2} {
+ return -code $code $result
+ }
+ set et_cache($prop,$target) $result
}
- verbose "check_cached_effective_target cached list is now: $et_prop_list" 2
}
- set value $et_cache($prop,value)
+ set value $et_cache($prop,$target)
verbose "check_cached_effective_target $prop: returning $value for $target" 2
return $value
}
+# Implements a version of check_cached_effective_target that also takes et_index
+# into account when creating the key for the cache.
+proc check_cached_effective_target_indexed { prop args } {
+ global et_index
+ set key "$et_index $prop"
+ verbose "check_cached_effective_target_index $prop: returning $key" 2
+
+ return [check_cached_effective_target $key [list uplevel eval $args]]
+}
+
# Clear effective-target cache. This is useful after testing
# effective-target features and overriding TEST_ALWAYS_FLAGS and/or
# ALWAYS_CXXFLAGS.
proc clear_effective_target_cache { } {
global et_cache
- global et_prop_list
-
- if {[info exists et_prop_list]} {
- verbose "clear_effective_target_cache: $et_prop_list" 2
- foreach prop $et_prop_list {
- unset et_cache($prop,value)
- unset et_cache($prop,target)
- }
- unset et_prop_list
- }
+ array unset et_cache
}
# Like check_compile, but delete the output file and return true if the
return 1
}
+ # pdp11 doesn't support it
+
+ if { [istarget pdp11*-*-*] } {
+ return 0
+ }
+
# ELF and ECOFF support it. a.out does with gas/gld but may also with
# other linkers, so we should try it
}
}
+# return 1 if weak undefined symbols are supported.
+
+proc check_effective_target_weak_undefined { } {
+ if { [istarget hppa*-*-hpux*] } {
+ return 0
+ }
+ return [check_runtime weak_undefined {
+ extern void foo () __attribute__((weak));
+ int main (void) { if (foo) return 1; return 0; }
+ } ""]
+}
+
###############################
# proc check_weak_override_available { }
###############################
# be determined.
proc check_alias_available { } {
- global alias_available_saved
global tool
- if [info exists alias_available_saved] {
- verbose "check_alias_available returning saved $alias_available_saved" 2
- } else {
+ return [check_cached_effective_target alias_available {
set src alias[pid].c
set obj alias[pid].o
verbose "check_alias_available compiling testfile $src" 2
if [string match "" $lines] then {
# No error messages, everything is OK.
- set alias_available_saved 2
+ return 2
} else {
if [regexp "alias definitions not supported" $lines] {
verbose "check_alias_available target does not support aliases" 2
if { $objformat == "elf" } {
verbose "check_alias_available but target uses ELF format, so it ought to" 2
- set alias_available_saved -1
+ return -1
} else {
- set alias_available_saved 0
+ return 0
}
} else {
if [regexp "only weak aliases are supported" $lines] {
verbose "check_alias_available target supports only weak aliases" 2
- set alias_available_saved 1
+ return 1
} else {
- set alias_available_saved -1
+ return -1
}
}
}
-
- verbose "check_alias_available returning $alias_available_saved" 2
- }
-
- return $alias_available_saved
+ }]
}
# Returns 1 if the target toolchain supports strong aliases, 0 otherwise.
#ifdef __cplusplus
extern "C" {
#endif
- typedef void F (void);
- F* g (void) {}
+ extern void f_ ();
+ typedef void F (void);
+ F* g (void) { return &f_; }
void f () __attribute__ ((ifunc ("g")));
#ifdef __cplusplus
}
# Returns true if --gc-sections is supported on the target.
proc check_gc_sections_available { } {
- global gc_sections_available_saved
global tool
- if {![info exists gc_sections_available_saved]} {
+ return [check_cached_effective_target gc_sections_available {
# Some targets don't support gc-sections despite whatever's
# advertised by ld's options.
if { [istarget alpha*-*-*]
|| [istarget ia64-*-*] } {
- set gc_sections_available_saved 0
return 0
}
# --gc-sections.
if { [board_info target exists ldflags]
&& [regexp " -elf2flt\[ =\]" " [board_info target ldflags] "] } {
- set gc_sections_available_saved 0
return 0
}
# while RTP executables are linked with -q (--emit-relocs).
# Both of these options are incompatible with --gc-sections.
if { [istarget *-*-vxworks*] } {
- set gc_sections_available_saved 0
return 0
}
set gcc_ld [lindex [${tool}_target_compile "-print-prog-name=ld" "" "none" ""] 0]
set ld_output [remote_exec host "$gcc_ld" "--help"]
if { [ string first "--gc-sections" $ld_output ] >= 0 } {
- set gc_sections_available_saved 1
+ return 1
} else {
- set gc_sections_available_saved 0
+ return 0
}
- }
- return $gc_sections_available_saved
+ }]
}
# Return 1 if according to target_info struct and explicit target list
if [target_info exists keeps_null_pointer_checks] {
return 1
}
- if { [istarget avr-*-*]
- || [istarget msp430-*-*] } {
+ if { [istarget msp430-*-*] } {
return 1;
}
return 0
# Return true if profiling is supported on the target.
proc check_profiling_available { test_what } {
- global profiling_available_saved
-
verbose "Profiling argument is <$test_what>" 1
# These conditions depend on the argument so examine them before
}
# Now examine the cache variable.
- if {![info exists profiling_available_saved]} {
+ set profiling_working \
+ [check_cached_effective_target profiling_available {
# Some targets don't have any implementation of __bb_init_func or are
# missing other needed machinery.
if {[istarget aarch64*-*-elf]
|| [istarget am3*-*-linux*]
+ || [istarget amdgcn-*-*]
|| [istarget arm*-*-eabi*]
|| [istarget arm*-*-elf]
|| [istarget arm*-*-symbianelf*]
|| [istarget bfin-*-*]
|| [istarget cris-*-*]
|| [istarget crisv32-*-*]
+ || [istarget csky-*-elf]
|| [istarget fido-*-elf]
|| [istarget h8300-*-*]
|| [istarget lm32-*-*]
|| [istarget xtensa*-*-elf]
|| [istarget *-*-rtems*]
|| [istarget *-*-vxworks*] } {
- set profiling_available_saved 0
+ return 0
} else {
- set profiling_available_saved 1
+ return 1
}
- }
+ }]
# -pg link test result can't be cached since it may change between
# runs.
- set profiling_working $profiling_available_saved
- if { $profiling_available_saved == 1
+ if { $profiling_working == 1
&& ![check_no_compiler_messages_nocache profiling executable {
int main() { return 0; } } "-pg"] } {
set profiling_working 0
if { [istarget nvptx-*-*] } {
return 0
}
+ if { [istarget amdgcn-*-*] } {
+ return 0
+ }
return 1
}
if { [istarget nvptx-*-*] } {
return 0
}
+ # It could be supported on amdgcn, but isn't yet.
+ if { [istarget amdgcn*-*-*] } {
+ return 0
+ }
return 1
}
# Return 1 if TLS executables can run correctly, 0 otherwise.
proc check_effective_target_tls_runtime {} {
- # The runtime does not have TLS support, but just
- # running the test below is insufficient to show this.
- if { [istarget msp430-*-*] || [istarget visium-*-*] } {
- return 0
- }
return [check_runtime tls_runtime {
- __thread int thr = 0;
+ __thread int thr __attribute__((tls_model("global-dynamic"))) = 0;
int main (void) { return thr; }
} [add_options_for_tls ""]]
}
# code, 0 otherwise.
proc check_effective_target_fopenacc {} {
- # nvptx can be built with the device-side bits of openacc, but it
+ # nvptx/amdgcn can be built with the device-side bits of openacc, but it
# does not make sense to test it as an openacc host.
if [istarget nvptx-*-*] { return 0 }
+ if [istarget amdgcn-*-*] { return 0 }
return [check_no_compiler_messages fopenacc object {
void foo (void) { }
# code, 0 otherwise.
proc check_effective_target_fopenmp {} {
- # nvptx can be built with the device-side bits of libgomp, but it
+ # nvptx/amdgcn can be built with the device-side bits of libgomp, but it
# does not make sense to test it as an openmp host.
if [istarget nvptx-*-*] { return 0 }
+ if [istarget amdgcn-*-*] { return 0 }
return [check_no_compiler_messages fopenmp object {
void foo (void) { }
# Return 1 if the target supports -fstack-protector
proc check_effective_target_fstack_protector {} {
return [check_runtime fstack_protector {
- int main (void) { return 0; }
+ #include <string.h>
+ int main (int argc, char *argv[]) {
+ char buf[64];
+ return !strcpy (buf, strrchr (argv[0], '/'));
+ }
} "-fstack-protector"]
}
} "-freorder-blocks-and-partition"]
&& [check_no_compiler_messages fprofile_use_freorder object {
void foo (void) { }
- } "-fprofile-use -freorder-blocks-and-partition"] } {
+ } "-fprofile-use -freorder-blocks-and-partition -Wno-missing-profile"] } {
return 1
}
return 0
|| [istarget *-*-dragonfly*]
|| [istarget *-*-freebsd*]
|| [istarget *-*-linux*]
- || [istarget *-*-gnu*] } {
+ || [istarget *-*-gnu*]
+ || [istarget *-*-amdhsa]} {
return 1;
}
if { [istarget *-*-solaris2.1\[1-9\]*] } {
- # Full PIE support was added in Solaris 11.x and Solaris 12, but gcc
- # errors out if missing, so check for that.
+ # Full PIE support was added in Solaris 11.3, but gcc errors out
+ # if missing, so check for that.
return [check_no_compiler_messages pie executable {
int main (void) { return 0; }
} "-pie -fpie"]
# }]
}
+ # The generic test doesn't work for C-SKY because some cores have
+ # hard float for single precision only.
+ if { [istarget csky*-*-*] } {
+ return [check_no_compiler_messages hard_float assembly {
+ #if defined __csky_soft_float__
+ #error __csky_soft_float__
+ #endif
+ }]
+ }
+
# The generic test equates hard_float with "no call for adding doubles".
return [check_no_messages_and_pattern hard_float "!\\(call" rtl-expand {
double a (double b, double c) { return b + c; }
}] $libiconv]
}
-# Return true if Cilk Library is supported on the target.
-proc check_effective_target_cilkplus_runtime { } {
- return [ check_no_compiler_messages_nocache cilkplus_runtime executable {
- #ifdef __cplusplus
- extern "C"
- #endif
- int __cilkrts_set_param (const char *, const char *);
- int main (void) {
- int x = __cilkrts_set_param ("nworkers", "0");
- return x;
- }
- } "-fcilkplus -lcilkrts" ]
-}
-
# Return true if the atomic library is supported on the target.
proc check_effective_target_libatomic_available { } {
return [check_no_compiler_messages libatomic_available executable {
} "-rdynamic"]
}
-# Return 1 if cilk-plus is supported by the target, 0 otherwise.
-
-proc check_effective_target_cilkplus { } {
- # Skip cilk-plus tests on int16 and size16 targets for now.
- # The cilk-plus tests are not generic enough to cover these
- # cases and would throw hundreds of FAILs.
- if { [check_effective_target_int16]
- || ![check_effective_target_size32plus] } {
- return 0;
- }
-
- # Skip AVR, its RAM is too small and too many tests would fail.
- if { [istarget avr-*-*] } {
- return 0;
- }
-
- if { ! [check_effective_target_pthread] } {
- return 0;
- }
-
- return 1
-}
-
proc check_linker_plugin_available { } {
return [check_no_compiler_messages_nocache linker_plugin executable {
int main() { return 0; }
return 0
}
+# Return 1 if bmi2 instructions can be compiled.
+proc check_effective_target_bmi2 { } {
+ if { !([istarget i?86-*-*] || [istarget x86_64-*-*]) } {
+ return 0
+ }
+ return [check_no_compiler_messages bmi2 object {
+ unsigned int
+ _bzhi_u32 (unsigned int __X, unsigned int __Y)
+ {
+ return __builtin_ia32_bzhi_si (__X, __Y);
+ }
+ } "-mbmi2" ]
+}
+
# Return 1 if the target supports executing MIPS Paired-Single instructions,
# 0 otherwise. Cache the result.
# Return 1 if the target supports executing Loongson vector instructions,
# 0 otherwise. Cache the result.
-proc check_mips_loongson_hw_available { } {
- return [check_cached_effective_target mips_loongson_hw_available {
+proc check_mips_loongson_mmi_hw_available { } {
+ return [check_cached_effective_target mips_loongson_mmi_hw_available {
# If this is not the right target then we can skip the test.
if { !([istarget mips*-*-*]) } {
expr 0
} else {
- check_runtime_nocache mips_loongson_hw_available {
- #include <loongson.h>
+ check_runtime_nocache mips_loongson_mmi_hw_available {
+ #include <loongson-mmiintrin.h>
int main()
{
asm volatile ("paddw $f2,$f4,$f6");
return 0;
}
- } ""
+ } "-mloongson-mmi"
}
}]
}
# Return 1 if the target supports running Loongson executables, 0 otherwise.
-proc check_effective_target_mips_loongson_runtime { } {
- if { [check_effective_target_mips_loongson]
- && [check_mips_loongson_hw_available] } {
+proc check_effective_target_mips_loongson_mmi_runtime { } {
+ if { [check_effective_target_mips_loongson_mmi]
+ && [check_mips_loongson_mmi_hw_available] } {
return 1
}
return 0
__float128 w = -1.0q;
__asm__ ("xsaddqp %0,%1,%2" : "+v" (w) : "v" (x), "v" (y));
- return ((z != 3.0q) || (z != w);
+ return ((z != 3.0q) || (z != w));
}
} $options
}
}]
}
+# Return 1 if we're generating code for big-endian memory order.
+
+proc check_effective_target_be { } {
+ return [check_no_compiler_messages be object {
+ int dummy[__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ ? 1 : -1];
+ }]
+}
+
+# Return 1 if we're generating code for little-endian memory order.
+
+proc check_effective_target_le { } {
+ return [check_no_compiler_messages le object {
+ int dummy[__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ? 1 : -1];
+ }]
+}
+
# Return 1 if we're generating 32-bit code using default options, 0
# otherwise.
}]
}
-# Return 1 if we support 32-bit or larger array and structure sizes
-# using default options, 0 otherwise. Avoid false positive on
-# targets with 20 or 24 bit address spaces.
+# Return 1 if we support 16-bit or larger array and structure sizes
+# using default options, 0 otherwise.
+# This implies at least a 20-bit address space, as no targets have an address
+# space between 16 and 20 bits.
+
+proc check_effective_target_size20plus { } {
+ return [check_no_compiler_messages size20plus object {
+ char dummy[65537L];
+ }]
+}
+
+# Return 1 if we support 24-bit or larger array and structure sizes
+# using default options, 0 otherwise.
+# This implies at least a 32-bit address space, as no targets have an address
+# space between 24 and 32 bits.
proc check_effective_target_size32plus { } {
return [check_no_compiler_messages size32plus object {
}]
}
+# Return 1 if int size is equal to float size,
+# 0 otherwise.
+
+proc check_effective_target_int_eq_float { } {
+ return [check_no_compiler_messages int_eq_float object {
+ int dummy[sizeof (int) >= sizeof (float) ? 1 : -1];
+ }]
+}
+
+# Return 1 if pointer size is equal to long size,
+# 0 otherwise.
+
+proc check_effective_target_ptr_eq_long { } {
+ # sizeof (void *) == 4 for msp430-elf -mlarge which is equal to
+ # sizeof (long). Avoid false positive.
+ if { [istarget msp430-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages ptr_eq_long object {
+ int dummy[sizeof (void *) == sizeof (long) ? 1 : -1];
+ }]
+}
+
# Return 1 if the target supports long double larger than double,
# 0 otherwise.
}]
}
+# Return 1 if the target supports long double of 64 bits,
+# 0 otherwise.
+
+proc check_effective_target_longdouble64 { } {
+ return [check_no_compiler_messages longdouble64 object {
+ int dummy[sizeof(long double) == 8 ? 1 : -1];
+ }]
+}
+
# Return 1 if the target supports double of 64 bits,
# 0 otherwise.
return 1
}
+# Return 1 if the target supports all four forms of fused multiply-add
+# (fma, fms, fnma, and fnms) for both float and double.
+
+proc check_effective_target_scalar_all_fma { } {
+ return [istarget aarch64*-*-*]
+}
+
# Return 1 if the target supports compiling fixed-point,
# 0 otherwise.
# instruction set.
proc check_effective_target_vect_cmdline_needed { } {
- global et_vect_cmdline_needed_saved
global et_vect_cmdline_needed_target_name
if { ![info exists et_vect_cmdline_needed_target_name] } {
}
}
- if [info exists et_vect_cmdline_needed_saved] {
- verbose "check_effective_target_vect_cmdline_needed: using cached result" 2
- } else {
- set et_vect_cmdline_needed_saved 1
+ return [check_cached_effective_target vect_cmdline_needed {
if { [istarget alpha*-*-*]
|| [istarget ia64-*-*]
|| (([istarget i?86-*-*] || [istarget x86_64-*-*])
|| [istarget spu-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_neon])
|| [istarget aarch64*-*-*] } {
- set et_vect_cmdline_needed_saved 0
- }
- }
-
- verbose "check_effective_target_vect_cmdline_needed: returning $et_vect_cmdline_needed_saved" 2
- return $et_vect_cmdline_needed_saved
+ return 0
+ } else {
+ return 1
+ }}]
}
# Return 1 if the target supports hardware vectors of int, 0 otherwise.
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_int { } {
- global et_vect_int_saved
- global et_index
-
- if [info exists et_vect_int_saved($et_index)] {
- verbose "check_effective_target_vect_int: using cached result" 2
- } else {
- set et_vect_int_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
- || ([istarget powerpc*-*-*]
- && ![istarget powerpc-*-linux*paired*])
- || [istarget spu-*-*]
- || [istarget sparc*-*-*]
- || [istarget alpha*-*-*]
- || [istarget ia64-*-*]
- || [istarget aarch64*-*-*]
- || [is-effective-target arm_neon]
- || ([istarget mips*-*-*]
- && ([et-is-effective-target mips_loongson]
- || [et-is-effective-target mips_msa]))
- || ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_int_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_int:\
- returning $et_vect_int_saved($et_index)" 2
- return $et_vect_int_saved($et_index)
+ return [check_cached_effective_target_indexed vect_int {
+ expr {
+ [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || ([istarget powerpc*-*-*]
+ && ![istarget powerpc-*-linux*paired*])
+ || [istarget amdgcn-*-*]
+ || [istarget spu-*-*]
+ || [istarget sparc*-*-*]
+ || [istarget alpha*-*-*]
+ || [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || [is-effective-target arm_neon]
+ || ([istarget mips*-*-*]
+ && ([et-is-effective-target mips_loongson_mmi]
+ || [et-is-effective-target mips_msa]))
+ || ([istarget s390*-*-*]
+ && [check_effective_target_s390_vx])
+ }}]
}
# Return 1 if the target supports signed int->float conversion
#
proc check_effective_target_vect_intfloat_cvt { } {
- global et_vect_intfloat_cvt_saved
- global et_index
-
- if [info exists et_vect_intfloat_cvt_saved($et_index)] {
- verbose "check_effective_target_vect_intfloat_cvt:\
- using cached result" 2
- } else {
- set et_vect_intfloat_cvt_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_intfloat_cvt {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_intfloat_cvt_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_intfloat_cvt:\
- returning $et_vect_intfloat_cvt_saved($et_index)" 2
- return $et_vect_intfloat_cvt_saved($et_index)
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports signed double->int conversion
#
proc check_effective_target_vect_doubleint_cvt { } {
- global et_vect_doubleint_cvt_saved
- global et_index
-
- if [info exists et_vect_doubleint_cvt_saved($et_index)] {
- verbose "check_effective_target_vect_doubleint_cvt: using cached result" 2
- } else {
- set et_vect_doubleint_cvt_saved($et_index) 0
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_no_compiler_messages vect_doubleint_cvt assembly {
- #ifdef __tune_atom__
- # error No double vectorizer support.
- #endif
+ return [check_cached_effective_target_indexed vect_doubleint_cvt {
+ expr { (([istarget i?86-*-*] || [istarget x86_64-*-*])
+ && [check_no_compiler_messages vect_doubleint_cvt assembly {
+ #ifdef __tune_atom__
+ # error No double vectorizer support.
+ #endif
}])
- || [istarget aarch64*-*-*]
- || [istarget spu-*-*]
- || ([istarget powerpc*-*-*] && [check_vsx_hw_available])
- || ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_doubleint_cvt_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_doubleint_cvt:\
- returning $et_vect_doubleint_cvt_saved($et_index)" 2
- return $et_vect_doubleint_cvt_saved($et_index)
+ || [istarget aarch64*-*-*]
+ || [istarget spu-*-*]
+ || ([istarget powerpc*-*-*] && [check_vsx_hw_available])
+ || ([istarget mips*-*-*]
+ && [et-is-effective-target mips_msa]) }}]
}
# Return 1 if the target supports signed int->double conversion
#
proc check_effective_target_vect_intdouble_cvt { } {
- global et_vect_intdouble_cvt_saved
- global et_index
-
- if [info exists et_vect_intdouble_cvt_saved($et_index)] {
- verbose "check_effective_target_vect_intdouble_cvt: using cached result" 2
- } else {
- set et_vect_intdouble_cvt_saved($et_index) 0
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
+ return [check_cached_effective_target_indexed vect_intdouble_cvt {
+ expr { (([istarget i?86-*-*] || [istarget x86_64-*-*])
&& [check_no_compiler_messages vect_intdouble_cvt assembly {
#ifdef __tune_atom__
# error No double vectorizer support.
|| [istarget spu-*-*]
|| ([istarget powerpc*-*-*] && [check_vsx_hw_available])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_intdouble_cvt_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_intdouble_cvt:\
- returning $et_vect_intdouble_cvt_saved($et_index)" 2
- return $et_vect_intdouble_cvt_saved($et_index)
+ && [et-is-effective-target mips_msa]) }}]
}
#Return 1 if we're supporting __int128 for target, 0 otherwise.
#
proc check_effective_target_vect_uintfloat_cvt { } {
- global et_vect_uintfloat_cvt_saved
- global et_index
-
- if [info exists et_vect_uintfloat_cvt_saved($et_index)] {
- verbose "check_effective_target_vect_uintfloat_cvt:\
- using cached result" 2
- } else {
- set et_vect_uintfloat_cvt_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_uintfloat_cvt {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| [istarget aarch64*-*-*]
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_uintfloat_cvt_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_uintfloat_cvt:\
- returning $et_vect_uintfloat_cvt_saved($et_index)" 2
- return $et_vect_uintfloat_cvt_saved($et_index)
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
#
proc check_effective_target_vect_floatint_cvt { } {
- global et_vect_floatint_cvt_saved
- global et_index
-
- if [info exists et_vect_floatint_cvt_saved($et_index)] {
- verbose "check_effective_target_vect_floatint_cvt:\
- using cached result" 2
- } else {
- set et_vect_floatint_cvt_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_floatint_cvt {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_floatint_cvt_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_floatint_cvt:\
- returning $et_vect_floatint_cvt_saved($et_index)" 2
- return $et_vect_floatint_cvt_saved($et_index)
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports unsigned float->int conversion
#
proc check_effective_target_vect_floatuint_cvt { } {
- global et_vect_floatuint_cvt_saved
- global et_index
-
- if [info exists et_vect_floatuint_cvt_saved($et_index)] {
- verbose "check_effective_target_vect_floatuint_cvt:\
- using cached result" 2
- } else {
- set et_vect_floatuint_cvt_saved($et_index) 0
- if { ([istarget powerpc*-*-*]
+ return [check_cached_effective_target_indexed vect_floatuint_cvt {
+ expr { ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_floatuint_cvt_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_floatuint_cvt:\
- returning $et_vect_floatuint_cvt_saved($et_index)" 2
- return $et_vect_floatuint_cvt_saved($et_index)
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if peeling for alignment might be profitable on the target
#
proc check_effective_target_vect_peeling_profitable { } {
- global et_vect_peeling_profitable_saved
- global et_index
-
- if [info exists et_vect_peeling_profitable_saved($et_index)] {
- verbose "check_effective_target_vect_peeling_profitable: using cached result" 2
- } else {
- set et_vect_peeling_profitable_saved($et_index) 1
- if { ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_peeling_profitable_saved($et_index) 0
- }
- }
-
- verbose "check_effective_target_vect_peeling_profitable:\
- returning $et_vect_peeling_profitable_saved($et_index)" 2
- return $et_vect_peeling_profitable_saved($et_index)
+ return [check_cached_effective_target_indexed vect_peeling_profitable {
+ expr { ([istarget s390*-*-*]
+ && [check_effective_target_s390_vx])
+ || [check_effective_target_vect_element_align_preferred] }}]
}
# Return 1 if the target supports #pragma omp declare simd, 0 otherwise.
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_simd_clones { } {
- global et_vect_simd_clones_saved
- global et_index
-
- if [info exists et_vect_simd_clones_saved($et_index)] {
- verbose "check_effective_target_vect_simd_clones: using cached result" 2
- } else {
- set et_vect_simd_clones_saved($et_index) 0
- # On i?86/x86_64 #pragma omp declare simd builds a sse2, avx,
- # avx2 and avx512f clone. Only the right clone for the
- # specified arch will be chosen, but still we need to at least
- # be able to assemble avx512f.
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_effective_target_avx512f]) } {
- set et_vect_simd_clones_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_simd_clones:\
- returning $et_vect_simd_clones_saved($et_index)" 2
- return $et_vect_simd_clones_saved($et_index)
+ # On i?86/x86_64 #pragma omp declare simd builds a sse2, avx,
+ # avx2 and avx512f clone. Only the right clone for the
+ # specified arch will be chosen, but still we need to at least
+ # be able to assemble avx512f.
+ return [check_cached_effective_target_indexed vect_simd_clones {
+ expr { (([istarget i?86-*-*] || [istarget x86_64-*-*])
+ && [check_effective_target_avx512f])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if this is a AArch64 target supporting big endian
}]
}
+# Return 1 if this is an AArch64 target supporting SVE.
+proc check_effective_target_aarch64_sve { } {
+ if { ![istarget aarch64*-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages aarch64_sve assembly {
+ #if !defined (__ARM_FEATURE_SVE)
+ #error FOO
+ #endif
+ }]
+}
+
+# Return the size in bits of an SVE vector, or 0 if the size is variable.
+proc aarch64_sve_bits { } {
+ return [check_cached_effective_target aarch64_sve_bits {
+ global tool
+
+ set src dummy[pid].c
+ set f [open $src "w"]
+ puts $f "int bits = __ARM_FEATURE_SVE_BITS;"
+ close $f
+ set output [${tool}_target_compile $src "" preprocess ""]
+ file delete $src
+
+ regsub {.*bits = ([^;]*);.*} $output {\1} bits
+ expr { $bits }
+ }]
+}
+
# Return 1 if this is a compiler supporting ARC atomic operations
proc check_effective_target_arc_atomic { } {
return [check_no_compiler_messages arc_atomic assembly {
}
+# Return 1 if this is an ARM target supporting -mfloat-abi=soft. Some
+# multilibs may be incompatible with this option.
+
+proc check_effective_target_arm_soft_ok { } {
+ if { [check_effective_target_arm32] } {
+ return [check_no_compiler_messages arm_soft_ok executable {
+ int main() { return 0;}
+ } "-mfloat-abi=soft"]
+ } else {
+ return 0
+ }
+}
+
# Return 1 if this is an ARM target supporting -mfpu=vfp
# -mfloat-abi=softfp. Some multilibs may be incompatible with these
# options.
check_effective_target_arm_neon_fp16_ok_nocache]
}
+# Return 1 if this is an ARM target supporting -mfpu=neon-fp16
+# and -mfloat-abi=softfp together. Some multilibs may be
+# incompatible with these options. Also set et_arm_neon_softfp_fp16_flags to
+# the best options to add.
+
+proc check_effective_target_arm_neon_softfp_fp16_ok_nocache { } {
+ global et_arm_neon_softfp_fp16_flags
+ global et_arm_neon_flags
+ set et_arm_neon_softfp_fp16_flags ""
+ if { [check_effective_target_arm32]
+ && [check_effective_target_arm_neon_ok] } {
+ foreach flags {"-mfpu=neon-fp16 -mfloat-abi=softfp"
+ "-mfpu=neon-fp16 -mfloat-abi=softfp -mfp16-format=ieee"} {
+ if { [check_no_compiler_messages_nocache arm_neon_softfp_fp16_ok object {
+ #include "arm_neon.h"
+ float16x4_t
+ foo (float32x4_t arg)
+ {
+ return vcvt_f16_f32 (arg);
+ }
+ } "$et_arm_neon_flags $flags"] } {
+ set et_arm_neon_softfp_fp16_flags [concat $et_arm_neon_flags $flags]
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_neon_softfp_fp16_ok { } {
+ return [check_cached_effective_target arm_neon_softfp_fp16_ok \
+ check_effective_target_arm_neon_softfp_fp16_ok_nocache]
+}
+
+
+
proc check_effective_target_arm_neon_fp16_hw { } {
if {! [check_effective_target_arm_neon_fp16_ok] } {
return 0
return "$flags $et_arm_neon_fp16_flags"
}
+proc add_options_for_arm_neon_softfp_fp16 { flags } {
+ if { ! [check_effective_target_arm_neon_softfp_fp16_ok] } {
+ return "$flags"
+ }
+ global et_arm_neon_softfp_fp16_flags
+ return "$flags $et_arm_neon_softfp_fp16_flags"
+}
+
# Return 1 if this is an ARM target supporting the FP16 alternative
# format. Some multilibs may be incompatible with the options needed. Also
# set et_arm_neon_fp16_flags to the best options to add.
# extension (eg. ARMv8.1-A) since there is no macro defined for them. See
# how only __ARM_ARCH_8A__ is checked for ARMv8.1-A.
# Usage: /* { dg-require-effective-target arm_arch_v5_ok } */
-# /* { dg-add-options arm_arch_v5 } */
-# /* { dg-require-effective-target arm_arch_v5_multilib } */
+# /* { dg-add-options arm_arch_v5t } */
+# /* { dg-require-effective-target arm_arch_v5t_multilib } */
foreach { armfunc armflag armdefs } {
v4 "-march=armv4 -marm" __ARM_ARCH_4__
v4t "-march=armv4t" __ARM_ARCH_4T__
- v5 "-march=armv5 -marm" __ARM_ARCH_5__
v5t "-march=armv5t" __ARM_ARCH_5T__
v5te "-march=armv5te" __ARM_ARCH_5TE__
v6 "-march=armv6" __ARM_ARCH_6__
v7ve "-march=armv7ve -marm"
"__ARM_ARCH_7A__ && __ARM_FEATURE_IDIV"
v8a "-march=armv8-a" __ARM_ARCH_8A__
- v8_1a "-march=armv8.1a" __ARM_ARCH_8A__
- v8_2a "-march=armv8.2a" __ARM_ARCH_8A__
+ v8_1a "-march=armv8.1-a" __ARM_ARCH_8A__
+ v8_2a "-march=armv8.2-a" __ARM_ARCH_8A__
v8m_base "-march=armv8-m.base -mthumb -mfloat-abi=soft"
__ARM_ARCH_8M_BASE__
v8m_main "-march=armv8-m.main -mthumb" __ARM_ARCH_8M_MAIN__
#if !(DEFS)
#error !(DEFS)
#endif
+ int
+ main (void)
+ {
+ return 0;
+ }
} "FLAG" ]
}
} [add_options_for_arm_neon ""]]
}
+# Return true if this is an AArch64 target that can run SVE code.
+
+proc check_effective_target_aarch64_sve_hw { } {
+ if { ![istarget aarch64*-*-*] } {
+ return 0
+ }
+ return [check_runtime aarch64_sve_hw_available {
+ int
+ main (void)
+ {
+ asm volatile ("ptrue p0.b");
+ return 0;
+ }
+ }]
+}
+
+# Return true if this is an AArch64 target that can run SVE code and
+# if its SVE vectors have exactly BITS bits.
+
+proc aarch64_sve_hw_bits { bits } {
+ if { ![check_effective_target_aarch64_sve_hw] } {
+ return 0
+ }
+ return [check_runtime aarch64_sve${bits}_hw [subst {
+ int
+ main (void)
+ {
+ int res;
+ asm volatile ("cntd %0" : "=r" (res));
+ if (res * 64 != $bits)
+ __builtin_abort ();
+ return 0;
+ }
+ }]]
+}
+
+# Return true if this is an AArch64 target that can run SVE code and
+# if its SVE vectors have exactly 256 bits.
+
+proc check_effective_target_aarch64_sve256_hw { } {
+ return [aarch64_sve_hw_bits 256]
+}
+
proc check_effective_target_arm_neonv2_hw { } {
return [check_runtime arm_neon_hwv2_available {
#include "arm_neon.h"
} [add_options_for_arm_neonv2 ""]]
}
+# ID_AA64PFR1_EL1.BT using bits[3:0] == 1 implies BTI implimented.
+proc check_effective_target_aarch64_bti_hw { } {
+ if { ![istarget aarch64*-*-*] } {
+ return 0
+ }
+ return [check_runtime aarch64_bti_hw_available {
+ int
+ main (void)
+ {
+ int a;
+ asm volatile ("mrs %0, id_aa64pfr1_el1" : "=r" (a));
+ return !((a & 0xf) == 1);
+ }
+ } "-O2" ]
+}
+
+# Return 1 if GCC was configured with --enable-standard-branch-protection
+proc check_effective_target_default_branch_protection { } {
+ return [check_configured_with "enable-standard-branch-protection"]
+}
+
# Return 1 if the target supports the ARMv8.1 Adv.SIMD extension, 0
# otherwise. The test is valid for AArch64 and ARM. Record the command
# line options needed.
check_effective_target_arm_v8_2a_fp16_neon_ok_nocache]
}
+# Return 1 if the target supports ARMv8.2 Adv.SIMD Dot Product
+# instructions, 0 otherwise. The test is valid for ARM and for AArch64.
+# Record the command line options needed.
+
+proc check_effective_target_arm_v8_2a_dotprod_neon_ok_nocache { } {
+ global et_arm_v8_2a_dotprod_neon_flags
+ set et_arm_v8_2a_dotprod_neon_flags ""
+
+ if { ![istarget arm*-*-*] && ![istarget aarch64*-*-*] } {
+ return 0;
+ }
+
+ # Iterate through sets of options to find the compiler flags that
+ # need to be added to the -march option.
+ foreach flags {"" "-mfloat-abi=softfp -mfpu=neon-fp-armv8" "-mfloat-abi=hard -mfpu=neon-fp-armv8"} {
+ if { [check_no_compiler_messages_nocache \
+ arm_v8_2a_dotprod_neon_ok object {
+ #include <stdint.h>
+ #if !defined (__ARM_FEATURE_DOTPROD)
+ #error "__ARM_FEATURE_DOTPROD not defined"
+ #endif
+ } "$flags -march=armv8.2-a+dotprod"] } {
+ set et_arm_v8_2a_dotprod_neon_flags "$flags -march=armv8.2-a+dotprod"
+ return 1
+ }
+ }
+
+ return 0;
+}
+
+proc check_effective_target_arm_v8_2a_dotprod_neon_ok { } {
+ return [check_cached_effective_target arm_v8_2a_dotprod_neon_ok \
+ check_effective_target_arm_v8_2a_dotprod_neon_ok_nocache]
+}
+
+proc add_options_for_arm_v8_2a_dotprod_neon { flags } {
+ if { ! [check_effective_target_arm_v8_2a_dotprod_neon_ok] } {
+ return "$flags"
+ }
+ global et_arm_v8_2a_dotprod_neon_flags
+ return "$flags $et_arm_v8_2a_dotprod_neon_flags"
+}
+
+# Return 1 if the target supports FP16 VFMAL and VFMSL
+# instructions, 0 otherwise.
+# Record the command line options needed.
+
+proc check_effective_target_arm_fp16fml_neon_ok_nocache { } {
+ global et_arm_fp16fml_neon_flags
+ set et_arm_fp16fml_neon_flags ""
+
+ if { ![istarget arm*-*-*] } {
+ return 0;
+ }
+
+ # Iterate through sets of options to find the compiler flags that
+ # need to be added to the -march option.
+ foreach flags {"" "-mfloat-abi=softfp -mfpu=neon-fp-armv8" "-mfloat-abi=hard -mfpu=neon-fp-armv8"} {
+ if { [check_no_compiler_messages_nocache \
+ arm_fp16fml_neon_ok assembly {
+ #include <arm_neon.h>
+ float32x2_t
+ foo (float32x2_t r, float16x4_t a, float16x4_t b)
+ {
+ return vfmlal_high_f16 (r, a, b);
+ }
+ } "$flags -march=armv8.2-a+fp16fml"] } {
+ set et_arm_fp16fml_neon_flags "$flags -march=armv8.2-a+fp16fml"
+ return 1
+ }
+ }
+
+ return 0;
+}
+
+proc check_effective_target_arm_fp16fml_neon_ok { } {
+ return [check_cached_effective_target arm_fp16fml_neon_ok \
+ check_effective_target_arm_fp16fml_neon_ok_nocache]
+}
+
+proc add_options_for_arm_fp16fml_neon { flags } {
+ if { ! [check_effective_target_arm_fp16fml_neon_ok] } {
+ return "$flags"
+ }
+ global et_arm_fp16fml_neon_flags
+ return "$flags $et_arm_fp16fml_neon_flags"
+}
+
# Return 1 if the target supports executing ARMv8 NEON instructions, 0
# otherwise.
} [add_options_for_arm_v8_2a_fp16_neon ""]]
}
+# Return 1 if the target supports executing AdvSIMD instructions from ARMv8.2
+# with the Dot Product extension, 0 otherwise. The test is valid for ARM and for
+# AArch64.
+
+proc check_effective_target_arm_v8_2a_dotprod_neon_hw { } {
+ if { ![check_effective_target_arm_v8_2a_dotprod_neon_ok] } {
+ return 0;
+ }
+ return [check_runtime arm_v8_2a_dotprod_neon_hw_available {
+ #include "arm_neon.h"
+ int
+ main (void)
+ {
+
+ uint32x2_t results = {0,0};
+ uint8x8_t a = {1,1,1,1,2,2,2,2};
+ uint8x8_t b = {2,2,2,2,3,3,3,3};
+
+ #ifdef __ARM_ARCH_ISA_A64
+ asm ("udot %0.2s, %1.8b, %2.8b"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+
+ #else
+ asm ("vudot.u8 %P0, %P1, %P2"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+ #endif
+
+ return (results[0] == 8 && results[1] == 24) ? 1 : 0;
+ }
+ } [add_options_for_arm_v8_2a_dotprod_neon ""]]
+}
+
# Return 1 if this is a ARM target with NEON enabled.
proc check_effective_target_arm_neon { } {
return "$flags -mmsa"
}
+# Add the options needed for MIPS Loongson MMI Architecture.
+
+proc add_options_for_mips_loongson_mmi { flags } {
+ if { ! [check_effective_target_mips_loongson_mmi] } {
+ return "$flags"
+ }
+ return "$flags -mloongson-mmi"
+}
+
+
# Return 1 if this a Loongson-2E or -2F target using an ABI that supports
# the Loongson vector modes.
-proc check_effective_target_mips_loongson { } {
+proc check_effective_target_mips_loongson_mmi { } {
return [check_no_compiler_messages loongson assembly {
+ #if !defined(__mips_loongson_mmi)
+ #error !__mips_loongson_mmi
+ #endif
#if !defined(__mips_loongson_vector_rev)
#error !__mips_loongson_vector_rev
#endif
}]
}
+# Return 1 if this is an ARM target that uses the soft float ABI
+# with no floating-point instructions at all (e.g. -mfloat-abi=soft).
+
+proc check_effective_target_arm_softfloat { } {
+ return [check_no_compiler_messages arm_softfloat object {
+ #if !defined(__SOFTFP__)
+ #error not soft-float EABI
+ #else
+ int dummy;
+ #endif
+ }]
+}
+
# Return 1 if this is an ARM target supporting -mcpu=iwmmxt.
# Some multilibs may be incompatible with this option.
} "-O2 -mthumb" ]
}
+# Return true if LDRD/STRD instructions are available on this target.
+proc check_effective_target_arm_ldrd_strd_ok { } {
+ if { ![check_effective_target_arm32] } {
+ return 0;
+ }
+
+ return [check_no_compiler_messages arm_ldrd_strd_ok object {
+ int main(void)
+ {
+ __UINT64_TYPE__ a = 1, b = 10;
+ __UINT64_TYPE__ *c = &b;
+ // `a` will be in a valid register since it's a DImode quantity.
+ asm ("ldrd %0, %1"
+ : "=r" (a)
+ : "m" (c));
+ return a == 10;
+ }
+ }]
+}
+
# Return 1 if this is a PowerPC target supporting -meabi.
proc check_effective_target_powerpc_eabi_ok { } {
}
}
+# Return 1 if current options define float128, 0 otherwise.
+
+proc check_effective_target_ppc_float128 { } {
+ return [check_no_compiler_messages_nocache ppc_float128 object {
+ #ifndef __FLOAT128__
+ nope no good
+ #endif
+ }]
+}
+
+# Return 1 if current options generate float128 insns, 0 otherwise.
+
+proc check_effective_target_ppc_float128_insns { } {
+ return [check_no_compiler_messages_nocache ppc_float128 object {
+ #ifndef __FLOAT128_HARDWARE__
+ nope no good
+ #endif
+ }]
+}
+
+# Return 1 if current options generate VSX instructions, 0 otherwise.
+
+proc check_effective_target_powerpc_vsx { } {
+ return [check_no_compiler_messages_nocache powerpc_vsx object {
+ #ifndef __VSX__
+ nope no vsx
+ #endif
+ }]
+}
+
# Return 1 if this is a PowerPC target supporting -mvsx
proc check_effective_target_powerpc_vsx_ok { } {
# Return 1 if the target supports hardware vector shift operation.
proc check_effective_target_vect_shift { } {
- global et_vect_shift_saved
- global et_index
-
- if [info exists et_vect_shift_saved($et_index)] {
- verbose "check_effective_target_vect_shift: using cached result" 2
- } else {
- set et_vect_shift_saved($et_index) 0
- if { ([istarget powerpc*-*-*]
+ return [check_cached_effective_target_indexed vect_shift {
+ expr {([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
&& ([et-is-effective-target mips_msa]
- || [et-is-effective-target mips_loongson]))
+ || [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_shift_saved($et_index) 1
- }
- }
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
- verbose "check_effective_target_vect_shift:\
- returning $et_vect_shift_saved($et_index)" 2
- return $et_vect_shift_saved($et_index)
+# Return 1 if the target supports hardware vector shift by register operation.
+
+proc check_effective_target_vect_var_shift { } {
+ return [check_cached_effective_target_indexed vect_var_shift {
+ expr {(([istarget i?86-*-*] || [istarget x86_64-*-*])
+ && [check_avx2_available])
+ }}]
}
proc check_effective_target_whole_vector_shift { } {
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson])
+ && [et-is-effective-target mips_loongson_mmi])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] } {
set answer 1
} else {
set answer 0
# Return 1 if the target supports vector bswap operations.
proc check_effective_target_vect_bswap { } {
- global et_vect_bswap_saved
- global et_index
-
- if [info exists et_vect_bswap_saved($et_index)] {
- verbose "check_effective_target_vect_bswap: using cached result" 2
- } else {
- set et_vect_bswap_saved($et_index) 0
- if { [istarget aarch64*-*-*]
- || [is-effective-target arm_neon]
- } {
- set et_vect_bswap_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_bswap:\
- returning $et_vect_bswap_saved($et_index)" 2
- return $et_vect_bswap_saved($et_index)
+ return [check_cached_effective_target_indexed vect_bswap {
+ expr { [istarget aarch64*-*-*]
+ || [is-effective-target arm_neon]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vector shift operation for char.
proc check_effective_target_vect_shift_char { } {
- global et_vect_shift_char_saved
- global et_index
-
- if [info exists et_vect_shift_char_saved($et_index)] {
- verbose "check_effective_target_vect_shift_char: using cached result" 2
- } else {
- set et_vect_shift_char_saved($et_index) 0
- if { ([istarget powerpc*-*-*]
+ return [check_cached_effective_target_indexed vect_shift_char {
+ expr { ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_shift_char_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_shift_char:\
- returning $et_vect_shift_char_saved($et_index)" 2
- return $et_vect_shift_char_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vectors of long, 0 otherwise.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] } {
set answer 1
} else {
set answer 0
return $answer
}
-# Return 1 if the target supports hardware vectors of float, 0 otherwise.
+# Return 1 if the target supports hardware vectors of float when
+# -funsafe-math-optimizations is enabled, 0 otherwise.
#
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_float { } {
- global et_vect_float_saved
- global et_index
-
- if [info exists et_vect_float_saved($et_index)] {
- verbose "check_effective_target_vect_float: using cached result" 2
- } else {
- set et_vect_float_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_float {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget powerpc*-*-*]
|| [istarget spu-*-*]
|| [istarget mips-sde-elf]
&& [et-is-effective-target mips_msa])
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vxe]) } {
- set et_vect_float_saved($et_index) 1
- }
- }
+ && [check_effective_target_s390_vxe])
+ || [istarget amdgcn-*-*] }}]
+}
- verbose "check_effective_target_vect_float:\
- returning $et_vect_float_saved($et_index)" 2
- return $et_vect_float_saved($et_index)
+# Return 1 if the target supports hardware vectors of float without
+# -funsafe-math-optimizations being enabled, 0 otherwise.
+
+proc check_effective_target_vect_float_strict { } {
+ return [expr { [check_effective_target_vect_float]
+ && ![istarget arm*-*-*] }]
}
# Return 1 if the target supports hardware vectors of double, 0 otherwise.
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_double { } {
- global et_vect_double_saved
- global et_index
-
- if [info exists et_vect_double_saved($et_index)] {
- verbose "check_effective_target_vect_double: using cached result" 2
- } else {
- set et_vect_double_saved($et_index) 0
- if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_no_compiler_messages vect_double assembly {
+ return [check_cached_effective_target_indexed vect_double {
+ expr { (([istarget i?86-*-*] || [istarget x86_64-*-*])
+ && [check_no_compiler_messages vect_double assembly {
#ifdef __tune_atom__
# error No double vectorizer support.
#endif
- }])
+ }])
|| [istarget aarch64*-*-*]
|| [istarget spu-*-*]
|| ([istarget powerpc*-*-*] && [check_vsx_hw_available])
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_double_saved($et_index) 1
- }
- }
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*]} }]
+}
+
+# Return 1 if the target supports conditional addition, subtraction,
+# multiplication, division, minimum and maximum on vectors of double,
+# via the cond_ optabs. Return 0 otherwise.
- verbose "check_effective_target_vect_double:\
- returning $et_vect_double_saved($et_index)" 2
- return $et_vect_double_saved($et_index)
+proc check_effective_target_vect_double_cond_arith { } {
+ return [check_effective_target_aarch64_sve]
}
# Return 1 if the target supports hardware vectors of long long, 0 otherwise.
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_long_long { } {
- global et_vect_long_long_saved
- global et_index
-
- if [info exists et_vect_long_long_saved($et_index)] {
- verbose "check_effective_target_vect_long_long: using cached result" 2
- } else {
- set et_vect_long_long_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_long_long {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_long_long_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_long_long:\
- returning $et_vect_long_long_saved($et_index)" 2
- return $et_vect_long_long_saved($et_index)
+ && [check_effective_target_s390_vx]) }}]
}
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_no_int_min_max { } {
- global et_vect_no_int_min_max_saved
- global et_index
-
- if [info exists et_vect_no_int_min_max_saved($et_index)] {
- verbose "check_effective_target_vect_no_int_min_max:\
- using cached result" 2
- } else {
- set et_vect_no_int_min_max_saved($et_index) 0
- if { [istarget sparc*-*-*]
+ return [check_cached_effective_target_indexed vect_no_int_min_max {
+ expr { [istarget sparc*-*-*]
|| [istarget spu-*-*]
|| [istarget alpha*-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson]) } {
- set et_vect_no_int_min_max_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_no_int_min_max:\
- returning $et_vect_no_int_min_max_saved($et_index)" 2
- return $et_vect_no_int_min_max_saved($et_index)
+ && [et-is-effective-target mips_loongson_mmi]) }}]
}
# Return 1 if the target plus current options does not support a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_no_int_add { } {
- global et_vect_no_int_add_saved
- global et_index
-
- if [info exists et_vect_no_int_add_saved($et_index)] {
- verbose "check_effective_target_vect_no_int_add: using cached result" 2
- } else {
- set et_vect_no_int_add_saved($et_index) 0
- # Alpha only supports vector add on V8QI and V4HI.
- if { [istarget alpha*-*-*] } {
- set et_vect_no_int_add_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_no_int_add:\
- returning $et_vect_no_int_add_saved($et_index)" 2
- return $et_vect_no_int_add_saved($et_index)
+ # Alpha only supports vector add on V8QI and V4HI.
+ return [check_cached_effective_target_indexed vect_no_int_add {
+ expr { [istarget alpha*-*-*] }}]
}
# Return 1 if the target plus current options does not support vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_no_bitwise { } {
- global et_vect_no_bitwise_saved
- global et_index
-
- if [info exists et_vect_no_bitwise_saved($et_index)] {
- verbose "check_effective_target_vect_no_bitwise: using cached result" 2
- } else {
- set et_vect_no_bitwise_saved($et_index) 0
- }
- verbose "check_effective_target_vect_no_bitwise:\
- returning $et_vect_no_bitwise_saved($et_index)" 2
- return $et_vect_no_bitwise_saved($et_index)
+ return [check_cached_effective_target_indexed vect_no_bitwise { return 0 }]
}
# Return 1 if the target plus current options supports vector permutation,
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_perm { } {
- global et_vect_perm_saved
- global et_index
-
- if [info exists et_vect_perm_saved($et_index)] {
- verbose "check_effective_target_vect_perm: using cached result" 2
- } else {
- set et_vect_perm_saved($et_index) 0
- if { [is-effective-target arm_neon]
+ return [check_cached_effective_target_indexed vect_perm {
+ expr { [is-effective-target arm_neon]
|| [istarget aarch64*-*-*]
|| [istarget powerpc*-*-*]
|| [istarget spu-*-*]
&& ([et-is-effective-target mpaired_single]
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_perm_saved($et_index) 1
- }
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return 1 if, for some VF:
+#
+# - the target's default vector size is VF * ELEMENT_BITS bits
+#
+# - it is possible to implement the equivalent of:
+#
+# int<ELEMENT_BITS>_t s1[COUNT][COUNT * VF], s2[COUNT * VF];
+# for (int i = 0; i < COUNT; ++i)
+# for (int j = 0; j < COUNT * VF; ++j)
+# s1[i][j] = s2[j - j % COUNT + i]
+#
+# using only a single 2-vector permute for each vector in s1.
+#
+# E.g. for COUNT == 3 and vector length 4, the two arrays would be:
+#
+# s2 | a0 a1 a2 a3 | b0 b1 b2 b3 | c0 c1 c2 c3
+# ------+-------------+-------------+------------
+# s1[0] | a0 a0 a0 a3 | a3 a3 b2 b2 | b2 c1 c1 c1
+# s1[1] | a1 a1 a1 b0 | b0 b0 b3 b3 | b3 c2 c2 c2
+# s1[2] | a2 a2 a2 b1 | b1 b1 c0 c0 | c0 c3 c3 c3
+#
+# Each s1 permute requires only two of a, b and c.
+#
+# The distance between the start of vector n in s1[0] and the start
+# of vector n in s2 is:
+#
+# A = (n * VF) % COUNT
+#
+# The corresponding value for the end of vector n is:
+#
+# B = (n * VF + VF - 1) % COUNT
+#
+# Subtracting i from each value gives the corresponding difference
+# for s1[i]. The condition being tested by this function is false
+# iff A - i > 0 and B - i < 0 for some i and n, such that the first
+# element for s1[i] comes from vector n - 1 of s2 and the last element
+# comes from vector n + 1 of s2. The condition is therefore true iff
+# A <= B for all n. This is turn means the condition is true iff:
+#
+# (n * VF) % COUNT + (VF - 1) % COUNT < COUNT
+#
+# for all n. COUNT - (n * VF) % COUNT is bounded by gcd (VF, COUNT),
+# and will be that value for at least one n in [0, COUNT), so we want:
+#
+# (VF - 1) % COUNT < gcd (VF, COUNT)
+
+proc vect_perm_supported { count element_bits } {
+ set vector_bits [lindex [available_vector_sizes] 0]
+ # The number of vectors has to be a power of 2 when permuting
+ # variable-length vectors.
+ if { $vector_bits <= 0 && ($count & -$count) != $count } {
+ return 0
}
- verbose "check_effective_target_vect_perm:\
- returning $et_vect_perm_saved($et_index)" 2
- return $et_vect_perm_saved($et_index)
+ set vf [expr { $vector_bits / $element_bits }]
+
+ # Compute gcd (VF, COUNT).
+ set gcd $vf
+ set temp1 $count
+ while { $temp1 > 0 } {
+ set temp2 [expr { $gcd % $temp1 }]
+ set gcd $temp1
+ set temp1 $temp2
+ }
+ return [expr { ($vf - 1) % $count < $gcd }]
+}
+
+# Return 1 if the target supports SLP permutation of 3 vectors when each
+# element has 32 bits.
+
+proc check_effective_target_vect_perm3_int { } {
+ return [expr { [check_effective_target_vect_perm]
+ && [vect_perm_supported 3 32] }]
}
# Return 1 if the target plus current options supports vector permutation
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_perm_byte { } {
- global et_vect_perm_byte_saved
- global et_index
-
- if [info exists et_vect_perm_byte_saved($et_index)] {
- verbose "check_effective_target_vect_perm_byte: using cached result" 2
- } else {
- set et_vect_perm_byte_saved($et_index) 0
- if { ([is-effective-target arm_neon]
+ return [check_cached_effective_target_indexed vect_perm_byte {
+ expr { ([is-effective-target arm_neon]
&& [is-effective-target arm_little_endian])
|| ([istarget aarch64*-*-*]
&& [is-effective-target aarch64_little_endian])
|| ([istarget mips-*.*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_perm_byte_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_perm_byte:\
- returning $et_vect_perm_byte_saved($et_index)" 2
- return $et_vect_perm_byte_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return 1 if the target supports SLP permutation of 3 vectors when each
+# element has 8 bits.
+
+proc check_effective_target_vect_perm3_byte { } {
+ return [expr { [check_effective_target_vect_perm_byte]
+ && [vect_perm_supported 3 8] }]
}
# Return 1 if the target plus current options supports vector permutation
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_perm_short { } {
- global et_vect_perm_short_saved
- global et_index
-
- if [info exists et_vect_perm_short_saved($et_index)] {
- verbose "check_effective_target_vect_perm_short: using cached result" 2
- } else {
- set et_vect_perm_short_saved($et_index) 0
- if { ([is-effective-target arm_neon]
+ return [check_cached_effective_target_indexed vect_perm_short {
+ expr { ([is-effective-target arm_neon]
&& [is-effective-target arm_little_endian])
|| ([istarget aarch64*-*-*]
&& [is-effective-target aarch64_little_endian])
|| [istarget powerpc*-*-*]
|| [istarget spu-*-*]
+ || (([istarget i?86-*-*] || [istarget x86_64-*-*])
+ && [check_ssse3_available])
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_perm_short_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_perm_short:\
- returning $et_vect_perm_short_saved($et_index)" 2
- return $et_vect_perm_short_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return 1 if the target supports SLP permutation of 3 vectors when each
+# element has 16 bits.
+
+proc check_effective_target_vect_perm3_short { } {
+ return [expr { [check_effective_target_vect_perm_short]
+ && [vect_perm_supported 3 16] }]
}
# Return 1 if the target plus current options supports folding of
# This won't change for different subtargets so cache the result.
proc check_effective_target_xorsign { } {
- global et_xorsign_saved
- global et_index
-
- if [info exists et_xorsign_saved($et_index)] {
- verbose "check_effective_target_xorsign: using cached result" 2
- } else {
- set et_xorsign_saved($et_index) 0
- if { [istarget aarch64*-*-*] || [istarget arm*-*-*] } {
- set et_xorsign_saved($et_index) 1
- }
- }
- verbose "check_effective_target_xorsign:\
- returning $et_xorsign_saved($et_index)" 2
- return $et_xorsign_saved($et_index)
+ return [check_cached_effective_target_indexed xorsign {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*] || [istarget arm*-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_widen_sum_hi_to_si_pattern { } {
- global et_vect_widen_sum_hi_to_si_pattern_saved
- global et_index
-
- if [info exists et_vect_widen_sum_hi_to_si_pattern_saved($et_index)] {
- verbose "check_effective_target_vect_widen_sum_hi_to_si_pattern:\
- using cached result" 2
- } else {
- set et_vect_widen_sum_hi_to_si_pattern_saved($et_index) 0
- if { [istarget powerpc*-*-*]
- || [istarget aarch64*-*-*]
+ return [check_cached_effective_target_indexed vect_widen_sum_hi_to_si_pattern {
+ expr { [istarget powerpc*-*-*]
+ || ([istarget aarch64*-*-*]
+ && ![check_effective_target_aarch64_sve])
|| [is-effective-target arm_neon]
- || [istarget ia64-*-*] } {
- set et_vect_widen_sum_hi_to_si_pattern_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_sum_hi_to_si_pattern:\
- returning $et_vect_widen_sum_hi_to_si_pattern_saved($et_index)" 2
- return $et_vect_widen_sum_hi_to_si_pattern_saved($et_index)
+ || [istarget ia64-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
# promotion (unpacking) from shorts to ints.
#
# This won't change for different subtargets so cache the result.
-
+
proc check_effective_target_vect_widen_sum_hi_to_si { } {
- global et_vect_widen_sum_hi_to_si_saved
- global et_index
-
- if [info exists et_vect_widen_sum_hi_to_si_saved($et_index)] {
- verbose "check_effective_target_vect_widen_sum_hi_to_si:\
- using cached result" 2
- } else {
- set et_vect_widen_sum_hi_to_si_saved($et_index) \
- [check_effective_target_vect_unpack]
- if { [istarget powerpc*-*-*]
- || [istarget ia64-*-*] } {
- set et_vect_widen_sum_hi_to_si_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_sum_hi_to_si:\
- returning $et_vect_widen_sum_hi_to_si_saved($et_index)" 2
- return $et_vect_widen_sum_hi_to_si_saved($et_index)
-}
+ return [check_cached_effective_target_indexed vect_widen_sum_hi_to_si {
+ expr { [check_effective_target_vect_unpack]
+ || [istarget powerpc*-*-*]
+ || [istarget ia64-*-*] }}]
+}
# Return 1 if the target plus current options supports a vector
# widening summation of *char* args into *short* result, 0 otherwise.
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_widen_sum_qi_to_hi { } {
- global et_vect_widen_sum_qi_to_hi_saved
- global et_index
-
- if [info exists et_vect_widen_sum_qi_to_hi_saved($et_index)] {
- verbose "check_effective_target_vect_widen_sum_qi_to_hi:\
- using cached result" 2
- } else {
- set et_vect_widen_sum_qi_to_hi_saved($et_index) 0
- if { [check_effective_target_vect_unpack]
+ return [check_cached_effective_target_indexed vect_widen_sum_qi_to_hi {
+ expr { [check_effective_target_vect_unpack]
|| [is-effective-target arm_neon]
- || [istarget ia64-*-*] } {
- set et_vect_widen_sum_qi_to_hi_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_sum_qi_to_hi:\
- returning $et_vect_widen_sum_qi_to_hi_saved($et_index)" 2
- return $et_vect_widen_sum_qi_to_hi_saved($et_index)
+ || [istarget ia64-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_widen_sum_qi_to_si { } {
- global et_vect_widen_sum_qi_to_si_saved
- global et_index
-
- if [info exists et_vect_widen_sum_qi_to_si_saved($et_index)] {
- verbose "check_effective_target_vect_widen_sum_qi_to_si:\
- using cached result" 2
- } else {
- set et_vect_widen_sum_qi_to_si_saved($et_index) 0
- if { [istarget powerpc*-*-*] } {
- set et_vect_widen_sum_qi_to_si_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_sum_qi_to_si:\
- returning $et_vect_widen_sum_qi_to_si_saved($et_index)" 2
- return $et_vect_widen_sum_qi_to_si_saved($et_index)
+ return [check_cached_effective_target_indexed vect_widen_sum_qi_to_si {
+ expr { [istarget powerpc*-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
proc check_effective_target_vect_widen_mult_qi_to_hi { } {
- global et_vect_widen_mult_qi_to_hi_saved
- global et_index
-
- if [info exists et_vect_widen_mult_qi_to_hi_saved($et_index)] {
- verbose "check_effective_target_vect_widen_mult_qi_to_hi:\
- using cached result" 2
- } else {
- if { [check_effective_target_vect_unpack]
- && [check_effective_target_vect_short_mult] } {
- set et_vect_widen_mult_qi_to_hi_saved($et_index) 1
- } else {
- set et_vect_widen_mult_qi_to_hi_saved($et_index) 0
- }
- if { [istarget powerpc*-*-*]
- || [istarget aarch64*-*-*]
- || [is-effective-target arm_neon]
- || ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_widen_mult_qi_to_hi_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_mult_qi_to_hi:\
- returning $et_vect_widen_mult_qi_to_hi_saved($et_index)" 2
- return $et_vect_widen_mult_qi_to_hi_saved($et_index)
+ return [check_cached_effective_target_indexed vect_widen_mult_qi_to_hi {
+ expr { ([check_effective_target_vect_unpack]
+ && [check_effective_target_vect_short_mult])
+ || ([istarget powerpc*-*-*]
+ || ([istarget aarch64*-*-*]
+ && ![check_effective_target_aarch64_sve])
+ || [is-effective-target arm_neon]
+ || ([istarget s390*-*-*]
+ && [check_effective_target_s390_vx]))
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
proc check_effective_target_vect_widen_mult_hi_to_si { } {
- global et_vect_widen_mult_hi_to_si_saved
- global et_index
-
- if [info exists et_vect_widen_mult_hi_to_si_saved($et_index)] {
- verbose "check_effective_target_vect_widen_mult_hi_to_si:\
- using cached result" 2
- } else {
- if { [check_effective_target_vect_unpack]
- && [check_effective_target_vect_int_mult] } {
- set et_vect_widen_mult_hi_to_si_saved($et_index) 1
- } else {
- set et_vect_widen_mult_hi_to_si_saved($et_index) 0
- }
- if { [istarget powerpc*-*-*]
- || [istarget spu-*-*]
- || [istarget ia64-*-*]
- || [istarget aarch64*-*-*]
- || [istarget i?86-*-*] || [istarget x86_64-*-*]
- || [is-effective-target arm_neon]
- || ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_widen_mult_hi_to_si_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_mult_hi_to_si:\
- returning $et_vect_widen_mult_hi_to_si_saved($et_index)" 2
- return $et_vect_widen_mult_hi_to_si_saved($et_index)
+ return [check_cached_effective_target_indexed vect_widen_mult_hi_to_si {
+ expr { ([check_effective_target_vect_unpack]
+ && [check_effective_target_vect_int_mult])
+ || ([istarget powerpc*-*-*]
+ || [istarget spu-*-*]
+ || [istarget ia64-*-*]
+ || ([istarget aarch64*-*-*]
+ && ![check_effective_target_aarch64_sve])
+ || [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || [is-effective-target arm_neon]
+ || ([istarget s390*-*-*]
+ && [check_effective_target_s390_vx]))
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_widen_mult_qi_to_hi_pattern { } {
- global et_vect_widen_mult_qi_to_hi_pattern_saved
- global et_index
-
- if [info exists et_vect_widen_mult_qi_to_hi_pattern_saved($et_index)] {
- verbose "check_effective_target_vect_widen_mult_qi_to_hi_pattern:\
- using cached result" 2
- } else {
- set et_vect_widen_mult_qi_to_hi_pattern_saved($et_index) 0
- if { [istarget powerpc*-*-*]
- || ([is-effective-target arm_neon]
- && [check_effective_target_arm_little_endian])
- || ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_widen_mult_qi_to_hi_pattern_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_mult_qi_to_hi_pattern:\
- returning $et_vect_widen_mult_qi_to_hi_pattern_saved($et_index)" 2
- return $et_vect_widen_mult_qi_to_hi_pattern_saved($et_index)
+ return [check_cached_effective_target_indexed vect_widen_mult_qi_to_hi_pattern {
+ expr { [istarget powerpc*-*-*]
+ || ([is-effective-target arm_neon]
+ && [check_effective_target_arm_little_endian])
+ || ([istarget s390*-*-*]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_widen_mult_hi_to_si_pattern { } {
- global et_vect_widen_mult_hi_to_si_pattern_saved
- global et_index
-
- if [info exists et_vect_widen_mult_hi_to_si_pattern_saved($et_index)] {
- verbose "check_effective_target_vect_widen_mult_hi_to_si_pattern:\
- using cached result" 2
- } else {
- set et_vect_widen_mult_hi_to_si_pattern_saved($et_index) 0
- if { [istarget powerpc*-*-*]
+ return [check_cached_effective_target_indexed vect_widen_mult_hi_to_si_pattern {
+ expr { [istarget powerpc*-*-*]
|| [istarget spu-*-*]
|| [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_widen_mult_hi_to_si_pattern_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_mult_hi_to_si_pattern:\
- returning $et_vect_widen_mult_hi_to_si_pattern_saved($et_index)" 2
- return $et_vect_widen_mult_hi_to_si_pattern_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_widen_mult_si_to_di_pattern { } {
- global et_vect_widen_mult_si_to_di_pattern_saved
- global et_index
-
- if [info exists et_vect_widen_mult_si_to_di_pattern_saved($et_index)] {
- verbose "check_effective_target_vect_widen_mult_si_to_di_pattern:\
- using cached result" 2
- } else {
- set et_vect_widen_mult_si_to_di_pattern_saved($et_index) 0
- if {[istarget ia64-*-*]
- || [istarget i?86-*-*] || [istarget x86_64-*-*]
- || ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_widen_mult_si_to_di_pattern_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_mult_si_to_di_pattern:\
- returning $et_vect_widen_mult_si_to_di_pattern_saved($et_index)" 2
- return $et_vect_widen_mult_si_to_di_pattern_saved($et_index)
+ return [check_cached_effective_target_indexed vect_widen_mult_si_to_di_pattern {
+ expr { [istarget ia64-*-*]
+ || [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || ([istarget s390*-*-*]
+ && [check_effective_target_s390_vx]) }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_widen_shift { } {
- global et_vect_widen_shift_saved
- global et_index
-
- if [info exists et_vect_shift_saved($et_index)] {
- verbose "check_effective_target_vect_widen_shift: using cached result" 2
- } else {
- set et_vect_widen_shift_saved($et_index) 0
- if { [is-effective-target arm_neon] } {
- set et_vect_widen_shift_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_widen_shift:\
- returning $et_vect_widen_shift_saved($et_index)" 2
- return $et_vect_widen_shift_saved($et_index)
+ return [check_cached_effective_target_indexed vect_widen_shift {
+ expr { [is-effective-target arm_neon] }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_sdot_qi { } {
- global et_vect_sdot_qi_saved
- global et_index
-
- if [info exists et_vect_sdot_qi_saved($et_index)] {
- verbose "check_effective_target_vect_sdot_qi: using cached result" 2
- } else {
- set et_vect_sdot_qi_saved($et_index) 0
- if { [istarget ia64-*-*]
+ return [check_cached_effective_target_indexed vect_sdot_qi {
+ expr { [istarget ia64-*-*]
+ || [istarget aarch64*-*-*]
+ || [istarget arm*-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_udot_qi_saved 1
- }
- }
- verbose "check_effective_target_vect_sdot_qi:\
- returning $et_vect_sdot_qi_saved($et_index)" 2
- return $et_vect_sdot_qi_saved($et_index)
+ && [et-is-effective-target mips_msa]) }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_udot_qi { } {
- global et_vect_udot_qi_saved
- global et_index
-
- if [info exists et_vect_udot_qi_saved($et_index)] {
- verbose "check_effective_target_vect_udot_qi: using cached result" 2
- } else {
- set et_vect_udot_qi_saved($et_index) 0
- if { [istarget powerpc*-*-*]
+ return [check_cached_effective_target_indexed vect_udot_qi {
+ expr { [istarget powerpc*-*-*]
+ || [istarget aarch64*-*-*]
+ || [istarget arm*-*-*]
|| [istarget ia64-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_udot_qi_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_udot_qi:\
- returning $et_vect_udot_qi_saved($et_index)" 2
- return $et_vect_udot_qi_saved($et_index)
+ && [et-is-effective-target mips_msa]) }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_sdot_hi { } {
- global et_vect_sdot_hi_saved
- global et_index
-
- if [info exists et_vect_sdot_hi_saved($et_index)] {
- verbose "check_effective_target_vect_sdot_hi: using cached result" 2
- } else {
- set et_vect_sdot_hi_saved($et_index) 0
- if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
+ return [check_cached_effective_target_indexed vect_sdot_hi {
+ expr { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
|| [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_sdot_hi_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_sdot_hi:\
- returning $et_vect_sdot_hi_saved($et_index)" 2
- return $et_vect_sdot_hi_saved($et_index)
+ && [et-is-effective-target mips_msa]) }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_udot_hi { } {
- global et_vect_udot_hi_saved
- global et_index
-
- if [info exists et_vect_udot_hi_saved($et_index)] {
- verbose "check_effective_target_vect_udot_hi: using cached result" 2
- } else {
- set et_vect_udot_hi_saved($et_index) 0
- if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
+ return [check_cached_effective_target_indexed vect_udot_hi {
+ expr { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) } {
- set et_vect_udot_hi_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_udot_hi:\
- returning $et_vect_udot_hi_saved($et_index)" 2
- return $et_vect_udot_hi_saved($et_index)
+ && [et-is-effective-target mips_msa]) }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_usad_char { } {
- global et_vect_usad_char_saved
- global et_index
+ return [check_cached_effective_target_indexed vect_usad_char {
+ expr { [istarget i?86-*-*]
+ || [istarget x86_64-*-*]
+ || ([istarget aarch64*-*-*]
+ && ![check_effective_target_aarch64_sve])
+ || ([istarget powerpc*-*-*]
+ && [check_p9vector_hw_available])}}]
+}
- if [info exists et_vect_usad_char_saved($et_index)] {
- verbose "check_effective_target_vect_usad_char: using cached result" 2
- } else {
- set et_vect_usad_char_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*] } {
- set et_vect_usad_char_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_usad_char:\
- returning $et_vect_usad_char_saved($et_index)" 2
- return $et_vect_usad_char_saved($et_index)
+# Return 1 if the target plus current options supports both signed
+# and unsigned average operations on vectors of bytes.
+
+proc check_effective_target_vect_avg_qi {} {
+ return [expr { [istarget aarch64*-*-*]
+ && ![check_effective_target_aarch64_sve] }]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_pack_trunc { } {
- global et_vect_pack_trunc_saved
- global et_index
-
- if [info exists et_vect_pack_trunc_saved($et_index)] {
- verbose "check_effective_target_vect_pack_trunc: using cached result" 2
- } else {
- set et_vect_pack_trunc_saved($et_index) 0
- if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
+ return [check_cached_effective_target_indexed vect_pack_trunc {
+ expr { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget aarch64*-*-*]
|| [istarget spu-*-*]
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_pack_trunc_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_pack_trunc:\
- returning $et_vect_pack_trunc_saved($et_index)" 2
- return $et_vect_pack_trunc_saved($et_index)
+ && [check_effective_target_s390_vx]) }}]
}
# Return 1 if the target plus current options supports a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_unpack { } {
- global et_vect_unpack_saved
- global et_index
-
- if [info exists et_vect_unpack_saved($et_index)] {
- verbose "check_effective_target_vect_unpack: using cached result" 2
- } else {
- set et_vect_unpack_saved($et_index) 0
- if { ([istarget powerpc*-*-*] && ![istarget powerpc-*paired*])
- || [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_unpack {
+ expr { ([istarget powerpc*-*-*] && ![istarget powerpc-*paired*])
+ || [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget spu-*-*]
|| [istarget ia64-*-*]
|| [istarget aarch64*-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok]
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_unpack_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_unpack:\
- returning $et_vect_unpack_saved($et_index)" 2
- return $et_vect_unpack_saved($et_index)
+ && [check_effective_target_s390_vx]) }}]
}
# Return 1 if the target plus current options does not guarantee
# This won't change for different subtargets so cache the result.
proc check_effective_target_unaligned_stack { } {
- global et_unaligned_stack_saved
-
- if [info exists et_unaligned_stack_saved] {
- verbose "check_effective_target_unaligned_stack: using cached result" 2
- } else {
- set et_unaligned_stack_saved 0
- }
- verbose "check_effective_target_unaligned_stack: returning $et_unaligned_stack_saved" 2
- return $et_unaligned_stack_saved
+ return [check_cached_effective_target_indexed unaligned_stack { expr 0 }]
}
# Return 1 if the target plus current options does not support a vector
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_no_align { } {
- global et_vect_no_align_saved
- global et_index
-
- if [info exists et_vect_no_align_saved($et_index)] {
- verbose "check_effective_target_vect_no_align: using cached result" 2
- } else {
- set et_vect_no_align_saved($et_index) 0
- if { [istarget mipsisa64*-*-*]
+ return [check_cached_effective_target_indexed vect_no_align {
+ expr { [istarget mipsisa64*-*-*]
|| [istarget mips-sde-elf]
|| [istarget sparc*-*-*]
|| [istarget ia64-*-*]
|| [check_effective_target_arm_vect_no_misalign]
|| ([istarget powerpc*-*-*] && [check_p8vector_hw_available])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson]) } {
- set et_vect_no_align_saved($et_index) 1
- }
- }
- verbose "check_effective_target_vect_no_align:\
- returning $et_vect_no_align_saved($et_index)" 2
- return $et_vect_no_align_saved($et_index)
+ && [et-is-effective-target mips_loongson_mmi]) }}]
}
# Return 1 if the target supports a vector misalign access, 0 otherwise.
# This won't change for different subtargets so cache the result.
proc check_effective_target_vect_hw_misalign { } {
- global et_vect_hw_misalign_saved
- global et_index
-
- if [info exists et_vect_hw_misalign_saved($et_index)] {
- verbose "check_effective_target_vect_hw_misalign: using cached result" 2
- } else {
- set et_vect_hw_misalign_saved($et_index) 0
+ return [check_cached_effective_target_indexed vect_hw_misalign {
if { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget powerpc*-*-*] && [check_p8vector_hw_available])
|| [istarget aarch64*-*-*]
|| ([istarget mips*-*-*] && [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]) } {
- set et_vect_hw_misalign_saved($et_index) 1
+ return 1
}
- if { [istarget arm*-*-*] } {
- set et_vect_hw_misalign_saved($et_index) [expr ![check_effective_target_arm_vect_no_misalign]]
+ if { [istarget arm*-*-*]
+ && ![check_effective_target_arm_vect_no_misalign] } {
+ return 1
}
- }
- verbose "check_effective_target_vect_hw_misalign:\
- returning $et_vect_hw_misalign_saved($et_index)" 2
- return $et_vect_hw_misalign_saved($et_index)
+ return 0
+ }]
}
# This won't change for different subtargets so cache the result.
proc check_effective_target_natural_alignment_32 { } {
- global et_natural_alignment_32
-
- if [info exists et_natural_alignment_32_saved] {
- verbose "check_effective_target_natural_alignment_32: using cached result" 2
- } else {
- # FIXME: 32bit powerpc: guaranteed only if MASK_ALIGN_NATURAL/POWER.
- set et_natural_alignment_32_saved 1
- if { ([istarget *-*-darwin*] && [is-effective-target lp64])
- || [istarget avr-*-*] } {
- set et_natural_alignment_32_saved 0
- }
- }
- verbose "check_effective_target_natural_alignment_32: returning $et_natural_alignment_32_saved" 2
- return $et_natural_alignment_32_saved
+ # FIXME: 32bit powerpc: guaranteed only if MASK_ALIGN_NATURAL/POWER.
+ return [check_cached_effective_target_indexed natural_alignment_32 {
+ if { ([istarget *-*-darwin*] && [is-effective-target lp64])
+ || [istarget avr-*-*] } {
+ return 0
+ } else {
+ return 1
+ }
+ }]
}
# Return 1 if types of size 64 bit or less are naturally aligned (aligned to their
# This won't change for different subtargets so cache the result.
proc check_effective_target_natural_alignment_64 { } {
- global et_natural_alignment_64
-
- if [info exists et_natural_alignment_64_saved] {
- verbose "check_effective_target_natural_alignment_64: using cached result" 2
- } else {
- set et_natural_alignment_64_saved 0
- if { ([is-effective-target lp64] && ![istarget *-*-darwin*])
- || [istarget spu-*-*] } {
- set et_natural_alignment_64_saved 1
- }
- }
- verbose "check_effective_target_natural_alignment_64: returning $et_natural_alignment_64_saved" 2
- return $et_natural_alignment_64_saved
+ return [check_cached_effective_target_indexed natural_alignment_64 {
+ expr { ([is-effective-target lp64] && ![istarget *-*-darwin*])
+ || [istarget spu-*-*] }
+ }]
}
# Return 1 if all vector types are naturally aligned (aligned to their
set et_vect_natural_alignment 1
if { [check_effective_target_arm_eabi]
|| [istarget nvptx-*-*]
- || [istarget s390*-*-*] } {
+ || [istarget s390*-*-*]
+ || [istarget amdgcn-*-*] } {
set et_vect_natural_alignment 0
}
verbose "check_effective_target_vect_natural_alignment:\
return $et_vect_natural_alignment
}
+# Return true if fully-masked loops are supported.
+
+proc check_effective_target_vect_fully_masked { } {
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
+}
+
+# Return 1 if the target doesn't prefer any alignment beyond element
+# alignment during vectorization.
+
+proc check_effective_target_vect_element_align_preferred { } {
+ return [expr { [check_effective_target_aarch64_sve]
+ && [check_effective_target_vect_variable_length] }]
+}
+
+# Return 1 if we can align stack data to the preferred vector alignment.
+
+proc check_effective_target_vect_align_stack_vars { } {
+ if { [check_effective_target_aarch64_sve] } {
+ return [check_effective_target_vect_variable_length]
+ }
+ return 1
+}
+
# Return 1 if vector alignment (for types of size 32 bit or less) is reachable, 0 otherwise.
proc check_effective_target_vector_alignment_reachable { } {
# Return 1 if the target only requires element alignment for vector accesses
proc check_effective_target_vect_element_align { } {
- global et_vect_element_align
- global et_index
-
- if [info exists et_vect_element_align($et_index)] {
- verbose "check_effective_target_vect_element_align:\
- using cached result" 2
- } else {
- set et_vect_element_align($et_index) 0
- if { ([istarget arm*-*-*]
+ return [check_cached_effective_target_indexed vect_element_align {
+ expr { ([istarget arm*-*-*]
&& ![check_effective_target_arm_vect_no_misalign])
- || [check_effective_target_vect_hw_misalign] } {
- set et_vect_element_align($et_index) 1
- }
- }
+ || [check_effective_target_vect_hw_misalign]
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return 1 if we expect to see unaligned accesses in at least some
+# vector dumps.
- verbose "check_effective_target_vect_element_align:\
- returning $et_vect_element_align($et_index)" 2
- return $et_vect_element_align($et_index)
+proc check_effective_target_vect_unaligned_possible { } {
+ return [expr { ![check_effective_target_vect_element_align_preferred]
+ && (![check_effective_target_vect_no_align]
+ || [check_effective_target_vect_hw_misalign]) }]
}
# Return 1 if the target supports vector LOAD_LANES operations, 0 otherwise.
proc check_effective_target_vect_load_lanes { } {
- global et_vect_load_lanes
+ # We don't support load_lanes correctly on big-endian arm.
+ return [check_cached_effective_target vect_load_lanes {
+ expr { ([check_effective_target_arm_little_endian]
+ && [check_effective_target_arm_neon_ok])
+ || [istarget aarch64*-*-*] }}]
+}
- if [info exists et_vect_load_lanes] {
- verbose "check_effective_target_vect_load_lanes: using cached result" 2
- } else {
- set et_vect_load_lanes 0
- if { ([istarget arm*-*-*] && [check_effective_target_arm_neon_ok])
- || [istarget aarch64*-*-*] } {
- set et_vect_load_lanes 1
- }
- }
+# Return 1 if the target supports vector masked stores.
+
+proc check_effective_target_vect_masked_store { } {
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
+}
- verbose "check_effective_target_vect_load_lanes: returning $et_vect_load_lanes" 2
- return $et_vect_load_lanes
+# Return 1 if the target supports vector scatter stores.
+
+proc check_effective_target_vect_scatter_store { } {
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
}
# Return 1 if the target supports vector conditional operations, 0 otherwise.
proc check_effective_target_vect_condition { } {
- global et_vect_cond_saved
- global et_index
-
- if [info exists et_vect_cond_saved($et_index)] {
- verbose "check_effective_target_vect_cond: using cached result" 2
- } else {
- set et_vect_cond_saved($et_index) 0
- if { [istarget aarch64*-*-*]
+ return [check_cached_effective_target_indexed vect_condition {
+ expr { [istarget aarch64*-*-*]
|| [istarget powerpc*-*-*]
|| [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget arm*-*-*]
&& [check_effective_target_arm_neon_ok])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_cond_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_cond:\
- returning $et_vect_cond_saved($et_index)" 2
- return $et_vect_cond_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector conditional operations where
# the comparison has different type from the lhs, 0 otherwise.
proc check_effective_target_vect_cond_mixed { } {
- global et_vect_cond_mixed_saved
- global et_index
-
- if [info exists et_vect_cond_mixed_saved($et_index)] {
- verbose "check_effective_target_vect_cond_mixed: using cached result" 2
- } else {
- set et_vect_cond_mixed_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_cond_mixed {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget aarch64*-*-*]
|| [istarget powerpc*-*-*]
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_cond_mixed_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_cond_mixed:\
- returning $et_vect_cond_mixed_saved($et_index)" 2
- return $et_vect_cond_mixed_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector char multiplication, 0 otherwise.
proc check_effective_target_vect_char_mult { } {
- global et_vect_char_mult_saved
- global et_index
-
- if [info exists et_vect_char_mult_saved($et_index)] {
- verbose "check_effective_target_vect_char_mult: using cached result" 2
- } else {
- set et_vect_char_mult_saved($et_index) 0
- if { [istarget aarch64*-*-*]
+ return [check_cached_effective_target_indexed vect_char_mult {
+ expr { [istarget aarch64*-*-*]
|| [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_char_mult_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_char_mult:\
- returning $et_vect_char_mult_saved($et_index)" 2
- return $et_vect_char_mult_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector short multiplication, 0 otherwise.
proc check_effective_target_vect_short_mult { } {
- global et_vect_short_mult_saved
- global et_index
-
- if [info exists et_vect_short_mult_saved($et_index)] {
- verbose "check_effective_target_vect_short_mult: using cached result" 2
- } else {
- set et_vect_short_mult_saved($et_index) 0
- if { [istarget ia64-*-*]
+ return [check_cached_effective_target_indexed vect_short_mult {
+ expr { [istarget ia64-*-*]
|| [istarget spu-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget powerpc*-*-*]
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
&& ([et-is-effective-target mips_msa]
- || [et-is-effective-target mips_loongson]))
+ || [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_short_mult_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_short_mult:\
- returning $et_vect_short_mult_saved($et_index)" 2
- return $et_vect_short_mult_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector int multiplication, 0 otherwise.
proc check_effective_target_vect_int_mult { } {
- global et_vect_int_mult_saved
- global et_index
-
- if [info exists et_vect_int_mult_saved($et_index)] {
- verbose "check_effective_target_vect_int_mult: using cached result" 2
- } else {
- set et_vect_int_mult_saved($et_index) 0
- if { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
+ return [check_cached_effective_target_indexed vect_int_mult {
+ expr { ([istarget powerpc*-*-*] && ![istarget powerpc-*-linux*paired*])
|| [istarget spu-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget ia64-*-*]
&& [et-is-effective-target mips_msa])
|| [check_effective_target_arm32]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_int_mult_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_int_mult:\
- returning $et_vect_int_mult_saved($et_index)" 2
- return $et_vect_int_mult_saved($et_index)
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports 64 bit hardware vector
# Return 1 if the target supports vector even/odd elements extraction, 0 otherwise.
proc check_effective_target_vect_extract_even_odd { } {
- global et_vect_extract_even_odd_saved
- global et_index
-
- if [info exists et_vect_extract_even_odd_saved($et_index)] {
- verbose "check_effective_target_vect_extract_even_odd:\
- using cached result" 2
- } else {
- set et_vect_extract_even_odd_saved($et_index) 0
- if { [istarget aarch64*-*-*]
+ return [check_cached_effective_target_indexed extract_even_odd {
+ expr { [istarget aarch64*-*-*]
|| [istarget powerpc*-*-*]
|| [is-effective-target arm_neon]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
&& ([et-is-effective-target mips_msa]
|| [et-is-effective-target mpaired_single]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_extract_even_odd_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_extract_even_odd:\
- returning $et_vect_extract_even_odd_saved($et_index)" 2
- return $et_vect_extract_even_odd_saved($et_index)
+ && [check_effective_target_s390_vx]) }}]
}
# Return 1 if the target supports vector interleaving, 0 otherwise.
proc check_effective_target_vect_interleave { } {
- global et_vect_interleave_saved
- global et_index
-
- if [info exists et_vect_interleave_saved($et_index)] {
- verbose "check_effective_target_vect_interleave: using cached result" 2
- } else {
- set et_vect_interleave_saved($et_index) 0
- if { [istarget aarch64*-*-*]
+ return [check_cached_effective_target_indexed vect_interleave {
+ expr { [istarget aarch64*-*-*]
|| [istarget powerpc*-*-*]
|| [is-effective-target arm_neon]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
&& ([et-is-effective-target mpaired_single]
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_interleave_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_interleave:\
- returning $et_vect_interleave_saved($et_index)" 2
- return $et_vect_interleave_saved($et_index)
+ && [check_effective_target_s390_vx]) }}]
}
foreach N {2 3 4 8} {
eval [string map [list N $N] {
# Return 1 if the target supports 2-vector interleaving
proc check_effective_target_vect_stridedN { } {
- global et_vect_stridedN_saved
- global et_index
-
- if [info exists et_vect_stridedN_saved($et_index)] {
- verbose "check_effective_target_vect_stridedN:\
- using cached result" 2
- } else {
- set et_vect_stridedN_saved($et_index) 0
+ return [check_cached_effective_target_indexed vect_stridedN {
if { (N & -N) == N
&& [check_effective_target_vect_interleave]
&& [check_effective_target_vect_extract_even_odd] } {
- set et_vect_stridedN_saved($et_index) 1
+ return 1
}
if { ([istarget arm*-*-*]
|| [istarget aarch64*-*-*]) && N >= 2 && N <= 4 } {
- set et_vect_stridedN_saved($et_index) 1
+ return 1
}
- }
-
- verbose "check_effective_target_vect_stridedN:\
- returning $et_vect_stridedN_saved($et_index)" 2
- return $et_vect_stridedN_saved($et_index)
+ if [check_effective_target_vect_fully_masked] {
+ return 1
+ }
+ return 0
+ }]
}
}]
}
+# Return the list of vector sizes (in bits) that each target supports.
+# A vector length of "0" indicates variable-length vectors.
+
+proc available_vector_sizes { } {
+ set result {}
+ if { [istarget aarch64*-*-*] } {
+ if { [check_effective_target_aarch64_sve] } {
+ lappend result [aarch64_sve_bits]
+ }
+ lappend result 128 64
+ } elseif { [istarget arm*-*-*]
+ && [check_effective_target_arm_neon_ok] } {
+ lappend result 128 64
+ } elseif { (([istarget i?86-*-*] || [istarget x86_64-*-*])
+ && ([check_avx_available] && ![check_prefer_avx128])) } {
+ lappend result 256 128
+ } elseif { [istarget sparc*-*-*] } {
+ lappend result 64
+ } else {
+ # The traditional default asumption.
+ lappend result 128
+ }
+ return $result
+}
+
# Return 1 if the target supports multiple vector sizes
proc check_effective_target_vect_multiple_sizes { } {
- global et_vect_multiple_sizes_saved
- global et_index
+ return [expr { [llength [available_vector_sizes]] > 1 }]
+}
- set et_vect_multiple_sizes_saved($et_index) 0
- if { [istarget aarch64*-*-*]
- || [is-effective-target arm_neon]
- || (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && ([check_avx_available] && ![check_prefer_avx128])) } {
- set et_vect_multiple_sizes_saved($et_index) 1
- }
+# Return true if variable-length vectors are supported.
- verbose "check_effective_target_vect_multiple_sizes:\
- returning $et_vect_multiple_sizes_saved($et_index)" 2
- return $et_vect_multiple_sizes_saved($et_index)
+proc check_effective_target_vect_variable_length { } {
+ return [expr { [lindex [available_vector_sizes] 0] == 0 }]
}
# Return 1 if the target supports vectors of 64 bits.
proc check_effective_target_vect64 { } {
- global et_vect64_saved
- global et_index
-
- if [info exists et_vect64_saved($et_index)] {
- verbose "check_effective_target_vect64: using cached result" 2
- } else {
- set et_vect64_saved($et_index) 0
- if { ([is-effective-target arm_neon]
- && [check_effective_target_arm_little_endian])
- || [istarget aarch64*-*-*]
- || [istarget sparc*-*-*] } {
- set et_vect64_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect64:\
- returning $et_vect64_saved($et_index)" 2
- return $et_vect64_saved($et_index)
+ return [expr { [lsearch -exact [available_vector_sizes] 64] >= 0 }]
}
# Return 1 if the target supports vector copysignf calls.
proc check_effective_target_vect_call_copysignf { } {
- global et_vect_call_copysignf_saved
- global et_index
-
- if [info exists et_vect_call_copysignf_saved($et_index)] {
- verbose "check_effective_target_vect_call_copysignf:\
- using cached result" 2
- } else {
- set et_vect_call_copysignf_saved($et_index) 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target_indexed vect_call_copysignf {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget powerpc*-*-*]
- || [istarget aarch64*-*-*] } {
- set et_vect_call_copysignf_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_copysignf:\
- returning $et_vect_call_copysignf_saved($et_index)" 2
- return $et_vect_call_copysignf_saved($et_index)
+ || [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports hardware square root instructions.
proc check_effective_target_sqrt_insn { } {
- global et_sqrt_insn_saved
-
- if [info exists et_sqrt_insn_saved] {
- verbose "check_effective_target_hw_sqrt: using cached result" 2
- } else {
- set et_sqrt_insn_saved 0
- if { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ return [check_cached_effective_target sqrt_insn {
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget powerpc*-*-*]
|| [istarget aarch64*-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_vfp_ok])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_sqrt_insn_saved 1
- }
- }
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return any additional options to enable square root intructions.
- verbose "check_effective_target_hw_sqrt: returning et_sqrt_insn_saved" 2
- return $et_sqrt_insn_saved
+proc add_options_for_sqrt_insn { flags } {
+ if { [istarget amdgcn*-*-*] } {
+ return "$flags -ffast-math"
+ }
+ return $flags
}
# Return 1 if the target supports vector sqrtf calls.
proc check_effective_target_vect_call_sqrtf { } {
- global et_vect_call_sqrtf_saved
- global et_index
-
- if [info exists et_vect_call_sqrtf_saved($et_index)] {
- verbose "check_effective_target_vect_call_sqrtf: using cached result" 2
- } else {
- set et_vect_call_sqrtf_saved($et_index) 0
- if { [istarget aarch64*-*-*]
+ return [check_cached_effective_target_indexed vect_call_sqrtf {
+ expr { [istarget aarch64*-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget powerpc*-*-*] && [check_vsx_hw_available])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
- set et_vect_call_sqrtf_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_sqrtf:\
- returning $et_vect_call_sqrtf_saved($et_index)" 2
- return $et_vect_call_sqrtf_saved($et_index)
+ && [check_effective_target_s390_vx]) }}]
}
# Return 1 if the target supports vector lrint calls.
proc check_effective_target_vect_call_lrint { } {
set et_vect_call_lrint 0
if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_effective_target_ilp32]) } {
+ && [check_effective_target_ilp32])
+ || [istarget amdgcn-*-*] } {
set et_vect_call_lrint 1
}
# Return 1 if the target supports vector btrunc calls.
proc check_effective_target_vect_call_btrunc { } {
- global et_vect_call_btrunc_saved
- global et_index
-
- if [info exists et_vect_call_btrunc_saved($et_index)] {
- verbose "check_effective_target_vect_call_btrunc:\
- using cached result" 2
- } else {
- set et_vect_call_btrunc_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_btrunc_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_btrunc:\
- returning $et_vect_call_btrunc_saved($et_index)" 2
- return $et_vect_call_btrunc_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_btrunc {
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector btruncf calls.
proc check_effective_target_vect_call_btruncf { } {
- global et_vect_call_btruncf_saved
- global et_index
-
- if [info exists et_vect_call_btruncf_saved($et_index)] {
- verbose "check_effective_target_vect_call_btruncf:\
- using cached result" 2
- } else {
- set et_vect_call_btruncf_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_btruncf_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_btruncf:\
- returning $et_vect_call_btruncf_saved($et_index)" 2
- return $et_vect_call_btruncf_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_btruncf {
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector ceil calls.
proc check_effective_target_vect_call_ceil { } {
- global et_vect_call_ceil_saved
- global et_index
-
- if [info exists et_vect_call_ceil_saved($et_index)] {
- verbose "check_effective_target_vect_call_ceil: using cached result" 2
- } else {
- set et_vect_call_ceil_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_ceil_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_ceil:\
- returning $et_vect_call_ceil_saved($et_index)" 2
- return $et_vect_call_ceil_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_ceil {
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector ceilf calls.
proc check_effective_target_vect_call_ceilf { } {
- global et_vect_call_ceilf_saved
- global et_index
-
- if [info exists et_vect_call_ceilf_saved($et_index)] {
- verbose "check_effective_target_vect_call_ceilf: using cached result" 2
- } else {
- set et_vect_call_ceilf_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_ceilf_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_ceilf:\
- returning $et_vect_call_ceilf_saved($et_index)" 2
- return $et_vect_call_ceilf_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_ceilf {
+ expr { [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports vector floor calls.
proc check_effective_target_vect_call_floor { } {
- global et_vect_call_floor_saved
- global et_index
-
- if [info exists et_vect_call_floor_saved($et_index)] {
- verbose "check_effective_target_vect_call_floor: using cached result" 2
- } else {
- set et_vect_call_floor_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_floor_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_floor:\
- returning $et_vect_call_floor_saved($et_index)" 2
- return $et_vect_call_floor_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_floor {
+ expr { [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports vector floorf calls.
proc check_effective_target_vect_call_floorf { } {
- global et_vect_call_floorf_saved
- global et_index
-
- if [info exists et_vect_call_floorf_saved($et_index)] {
- verbose "check_effective_target_vect_call_floorf: using cached result" 2
- } else {
- set et_vect_call_floorf_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_floorf_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_floorf:\
- returning $et_vect_call_floorf_saved($et_index)" 2
- return $et_vect_call_floorf_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_floorf {
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector lceil calls.
proc check_effective_target_vect_call_lceil { } {
- global et_vect_call_lceil_saved
- global et_index
-
- if [info exists et_vect_call_lceil_saved($et_index)] {
- verbose "check_effective_target_vect_call_lceil: using cached result" 2
- } else {
- set et_vect_call_lceil_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_lceil_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_lceil:\
- returning $et_vect_call_lceil_saved($et_index)" 2
- return $et_vect_call_lceil_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_lceil {
+ expr { [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports vector lfloor calls.
proc check_effective_target_vect_call_lfloor { } {
- global et_vect_call_lfloor_saved
- global et_index
-
- if [info exists et_vect_call_lfloor_saved($et_index)] {
- verbose "check_effective_target_vect_call_lfloor: using cached result" 2
- } else {
- set et_vect_call_lfloor_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_lfloor_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_lfloor:\
- returning $et_vect_call_lfloor_saved($et_index)" 2
- return $et_vect_call_lfloor_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_lfloor {
+ expr { [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports vector nearbyint calls.
proc check_effective_target_vect_call_nearbyint { } {
- global et_vect_call_nearbyint_saved
- global et_index
-
- if [info exists et_vect_call_nearbyint_saved($et_index)] {
- verbose "check_effective_target_vect_call_nearbyint: using cached result" 2
- } else {
- set et_vect_call_nearbyint_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_nearbyint_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_nearbyint:\
- returning $et_vect_call_nearbyint_saved($et_index)" 2
- return $et_vect_call_nearbyint_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_nearbyint {
+ expr { [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports vector nearbyintf calls.
proc check_effective_target_vect_call_nearbyintf { } {
- global et_vect_call_nearbyintf_saved
- global et_index
-
- if [info exists et_vect_call_nearbyintf_saved($et_index)] {
- verbose "check_effective_target_vect_call_nearbyintf:\
- using cached result" 2
- } else {
- set et_vect_call_nearbyintf_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_nearbyintf_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_nearbyintf:\
- returning $et_vect_call_nearbyintf_saved($et_index)" 2
- return $et_vect_call_nearbyintf_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_nearbyintf {
+ expr { [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports vector round calls.
proc check_effective_target_vect_call_round { } {
- global et_vect_call_round_saved
- global et_index
-
- if [info exists et_vect_call_round_saved($et_index)] {
- verbose "check_effective_target_vect_call_round: using cached result" 2
- } else {
- set et_vect_call_round_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_round_saved($et_index) 1
- }
- }
-
- verbose "check_effective_target_vect_call_round:\
- returning $et_vect_call_round_saved($et_index)" 2
- return $et_vect_call_round_saved($et_index)
+ return [check_cached_effective_target_indexed vect_call_round {
+ expr { [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports vector roundf calls.
proc check_effective_target_vect_call_roundf { } {
- global et_vect_call_roundf_saved
- global et_index
+ return [check_cached_effective_target_indexed vect_call_roundf {
+ expr { [istarget aarch64*-*-*] }}]
+}
- if [info exists et_vect_call_roundf_saved($et_index)] {
- verbose "check_effective_target_vect_call_roundf: using cached result" 2
- } else {
- set et_vect_call_roundf_saved($et_index) 0
- if { [istarget aarch64*-*-*] } {
- set et_vect_call_roundf_saved($et_index) 1
- }
- }
+# Return 1 if the target supports AND, OR and XOR reduction.
- verbose "check_effective_target_vect_call_roundf:\
- returning $et_vect_call_roundf_saved($et_index)" 2
- return $et_vect_call_roundf_saved($et_index)
+proc check_effective_target_vect_logical_reduc { } {
+ return [check_effective_target_aarch64_sve]
}
-# Return 1 if the target supports section-anchors
+# Return 1 if the target supports the fold_extract_last optab.
-proc check_effective_target_section_anchors { } {
- global et_section_anchors_saved
+proc check_effective_target_vect_fold_extract_last { } {
+ return [check_effective_target_aarch64_sve]
+}
- if [info exists et_section_anchors_saved] {
- verbose "check_effective_target_section_anchors: using cached result" 2
- } else {
- set et_section_anchors_saved 0
- if { [istarget powerpc*-*-*]
- || [istarget arm*-*-*]
- || [istarget aarch64*-*-*] } {
- set et_section_anchors_saved 1
- }
- }
+# Return 1 if the target supports section-anchors
- verbose "check_effective_target_section_anchors: returning $et_section_anchors_saved" 2
- return $et_section_anchors_saved
+proc check_effective_target_section_anchors { } {
+ return [check_cached_effective_target section_anchors {
+ expr { [istarget powerpc*-*-*]
+ || [istarget arm*-*-*]
+ || [istarget aarch64*-*-*] }}]
}
# Return 1 if the target supports atomic operations on "int_128" values.
}
}
+# Return 1 if the target supports popcount on long.
+
+proc check_effective_target_popcountl { } {
+ return [check_no_messages_and_pattern popcountl "!\\(call" rtl-expand {
+ int foo (long b)
+ {
+ return __builtin_popcountl (b);
+ }
+ } "" ]
+}
+
# Return 1 if the target supports atomic operations on "long long"
# and can execute them.
#
}
}
-# Return 1 if the target supports byte swap instructions.
-
-proc check_effective_target_bswap { } {
- global et_bswap_saved
-
- if [info exists et_bswap_saved] {
- verbose "check_effective_target_bswap: using cached result" 2
- } else {
- set et_bswap_saved 0
- if { [istarget aarch64*-*-*]
- || [istarget alpha*-*-*]
- || [istarget i?86-*-*] || [istarget x86_64-*-*]
- || [istarget m68k-*-*]
- || [istarget powerpc*-*-*]
- || [istarget rs6000-*-*]
- || [istarget s390*-*-*]
- || ([istarget arm*-*-*]
- && [check_no_compiler_messages_nocache arm_v6_or_later object {
- #if __ARM_ARCH < 6
- #error not armv6 or later
- #endif
- int i;
- } ""]) } {
- set et_bswap_saved 1
- }
- }
-
- verbose "check_effective_target_bswap: returning $et_bswap_saved" 2
- return $et_bswap_saved
-}
-
-# Return 1 if the target supports 16-bit byte swap instructions.
-
-proc check_effective_target_bswap16 { } {
- global et_bswap16_saved
-
- if [info exists et_bswap16_saved] {
- verbose "check_effective_target_bswap16: using cached result" 2
- } else {
- set et_bswap16_saved 0
- if { [is-effective-target bswap]
- && ![istarget alpha*-*-*]
- && !([istarget i?86-*-*] || [istarget x86_64-*-*]) } {
- set et_bswap16_saved 1
- }
- }
-
- verbose "check_effective_target_bswap16: returning $et_bswap16_saved" 2
- return $et_bswap16_saved
-}
-
-# Return 1 if the target supports 32-bit byte swap instructions.
-
-proc check_effective_target_bswap32 { } {
- global et_bswap32_saved
-
- if [info exists et_bswap32_saved] {
- verbose "check_effective_target_bswap32: using cached result" 2
- } else {
- set et_bswap32_saved 0
- if { [is-effective-target bswap] } {
- set et_bswap32_saved 1
- }
- }
-
- verbose "check_effective_target_bswap32: returning $et_bswap32_saved" 2
- return $et_bswap32_saved
-}
-
-# Return 1 if the target supports 64-bit byte swap instructions.
-#
-# Note: 32bit s390 targets require -mzarch in dg-options.
-
-proc check_effective_target_bswap64 { } {
- global et_bswap64_saved
+# Return 1 if the target supports byte swap instructions.
- # expand_unop can expand 64-bit byte swap on 32-bit targets
- if { [is-effective-target bswap] && [is-effective-target int32plus] } {
- return 1
- }
- return 0
+proc check_effective_target_bswap { } {
+ return [check_cached_effective_target bswap {
+ expr { [istarget aarch64*-*-*]
+ || [istarget alpha*-*-*]
+ || [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || [istarget m68k-*-*]
+ || [istarget powerpc*-*-*]
+ || [istarget rs6000-*-*]
+ || [istarget s390*-*-*]
+ || ([istarget arm*-*-*]
+ && [check_no_compiler_messages_nocache arm_v6_or_later object {
+ #if __ARM_ARCH < 6
+ #error not armv6 or later
+ #endif
+ int i;
+ } ""]) }}]
}
# Return 1 if the target supports atomic operations on "int" and "long".
proc check_effective_target_sync_int_long { } {
- global et_sync_int_long_saved
-
- if [info exists et_sync_int_long_saved] {
- verbose "check_effective_target_sync_int_long: using cached result" 2
- } else {
- set et_sync_int_long_saved 0
# This is intentionally powerpc but not rs6000, rs6000 doesn't have the
# load-reserved/store-conditional instructions.
- if { [istarget ia64-*-*]
+ return [check_cached_effective_target sync_int_long {
+ expr { [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget aarch64*-*-*]
|| [istarget alpha*-*-*]
|| ([istarget sparc*-*-*] && [check_effective_target_sparc_v9])
|| [istarget spu-*-*]
|| ([istarget arc*-*-*] && [check_effective_target_arc_atomic])
- || [check_effective_target_mips_llsc] } {
- set et_sync_int_long_saved 1
- }
- }
-
- verbose "check_effective_target_sync_int_long: returning $et_sync_int_long_saved" 2
- return $et_sync_int_long_saved
+ || [check_effective_target_mips_llsc] }}]
}
# Return 1 if the target supports atomic operations on "char" and "short".
proc check_effective_target_sync_char_short { } {
- global et_sync_char_short_saved
-
- if [info exists et_sync_char_short_saved] {
- verbose "check_effective_target_sync_char_short: using cached result" 2
- } else {
- set et_sync_char_short_saved 0
# This is intentionally powerpc but not rs6000, rs6000 doesn't have the
# load-reserved/store-conditional instructions.
- if { [istarget aarch64*-*-*]
+ return [check_cached_effective_target sync_char_short {
+ expr { [istarget aarch64*-*-*]
|| [istarget ia64-*-*]
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [istarget alpha*-*-*]
|| ([istarget sparc*-*-*] && [check_effective_target_sparc_v9])
|| [istarget spu-*-*]
|| ([istarget arc*-*-*] && [check_effective_target_arc_atomic])
- || [check_effective_target_mips_llsc] } {
- set et_sync_char_short_saved 1
- }
- }
-
- verbose "check_effective_target_sync_char_short: returning $et_sync_char_short_saved" 2
- return $et_sync_char_short_saved
+ || [check_effective_target_mips_llsc] }}]
}
# Return 1 if the target uses a ColdFire FPU.
}]
}
+# Return true if GCC was configured with --enable-newlib-nano-formatted-io
+proc check_effective_target_newlib_nano_io { } {
+ return [check_configured_with "--enable-newlib-nano-formatted-io"]
+}
+
# Some newlib versions don't provide a frexpl and instead depend
# on frexp to implement long double conversions in their printf-like
# functions. This leads to broken results. Detect such versions here.
default { error "unknown effective target keyword `$arg'" }
}
}
+
verbose "is-effective-target: $arg $selected" 2
return $selected
}
}]
}
+# Return 1 if the target provides the D runtime.
+
+proc check_effective_target_d_runtime { } {
+ return [check_no_compiler_messages d_runtime executable {
+ // D
+ module mod;
+
+ extern(C) int main() {
+ return 0;
+ }
+ }]
+}
+
# Return 1 if target wchar_t is at least 4 bytes.
proc check_effective_target_4byte_wchar_t { } {
return 0;
}
-# Return true if 32- and 16-bytes vectors are available.
+# Return true if we are compiling for AVX2 target.
+
+proc check_avx2_available { } {
+ if { [check_no_compiler_messages avx2_available assembly {
+ #ifndef __AVX2__
+ #error unsupported
+ #endif
+ } ""] } {
+ return 1;
+ }
+ return 0;
+}
+
+# Return true if we are compiling for SSSE3 target.
+
+proc check_ssse3_available { } {
+ if { [check_no_compiler_messages sse3a_available assembly {
+ #ifndef __SSSE3__
+ #error unsupported
+ #endif
+ } ""] } {
+ return 1;
+ }
+ return 0;
+}
+
+# Return true if 32- and 16-bytes vectors are available.
+
+proc check_effective_target_vect_sizes_32B_16B { } {
+ return [expr { [available_vector_sizes] == [list 256 128] }]
+}
+
+# Return true if 16- and 8-bytes vectors are available.
+
+proc check_effective_target_vect_sizes_16B_8B { } {
+ if { [check_avx_available]
+ || [is-effective-target arm_neon]
+ || [istarget aarch64*-*-*] } {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+
+# Return true if 128-bits vectors are preferred even if 256-bits vectors
+# are available.
+
+proc check_prefer_avx128 { } {
+ if ![check_avx_available] {
+ return 0;
+ }
+ return [check_no_messages_and_pattern avx_explicit "xmm" assembly {
+ float a[1024],b[1024],c[1024];
+ void foo (void) { int i; for (i = 0; i < 1024; i++) a[i]=b[i]+c[i];}
+ } "-O2 -ftree-vectorize"]
+}
+
+
+# Return 1 if avx512f instructions can be compiled.
+
+proc check_effective_target_avx512f { } {
+ return [check_no_compiler_messages avx512f object {
+ typedef double __m512d __attribute__ ((__vector_size__ (64)));
+ typedef double __m128d __attribute__ ((__vector_size__ (16)));
+
+ __m512d _mm512_add (__m512d a)
+ {
+ return __builtin_ia32_addpd512_mask (a, a, a, 1, 4);
+ }
+
+ __m128d _mm128_add (__m128d a)
+ {
+ return __builtin_ia32_addsd_round (a, a, 8);
+ }
+
+ __m128d _mm128_getmant (__m128d a)
+ {
+ return __builtin_ia32_getmantsd_round (a, a, 0, 8);
+ }
+ } "-O2 -mavx512f" ]
+}
+
+# Return 1 if avx instructions can be compiled.
+
+proc check_effective_target_avx { } {
+ if { !([istarget i?86-*-*] || [istarget x86_64-*-*]) } {
+ return 0
+ }
+ return [check_no_compiler_messages avx object {
+ void _mm256_zeroall (void)
+ {
+ __builtin_ia32_vzeroall ();
+ }
+ } "-O2 -mavx" ]
+}
+
+# Return 1 if avx2 instructions can be compiled.
+proc check_effective_target_avx2 { } {
+ return [check_no_compiler_messages avx2 object {
+ typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+ __v4di
+ mm256_is32_andnotsi256 (__v4di __X, __v4di __Y)
+ {
+ return __builtin_ia32_andnotsi256 (__X, __Y);
+ }
+ } "-O0 -mavx2" ]
+}
+
+# Return 1 if sse instructions can be compiled.
+proc check_effective_target_sse { } {
+ return [check_no_compiler_messages sse object {
+ int main ()
+ {
+ __builtin_ia32_stmxcsr ();
+ return 0;
+ }
+ } "-O2 -msse" ]
+}
+
+# Return 1 if sse2 instructions can be compiled.
+proc check_effective_target_sse2 { } {
+ return [check_no_compiler_messages sse2 object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_srli_si128 (__m128i __A, int __N)
+ {
+ return (__m128i)__builtin_ia32_psrldqi128 (__A, 8);
+ }
+ } "-O2 -msse2" ]
+}
+
+# Return 1 if sse4.1 instructions can be compiled.
+proc check_effective_target_sse4 { } {
+ return [check_no_compiler_messages sse4.1 object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_mullo_epi32 (__m128i __X, __m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X,
+ (__v4si)__Y);
+ }
+ } "-O2 -msse4.1" ]
+}
+
+# Return 1 if F16C instructions can be compiled.
+
+proc check_effective_target_f16c { } {
+ return [check_no_compiler_messages f16c object {
+ #include "immintrin.h"
+ float
+ foo (unsigned short val)
+ {
+ return _cvtsh_ss (val);
+ }
+ } "-O2 -mf16c" ]
+}
+
+proc check_effective_target_ms_hook_prologue { } {
+ if { [check_no_compiler_messages ms_hook_prologue object {
+ void __attribute__ ((__ms_hook_prologue__)) foo ();
+ } ""] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Return 1 if 3dnow instructions can be compiled.
+proc check_effective_target_3dnow { } {
+ return [check_no_compiler_messages 3dnow object {
+ typedef int __m64 __attribute__ ((__vector_size__ (8)));
+ typedef float __v2sf __attribute__ ((__vector_size__ (8)));
+
+ __m64 _m_pfadd (__m64 __A, __m64 __B)
+ {
+ return (__m64) __builtin_ia32_pfadd ((__v2sf)__A, (__v2sf)__B);
+ }
+ } "-O2 -m3dnow" ]
+}
+
+# Return 1 if sse3 instructions can be compiled.
+proc check_effective_target_sse3 { } {
+ return [check_no_compiler_messages sse3 object {
+ typedef double __m128d __attribute__ ((__vector_size__ (16)));
+ typedef double __v2df __attribute__ ((__vector_size__ (16)));
+
+ __m128d _mm_addsub_pd (__m128d __X, __m128d __Y)
+ {
+ return (__m128d) __builtin_ia32_addsubpd ((__v2df)__X, (__v2df)__Y);
+ }
+ } "-O2 -msse3" ]
+}
+
+# Return 1 if ssse3 instructions can be compiled.
+proc check_effective_target_ssse3 { } {
+ return [check_no_compiler_messages ssse3 object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_abs_epi32 (__m128i __X)
+ {
+ return (__m128i) __builtin_ia32_pabsd128 ((__v4si)__X);
+ }
+ } "-O2 -mssse3" ]
+}
+
+# Return 1 if aes instructions can be compiled.
+proc check_effective_target_aes { } {
+ return [check_no_compiler_messages aes object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_aesimc_si128 (__m128i __X)
+ {
+ return (__m128i) __builtin_ia32_aesimc128 ((__v2di)__X);
+ }
+ } "-O2 -maes" ]
+}
+
+# Return 1 if vaes instructions can be compiled.
+proc check_effective_target_vaes { } {
+ return [check_no_compiler_messages vaes object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_aesimc_si128 (__m128i __X)
+ {
+ return (__m128i) __builtin_ia32_aesimc128 ((__v2di)__X);
+ }
+ } "-O2 -maes -mavx" ]
+}
+
+# Return 1 if pclmul instructions can be compiled.
+proc check_effective_target_pclmul { } {
+ return [check_no_compiler_messages pclmul object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i pclmulqdq_test (__m128i __X, __m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)__X,
+ (__v2di)__Y,
+ 1);
+ }
+ } "-O2 -mpclmul" ]
+}
+
+# Return 1 if vpclmul instructions can be compiled.
+proc check_effective_target_vpclmul { } {
+ return [check_no_compiler_messages vpclmul object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i pclmulqdq_test (__m128i __X, __m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_pclmulqdq128 ((__v2di)__X,
+ (__v2di)__Y,
+ 1);
+ }
+ } "-O2 -mpclmul -mavx" ]
+}
+
+# Return 1 if sse4a instructions can be compiled.
+proc check_effective_target_sse4a { } {
+ return [check_no_compiler_messages sse4a object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+
+ __m128i _mm_insert_si64 (__m128i __X,__m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_insertq ((__v2di)__X, (__v2di)__Y);
+ }
+ } "-O2 -msse4a" ]
+}
+
+# Return 1 if fma4 instructions can be compiled.
+proc check_effective_target_fma4 { } {
+ return [check_no_compiler_messages fma4 object {
+ typedef float __m128 __attribute__ ((__vector_size__ (16)));
+ typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+ __m128 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+ {
+ return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A,
+ (__v4sf)__B,
+ (__v4sf)__C);
+ }
+ } "-O2 -mfma4" ]
+}
+
+# Return 1 if fma instructions can be compiled.
+proc check_effective_target_fma { } {
+ return [check_no_compiler_messages fma object {
+ typedef float __m128 __attribute__ ((__vector_size__ (16)));
+ typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+ __m128 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+ {
+ return (__m128) __builtin_ia32_vfmaddps ((__v4sf)__A,
+ (__v4sf)__B,
+ (__v4sf)__C);
+ }
+ } "-O2 -mfma" ]
+}
+
+# Return 1 if xop instructions can be compiled.
+proc check_effective_target_xop { } {
+ return [check_no_compiler_messages xop object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef short __v8hi __attribute__ ((__vector_size__ (16)));
+ __m128i _mm_maccs_epi16(__m128i __A, __m128i __B, __m128i __C)
+ {
+ return (__m128i) __builtin_ia32_vpmacssww ((__v8hi)__A,
+ (__v8hi)__B,
+ (__v8hi)__C);
+ }
+ } "-O2 -mxop" ]
+}
+
+# Return 1 if lzcnt instruction can be compiled.
+proc check_effective_target_lzcnt { } {
+ return [check_no_compiler_messages lzcnt object {
+ unsigned short _lzcnt (unsigned short __X)
+ {
+ return __builtin_clzs (__X);
+ }
+ } "-mlzcnt" ]
+}
+
+# Return 1 if bmi instructions can be compiled.
+proc check_effective_target_bmi { } {
+ return [check_no_compiler_messages bmi object {
+ unsigned int __bextr_u32 (unsigned int __X, unsigned int __Y)
+ {
+ return __builtin_ia32_bextr_u32 (__X, __Y);
+ }
+ } "-mbmi" ]
+}
+
+# Return 1 if ADX instructions can be compiled.
+proc check_effective_target_adx { } {
+ return [check_no_compiler_messages adx object {
+ unsigned char
+ _adxcarry_u32 (unsigned char __CF, unsigned int __X,
+ unsigned int __Y, unsigned int *__P)
+ {
+ return __builtin_ia32_addcarryx_u32 (__CF, __X, __Y, __P);
+ }
+ } "-madx" ]
+}
-proc check_effective_target_vect_sizes_32B_16B { } {
- if { [check_avx_available] && ![check_prefer_avx128] } {
- return 1;
- } else {
- return 0;
- }
+# Return 1 if rtm instructions can be compiled.
+proc check_effective_target_rtm { } {
+ return [check_no_compiler_messages rtm object {
+ void
+ _rtm_xend (void)
+ {
+ return __builtin_ia32_xend ();
+ }
+ } "-mrtm" ]
}
-# Return true if 16- and 8-bytes vectors are available.
+# Return 1 if avx512vl instructions can be compiled.
+proc check_effective_target_avx512vl { } {
+ return [check_no_compiler_messages avx512vl object {
+ typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+ __v4di
+ mm256_and_epi64 (__v4di __X, __v4di __Y)
+ {
+ __v4di __W;
+ return __builtin_ia32_pandq256_mask (__X, __Y, __W, -1);
+ }
+ } "-mavx512vl" ]
+}
-proc check_effective_target_vect_sizes_16B_8B { } {
- if { [check_avx_available]
- || [is-effective-target arm_neon]
- || [istarget aarch64*-*-*] } {
- return 1;
- } else {
- return 0;
- }
+# Return 1 if avx512cd instructions can be compiled.
+proc check_effective_target_avx512cd { } {
+ return [check_no_compiler_messages avx512cd_trans object {
+ typedef long long __v8di __attribute__ ((__vector_size__ (64)));
+ __v8di
+ _mm512_conflict_epi64 (__v8di __W, __v8di __A)
+ {
+ return (__v8di) __builtin_ia32_vpconflictdi_512_mask ((__v8di) __A,
+ (__v8di) __W,
+ -1);
+ }
+ } "-Wno-psabi -mavx512cd" ]
}
+# Return 1 if avx512er instructions can be compiled.
+proc check_effective_target_avx512er { } {
+ return [check_no_compiler_messages avx512er_trans object {
+ typedef float __v16sf __attribute__ ((__vector_size__ (64)));
+ __v16sf
+ mm512_exp2a23_ps (__v16sf __X)
+ {
+ return __builtin_ia32_exp2ps_mask (__X, __X, -1, 4);
+ }
+ } "-Wno-psabi -mavx512er" ]
+}
-# Return true if 128-bits vectors are preferred even if 256-bits vectors
-# are available.
+# Return 1 if sha instructions can be compiled.
+proc check_effective_target_sha { } {
+ return [check_no_compiler_messages sha object {
+ typedef long long __m128i __attribute__ ((__vector_size__ (16)));
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
-proc check_prefer_avx128 { } {
- if ![check_avx_available] {
- return 0;
- }
- return [check_no_messages_and_pattern avx_explicit "xmm" assembly {
- float a[1024],b[1024],c[1024];
- void foo (void) { int i; for (i = 0; i < 1024; i++) a[i]=b[i]+c[i];}
- } "-O2 -ftree-vectorize"]
+ __m128i _mm_sha1msg1_epu32 (__m128i __X, __m128i __Y)
+ {
+ return (__m128i) __builtin_ia32_sha1msg1 ((__v4si)__X,
+ (__v4si)__Y);
+ }
+ } "-O2 -msha" ]
}
+# Return 1 if avx512dq instructions can be compiled.
+proc check_effective_target_avx512dq { } {
+ return [check_no_compiler_messages avx512dq object {
+ typedef long long __v8di __attribute__ ((__vector_size__ (64)));
+ __v8di
+ _mm512_mask_mullo_epi64 (__v8di __W, __v8di __A, __v8di __B)
+ {
+ return (__v8di) __builtin_ia32_pmullq512_mask ((__v8di) __A,
+ (__v8di) __B,
+ (__v8di) __W,
+ -1);
+ }
+ } "-mavx512dq" ]
+}
-# Return 1 if avx512f instructions can be compiled.
+# Return 1 if avx512bw instructions can be compiled.
+proc check_effective_target_avx512bw { } {
+ return [check_no_compiler_messages avx512bw object {
+ typedef short __v32hi __attribute__ ((__vector_size__ (64)));
+ __v32hi
+ _mm512_mask_mulhrs_epi16 (__v32hi __W, __v32hi __A, __v32hi __B)
+ {
+ return (__v32hi) __builtin_ia32_pmulhrsw512_mask ((__v32hi) __A,
+ (__v32hi) __B,
+ (__v32hi) __W,
+ -1);
+ }
+ } "-mavx512bw" ]
+}
-proc check_effective_target_avx512f { } {
- return [check_no_compiler_messages avx512f object {
- typedef double __m512d __attribute__ ((__vector_size__ (64)));
+# Return 1 if avx512ifma instructions can be compiled.
+proc check_effective_target_avx512ifma { } {
+ return [check_no_compiler_messages avx512ifma object {
+ typedef long long __v8di __attribute__ ((__vector_size__ (64)));
+ __v8di
+ _mm512_madd52lo_epu64 (__v8di __X, __v8di __Y, __v8di __Z)
+ {
+ return (__v8di) __builtin_ia32_vpmadd52luq512_mask ((__v8di) __X,
+ (__v8di) __Y,
+ (__v8di) __Z,
+ -1);
+ }
+ } "-mavx512ifma" ]
+}
- __m512d _mm512_add (__m512d a)
+# Return 1 if avx512vbmi instructions can be compiled.
+proc check_effective_target_avx512vbmi { } {
+ return [check_no_compiler_messages avx512vbmi object {
+ typedef char __v64qi __attribute__ ((__vector_size__ (64)));
+ __v64qi
+ _mm512_multishift_epi64_epi8 (__v64qi __X, __v64qi __Y)
{
- return __builtin_ia32_addpd512_mask (a, a, a, 1, 4);
+ return (__v64qi) __builtin_ia32_vpmultishiftqb512_mask ((__v64qi) __X,
+ (__v64qi) __Y,
+ (__v64qi) __Y,
+ -1);
}
- } "-O2 -mavx512f" ]
+ } "-mavx512vbmi" ]
}
-# Return 1 if avx instructions can be compiled.
+# Return 1 if avx512_4fmaps instructions can be compiled.
+proc check_effective_target_avx5124fmaps { } {
+ return [check_no_compiler_messages avx5124fmaps object {
+ typedef float __v16sf __attribute__ ((__vector_size__ (64)));
+ typedef float __v4sf __attribute__ ((__vector_size__ (16)));
-proc check_effective_target_avx { } {
- if { !([istarget i?86-*-*] || [istarget x86_64-*-*]) } {
- return 0
- }
- return [check_no_compiler_messages avx object {
- void _mm256_zeroall (void)
+ __v16sf
+ _mm512_mask_4fmadd_ps (__v16sf __DEST, __v16sf __A, __v16sf __B, __v16sf __C,
+ __v16sf __D, __v16sf __E, __v4sf *__F)
{
- __builtin_ia32_vzeroall ();
+ return (__v16sf) __builtin_ia32_4fmaddps_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__v16sf) __D,
+ (__v16sf) __E,
+ (const __v4sf *) __F,
+ (__v16sf) __DEST,
+ 0xffff);
+ }
+ } "-mavx5124fmaps" ]
+}
+
+# Return 1 if avx512_4vnniw instructions can be compiled.
+proc check_effective_target_avx5124vnniw { } {
+ return [check_no_compiler_messages avx5124vnniw object {
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
+ typedef int __v4si __attribute__ ((__vector_size__ (16)));
+
+ __v16si
+ _mm512_4dpwssd_epi32 (__v16si __A, __v16si __B, __v16si __C,
+ __v16si __D, __v16si __E, __v4si *__F)
+ {
+ return (__v16si) __builtin_ia32_vp4dpwssd ((__v16si) __B,
+ (__v16si) __C,
+ (__v16si) __D,
+ (__v16si) __E,
+ (__v16si) __A,
+ (const __v4si *) __F);
}
- } "-O2 -mavx" ]
+ } "-mavx5124vnniw" ]
}
-# Return 1 if avx2 instructions can be compiled.
-proc check_effective_target_avx2 { } {
- return [check_no_compiler_messages avx2 object {
- typedef long long __v4di __attribute__ ((__vector_size__ (32)));
- __v4di
- mm256_is32_andnotsi256 (__v4di __X, __v4di __Y)
+# Return 1 if avx512_vpopcntdq instructions can be compiled.
+proc check_effective_target_avx512vpopcntdq { } {
+ return [check_no_compiler_messages avx512vpopcntdq object {
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
+
+ __v16si
+ _mm512_popcnt_epi32 (__v16si __A)
{
- return __builtin_ia32_andnotsi256 (__X, __Y);
- }
- } "-O0 -mavx2" ]
+ return (__v16si) __builtin_ia32_vpopcountd_v16si ((__v16si) __A);
+ }
+ } "-mavx512vpopcntdq" ]
}
-# Return 1 if sse instructions can be compiled.
-proc check_effective_target_sse { } {
- return [check_no_compiler_messages sse object {
- int main ()
- {
- __builtin_ia32_stmxcsr ();
- return 0;
- }
- } "-O2 -msse" ]
+# Return 1 if 128 or 256-bit avx512_vpopcntdq instructions can be compiled.
+proc check_effective_target_avx512vpopcntdqvl { } {
+ return [check_no_compiler_messages avx512vpopcntdqvl object {
+ typedef int __v8si __attribute__ ((__vector_size__ (32)));
+
+ __v8si
+ _mm256_popcnt_epi32 (__v8si __A)
+ {
+ return (__v8si) __builtin_ia32_vpopcountd_v8si ((__v8si) __A);
+ }
+ } "-mavx512vpopcntdq -mavx512vl" ]
}
-# Return 1 if sse2 instructions can be compiled.
-proc check_effective_target_sse2 { } {
- return [check_no_compiler_messages sse2 object {
- typedef long long __m128i __attribute__ ((__vector_size__ (16)));
-
- __m128i _mm_srli_si128 (__m128i __A, int __N)
+# Return 1 if gfni instructions can be compiled.
+proc check_effective_target_gfni { } {
+ return [check_no_compiler_messages gfni object {
+ typedef char __v16qi __attribute__ ((__vector_size__ (16)));
+
+ __v16qi
+ _mm_gf2p8affineinv_epi64_epi8 (__v16qi __A, __v16qi __B, const int __C)
+ {
+ return (__v16qi) __builtin_ia32_vgf2p8affineinvqb_v16qi ((__v16qi) __A,
+ (__v16qi) __B,
+ 0);
+ }
+ } "-mgfni" ]
+}
+
+# Return 1 if avx512vbmi2 instructions can be compiled.
+proc check_effective_target_avx512vbmi2 { } {
+ return [check_no_compiler_messages avx512vbmi2 object {
+ typedef char __v16qi __attribute__ ((__vector_size__ (16)));
+ typedef unsigned long long __mmask16;
+
+ __v16qi
+ _mm_mask_compress_epi8 (__v16qi __A, __mmask16 __B, __v16qi __C)
{
- return (__m128i)__builtin_ia32_psrldqi128 (__A, 8);
+ return (__v16qi) __builtin_ia32_compressqi128_mask((__v16qi)__C,
+ (__v16qi)__A,
+ (__mmask16)__B);
}
- } "-O2 -msse2" ]
+ } "-mavx512vbmi2 -mavx512vl" ]
}
-# Return 1 if sse4.1 instructions can be compiled.
-proc check_effective_target_sse4 { } {
- return [check_no_compiler_messages sse4.1 object {
- typedef long long __m128i __attribute__ ((__vector_size__ (16)));
- typedef int __v4si __attribute__ ((__vector_size__ (16)));
+# Return 1 if avx512vbmi2 instructions can be compiled.
+proc check_effective_target_avx512vnni { } {
+ return [check_no_compiler_messages avx512vnni object {
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
- __m128i _mm_mullo_epi32 (__m128i __X, __m128i __Y)
+ __v16si
+ _mm_mask_compress_epi8 (__v16si __A, __v16si __B, __v16si __C)
{
- return (__m128i) __builtin_ia32_pmulld128 ((__v4si)__X,
- (__v4si)__Y);
+ return (__v16si) __builtin_ia32_vpdpbusd_v16si ((__v16si)__A,
+ (__v16si)__B,
+ (__v16si)__C);
}
- } "-O2 -msse4.1" ]
+ } "-mavx512vnni -mavx512f" ]
}
-# Return 1 if F16C instructions can be compiled.
+# Return 1 if vaes instructions can be compiled.
+proc check_effective_target_avx512vaes { } {
+ return [check_no_compiler_messages avx512vaes object {
-proc check_effective_target_f16c { } {
- return [check_no_compiler_messages f16c object {
- #include "immintrin.h"
- float
- foo (unsigned short val)
+ typedef int __v16si __attribute__ ((__vector_size__ (64)));
+
+ __v32qi
+ _mm256_aesdec_epi128 (__v32qi __A, __v32qi __B)
{
- return _cvtsh_ss (val);
+ return (__v32qi)__builtin_ia32_vaesdec_v32qi ((__v32qi) __A, (__v32qi) __B);
}
- } "-O2 -mf16c" ]
+ } "-mvaes" ]
+}
+
+# Return 1 if vpclmulqdq instructions can be compiled.
+proc check_effective_target_vpclmulqdq { } {
+ return [check_no_compiler_messages vpclmulqdq object {
+ typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+
+ __v4di
+ _mm256_clmulepi64_epi128 (__v4di __A, __v4di __B)
+ {
+ return (__v4di) __builtin_ia32_vpclmulqdq_v4di (__A, __B, 0);
+ }
+ } "-mvpclmulqdq -mavx512vl" ]
+}
+
+# Return 1 if avx512_bitalg instructions can be compiled.
+proc check_effective_target_avx512bitalg { } {
+ return [check_no_compiler_messages avx512bitalg object {
+ typedef short int __v32hi __attribute__ ((__vector_size__ (64)));
+
+ __v32hi
+ _mm512_popcnt_epi16 (__v32hi __A)
+ {
+ return (__v32hi) __builtin_ia32_vpopcountw_v32hi ((__v32hi) __A);
+ }
+ } "-mavx512bitalg" ]
}
# Return 1 if C wchar_t type is compatible with char16_t.
# (LTO) support.
proc check_effective_target_lto { } {
- if { [istarget nvptx-*-*] } {
+ if { [istarget nvptx-*-*]
+ || [istarget amdgcn-*-*] } {
return 0;
}
return [check_no_compiler_messages lto object {
} "-flto"]
}
+# Return 1 if the compiler and linker support incremental link-time
+# optimization.
+
+proc check_effective_target_lto_incremental { } {
+ if ![check_effective_target_lto] {
+ return 0
+ }
+ return [check_no_compiler_messages lto_incremental executable {
+ int main () { return 0; }
+ } "-flto -r -nostdlib"]
+}
+
# Return 1 if -mx32 -maddress-mode=short can compile, 0 otherwise.
proc check_effective_target_maybe_x32 { } {
if { [check_effective_target_mpaired_single] } {
lappend EFFECTIVE_TARGETS mpaired_single
}
- if { [check_effective_target_mips_loongson] } {
- lappend EFFECTIVE_TARGETS mips_loongson
+ if { [check_effective_target_mips_loongson_mmi] } {
+ lappend EFFECTIVE_TARGETS mips_loongson_mmi
}
if { [check_effective_target_mips_msa] } {
lappend EFFECTIVE_TARGETS mips_msa
lappend DEFAULT_VECTCFLAGS "-march=z14" "-mzarch"
set dg-do-what-default compile
}
+ } elseif [istarget amdgcn-*-*] {
+ set dg-do-what-default run
} else {
return 0
}
# Create functions to check that the AArch64 assembler supports the
# various architecture extensions via the .arch_extension pseudo-op.
-foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse"} {
+foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"} {
eval [string map [list FUNC $aarch64_ext] {
proc check_effective_target_aarch64_asm_FUNC_ok { } {
if { [istarget aarch64*-*-*] } {
}
}
+# Return 1 if <fenv.h> is available.
+
+proc check_effective_target_fenv {} {
+ return [check_no_compiler_messages fenv object {
+ #include <fenv.h>
+ } [add_options_for_ieee "-std=gnu99"]]
+}
+
# Return 1 if <fenv.h> is available with all the standard IEEE
# exceptions and floating-point exceptions are raised by arithmetic
# operations. (If the target requires special options for "inexact"
} [add_options_for_ieee "-std=gnu99"]]
}
-proc check_effective_target_tiny {} {
- global et_target_tiny_saved
+# Return 1 if -fexceptions is supported.
- if [info exists et_target_tiny_saved] {
- verbose "check_effective_target_tiny: using cached result" 2
- } else {
- set et_target_tiny_saved 0
- if { [istarget aarch64*-*-*]
- && [check_effective_target_aarch64_tiny] } {
- set et_target_tiny_saved 1
- }
- if { [istarget avr-*-*]
- && [check_effective_target_avr_tiny] } {
- set et_target_tiny_saved 1
- }
+proc check_effective_target_exceptions {} {
+ if { [istarget amdgcn*-*-*] } {
+ return 0
}
+ return 1
+}
+
- return $et_target_tiny_saved
+proc check_effective_target_tiny {} {
+ return [check_cached_effective_target tiny {
+ if { [istarget aarch64*-*-*]
+ && [check_effective_target_aarch64_tiny] } {
+ return 1
+ }
+ if { [istarget avr-*-*]
+ && [check_effective_target_avr_tiny] } {
+ return 1
+ }
+ return 0
+ }]
}
-# Return 1 if LOGICAL_OP_NON_SHORT_CIRCUIT is set to 0 for the current target.
+# Return 1 if the target supports -mbranch-cost=N option.
-proc check_effective_target_logical_op_short_circuit {} {
- if { [istarget mips*-*-*]
- || [istarget arc*-*-*]
+proc check_effective_target_branch_cost {} {
+ if { [ istarget arm*-*-*]
|| [istarget avr*-*-*]
- || [istarget crisv32-*-*] || [istarget cris-*-*]
- || [istarget mmix-*-*]
+ || [istarget csky*-*-*]
+ || [istarget epiphany*-*-*]
+ || [istarget frv*-*-*]
+ || [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || [istarget mips*-*-*]
|| [istarget s390*-*-*]
- || [istarget powerpc*-*-*]
- || [istarget nios2*-*-*]
|| [istarget riscv*-*-*]
- || [istarget visium-*-*]
- || [check_effective_target_arm_cortex_m] } {
+ || [istarget sh*-*-*]
+ || [istarget spu*-*-*] } {
return 1
}
return 0
}
proc ${test}_required_options {} {
global gcc_force_conventional_output
+ upvar 1 extra_tool_flags extra_tool_flags
+ if {[regexp -- "^scan-assembler" [info level 0]]
+ && ![string match "*-fident*" $extra_tool_flags]} {
+ # Do not let .ident confuse assembler scan tests
+ return [list $gcc_force_conventional_output "-fno-ident"]
+ }
return $gcc_force_conventional_output
}
}
+# Record that dg-final test scan-ltrans-tree-dump* requires -flto-partition=one
+# in order to force a single partition, allowing scan-ltrans-tree-dump* to scan
+# a dump file *.exe.ltrans0.*.
+
+proc scan-ltrans-tree-dump_required_options {} {
+ return "-flto-partition=one"
+}
+proc scan-ltrans-tree-dump-times_required_options {} {
+ return "-flto-partition=one"
+}
+proc scan-ltrans-tree-dump-not_required_options {} {
+ return "-flto-partition=one"
+}
+proc scan-ltrans-tree-dump-dem_required_options {} {
+ return "-flto-partition=one"
+}
+proc scan-ltrans-tree-dump-dem-not_required_options {} {
+ return "-flto-partition=one"
+}
+
# Return 1 if the x86-64 target supports PIE with copy reloc, 0
# otherwise. Cache the result.
proc check_effective_target_pie_copyreloc { } {
- global pie_copyreloc_available_saved
global tool
global GCC_UNDER_TEST
return 0
}
- if [info exists pie_copyreloc_available_saved] {
- verbose "check_effective_target_pie_copyreloc returning saved $pie_copyreloc_available_saved" 2
- } else {
+ return [check_cached_effective_target pie_copyreloc {
# Set up and compile to see if linker supports PIE with copy
# reloc. Include the current process ID in the file names to
# prevent conflicts with invocations for multiple testsuites.
if [string match "" $lines] then {
verbose "check_effective_target_pie_copyreloc testfile compilation passed" 2
- set pie_copyreloc_available_saved 1
+ return 1
} else {
verbose "check_effective_target_pie_copyreloc testfile compilation failed" 2
- set pie_copyreloc_available_saved 0
+ return 0
}
- }
-
- return $pie_copyreloc_available_saved
+ }]
}
# Return 1 if the x86 target supports R_386_GOT32X relocation, 0
# otherwise. Cache the result.
proc check_effective_target_got32x_reloc { } {
- global got32x_reloc_available_saved
global tool
global GCC_UNDER_TEST
return 0
}
- if [info exists got32x_reloc_available_saved] {
- verbose "check_effective_target_got32x_reloc returning saved $got32x_reloc_available_saved" 2
- } else {
+ return [check_cached_effective_target got32x_reloc {
# Include the current process ID in the file names to prevent
# conflicts with invocations for multiple testsuites.
if [string match "" $lines] then {
verbose "check_effective_target_got32x_reloc testfile compilation passed" 2
- set got32x_reloc_available_saved 1
+ return 1
} else {
verbose "check_effective_target_got32x_reloc testfile compilation failed" 2
- set got32x_reloc_available_saved 0
+ return 0
}
- }
+ }]
return $got32x_reloc_available_saved
}
# 0 otherwise. Cache the result.
proc check_effective_target_tls_get_addr_via_got { } {
- global tls_get_addr_via_got_available_saved
global tool
global GCC_UNDER_TEST
return 0
}
- if [info exists tls_get_addr_via_got_available_saved] {
- verbose "check_effective_target_tls_get_addr_via_got returning saved $tls_get_addr_via_got_available_saved" 2
- } else {
+ return [check_cached_effective_target tls_get_addr_via_got {
# Include the current process ID in the file names to prevent
# conflicts with invocations for multiple testsuites.
if [string match "" $lines] then {
verbose "check_effective_target_tls_get_addr_via_got testfile compilation passed" 2
- set tls_get_addr_via_got_available_saved 1
+ return 1
} else {
verbose "check_effective_target_tls_get_addr_via_got testfile compilation failed" 2
- set tls_get_addr_via_got_available_saved 0
+ return 0
}
- }
-
- return $tls_get_addr_via_got_available_saved
+ }]
}
# Return 1 if the target uses comdat groups.
proc check_effective_target_comdat_group {} {
- return [check_no_messages_and_pattern comdat_group "\.section\[^\n\r]*,comdat" assembly {
+ return [check_no_messages_and_pattern comdat_group "\.section\[^\n\r]*,comdat|\.group\[^\n\r]*,#comdat" assembly {
// C++
inline int foo () { return 1; }
int (*fn) () = foo;
return 0
}
-# Return 1 if there is an nvptx offload compiler.
+# Return 1 if the compiler has been configured with hsa offloading.
-proc check_effective_target_offload_nvptx { } {
- return [check_no_compiler_messages offload_nvptx object {
+proc check_effective_target_offload_hsa { } {
+ return [check_no_compiler_messages offload_hsa assembly {
int main () {return 0;}
- } "-foffload=nvptx-none" ]
+ } "-foffload=hsa" ]
}
# Return 1 if the compiler has been configured with hsa offloading.
-proc check_effective_target_offload_hsa { } {
- return [check_no_compiler_messages offload_hsa assembly {
+proc check_effective_target_offload_gcn { } {
+ return [check_no_compiler_messages offload_gcn assembly {
int main () {return 0;}
- } "-foffload=hsa" ]
+ } "-foffload=amdgcn-unknown-amdhsa" ]
}
# Return 1 if the target support -fprofile-update=atomic
#
proc check_effective_target_supports_stack_clash_protection { } {
- # Temporary until the target bits are fully ACK'd.
-# if { [istarget aarch*-*-*] } {
-# return 1
-# }
-
if { [istarget x86_64-*-*] || [istarget i?86-*-*]
|| [istarget powerpc*-*-*] || [istarget rs6000*-*-*]
- || [istarget s390*-*-*] } {
+ || [istarget aarch64*-**] || [istarget s390*-*-*] } {
return 1
}
return 0
# Return 1 if the target creates a frame pointer for non-leaf functions
# Note we ignore cases where we apply tail call optimization here.
proc check_effective_target_frame_pointer_for_non_leaf { } {
- if { [istarget aarch*-*-*] } {
- return 1
- }
-
# Solaris/x86 defaults to -fno-omit-frame-pointer.
if { [istarget i?86-*-solaris*] || [istarget x86_64-*-solaris*] } {
return 1
}
} "-O2" ]
}
+
+# Return 1 if target supports floating point "infinite"
+proc check_effective_target_inf { } {
+ return [check_no_compiler_messages supports_inf assembly {
+ const double pinf = __builtin_inf ();
+ }]
+}
+
+# Return 1 if the target supports ARMv8.3 Adv.SIMD Complex instructions
+# instructions, 0 otherwise. The test is valid for ARM and for AArch64.
+# Record the command line options needed.
+
+proc check_effective_target_arm_v8_3a_complex_neon_ok_nocache { } {
+ global et_arm_v8_3a_complex_neon_flags
+ set et_arm_v8_3a_complex_neon_flags ""
+
+ if { ![istarget arm*-*-*] && ![istarget aarch64*-*-*] } {
+ return 0;
+ }
+
+ # Iterate through sets of options to find the compiler flags that
+ # need to be added to the -march option.
+ foreach flags {"" "-mfloat-abi=softfp -mfpu=auto" "-mfloat-abi=hard -mfpu=auto"} {
+ if { [check_no_compiler_messages_nocache \
+ arm_v8_3a_complex_neon_ok object {
+ #if !defined (__ARM_FEATURE_COMPLEX)
+ #error "__ARM_FEATURE_COMPLEX not defined"
+ #endif
+ } "$flags -march=armv8.3-a"] } {
+ set et_arm_v8_3a_complex_neon_flags "$flags -march=armv8.3-a"
+ return 1
+ }
+ }
+
+ return 0;
+}
+
+proc check_effective_target_arm_v8_3a_complex_neon_ok { } {
+ return [check_cached_effective_target arm_v8_3a_complex_neon_ok \
+ check_effective_target_arm_v8_3a_complex_neon_ok_nocache]
+}
+
+proc add_options_for_arm_v8_3a_complex_neon { flags } {
+ if { ! [check_effective_target_arm_v8_3a_complex_neon_ok] } {
+ return "$flags"
+ }
+ global et_arm_v8_3a_complex_neon_flags
+ return "$flags $et_arm_v8_3a_complex_neon_flags"
+}
+
+# Return 1 if the target supports executing AdvSIMD instructions from ARMv8.3
+# with the complex instruction extension, 0 otherwise. The test is valid for
+# ARM and for AArch64.
+
+proc check_effective_target_arm_v8_3a_complex_neon_hw { } {
+ if { ![check_effective_target_arm_v8_3a_complex_neon_ok] } {
+ return 0;
+ }
+ return [check_runtime arm_v8_3a_complex_neon_hw_available {
+ #include "arm_neon.h"
+ int
+ main (void)
+ {
+
+ float32x2_t results = {-4.0,5.0};
+ float32x2_t a = {1.0,3.0};
+ float32x2_t b = {2.0,5.0};
+
+ #ifdef __ARM_ARCH_ISA_A64
+ asm ("fcadd %0.2s, %1.2s, %2.2s, #90"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+
+ #else
+ asm ("vcadd.f32 %P0, %P1, %P2, #90"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+ #endif
+
+ return (results[0] == 8 && results[1] == 24) ? 1 : 0;
+ }
+ } [add_options_for_arm_v8_3a_complex_neon ""]]
+}
+
+# Return 1 if the target plus current options supports a vector
+# complex addition with rotate of half and single float modes, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+foreach N {hf sf} {
+ eval [string map [list N $N] {
+ proc check_effective_target_vect_complex_rot_N { } {
+ return [check_cached_effective_target_indexed vect_complex_rot_N {
+ expr { [istarget aarch64*-*-*]
+ || [istarget arm*-*-*] }}]
+ }
+ }]
+}
+
+# Return 1 if the target plus current options supports a vector
+# complex addition with rotate of double float modes, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+foreach N {df} {
+ eval [string map [list N $N] {
+ proc check_effective_target_vect_complex_rot_N { } {
+ return [check_cached_effective_target_indexed vect_complex_rot_N {
+ expr { [istarget aarch64*-*-*] }}]
+ }
+ }]
+}
+
+# Return 1 if this target uses an LLVM assembler and/or linker
+proc check_effective_target_llvm_binutils { } {
+ return [check_cached_effective_target llvm_binutils {
+ expr { [istarget amdgcn*-*-*]
+ || [check_effective_target_offload_gcn] }}]
+}