# return 1 if weak undefined symbols are supported.
proc check_effective_target_weak_undefined { } {
+ if { [istarget hppa*-*-hpux*] } {
+ return 0
+ }
return [check_runtime weak_undefined {
extern void foo () __attribute__((weak));
int main (void) { if (foo) return 1; return 0; }
# missing other needed machinery.
if {[istarget aarch64*-*-elf]
|| [istarget am3*-*-linux*]
+ || [istarget amdgcn-*-*]
|| [istarget arm*-*-eabi*]
|| [istarget arm*-*-elf]
|| [istarget arm*-*-symbianelf*]
if { [istarget nvptx-*-*] } {
return 0
}
+ if { [istarget amdgcn-*-*] } {
+ return 0
+ }
return 1
}
if { [istarget nvptx-*-*] } {
return 0
}
+ # It could be supported on amdgcn, but isn't yet.
+ if { [istarget amdgcn*-*-*] } {
+ return 0
+ }
return 1
}
# code, 0 otherwise.
proc check_effective_target_fopenacc {} {
- # nvptx can be built with the device-side bits of openacc, but it
+ # nvptx/amdgcn can be built with the device-side bits of openacc, but it
# does not make sense to test it as an openacc host.
if [istarget nvptx-*-*] { return 0 }
+ if [istarget amdgcn-*-*] { return 0 }
return [check_no_compiler_messages fopenacc object {
void foo (void) { }
# code, 0 otherwise.
proc check_effective_target_fopenmp {} {
- # nvptx can be built with the device-side bits of libgomp, but it
+ # nvptx/amdgcn can be built with the device-side bits of libgomp, but it
# does not make sense to test it as an openmp host.
if [istarget nvptx-*-*] { return 0 }
+ if [istarget amdgcn-*-*] { return 0 }
return [check_no_compiler_messages fopenmp object {
void foo (void) { }
|| [istarget *-*-dragonfly*]
|| [istarget *-*-freebsd*]
|| [istarget *-*-linux*]
- || [istarget *-*-gnu*] } {
+ || [istarget *-*-gnu*]
+ || [istarget *-*-amdhsa]} {
return 1;
}
if { [istarget *-*-solaris2.1\[1-9\]*] } {
[istarget i?86-*-*] || [istarget x86_64-*-*]
|| ([istarget powerpc*-*-*]
&& ![istarget powerpc-*-linux*paired*])
+ || [istarget amdgcn-*-*]
|| [istarget spu-*-*]
|| [istarget sparc*-*-*]
|| [istarget alpha*-*-*]
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports signed double->int conversion
|| [istarget aarch64*-*-*]
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports unsigned float->int conversion
&& ![istarget powerpc-*-linux*paired*])
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_msa]) }}]
+ && [et-is-effective-target mips_msa])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if peeling for alignment might be profitable on the target
# be able to assemble avx512f.
return [check_cached_effective_target_indexed vect_simd_clones {
expr { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_effective_target_avx512f]) }}]
+ && [check_effective_target_avx512f])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if this is a AArch64 target supporting big endian
check_effective_target_arm_neon_fp16_ok_nocache]
}
+# Return 1 if this is an ARM target supporting -mfpu=neon-fp16
+# and -mfloat-abi=softfp together. Some multilibs may be
+# incompatible with these options. Also set et_arm_neon_softfp_fp16_flags to
+# the best options to add.
+
+proc check_effective_target_arm_neon_softfp_fp16_ok_nocache { } {
+ global et_arm_neon_softfp_fp16_flags
+ global et_arm_neon_flags
+ set et_arm_neon_softfp_fp16_flags ""
+ if { [check_effective_target_arm32]
+ && [check_effective_target_arm_neon_ok] } {
+ foreach flags {"-mfpu=neon-fp16 -mfloat-abi=softfp"
+ "-mfpu=neon-fp16 -mfloat-abi=softfp -mfp16-format=ieee"} {
+ if { [check_no_compiler_messages_nocache arm_neon_softfp_fp16_ok object {
+ #include "arm_neon.h"
+ float16x4_t
+ foo (float32x4_t arg)
+ {
+ return vcvt_f16_f32 (arg);
+ }
+ } "$et_arm_neon_flags $flags"] } {
+ set et_arm_neon_softfp_fp16_flags [concat $et_arm_neon_flags $flags]
+ return 1
+ }
+ }
+ }
+
+ return 0
+}
+
+proc check_effective_target_arm_neon_softfp_fp16_ok { } {
+ return [check_cached_effective_target arm_neon_softfp_fp16_ok \
+ check_effective_target_arm_neon_softfp_fp16_ok_nocache]
+}
+
+
+
proc check_effective_target_arm_neon_fp16_hw { } {
if {! [check_effective_target_arm_neon_fp16_ok] } {
return 0
return "$flags $et_arm_neon_fp16_flags"
}
+proc add_options_for_arm_neon_softfp_fp16 { flags } {
+ if { ! [check_effective_target_arm_neon_softfp_fp16_ok] } {
+ return "$flags"
+ }
+ global et_arm_neon_softfp_fp16_flags
+ return "$flags $et_arm_neon_softfp_fp16_flags"
+}
+
# Return 1 if this is an ARM target supporting the FP16 alternative
# format. Some multilibs may be incompatible with the options needed. Also
# set et_arm_neon_fp16_flags to the best options to add.
float32x2_t
foo (float32x2_t r, float16x4_t a, float16x4_t b)
{
- return vfmlal_high_u32 (r, a, b);
+ return vfmlal_high_f16 (r, a, b);
}
} "$flags -march=armv8.2-a+fp16fml"] } {
set et_arm_fp16fml_neon_flags "$flags -march=armv8.2-a+fp16fml"
} "-O2 -mthumb" ]
}
+# Return true if LDRD/STRD instructions are available on this target.
+proc check_effective_target_arm_ldrd_strd_ok { } {
+ if { ![check_effective_target_arm32] } {
+ return 0;
+ }
+
+ return [check_no_compiler_messages arm_ldrd_strd_ok object {
+ int main(void)
+ {
+ __UINT64_TYPE__ a = 1, b = 10;
+ __UINT64_TYPE__ *c = &b;
+ // `a` will be in a valid register since it's a DImode quantity.
+ asm ("ldrd %0, %1"
+ : "=r" (a)
+ : "m" (c));
+ return a == 10;
+ }
+ }]
+}
+
# Return 1 if this is a PowerPC target supporting -meabi.
proc check_effective_target_powerpc_eabi_ok { } {
&& ([et-is-effective-target mips_msa]
|| [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vector shift by register operation.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_loongson_mmi])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] } {
set answer 1
} else {
set answer 0
proc check_effective_target_vect_bswap { } {
return [check_cached_effective_target_indexed vect_bswap {
- expr { [istarget aarch64*-*-*] || [is-effective-target arm_neon] }}]
+ expr { [istarget aarch64*-*-*]
+ || [is-effective-target arm_neon]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vector shift operation for char.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vectors of long, 0 otherwise.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) } {
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] } {
set answer 1
} else {
set answer 0
&& [et-is-effective-target mips_msa])
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vxe]) }}]
+ && [check_effective_target_s390_vxe])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports hardware vectors of float without
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx])} }]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*]} }]
}
# Return 1 if the target supports conditional addition, subtraction,
&& ([et-is-effective-target mpaired_single]
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if, for some VF:
|| ([istarget mips-*.*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports SLP permutation of 3 vectors when each
proc check_effective_target_xorsign { } {
return [check_cached_effective_target_indexed xorsign {
- expr { [istarget aarch64*-*-*] || [istarget arm*-*-*] }}]
+ expr { [istarget i?86-*-*] || [istarget x86_64-*-*]
+ || [istarget aarch64*-*-*] || [istarget arm*-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
&& ![check_effective_target_aarch64_sve])
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx])) }}]
+ && [check_effective_target_s390_vx]))
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
|| [istarget i?86-*-*] || [istarget x86_64-*-*]
|| [is-effective-target arm_neon]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx])) }}]
+ && [check_effective_target_s390_vx]))
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target plus current options supports a vector
expr { [istarget i?86-*-*]
|| [istarget x86_64-*-*]
|| ([istarget aarch64*-*-*]
- && ![check_effective_target_aarch64_sve])}}]
+ && ![check_effective_target_aarch64_sve])
+ || ([istarget powerpc*-*-*]
+ && [check_p9vector_hw_available])}}]
}
# Return 1 if the target plus current options supports both signed
set et_vect_natural_alignment 1
if { [check_effective_target_arm_eabi]
|| [istarget nvptx-*-*]
- || [istarget s390*-*-*] } {
+ || [istarget s390*-*-*]
+ || [istarget amdgcn-*-*] } {
set et_vect_natural_alignment 0
}
verbose "check_effective_target_vect_natural_alignment:\
# Return true if fully-masked loops are supported.
proc check_effective_target_vect_fully_masked { } {
- return [check_effective_target_aarch64_sve]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
}
# Return 1 if the target doesn't prefer any alignment beyond element
return [check_cached_effective_target_indexed vect_element_align {
expr { ([istarget arm*-*-*]
&& ![check_effective_target_arm_vect_no_misalign])
- || [check_effective_target_vect_hw_misalign] }}]
+ || [check_effective_target_vect_hw_misalign]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if we expect to see unaligned accesses in at least some
# Return 1 if the target supports vector masked stores.
proc check_effective_target_vect_masked_store { } {
- return [check_effective_target_aarch64_sve]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
}
# Return 1 if the target supports vector scatter stores.
proc check_effective_target_vect_scatter_store { } {
- return [check_effective_target_aarch64_sve]
+ return [expr { [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] }]
}
# Return 1 if the target supports vector conditional operations, 0 otherwise.
|| ([istarget arm*-*-*]
&& [check_effective_target_arm_neon_ok])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector conditional operations where
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector char multiplication, 0 otherwise.
|| ([istarget mips*-*-*]
&& [et-is-effective-target mips_msa])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector short multiplication, 0 otherwise.
&& ([et-is-effective-target mips_msa]
|| [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector int multiplication, 0 otherwise.
&& [et-is-effective-target mips_msa])
|| [check_effective_target_arm32]
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports 64 bit hardware vector
|| [istarget aarch64*-*-*]) && N >= 2 && N <= 4 } {
return 1
}
+ if [check_effective_target_vect_fully_masked] {
+ return 1
+ }
return 0
}]
}
|| [istarget aarch64*-*-*]
|| ([istarget arm*-*-*] && [check_effective_target_arm_vfp_ok])
|| ([istarget s390*-*-*]
- && [check_effective_target_s390_vx]) }}]
+ && [check_effective_target_s390_vx])
+ || [istarget amdgcn-*-*] }}]
+}
+
+# Return any additional options to enable square root intructions.
+
+proc add_options_for_sqrt_insn { flags } {
+ if { [istarget amdgcn*-*-*] } {
+ return "$flags -ffast-math"
+ }
+ return $flags
}
# Return 1 if the target supports vector sqrtf calls.
proc check_effective_target_vect_call_lrint { } {
set et_vect_call_lrint 0
if { (([istarget i?86-*-*] || [istarget x86_64-*-*])
- && [check_effective_target_ilp32]) } {
+ && [check_effective_target_ilp32])
+ || [istarget amdgcn-*-*] } {
set et_vect_call_lrint 1
}
proc check_effective_target_vect_call_btrunc { } {
return [check_cached_effective_target_indexed vect_call_btrunc {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector btruncf calls.
proc check_effective_target_vect_call_btruncf { } {
return [check_cached_effective_target_indexed vect_call_btruncf {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector ceil calls.
proc check_effective_target_vect_call_ceil { } {
return [check_cached_effective_target_indexed vect_call_ceil {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector ceilf calls.
proc check_effective_target_vect_call_floorf { } {
return [check_cached_effective_target_indexed vect_call_floorf {
- expr { [istarget aarch64*-*-*] }}]
+ expr { [istarget aarch64*-*-*]
+ || [istarget amdgcn-*-*] }}]
}
# Return 1 if the target supports vector lceil calls.
# Return true if we are compiling for AVX2 target.
proc check_avx2_available { } {
- if { [check_no_compiler_messages avx_available assembly {
+ if { [check_no_compiler_messages avx2_available assembly {
#ifndef __AVX2__
#error unsupported
#endif
# (LTO) support.
proc check_effective_target_lto { } {
- if { [istarget nvptx-*-*] } {
+ if { [istarget nvptx-*-*]
+ || [istarget amdgcn-*-*] } {
return 0;
}
return [check_no_compiler_messages lto object {
lappend DEFAULT_VECTCFLAGS "-march=z14" "-mzarch"
set dg-do-what-default compile
}
+ } elseif [istarget amdgcn-*-*] {
+ set dg-do-what-default run
} else {
return 0
}
}
}
+# Return 1 if <fenv.h> is available.
+
+proc check_effective_target_fenv {} {
+ return [check_no_compiler_messages fenv object {
+ #include <fenv.h>
+ } [add_options_for_ieee "-std=gnu99"]]
+}
+
# Return 1 if <fenv.h> is available with all the standard IEEE
# exceptions and floating-point exceptions are raised by arithmetic
# operations. (If the target requires special options for "inexact"
} [add_options_for_ieee "-std=gnu99"]]
}
+# Return 1 if -fexceptions is supported.
+
+proc check_effective_target_exceptions {} {
+ if { [istarget amdgcn*-*-*] } {
+ return 0
+ }
+ return 1
+}
+
+
proc check_effective_target_tiny {} {
return [check_cached_effective_target tiny {
if { [istarget aarch64*-*-*]
return 0
}
-# Return 1 if there is an nvptx offload compiler.
+# Return 1 if the compiler has been configured with hsa offloading.
-proc check_effective_target_offload_nvptx { } {
- return [check_no_compiler_messages offload_nvptx object {
+proc check_effective_target_offload_hsa { } {
+ return [check_no_compiler_messages offload_hsa assembly {
int main () {return 0;}
- } "-foffload=nvptx-none" ]
+ } "-foffload=hsa" ]
}
# Return 1 if the compiler has been configured with hsa offloading.
-proc check_effective_target_offload_hsa { } {
- return [check_no_compiler_messages offload_hsa assembly {
+proc check_effective_target_offload_gcn { } {
+ return [check_no_compiler_messages offload_gcn assembly {
int main () {return 0;}
- } "-foffload=hsa" ]
+ } "-foffload=amdgcn-unknown-amdhsa" ]
}
# Return 1 if the target support -fprofile-update=atomic
const double pinf = __builtin_inf ();
}]
}
+
+# Return 1 if the target supports ARMv8.3 Adv.SIMD Complex instructions
+# instructions, 0 otherwise. The test is valid for ARM and for AArch64.
+# Record the command line options needed.
+
+proc check_effective_target_arm_v8_3a_complex_neon_ok_nocache { } {
+ global et_arm_v8_3a_complex_neon_flags
+ set et_arm_v8_3a_complex_neon_flags ""
+
+ if { ![istarget arm*-*-*] && ![istarget aarch64*-*-*] } {
+ return 0;
+ }
+
+ # Iterate through sets of options to find the compiler flags that
+ # need to be added to the -march option.
+ foreach flags {"" "-mfloat-abi=softfp -mfpu=auto" "-mfloat-abi=hard -mfpu=auto"} {
+ if { [check_no_compiler_messages_nocache \
+ arm_v8_3a_complex_neon_ok object {
+ #if !defined (__ARM_FEATURE_COMPLEX)
+ #error "__ARM_FEATURE_COMPLEX not defined"
+ #endif
+ } "$flags -march=armv8.3-a"] } {
+ set et_arm_v8_3a_complex_neon_flags "$flags -march=armv8.3-a"
+ return 1
+ }
+ }
+
+ return 0;
+}
+
+proc check_effective_target_arm_v8_3a_complex_neon_ok { } {
+ return [check_cached_effective_target arm_v8_3a_complex_neon_ok \
+ check_effective_target_arm_v8_3a_complex_neon_ok_nocache]
+}
+
+proc add_options_for_arm_v8_3a_complex_neon { flags } {
+ if { ! [check_effective_target_arm_v8_3a_complex_neon_ok] } {
+ return "$flags"
+ }
+ global et_arm_v8_3a_complex_neon_flags
+ return "$flags $et_arm_v8_3a_complex_neon_flags"
+}
+
+# Return 1 if the target supports executing AdvSIMD instructions from ARMv8.3
+# with the complex instruction extension, 0 otherwise. The test is valid for
+# ARM and for AArch64.
+
+proc check_effective_target_arm_v8_3a_complex_neon_hw { } {
+ if { ![check_effective_target_arm_v8_3a_complex_neon_ok] } {
+ return 0;
+ }
+ return [check_runtime arm_v8_3a_complex_neon_hw_available {
+ #include "arm_neon.h"
+ int
+ main (void)
+ {
+
+ float32x2_t results = {-4.0,5.0};
+ float32x2_t a = {1.0,3.0};
+ float32x2_t b = {2.0,5.0};
+
+ #ifdef __ARM_ARCH_ISA_A64
+ asm ("fcadd %0.2s, %1.2s, %2.2s, #90"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+
+ #else
+ asm ("vcadd.f32 %P0, %P1, %P2, #90"
+ : "=w"(results)
+ : "w"(a), "w"(b)
+ : /* No clobbers. */);
+ #endif
+
+ return (results[0] == 8 && results[1] == 24) ? 1 : 0;
+ }
+ } [add_options_for_arm_v8_3a_complex_neon ""]]
+}
+
+# Return 1 if the target plus current options supports a vector
+# complex addition with rotate of half and single float modes, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+foreach N {hf sf} {
+ eval [string map [list N $N] {
+ proc check_effective_target_vect_complex_rot_N { } {
+ return [check_cached_effective_target_indexed vect_complex_rot_N {
+ expr { [istarget aarch64*-*-*]
+ || [istarget arm*-*-*] }}]
+ }
+ }]
+}
+
+# Return 1 if the target plus current options supports a vector
+# complex addition with rotate of double float modes, 0 otherwise.
+#
+# This won't change for different subtargets so cache the result.
+
+foreach N {df} {
+ eval [string map [list N $N] {
+ proc check_effective_target_vect_complex_rot_N { } {
+ return [check_cached_effective_target_indexed vect_complex_rot_N {
+ expr { [istarget aarch64*-*-*] }}]
+ }
+ }]
+}
+
+# Return 1 if this target uses an LLVM assembler and/or linker
+proc check_effective_target_llvm_binutils { } {
+ return [check_cached_effective_target llvm_binutils {
+ expr { [istarget amdgcn*-*-*]
+ || [check_effective_target_offload_gcn] }}]
+}