+2017-01-06 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * config/arm/arm.md (<cdp>): New.
+ * config/arm/arm.c (neon_const_bounds): Rename this ...
+ (arm_const_bounds): ... this.
+ (arm_coproc_builtin_available): New.
+ * config/arm/arm-builtins.c (SIMD_MAX_BUILTIN_ARGS): Increase.
+ (arm_type_qualifiers): Add 'qualifier_unsigned_immediate'.
+ (CDP_QUALIFIERS): Define to...
+ (arm_cdp_qualifiers): ... this. New.
+ (void_UP): Define.
+ (arm_expand_builtin_args): Add case for 6 arguments.
+ * config/arm/arm-protos.h (neon_const_bounds): Rename this ...
+ (arm_const_bounds): ... this.
+ (arm_coproc_builtin_available): New.
+ * config/arm/arm_acle.h (__arm_cdp): New.
+ (__arm_cdp2): New.
+ * config/arm/arm_acle_builtins.def (cdp): New.
+ (cdp2): New.
+ * config/arm/iterators.md (CDPI,CDP,cdp): New.
+ * config/arm/neon.md: Rename all 'neon_const_bounds' to
+ 'arm_const_bounds'.
+ * config/arm/types.md (coproc): New.
+ * config/arm/unspecs.md (VUNSPEC_CDP, VUNSPEC_CDP2): New.
+ * gcc/doc/extend.texi (ACLE): Add a mention of Coprocessor intrinsics.
+ * gcc/doc/sourcebuild.texi (arm_coproc1_ok, arm_coproc2_ok,
+ arm_coproc3_ok, arm_coproc4_ok): Document new effective targets.
+
2017-01-06 Andre Vieira <andre.simoesdiasvieira@arm.com>
* config/arm/arm-builtins.c (arm_unsigned_binop_qualifiers): New.
#include "case-cfn-macros.h"
#include "sbitmap.h"
-#define SIMD_MAX_BUILTIN_ARGS 5
+#define SIMD_MAX_BUILTIN_ARGS 7
enum arm_type_qualifiers
{
/* Used when expanding arguments if an operand could
be an immediate. */
qualifier_immediate = 0x8, /* 1 << 3 */
+ qualifier_unsigned_immediate = 0x9,
qualifier_maybe_immediate = 0x10, /* 1 << 4 */
/* void foo (...). */
qualifier_void = 0x20, /* 1 << 5 */
qualifier_unsigned };
#define UBINOP_QUALIFIERS (arm_unsigned_binop_qualifiers)
+/* void (unsigned immediate, unsigned immediate, unsigned immediate,
+ unsigned immediate, unsigned immediate, unsigned immediate). */
+static enum arm_type_qualifiers
+arm_cdp_qualifiers[SIMD_MAX_BUILTIN_ARGS]
+ = { qualifier_void, qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate,
+ qualifier_unsigned_immediate };
+#define CDP_QUALIFIERS \
+ (arm_cdp_qualifiers)
/* The first argument (return type) of a store should be void type,
which we represent with qualifier_void. Their first operand will be
a DImode pointer to the location to store to, so we must use
#define oi_UP OImode
#define hf_UP HFmode
#define si_UP SImode
+#define void_UP VOIDmode
#define UP(X) X##_UP
pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
break;
+ case 6:
+ pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4], op[5]);
+ break;
+
default:
gcc_unreachable ();
}
pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
break;
+ case 6:
+ pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
+ break;
+
default:
gcc_unreachable ();
}
extern tree arm_builtin_vectorized_function (unsigned int, tree, tree);
extern void neon_expand_vector_init (rtx, rtx);
extern void neon_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
-extern void neon_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+extern void arm_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
extern HOST_WIDE_INT neon_element_bits (machine_mode);
extern void neon_emit_pair_result_insn (machine_mode,
rtx (*) (rtx, rtx, rtx, rtx),
extern void arm_split_compare_and_swap (rtx op[]);
extern void arm_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
extern rtx arm_load_tp (rtx);
+extern bool arm_coproc_builtin_available (enum unspecv);
#if defined TREE_CODE
extern void arm_init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, tree);
/* Bounds-check constants. */
void
-neon_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
+arm_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
{
bounds_check (operand, low, high, NULL_TREE, "constant");
}
*rem_p = remainder;
}
+/* This function checks for the availability of the coprocessor builtin passed
+ in BUILTIN for the current target. Returns true if it is available and
+ false otherwise. If a BUILTIN is passed for which this function has not
+ been implemented it will cause an exception. */
+
+bool
+arm_coproc_builtin_available (enum unspecv builtin)
+{
+ /* None of these builtins are available in Thumb mode if the target only
+ supports Thumb-1. */
+ if (TARGET_THUMB1)
+ return false;
+
+ switch (builtin)
+ {
+ case VUNSPEC_CDP:
+ if (arm_arch4)
+ return true;
+ break;
+ case VUNSPEC_CDP2:
+ /* Only present in ARMv5*, ARMv6 (but not ARMv6-M), ARMv7* and
+ ARMv8-{A,M}. */
+ if (arm_arch5)
+ return true;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return false;
+}
#include "gt-arm.h"
DONE;
})
+(define_insn "<cdp>"
+ [(unspec_volatile [(match_operand:SI 0 "immediate_operand" "n")
+ (match_operand:SI 1 "immediate_operand" "n")
+ (match_operand:SI 2 "immediate_operand" "n")
+ (match_operand:SI 3 "immediate_operand" "n")
+ (match_operand:SI 4 "immediate_operand" "n")
+ (match_operand:SI 5 "immediate_operand" "n")] CDPI)]
+ "arm_coproc_builtin_available (VUNSPEC_<CDP>)"
+{
+ arm_const_bounds (operands[0], 0, 16);
+ arm_const_bounds (operands[1], 0, 16);
+ arm_const_bounds (operands[2], 0, (1 << 5));
+ arm_const_bounds (operands[3], 0, (1 << 5));
+ arm_const_bounds (operands[4], 0, (1 << 5));
+ arm_const_bounds (operands[5], 0, 8);
+ return "<cdp>\\tp%c0, %1, CR%c2, CR%c3, CR%c4, %5";
+}
+ [(set_attr "length" "4")
+ (set_attr "type" "coproc")])
+
;; Vector bits common to IWMMXT and Neon
(include "vec-common.md")
;; Load the Intel Wireless Multimedia Extension patterns
extern "C" {
#endif
+#if (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_cdp (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRd, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ return __builtin_arm_cdp (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2);
+}
+
+#if __ARM_ARCH >= 5
+__extension__ static __inline void __attribute__ ((__always_inline__))
+__arm_cdp2 (const unsigned int __coproc, const unsigned int __opc1,
+ const unsigned int __CRd, const unsigned int __CRn,
+ const unsigned int __CRm, const unsigned int __opc2)
+{
+ return __builtin_arm_cdp2 (__coproc, __opc1, __CRd, __CRn, __CRm, __opc2);
+}
+#endif /* __ARM_ARCH >= 5. */
+#endif /* (!__thumb__ || __thumb2__) && __ARM_ARCH >= 4. */
+
#ifdef __ARM_FEATURE_CRC32
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
__crc32b (uint32_t __a, uint8_t __b)
VAR1 (UBINOP, crc32cb, si)
VAR1 (UBINOP, crc32ch, si)
VAR1 (UBINOP, crc32cw, si)
+VAR1 (CDP, cdp, void)
+VAR1 (CDP, cdp2, void)
;; Attributes for VFMA_LANE/ VFMS_LANE
(define_int_attr neon_vfm_lane_as
[(UNSPEC_VFMA_LANE "a") (UNSPEC_VFMS_LANE "s")])
+
+;; An iterator for the CDP coprocessor instructions
+(define_int_iterator CDPI [VUNSPEC_CDP VUNSPEC_CDP2])
+(define_int_attr cdp [(VUNSPEC_CDP "cdp") (VUNSPEC_CDP2 "cdp2")])
+(define_int_attr CDP [(VUNSPEC_CDP "CDP") (VUNSPEC_CDP2 "CDP2")])
VCVT_US_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
return "vcvt.<sup>%#32.f32\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_fp_to_int_<V_elem_ch><q>")]
VCVT_US_N))]
"TARGET_NEON_FP16INST"
{
- neon_const_bounds (operands[2], 0, 17);
+ arm_const_bounds (operands[2], 0, 17);
return "vcvt.<sup>%#16.f16\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_fp_to_int_<VH_elem_ch><q>")]
VCVT_US_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
return "vcvt.f32.<sup>%#32\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_int_to_fp_<V_elem_ch><q>")]
VCVT_US_N))]
"TARGET_NEON_FP16INST"
{
- neon_const_bounds (operands[2], 0, 17);
+ arm_const_bounds (operands[2], 0, 17);
return "vcvt.f16.<sup>%#16\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_int_to_fp_<VH_elem_ch><q>")]
UNSPEC_VEXT))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
+ arm_const_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
return "vext.<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2, %3";
}
[(set_attr "type" "neon_ext<q>")]
VSHR_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) + 1);
return "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_shift_imm<q>")]
VSHRN_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
return "v<shift_op>.<V_if_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_shift_imm_narrow_q")]
VQSHRN_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
return "v<shift_op>.<sup>%#<V_sz_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
VQSHRUN_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
+ arm_const_bounds (operands[2], 1, neon_element_bits (<MODE>mode) / 2 + 1);
return "v<shift_op>.<V_s_elem>\t%P0, %q1, %2";
}
[(set_attr "type" "neon_sat_shift_imm_narrow_q")]
UNSPEC_VSHL_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
return "vshl.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_shift_imm<q>")]
VQSHL_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
return "vqshl.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_sat_shift_imm<q>")]
UNSPEC_VQSHLU_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode));
return "vqshlu.<V_s_elem>\t%<V_reg>0, %<V_reg>1, %2";
}
[(set_attr "type" "neon_sat_shift_imm<q>")]
"TARGET_NEON"
{
/* The boundaries are: 0 < imm <= size. */
- neon_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[2], 0, neon_element_bits (<MODE>mode) + 1);
return "vshll.<sup>%#<V_sz_elem>\t%q0, %P1, %2";
}
[(set_attr "type" "neon_shift_imm_long")]
VSRA_N))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
return "v<shift_op>.<sup>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
}
[(set_attr "type" "neon_shift_acc<q>")]
UNSPEC_VSRI))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
+ arm_const_bounds (operands[3], 1, neon_element_bits (<MODE>mode) + 1);
return "vsri.<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
}
[(set_attr "type" "neon_shift_reg<q>")]
UNSPEC_VSLI))]
"TARGET_NEON"
{
- neon_const_bounds (operands[3], 0, neon_element_bits (<MODE>mode));
+ arm_const_bounds (operands[3], 0, neon_element_bits (<MODE>mode));
return "vsli.<V_sz_elem>\t%<V_reg>0, %<V_reg>2, %3";
}
[(set_attr "type" "neon_shift_reg<q>")]
; crypto_sha1_slow
; crypto_sha256_fast
; crypto_sha256_slow
+;
+; The classification below is for coprocessor instructions
+;
+; coproc
(define_attr "type"
"adc_imm,\
crypto_sha1_fast,\
crypto_sha1_slow,\
crypto_sha256_fast,\
- crypto_sha256_slow"
+ crypto_sha256_slow,\
+ coproc"
(const_string "untyped"))
; Is this an (integer side) multiply with a 32-bit (or smaller) result?
VUNSPEC_GET_FPSCR ; Represent fetch of FPSCR content.
VUNSPEC_SET_FPSCR ; Represent assign of FPSCR content.
VUNSPEC_PROBE_STACK_RANGE ; Represent stack range probing.
+ VUNSPEC_CDP ; Represent the coprocessor cdp instruction.
+ VUNSPEC_CDP2 ; Represent the coprocessor cdp2 instruction.
])
;; Enumerators for NEON unspecs.
(float_truncate:HF (float:SF (match_dup 0))))]
"TARGET_VFP_FP16INST"
{
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
return "vcvt.f16.<sup>32\t%0, %0, %2\;vmov.f32\t%3, %0";
}
[(set_attr "conds" "unconditional")
{
rtx op1 = gen_reg_rtx (SImode);
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
emit_move_insn (op1, operands[1]);
emit_insn (gen_neon_vcvth<sup>_nhf_unspec (op1, op1, operands[2],
VCVT_SI_US_N))]
"TARGET_VFP_FP16INST"
{
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
return "vmov.f32\t%0, %1\;vcvt.<sup>%#32.f16\t%0, %0, %2";
}
[(set_attr "conds" "unconditional")
{
rtx op1 = gen_reg_rtx (SImode);
- neon_const_bounds (operands[2], 1, 33);
+ arm_const_bounds (operands[2], 1, 33);
emit_insn (gen_neon_vcvth<sup>_nsi_unspec (op1, operands[1], operands[2]));
emit_move_insn (operands[0], op1);
DONE;
NEON is enabled.
Currently, ARM and AArch64 back ends do not support ACLE 2.0 fully. Both
-back ends support CRC32 intrinsics from @file{arm_acle.h}. The ARM back end's
-16-bit floating-point Advanced SIMD intrinsics currently comply to ACLE v1.1.
+back ends support CRC32 intrinsics and the ARM back end supports the
+Coprocessor intrinsics, all from @file{arm_acle.h}. The ARM back end's 16-bit
+floating-point Advanced SIMD intrinsics currently comply to ACLE v1.1.
AArch64's back end does not have support for 16-bit floating point Advanced SIMD
intrinsics yet.
ARM target supports ARMv8-M Security Extensions, enabled by the @code{-mcmse}
option.
+@item arm_coproc1_ok
+@anchor{arm_coproc1_ok}
+ARM target supports the following coprocessor instructions: @code{CDP},
+@code{LDC}, @code{STC}, @code{MCR} and @code{MRC}.
+
+@item arm_coproc2_ok
+@anchor{arm_coproc2_ok}
+ARM target supports all the coprocessor instructions also listed as supported
+in @ref{arm_coproc1_ok} in addition to the following: @code{CDP2}, @code{LDC2},
+@code{LDC2l}, @code{STC2}, @code{STC2l}, @code{MCR2} and @code{MRC2}.
+
+@item arm_coproc3_ok
+@anchor{arm_coproc3_ok}
+ARM target supports all the coprocessor instructions also listed as supported
+in @ref{arm_coproc2_ok} in addition the following: @code{MCRR} and @code{MRRC}.
+
+@item arm_coproc4_ok
+ARM target supports all the coprocessor instructions also listed as supported
+in @ref{arm_coproc3_ok} in addition the following: @code{MCRR2} and @code{MRRC2}.
@end table
@subsubsection AArch64-specific attributes
+2017-01-06 Andre Vieira <andre.simoesdiasvieira@arm.com>
+
+ * gcc.target/arm/acle/acle.exp: Run tests for different options
+ and make sure fat-lto-objects is used such that we can still do
+ assemble scans.
+ * gcc.target/arm/acle/cdp.c: New.
+ * gcc.target/arm/acle/cdp2.c: New.
+ * lib/target-supports.exp (check_effective_target_arm_coproc1_ok): New.
+ (check_effective_target_arm_coproc1_ok_nocache): New.
+ (check_effective_target_arm_coproc2_ok): New.
+ (check_effective_target_arm_coproc2_ok_nocache): New.
+ (check_effective_target_arm_coproc3_ok): New.
+ (check_effective_target_arm_coproc3_ok_nocache): New.
+ (check_effective_target_arm_coproc4_ok): New.
+ (check_effective_target_arm_coproc4_ok_nocache): New.
+
2017-01-06 Martin Sebor <msebor@redhat.com>
PR middle-end/78605
# Initialize `dg'.
dg-init
+set saved-dg-do-what-default ${dg-do-what-default}
+set dg-do-what-default "assemble"
+
+set saved-lto_torture_options ${LTO_TORTURE_OPTIONS}
+
+# Add -ffat-lto-objects option to all LTO options such that we can do assembly
+# scans.
+proc add_fat_objects { list } {
+ set res {}
+ foreach el $list {set res [lappend res [concat $el " -ffat-lto-objects"]]}
+ return $res
+};
+set LTO_TORTURE_OPTIONS [add_fat_objects ${LTO_TORTURE_OPTIONS}]
+
# Main loop.
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
"" ""
+# Restore globals
+set dg-do-what-default ${saved-dg-do-what-default}
+set LTO_TORTURE_OPTIONS ${saved-lto_torture_options}
# All done.
dg-finish
--- /dev/null
+/* Test the cdp ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc1_ok } */
+
+#include "arm_acle.h"
+
+void test_cdp (void)
+{
+ __arm_cdp (10, 1, 2, 3, 4, 5);
+}
+
+/* { dg-final { scan-assembler "cdp\tp10, #1, CR2, CR3, CR4, #5\n" } } */
--- /dev/null
+/* Test the cdp2 ACLE intrinsic. */
+
+/* { dg-do assemble } */
+/* { dg-options "-save-temps" } */
+/* { dg-require-effective-target arm_coproc2_ok } */
+
+#include "arm_acle.h"
+
+void test_cdp2 (void)
+{
+ __arm_cdp2 (10, 4, 3, 2, 1, 0);
+}
+
+/* { dg-final { scan-assembler "cdp2\tp10, #4, CR3, CR2, CR1, #0\n" } } */
return 0
}
+
+# Return 1 if the target supports coprocessor instructions: cdp, ldc, stc, mcr and
+# mrc.
+proc check_effective_target_arm_coproc1_ok_nocache { } {
+ if { ![istarget arm*-*-*] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc1_ok assembly {
+ #if (__thumb__ && !__thumb2__) || __ARM_ARCH < 4
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc1_ok { } {
+ return [check_cached_effective_target arm_coproc1_ok \
+ check_effective_target_arm_coproc1_ok_nocache]
+}
+
+# Return 1 if the target supports all coprocessor instructions checked by
+# check_effective_target_arm_coproc1_ok in addition to the following: cdp2,
+# ldc2, ldc2l, stc2, stc2l, mcr2 and mrc2.
+proc check_effective_target_arm_coproc2_ok_nocache { } {
+ if { ![check_effective_target_arm_coproc1_ok] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc2_ok assembly {
+ #if __ARM_ARCH < 5
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc2_ok { } {
+ return [check_cached_effective_target arm_coproc2_ok \
+ check_effective_target_arm_coproc2_ok_nocache]
+}
+
+# Return 1 if the target supports all coprocessor instructions checked by
+# check_effective_target_arm_coproc2_ok in addition the following: mcrr and
+mrrc.
+proc check_effective_target_arm_coproc3_ok_nocache { } {
+ if { ![check_effective_target_arm_coproc2_ok] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc3_ok assembly {
+ #if __ARM_ARCH < 6 && !defined (__ARM_ARCH_5TE__)
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc3_ok { } {
+ return [check_cached_effective_target arm_coproc3_ok \
+ check_effective_target_arm_coproc3_ok_nocache]
+}
+
+# Return 1 if the target supports all coprocessor instructions checked by
+# check_effective_target_arm_coproc3_ok in addition the following: mcrr2 and
+# mrcc2.
+proc check_effective_target_arm_coproc4_ok_nocache { } {
+ if { ![check_effective_target_arm_coproc3_ok] } {
+ return 0
+ }
+ return [check_no_compiler_messages_nocache arm_coproc4_ok assembly {
+ #if __ARM_ARCH < 6
+ #error FOO
+ #endif
+ }]
+}
+
+proc check_effective_target_arm_coproc4_ok { } {
+ return [check_cached_effective_target arm_coproc4_ok \
+ check_effective_target_arm_coproc4_ok_nocache]
+}