+2014-11-17 Alan Lawrence <alan.lawrence@arm.com>
+
+ * config/aarch64/aarch64-builtins.c (TYPES_CREATE): Remove.
+ * config/aarch64/aarch64-simd-builtins.def (create): Remove.
+ * config/aarch64/aarch64-simd.md (aarch64_create<mode>): Remove.
+ * config/aarch64/arm_neon.h (vcreate_f64, vreinterpret_f64_s64,
+ vreinterpret_f64_u64): Replace __builtin_aarch64_createv1df with C casts.
+ * config/aarch64/iterators.md (VD1): Remove.
+
2014-11-17 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
* config/aarch64/aarch64-cores.def (cortex-a53): Remove
aarch64_types_unopu_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_unsigned, qualifier_unsigned };
#define TYPES_UNOPU (aarch64_types_unopu_qualifiers)
-#define TYPES_CREATE (aarch64_types_unop_qualifiers)
static enum aarch64_type_qualifiers
aarch64_types_binop_qualifiers[SIMD_MAX_BUILTIN_ARGS]
= { qualifier_none, qualifier_none, qualifier_maybe_immediate };
1-9 - CODE_FOR_<name><mode><1-9>
10 - CODE_FOR_<name><mode>. */
- BUILTIN_VD1 (CREATE, create, 0)
BUILTIN_VDC (COMBINE, combine, 0)
BUILTIN_VB (BINOP, pmul, 0)
BUILTIN_VDQF (UNOP, sqrt, 2)
;; Patterns for AArch64 SIMD Intrinsics.
-(define_expand "aarch64_create<mode>"
- [(match_operand:VD1 0 "register_operand" "")
- (match_operand:DI 1 "general_operand" "")]
- "TARGET_SIMD"
-{
- rtx src = gen_lowpart (<MODE>mode, operands[1]);
- emit_move_insn (operands[0], src);
- DONE;
-})
-
;; Lane extraction with sign extension to general purpose register.
(define_insn "*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>"
[(set (match_operand:GPI 0 "register_operand" "=r")
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
vcreate_f64 (uint64_t __a)
{
- return __builtin_aarch64_createv1df (__a);
+ return (float64x1_t) __a;
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_s64 (int64x1_t __a)
{
- return __builtin_aarch64_createv1df ((uint64_t) vget_lane_s64 (__a, 0));
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
__extension__ static __inline float64x1_t __attribute__((__always_inline__))
vreinterpret_f64_u64 (uint64x1_t __a)
{
- return __builtin_aarch64_createv1df (vget_lane_u64 (__a, 0));
+ return (float64x1_t) __a;
}
__extension__ static __inline float64x2_t __attribute__((__always_inline__))
;; Double vector modes for combines.
(define_mode_iterator VDIC [V8QI V4HI V2SI])
-;; Double vector modes inc V1DF
-(define_mode_iterator VD1 [V8QI V4HI V2SI V2SF V1DF])
-
;; Vector modes except double int.
(define_mode_iterator VDQIF [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DF])
+2014-11-17 Alan Lawrence <alan.lawrence@arm.com>
+
+ * gcc.target/aarch64/simd/vfma_f64.c: Add asm volatile memory.
+ * gcc.target/aarch64/simd/vfms_f64.c: Likewise.
+
2014-11-17 Ilya Enkovich <ilya.enkovich@intel.com>
* gcc.target/i386/chkp-strlen-1.c: New.
#define EPS 1.0e-15
+#define INHIB_OPT(x) asm volatile ("mov %d0, %1.d[0]" \
+ : "=w"(x) \
+ : "w"(x) \
+ : /* No clobbers. */);
extern void abort (void);
arg2 = vcreate_f64 (0x3fa88480812d6670ULL);
arg3 = vcreate_f64 (0x3fd5791ae2a92572ULL);
+ INHIB_OPT (arg1);
+ INHIB_OPT (arg2);
+ INHIB_OPT (arg3);
+
expected = 0.6280448184360076;
actual = vget_lane_f64 (vfma_f64 (arg1, arg2, arg3), 0);
#define EPS 1.0e-15
+#define INHIB_OPT(x) asm volatile ("mov %d0, %1.d[0]" \
+ : "=w"(x) \
+ : "w"(x) \
+ : /* No clobbers. */);
extern void abort (void);
arg2 = vcreate_f64 (0x3fe6b78680fa29ceULL);
arg3 = vcreate_f64 (0x3feea3cbf921fbe0ULL);
+ INHIB_OPT (arg1);
+ INHIB_OPT (arg2);
+ INHIB_OPT (arg3);
+
expected = 4.4964705746355915e-2;
actual = vget_lane_f64 (vfms_f64 (arg1, arg2, arg3), 0);