+2015-06-29 Matthew Wahab <matthew.wahab@arm.com>
+
+ PR target/65697
+ * config/armc/arm.c (arm_split_atomic_op): For ARMv8, replace an
+ initial acquire barrier with final barrier.
+
2015-06-29 Richard Henderson <rth@redhat.com>
* config/i386/constraints.md (Bf): New constraint.
rtx_code_label *label;
rtx x;
+ bool is_armv8_sync = arm_arch8 && is_mm_sync (model);
+
bool use_acquire = TARGET_HAVE_LDACQ
&& !(is_mm_relaxed (model) || is_mm_consume (model)
|| is_mm_release (model));
&& !(is_mm_relaxed (model) || is_mm_consume (model)
|| is_mm_acquire (model));
+ /* For ARMv8, a load-acquire is too weak for __sync memory orders. Instead,
+ a full barrier is emitted after the store-release. */
+ if (is_armv8_sync)
+ use_acquire = false;
+
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
arm_pre_atomic_barrier (model);
emit_unlikely_jump (gen_cbranchsi4 (x, cond, const0_rtx, label));
/* Checks whether a barrier is needed and emits one accordingly. */
- if (!(use_acquire || use_release))
+ if (is_armv8_sync
+ || !(use_acquire || use_release))
arm_post_atomic_barrier (model);
}
\f