+2015-06-09 Shiva Chen <shiva0217@gmail.com>
+
+ * sync.md (atomic_load<mode>): Add conditional code for lda/ldr
+ (atomic_store<mode>): Likewise.
+
2015-06-09 Richard Biener <rguenther@suse.de>
* cfgloop.c (get_loop_body_in_bfs_order): Fix assert.
{
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
- return \"ldr<sync_sfx>\\t%0, %1\";
+ return \"ldr%(<sync_sfx>%)\\t%0, %1\";
else
- return \"lda<sync_sfx>\\t%0, %1\";
+ return \"lda<sync_sfx>%?\\t%0, %1\";
}
-)
+ [(set_attr "predicable" "yes")
+ (set_attr "predicable_short_it" "no")])
(define_insn "atomic_store<mode>"
[(set (match_operand:QHSI 0 "memory_operand" "=Q")
{
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
- return \"str<sync_sfx>\t%1, %0\";
+ return \"str%(<sync_sfx>%)\t%1, %0\";
else
- return \"stl<sync_sfx>\t%1, %0\";
+ return \"stl<sync_sfx>%?\t%1, %0\";
}
-)
+ [(set_attr "predicable" "yes")
+ (set_attr "predicable_short_it" "no")])
;; Note that ldrd and vldr are *not* guaranteed to be single-copy atomic,
;; even for a 64-bit aligned address. Instead we use a ldrexd unparied
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_arm_ok } */
+/* { dg-require-effective-target arm_arch_v8a_ok } */
+/* { dg-options "-O2 -marm" } */
+/* { dg-add-options arm_arch_v8a } */
+
+struct backtrace_state
+{
+ int threaded;
+ int lock_alloc;
+};
+
+void foo (struct backtrace_state *state)
+{
+ if (state->threaded)
+ __sync_lock_release (&state->lock_alloc);
+}
+
+/* { dg-final { scan-assembler "stlne" } } */