From f70fb3b635f9618c6d2ee3848ba836914f7951c2 Mon Sep 17 00:00:00 2001 From: Matthew Wahab Date: Mon, 1 Jun 2015 15:18:19 +0000 Subject: [PATCH] re PR target/65697 (__atomic memory barriers not strong enough for __sync builtins) PR target/65697 * config/aarch64/aarch64.c (aarch64_emit_post_barrier):New. (aarch64_split_atomic_op): Check for __sync memory models, emit appropriate initial loads and final barriers. From-SVN: r223983 --- gcc/ChangeLog | 7 +++++++ gcc/config/aarch64/aarch64.c | 31 ++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index daf647637e4..0c109c33c0a 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2015-06-01 Matthew Wahab + + PR target/65697 + * config/aarch64/aarch64.c (aarch64_emit_post_barrier):New. + (aarch64_split_atomic_op): Check for __sync memory models, emit + appropriate initial loads and final barriers. + 2015-06-01 Vidya Praveen * Makefile.in: Fix gcov dependencies that should diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 648a548e0e0..93bea074d68 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -9409,6 +9409,23 @@ aarch64_expand_compare_and_swap (rtx operands[]) emit_insn (gen_rtx_SET (bval, x)); } +/* Emit a barrier, that is appropriate for memory model MODEL, at the end of a + sequence implementing an atomic operation. */ + +static void +aarch64_emit_post_barrier (enum memmodel model) +{ + const enum memmodel base_model = memmodel_base (model); + + if (is_mm_sync (model) + && (base_model == MEMMODEL_ACQUIRE + || base_model == MEMMODEL_ACQ_REL + || base_model == MEMMODEL_SEQ_CST)) + { + emit_insn (gen_mem_thread_fence (GEN_INT (MEMMODEL_SEQ_CST))); + } +} + /* Split a compare and swap pattern. */ void @@ -9471,6 +9488,8 @@ aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem, { machine_mode mode = GET_MODE (mem); machine_mode wmode = (mode == DImode ? DImode : SImode); + const enum memmodel model = memmodel_from_int (INTVAL (model_rtx)); + const bool is_sync = is_mm_sync (model); rtx_code_label *label; rtx x; @@ -9485,7 +9504,13 @@ aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem, old_out = new_out; value = simplify_gen_subreg (wmode, value, mode, 0); - aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx); + /* The initial load can be relaxed for a __sync operation since a final + barrier will be emitted to stop code hoisting. */ + if (is_sync) + aarch64_emit_load_exclusive (mode, old_out, mem, + GEN_INT (MEMMODEL_RELAXED)); + else + aarch64_emit_load_exclusive (mode, old_out, mem, model_rtx); switch (code) { @@ -9521,6 +9546,10 @@ aarch64_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem, x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, gen_rtx_LABEL_REF (Pmode, label), pc_rtx); aarch64_emit_unlikely_jump (gen_rtx_SET (pc_rtx, x)); + + /* Emit any final barrier needed for a __sync operation. */ + if (is_sync) + aarch64_emit_post_barrier (model); } static void -- 2.30.2