From a7b8107f8ee99629c28ab9e0e5b00c589d8577db Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Apr 2012 09:51:15 -0700 Subject: [PATCH] arm.md (UNSPEC_LL): New. * config/arm/arm.md (UNSPEC_LL): New. * config/arm/sync.md (atomic_loaddi, atomic_loaddi_1): New. (arm_load_exclusivedi): Use %H0. From-SVN: r186990 --- gcc/ChangeLog | 6 ++++++ gcc/config/arm/arm.md | 1 + gcc/config/arm/sync.md | 36 ++++++++++++++++++++++++++---------- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5a39f21ee60..ea032c11afc 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,9 @@ +2012-04-30 Richard Henderson + + * config/arm/arm.md (UNSPEC_LL): New. + * config/arm/sync.md (atomic_loaddi, atomic_loaddi_1): New. + (arm_load_exclusivedi): Use %H0. + 2012-04-30 Jason Merrill * dwarf2out.c (comdat_symbol_id): Add const. diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index 9506228c5e4..7a49270801c 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -117,6 +117,7 @@ ; that. UNSPEC_UNALIGNED_STORE ; Same for str/strh. UNSPEC_PIC_UNIFIED ; Create a common pic addressing form. + UNSPEC_LL ; Represent an unpaired load-register-exclusive. ]) ;; UNSPEC_VOLATILE Usage: diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md index 03838f5d247..86135bb64c4 100644 --- a/gcc/config/arm/sync.md +++ b/gcc/config/arm/sync.md @@ -65,6 +65,31 @@ (set_attr "conds" "unconditional") (set_attr "predicable" "no")]) +;; Note that ldrd and vldr are *not* guaranteed to be single-copy atomic, +;; even for a 64-bit aligned address. Instead we use a ldrexd unparied +;; with a store. +(define_expand "atomic_loaddi" + [(match_operand:DI 0 "s_register_operand") ;; val out + (match_operand:DI 1 "mem_noofs_operand") ;; memory + (match_operand:SI 2 "const_int_operand")] ;; model + "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN" +{ + enum memmodel model = (enum memmodel) INTVAL (operands[2]); + expand_mem_thread_fence (model); + emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1])); + if (model == MEMMODEL_SEQ_CST) + expand_mem_thread_fence (model); + DONE; +}) + +(define_insn "atomic_loaddi_1" + [(set (match_operand:DI 0 "s_register_operand" "=r") + (unspec:DI [(match_operand:DI 1 "mem_noofs_operand" "Ua")] + UNSPEC_LL))] + "TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN" + "ldrexd%?\t%0, %H0, %C1" + [(set_attr "predicable" "yes")]) + (define_expand "atomic_compare_and_swap" [(match_operand:SI 0 "s_register_operand" "") ;; bool out (match_operand:QHSD 1 "s_register_operand" "") ;; val out @@ -317,16 +342,7 @@ [(match_operand:DI 1 "mem_noofs_operand" "Ua")] VUNSPEC_LL))] "TARGET_HAVE_LDREXD" - { - rtx target = operands[0]; - /* The restrictions on target registers in ARM mode are that the two - registers are consecutive and the first one is even; Thumb is - actually more flexible, but DI should give us this anyway. - Note that the 1st register always gets the lowest word in memory. */ - gcc_assert ((REGNO (target) & 1) == 0); - operands[2] = gen_rtx_REG (SImode, REGNO (target) + 1); - return "ldrexd%?\t%0, %2, %C1"; - } + "ldrexd%?\t%0, %H0, %C1" [(set_attr "predicable" "yes")]) (define_insn "arm_store_exclusive" -- 2.30.2