+2018-09-26 Matthew Malcomson <matthew.malcomson@arm.com>
+
+ * config/arm/arm.c (arm_split_compare_and_swap, arm_split_atomic_op):
+ Use new helper functions.
+ * config/arm/sync.md (atomic_load<mode>, atomic_store<mode>):
+ Use new helper functions.
+ * config/arm/aarch-common-protos.h (aarch_mm_needs_acquire,
+ aarch_mm_needs_release): New declarations.
+ * config/arm/aarch-common.c (aarch_mm_needs_acquire,
+ aarch_mm_needs_release): New.
+
2018-09-26 Eric Botcazou <ebotcazou@adacore.com>
* config/arm/arm.c (arm_reorg): Skip Thumb reorg pass for thunks.
extern bool aarch_rev16_p (rtx);
extern bool aarch_rev16_shleft_mask_imm_p (rtx, machine_mode);
extern bool aarch_rev16_shright_mask_imm_p (rtx, machine_mode);
+extern bool aarch_mm_needs_acquire (rtx);
+extern bool aarch_mm_needs_release (rtx);
extern int arm_early_load_addr_dep (rtx, rtx);
extern int arm_early_load_addr_dep_ptr (rtx, rtx);
extern int arm_early_store_addr_dep (rtx, rtx);
#include "tm.h"
#include "rtl.h"
#include "rtl-iter.h"
+#include "memmodel.h"
/* In ARMv8-A there's a general expectation that AESE/AESMC
and AESD/AESIMC sequences of the form:
return is_rev;
}
+/* Return non-zero if the RTX representing a memory model is a memory model
+ that needs acquire semantics. */
+bool
+aarch_mm_needs_acquire (rtx const_int)
+{
+ enum memmodel model = memmodel_from_int (INTVAL (const_int));
+ return !(is_mm_relaxed (model)
+ || is_mm_consume (model)
+ || is_mm_release (model));
+}
+
+/* Return non-zero if the RTX representing a memory model is a memory model
+ that needs release semantics. */
+bool
+aarch_mm_needs_release (rtx const_int)
+{
+ enum memmodel model = memmodel_from_int (INTVAL (const_int));
+ return !(is_mm_relaxed (model)
+ || is_mm_consume (model)
+ || is_mm_acquire (model));
+}
+
/* Return nonzero if the CONSUMER instruction (a load) does need
PRODUCER's value to calculate the address. */
int
void
arm_split_compare_and_swap (rtx operands[])
{
- rtx rval, mem, oldval, newval, neg_bval;
+ rtx rval, mem, oldval, newval, neg_bval, mod_s_rtx;
machine_mode mode;
enum memmodel mod_s, mod_f;
bool is_weak;
oldval = operands[3];
newval = operands[4];
is_weak = (operands[5] != const0_rtx);
- mod_s = memmodel_from_int (INTVAL (operands[6]));
+ mod_s_rtx = operands[6];
+ mod_s = memmodel_from_int (INTVAL (mod_s_rtx));
mod_f = memmodel_from_int (INTVAL (operands[7]));
neg_bval = TARGET_THUMB1 ? operands[0] : operands[8];
mode = GET_MODE (mem);
bool is_armv8_sync = arm_arch8 && is_mm_sync (mod_s);
- bool use_acquire = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
- || is_mm_release (mod_s));
-
- bool use_release = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
- || is_mm_acquire (mod_s));
+ bool use_acquire = TARGET_HAVE_LDACQ && aarch_mm_needs_acquire (mod_s_rtx);
+ bool use_release = TARGET_HAVE_LDACQ && aarch_mm_needs_release (mod_s_rtx);
/* For ARMv8, the load-acquire is too weak for __sync memory orders. Instead,
a full barrier is emitted after the store-release. */
bool is_armv8_sync = arm_arch8 && is_mm_sync (model);
- bool use_acquire = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (model) || is_mm_consume (model)
- || is_mm_release (model));
-
- bool use_release = TARGET_HAVE_LDACQ
- && !(is_mm_relaxed (model) || is_mm_consume (model)
- || is_mm_acquire (model));
+ bool use_acquire = TARGET_HAVE_LDACQ && aarch_mm_needs_acquire (model_rtx);
+ bool use_release = TARGET_HAVE_LDACQ && aarch_mm_needs_release (model_rtx);
/* For ARMv8, a load-acquire is too weak for __sync memory orders. Instead,
a full barrier is emitted after the store-release. */
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
{
- enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
- if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
+ if (aarch_mm_needs_acquire (operands[2]))
{
if (TARGET_THUMB1)
- return \"ldr<sync_sfx>\\t%0, %1\";
+ return "lda<sync_sfx>\t%0, %1";
else
- return \"ldr<sync_sfx>%?\\t%0, %1\";
+ return "lda<sync_sfx>%?\t%0, %1";
}
else
{
if (TARGET_THUMB1)
- return \"lda<sync_sfx>\\t%0, %1\";
+ return "ldr<sync_sfx>\t%0, %1";
else
- return \"lda<sync_sfx>%?\\t%0, %1\";
+ return "ldr<sync_sfx>%?\t%0, %1";
}
}
[(set_attr "arch" "32,v8mb,any")
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
{
- enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
- if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
+ if (aarch_mm_needs_release (operands[2]))
{
if (TARGET_THUMB1)
- return \"str<sync_sfx>\t%1, %0\";
+ return "stl<sync_sfx>\t%1, %0";
else
- return \"str<sync_sfx>%?\t%1, %0\";
+ return "stl<sync_sfx>%?\t%1, %0";
}
else
{
if (TARGET_THUMB1)
- return \"stl<sync_sfx>\t%1, %0\";
+ return "str<sync_sfx>\t%1, %0";
else
- return \"stl<sync_sfx>%?\t%1, %0\";
+ return "str<sync_sfx>%?\t%1, %0";
}
}
[(set_attr "arch" "32,v8mb,any")