+2017-10-14 Uros Bizjak <ubizjak@gmail.com>
+
+ * target-insns.def: Add memory_blockage.
+ * optabs.c (expand_memory_blockage): New function.
+ (expand_asm_memory_barrier): Rename ...
+ (expand_asm_memory_blockage): ... to this.
+ (expand_mem_thread_fence): Call expand_memory_blockage
+ instead of expand_asm_memory_barrier.
+ (expand_mem_singnal_fence): Ditto.
+ (expand_atomic_load): Ditto.
+ (expand_atomic_store): Ditto.
+ * doc/md.texi (Standard Pattern Names For Generation):
+ Document memory_blockage instruction pattern.
+
2017-10-13 Sebastian Perta <sebastian.perta@renesas.com>
* config/rl78/rl78.c (rl78_emit_libcall): New function.
2017-10-13 Jan Hubicka <hubicka@ucw.cz>
- * cfghooks.c (verify_flow_info): Check that edge probabilities are
- set.
+ * cfghooks.c (verify_flow_info): Check that edge probabilities are set.
2017-10-13 Nathan Sidwell <nathan@acm.org>
graphite_expression_type_precision): Avoid global constructor
by moving ...
(translate_isl_ast_to_gimple::translate_isl_ast_to_gimple): Here.
- (translate_isl_ast_to_gimple::graphite_expr_type): Add type
- member.
+ (translate_isl_ast_to_gimple::graphite_expr_type): Add type member.
(translate_isl_ast_to_gimple::translate_isl_ast_node_for): Use it.
(translate_isl_ast_to_gimple::build_iv_mapping): Likewise.
(translate_isl_ast_to_gimple::graphite_create_new_guard): Likewise.
equivalences across the boundary defined by the blockage insn.
This needs to be an UNSPEC_VOLATILE pattern or a volatile ASM.
+@cindex @code{memory_blockage} instruction pattern
+@item @samp{memory_blockage}
+This pattern, if defined, represents a compiler memory barrier, and will be
+placed at points across which RTL passes may not propagate memory accesses.
+This instruction needs to read and write volatile BLKmode memory. It does
+not need to generate any machine instruction. If this pattern is not defined,
+the compiler falls back to emitting an instruction corresponding
+to @code{asm volatile ("" ::: "memory")}.
+
@cindex @code{memory_barrier} instruction pattern
@item @samp{memory_barrier}
If the target memory model is not fully synchronous, then this pattern
return true;
}
-/* Generate asm volatile("" : : : "memory") as the memory barrier. */
+/* Generate asm volatile("" : : : "memory") as the memory blockage. */
static void
-expand_asm_memory_barrier (void)
+expand_asm_memory_blockage (void)
{
rtx asm_op, clob;
emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
}
+/* Do not propagate memory accesses across this point. */
+
+static void
+expand_memory_blockage (void)
+{
+ if (targetm.have_memory_blockage)
+ emit_insn (gen_memory_blockage ());
+ else
+ expand_asm_memory_blockage ();
+}
+
/* This routine will either emit the mem_thread_fence pattern or issue a
sync_synchronize to generate a fence for memory model MEMMODEL. */
if (targetm.have_mem_thread_fence ())
{
emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model)));
- expand_asm_memory_barrier ();
+ expand_memory_blockage ();
}
else if (targetm.have_memory_barrier ())
emit_insn (targetm.gen_memory_barrier ());
else if (synchronize_libfunc != NULL_RTX)
emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode);
else
- expand_asm_memory_barrier ();
+ expand_memory_blockage ();
}
/* Emit a signal fence with given memory model. */
/* No machine barrier is required to implement a signal fence, but
a compiler memory barrier must be issued, except for relaxed MM. */
if (!is_mm_relaxed (model))
- expand_asm_memory_barrier ();
+ expand_memory_blockage ();
}
/* This function expands the atomic load operation:
struct expand_operand ops[3];
rtx_insn *last = get_last_insn ();
if (is_mm_seq_cst (model))
- expand_asm_memory_barrier ();
+ expand_memory_blockage ();
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], mem);
if (maybe_expand_insn (icode, 3, ops))
{
if (!is_mm_relaxed (model))
- expand_asm_memory_barrier ();
+ expand_memory_blockage ();
return ops[0].value;
}
delete_insns_since (last);
{
rtx_insn *last = get_last_insn ();
if (!is_mm_relaxed (model))
- expand_asm_memory_barrier ();
+ expand_memory_blockage ();
create_fixed_operand (&ops[0], mem);
create_input_operand (&ops[1], val, mode);
create_integer_operand (&ops[2], model);
if (maybe_expand_insn (icode, 3, ops))
{
if (is_mm_seq_cst (model))
- expand_asm_memory_barrier ();
+ expand_memory_blockage ();
return const0_rtx;
}
delete_insns_since (last);
DEF_TARGET_INSN (load_multiple, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (mem_thread_fence, (rtx x0))
DEF_TARGET_INSN (memory_barrier, (void))
+DEF_TARGET_INSN (memory_blockage, (void))
DEF_TARGET_INSN (movstr, (rtx x0, rtx x1, rtx x2))
DEF_TARGET_INSN (nonlocal_goto, (rtx x0, rtx x1, rtx x2, rtx x3))
DEF_TARGET_INSN (nonlocal_goto_receiver, (void))