* gcc-interface/gigi.h (build_atomic_load): Adjust prototype.
(build_atomic_store): Likewise.
(build_load_modify_store): Declare.
(VECTOR_TYPE_P): Delete.
* gcc-interface/decl.c (gnat_to_gnu_entity): Replace Is_Atomic with
Is_Atomic_Or_VFA throughout.
<E_Array_Type>: Build a variant of the XUA type instead of forcing
TYPE_VOLATILE on it.
<E_Array_Subtype>: Use the main variant of the base type.
Do not force TYPE_VOLATILE on the type being built.
<E_Record_Type>: Likewise.
<E_Array_Subtype>: Likewise.
<E_Subprogram_Type>: Rename local variable.
Add Atomic qualifier in conjunction with Volatile on types if needed.
Force BLKmode for by-ref types only at the end of the processing.
Change qualifiers only after changing the mode of the type. Set
TYPE_UNIVERSAL_ALIASING_P on the type directly.
(check_ok_for_atomic_type): Issue specific error message for VFA.
(gnat_to_gnu_component_type): Replace Is_Atomic with
Is_Atomic_Or_VFA throughout.
* gcc-interface/misc.c (gnat_get_alias_set): Test
TYPE_UNIVERSAL_ALIASING_P on the type directly.
* gcc-interface/trans.c (lvalue_required_p): Replace Is_Atomic with
Is_Atomic_Or_VFA throughout. Add missing guard.
(node_is_atomic): New predicate.
(node_has_volatile_full_access): Likewise.
(gnat_strip_type_conversion): New function.
(outer_atomic_access_required_p): New predicate.
(atomic_sync_required_p): Rename into...
(atomic_access_required_p): ...this. Add SYNC parameter, scan the
parent node first and then look for the atomic setting. Add support
for Volatile_Full_Access.
(Call_to_gnu): Add atomic_access and outer_atomic_access parameters
and adjusts calls to above functions. Use load-modify-store sequence
for updates of In/Out and Out parameters if required, as well as for
moving the result to the target if required. Add couple of missing
guards.
(gnat_to_gnu): Adjust calls to above functions.
<N_Object_Renaming_Declaration>: If the renamed object has side-effects
evaluate only its address.
<N_Assignment_Statement>: Adjust call to Call_to_gnu. Use load-modify
store sequence if required.
<N_Function_Call>: Adjust call to Call_to_gnu.
(extract_values): Adjust comment.
* gcc-interface/utils2.c (build_atomic_load): Add SYNC parameter and
use relaxed memory model if it is not set.
(build_atomic_store): Likewise.
(call_is_atomic_load): New predicate.
(build_load_modify_store): New function.
(build_binary_op) <MODIFY_EXPR>: Accept SAVE_EXPR on the LHS.
(gnat_stabilize_reference) <CALL_EXPR>: Deal with atomic loads.
From-SVN: r223652
+2015-05-25 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc-interface/gigi.h (build_atomic_load): Adjust prototype.
+ (build_atomic_store): Likewise.
+ (build_load_modify_store): Declare.
+ (VECTOR_TYPE_P): Delete.
+ * gcc-interface/decl.c (gnat_to_gnu_entity): Replace Is_Atomic with
+ Is_Atomic_Or_VFA throughout.
+ <E_Array_Type>: Build a variant of the XUA type instead of forcing
+ TYPE_VOLATILE on it.
+ <E_Array_Subtype>: Use the main variant of the base type.
+ Do not force TYPE_VOLATILE on the type being built.
+ <E_Record_Type>: Likewise.
+ <E_Array_Subtype>: Likewise.
+ <E_Subprogram_Type>: Rename local variable.
+ Add Atomic qualifier in conjunction with Volatile on types if needed.
+ Force BLKmode for by-ref types only at the end of the processing.
+ Change qualifiers only after changing the mode of the type. Set
+ TYPE_UNIVERSAL_ALIASING_P on the type directly.
+ (check_ok_for_atomic_type): Issue specific error message for VFA.
+ (gnat_to_gnu_component_type): Replace Is_Atomic with
+ Is_Atomic_Or_VFA throughout.
+ * gcc-interface/misc.c (gnat_get_alias_set): Test
+ TYPE_UNIVERSAL_ALIASING_P on the type directly.
+ * gcc-interface/trans.c (lvalue_required_p): Replace Is_Atomic with
+ Is_Atomic_Or_VFA throughout. Add missing guard.
+ (node_is_atomic): New predicate.
+ (node_has_volatile_full_access): Likewise.
+ (gnat_strip_type_conversion): New function.
+ (outer_atomic_access_required_p): New predicate.
+ (atomic_sync_required_p): Rename into...
+ (atomic_access_required_p): ...this. Add SYNC parameter, scan the
+ parent node first and then look for the atomic setting. Add support
+ for Volatile_Full_Access.
+ (Call_to_gnu): Add atomic_access and outer_atomic_access parameters
+ and adjusts calls to above functions. Use load-modify-store sequence
+ for updates of In/Out and Out parameters if required, as well as for
+ moving the result to the target if required. Add couple of missing
+ guards.
+ (gnat_to_gnu): Adjust calls to above functions.
+ <N_Object_Renaming_Declaration>: If the renamed object has side-effects
+ evaluate only its address.
+ <N_Assignment_Statement>: Adjust call to Call_to_gnu. Use load-modify
+ store sequence if required.
+ <N_Function_Call>: Adjust call to Call_to_gnu.
+ (extract_values): Adjust comment.
+ * gcc-interface/utils2.c (build_atomic_load): Add SYNC parameter and
+ use relaxed memory model if it is not set.
+ (build_atomic_store): Likewise.
+ (call_is_atomic_load): New predicate.
+ (build_load_modify_store): New function.
+ (build_binary_op) <MODIFY_EXPR>: Accept SAVE_EXPR on the LHS.
+ (gnat_stabilize_reference) <CALL_EXPR>: Deal with atomic loads.
+
2015-05-25 Eric Botcazou <ebotcazou@adacore.com>
* gcc-interface/ada-tree.h (DECL_RENAMING_GLOBAL_P): Rename into...
constant, set the alignment to the smallest one which is not
smaller than the size, with an appropriate cap. */
if (!gnu_size && align == 0
- && (Is_Atomic (gnat_entity)
+ && (Is_Atomic_Or_VFA (gnat_entity)
|| (!Optimize_Alignment_Space (gnat_entity)
&& kind != E_Exception
&& kind != E_Out_Parameter
to support BIGGEST_ALIGNMENT if we don't really have to.
So we cap to the smallest alignment that corresponds to
a known efficient memory access pattern of the target. */
- if (Is_Atomic (gnat_entity))
+ if (Is_Atomic_Or_VFA (gnat_entity))
{
size_cap = UINT_MAX;
align_cap = BIGGEST_ALIGNMENT;
the padded record to assign to the object. We could fix this by
always copying via an intermediate value, but it's not clear it's
worth the effort. */
- if (Is_Atomic (gnat_entity))
+ if (Is_Atomic_Or_VFA (gnat_entity))
check_ok_for_atomic_type (gnu_type, gnat_entity, false);
/* If this is an aliased object with an unconstrained nominal subtype,
|| imported_p
|| Present (Address_Clause (gnat_entity)))))
&& !TYPE_VOLATILE (gnu_type))
- gnu_type = change_qualified_type (gnu_type, TYPE_QUAL_VOLATILE);
+ {
+ const int quals
+ = TYPE_QUAL_VOLATILE
+ | (Is_Atomic_Or_VFA (gnat_entity) ? TYPE_QUAL_ATOMIC : 0);
+ gnu_type = change_qualified_type (gnu_type, quals);
+ }
/* If we are defining an aliased object whose nominal subtype is
unconstrained, the object is a record that contains both the
TYPE_MULTI_ARRAY_P (tem) = (index > 0);
if (array_type_has_nonaliased_component (tem, gnat_entity))
TYPE_NONALIASED_COMPONENT (tem) = 1;
-
- /* If it is passed by reference, force BLKmode to ensure that
- objects of this type will always be put in memory. */
- if (TYPE_MODE (tem) != BLKmode
- && Is_By_Reference_Type (gnat_entity))
- SET_TYPE_MODE (tem, BLKmode);
}
- TYPE_VOLATILE (tem) = Treat_As_Volatile (gnat_entity);
-
/* If an alignment is specified, use it if valid. But ignore it
for the original type of packed array types. If the alignment
was requested with an explicit alignment clause, state so. */
TYPE_CONVENTION_FORTRAN_P (tem) = convention_fortran_p;
+ if (Treat_As_Volatile (gnat_entity))
+ tem = change_qualified_type (tem, TYPE_QUAL_VOLATILE);
+
/* Adjust the type of the pointer-to-array field of the fat pointer
and record the aliasing relationships if necessary. */
TREE_TYPE (TYPE_FIELDS (gnu_fat_type)) = build_pointer_type (tem);
First check to see if this is simply a renaming of the array type.
If so, the result is the array type. */
- gnu_type = gnat_to_gnu_type (Etype (gnat_entity));
+ gnu_type = TYPE_MAIN_VARIANT (gnat_to_gnu_type (Etype (gnat_entity)));
if (!Is_Constrained (gnat_entity))
;
else
TYPE_MULTI_ARRAY_P (gnu_type) = (index > 0);
if (array_type_has_nonaliased_component (gnu_type, gnat_entity))
TYPE_NONALIASED_COMPONENT (gnu_type) = 1;
-
- /* See the E_Array_Type case for the rationale. */
- if (TYPE_MODE (gnu_type) != BLKmode
- && Is_By_Reference_Type (gnat_entity))
- SET_TYPE_MODE (gnu_type, BLKmode);
}
- TYPE_VOLATILE (gnu_type) = Treat_As_Volatile (gnat_entity);
-
/* Attach the TYPE_STUB_DECL in case we have a parallel type. */
TYPE_STUB_DECL (gnu_type)
= create_type_stub_decl (gnu_entity_name, gnu_type);
debugging information for it. */
process_attributes (&gnu_type, &attr_list, false, gnat_entity);
if (Treat_As_Volatile (gnat_entity))
- gnu_type
- = change_qualified_type (gnu_type, TYPE_QUAL_VOLATILE);
+ {
+ const int quals
+ = TYPE_QUAL_VOLATILE
+ | (Is_Atomic_Or_VFA (gnat_entity) ? TYPE_QUAL_ATOMIC : 0);
+ gnu_type = change_qualified_type (gnu_type, quals);
+ }
/* Make it artificial only if the base type was artificial too.
That's sort of "morally" true and will make it possible for
the debugger to look it up by name in DWARF, which is needed
if (Known_Alignment (gnat_entity))
TYPE_ALIGN (gnu_type)
= validate_alignment (Alignment (gnat_entity), gnat_entity, 0);
- else if (Is_Atomic (gnat_entity) && Known_Esize (gnat_entity))
+ else if (Is_Atomic_Or_VFA (gnat_entity) && Known_Esize (gnat_entity))
{
unsigned int size = UI_To_Int (Esize (gnat_entity));
TYPE_ALIGN (gnu_type)
false, OK_To_Reorder_Components (gnat_entity),
all_rep ? NULL_TREE : bitsize_zero_node, NULL);
- /* If it is passed by reference, force BLKmode to ensure that objects
- of this type will always be put in memory. */
- if (TYPE_MODE (gnu_type) != BLKmode
- && Is_By_Reference_Type (gnat_entity))
- SET_TYPE_MODE (gnu_type, BLKmode);
-
- TYPE_VOLATILE (gnu_type) = Treat_As_Volatile (gnat_entity);
-
/* Fill in locations of fields. */
annotate_rep (gnat_entity, gnu_type);
this_deferred = true;
}
- gnu_base_type = gnat_to_gnu_type (gnat_base_type);
+ gnu_base_type
+ = TYPE_MAIN_VARIANT (gnat_to_gnu_type (gnat_base_type));
if (present_gnu_tree (gnat_entity))
{
false);
compute_record_mode (gnu_type);
- /* See the E_Record_Type case for the rationale. */
- if (TYPE_MODE (gnu_type) != BLKmode
- && Is_By_Reference_Type (gnat_entity))
- SET_TYPE_MODE (gnu_type, BLKmode);
-
- TYPE_VOLATILE (gnu_type) = Treat_As_Volatile (gnat_entity);
-
/* Fill in locations of fields. */
annotate_rep (gnat_entity, gnu_type);
bool const_flag
= (Exception_Mechanism == Back_End_Exceptions
&& Is_Pure (gnat_entity));
- bool volatile_flag = No_Return (gnat_entity);
+ bool noreturn_flag = No_Return (gnat_entity);
bool return_by_direct_ref_p = false;
bool return_by_invisi_ref_p = false;
bool return_unconstrained_p = false;
if (TREE_CODE (gnu_return_type) == VOID_TYPE || return_unconstrained_p)
const_flag = false;
- if (const_flag || volatile_flag)
+ if (const_flag || noreturn_flag)
{
const int quals
= (const_flag ? TYPE_QUAL_CONST : 0)
- | (volatile_flag ? TYPE_QUAL_VOLATILE : 0);
-
+ | (noreturn_flag ? TYPE_QUAL_VOLATILE : 0);
gnu_type = change_qualified_type (gnu_type, quals);
}
gnat_entity);
}
}
- else if (Is_Atomic (gnat_entity) && !gnu_size
+ else if (Is_Atomic_Or_VFA (gnat_entity) && !gnu_size
&& tree_fits_uhwi_p (TYPE_SIZE (gnu_type))
&& integer_pow2p (TYPE_SIZE (gnu_type)))
align = MIN (BIGGEST_ALIGNMENT,
tree_to_uhwi (TYPE_SIZE (gnu_type)));
- else if (Is_Atomic (gnat_entity) && gnu_size
+ else if (Is_Atomic_Or_VFA (gnat_entity) && gnu_size
&& tree_fits_uhwi_p (gnu_size)
&& integer_pow2p (gnu_size))
align = MIN (BIGGEST_ALIGNMENT, tree_to_uhwi (gnu_size));
}
}
- if (Is_Atomic (gnat_entity))
+ if (Is_Atomic_Or_VFA (gnat_entity))
check_ok_for_atomic_type (gnu_type, gnat_entity, false);
/* If this is not an unconstrained array type, set some flags. */
if (TREE_CODE (gnu_type) != UNCONSTRAINED_ARRAY_TYPE)
{
- if (Treat_As_Volatile (gnat_entity))
- gnu_type = change_qualified_type (gnu_type, TYPE_QUAL_VOLATILE);
-
if (Present (Alignment_Clause (gnat_entity)))
TYPE_USER_ALIGN (gnu_type) = 1;
if (Universal_Aliasing (gnat_entity))
- TYPE_UNIVERSAL_ALIASING_P (TYPE_MAIN_VARIANT (gnu_type)) = 1;
+ TYPE_UNIVERSAL_ALIASING_P (gnu_type) = 1;
+
+ /* If it is passed by reference, force BLKmode to ensure that
+ objects of this type will always be put in memory. */
+ if (TYPE_MODE (gnu_type) != BLKmode
+ && AGGREGATE_TYPE_P (gnu_type)
+ && TYPE_BY_REFERENCE_P (gnu_type))
+ SET_TYPE_MODE (gnu_type, BLKmode);
+
+ if (Treat_As_Volatile (gnat_entity))
+ {
+ const int quals
+ = TYPE_QUAL_VOLATILE
+ | (Is_Atomic_Or_VFA (gnat_entity) ? TYPE_QUAL_ATOMIC : 0);
+ gnu_type = change_qualified_type (gnu_type, quals);
+ }
}
if (!gnu_decl)
}
if (Has_Volatile_Components (gnat_array))
- gnu_type = change_qualified_type (gnu_type, TYPE_QUAL_VOLATILE);
+ {
+ const int quals
+ = TYPE_QUAL_VOLATILE
+ | (Has_Atomic_Components (gnat_array) ? TYPE_QUAL_ATOMIC : 0);
+ gnu_type = change_qualified_type (gnu_type, quals);
+ }
return gnu_type;
}
const bool is_aliased
= Is_Aliased (gnat_field);
const bool is_atomic
- = (Is_Atomic (gnat_field) || Is_Atomic (gnat_field_type));
+ = (Is_Atomic_Or_VFA (gnat_field) || Is_Atomic_Or_VFA (gnat_field_type));
const bool is_independent
= (Is_Independent (gnat_field) || Is_Independent (gnat_field_type));
const bool is_volatile
}
}
- if (Is_Atomic (gnat_field))
+ if (Is_Atomic_Or_VFA (gnat_field))
check_ok_for_atomic_type (gnu_field_type, gnat_field, false);
if (Present (Component_Clause (gnat_field)))
if (component_p)
post_error_ne ("atomic access to component of & cannot be guaranteed",
gnat_error_point, gnat_entity);
+ else if (Is_Volatile_Full_Access (gnat_entity))
+ post_error_ne ("volatile full access to & cannot be guaranteed",
+ gnat_error_point, gnat_entity);
else
post_error_ne ("atomic access to & cannot be guaranteed",
gnat_error_point, gnat_entity);
of 2. */
extern bool value_factor_p (tree value, HOST_WIDE_INT factor);
-/* Build an atomic load for the underlying atomic object in SRC. */
-extern tree build_atomic_load (tree src);
+/* Build an atomic load for the underlying atomic object in SRC. SYNC is
+ true if the load requires synchronization. */
+extern tree build_atomic_load (tree src, bool sync);
-/* Build an atomic store from SRC to the underlying atomic object in DEST. */
-extern tree build_atomic_store (tree dest, tree src);
+/* Build an atomic store from SRC to the underlying atomic object in DEST.
+ SYNC is true if the store requires synchronization. */
+extern tree build_atomic_store (tree dest, tree src, bool sync);
+
+/* Build a load-modify-store sequence from SRC to DEST. GNAT_NODE is used for
+ the location of the sequence. Note that, even if the load and the store are
+ both atomic, the sequence itself is not atomic. */
+extern tree build_load_modify_store (tree dest, tree src, Node_Id gnat_node);
/* Make a binary operation of kind OP_CODE. RESULT_TYPE is the type
desired for the result. Usually the operation is to be performed
}
#endif
-/* Convenient shortcuts. */
-#define VECTOR_TYPE_P(TYPE) (TREE_CODE (TYPE) == VECTOR_TYPE)
-
/* If EXP's type is a VECTOR_TYPE, return EXP converted to the associated
TYPE_REPRESENTATIVE_ARRAY. */
return exp;
}
+/* Return the smallest power of 2 larger than X. */
+
static inline unsigned HOST_WIDE_INT
ceil_pow2 (unsigned HOST_WIDE_INT x)
{
get_alias_set (TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (type)))));
/* If the type can alias any other types, return the alias set 0. */
- else if (TYPE_P (type)
- && TYPE_UNIVERSAL_ALIASING_P (TYPE_MAIN_VARIANT (type)))
+ else if (TYPE_P (type) && TYPE_UNIVERSAL_ALIASING_P (type))
return 0;
return -1;
the actual assignment might end up being done component-wise. */
return (!constant
||(Is_Composite_Type (Underlying_Type (Etype (gnat_node)))
- && Is_Atomic (Defining_Entity (gnat_parent)))
+ && Is_Atomic_Or_VFA (Defining_Entity (gnat_parent)))
/* We don't use a constructor if this is a class-wide object
because the effective type of the object is the equivalent
type of the class-wide subtype and it smashes most of the
return (!constant
|| Name (gnat_parent) == gnat_node
|| (Is_Composite_Type (Underlying_Type (Etype (gnat_node)))
- && Is_Atomic (Entity (Name (gnat_parent)))));
+ && Is_Entity_Name (Name (gnat_parent))
+ && Is_Atomic_Or_VFA (Entity (Name (gnat_parent)))));
case N_Unchecked_Type_Conversion:
if (!constant)
rest_of_subprog_body_compilation (gnu_subprog_decl);
}
\f
-/* Return true if GNAT_NODE requires atomic synchronization. */
+/* Return true if GNAT_NODE references an Atomic entity. */
static bool
-atomic_sync_required_p (Node_Id gnat_node)
+node_is_atomic (Node_Id gnat_node)
{
- const Node_Id gnat_parent = Parent (gnat_node);
- Node_Kind kind;
- unsigned char attr_id;
+ Entity_Id gnat_entity;
- /* First, scan the node to find the Atomic_Sync_Required flag. */
- kind = Nkind (gnat_node);
- if (kind == N_Type_Conversion || kind == N_Unchecked_Type_Conversion)
+ switch (Nkind (gnat_node))
{
- gnat_node = Expression (gnat_node);
- kind = Nkind (gnat_node);
+ case N_Identifier:
+ case N_Expanded_Name:
+ gnat_entity = Entity (gnat_node);
+ if (Ekind (gnat_entity) != E_Variable)
+ break;
+ return Is_Atomic (gnat_entity) || Is_Atomic (Etype (gnat_entity));
+
+ case N_Selected_Component:
+ gnat_entity = Entity (Selector_Name (gnat_node));
+ return Is_Atomic (gnat_entity) || Is_Atomic (Etype (gnat_entity));
+
+ case N_Indexed_Component:
+ if (Has_Atomic_Components (Etype (Prefix (gnat_node))))
+ return true;
+
+ /* ... fall through ... */
+
+ case N_Explicit_Dereference:
+ return Is_Atomic (Etype (gnat_node));
+
+ default:
+ break;
}
- switch (kind)
+ return false;
+}
+
+/* Return true if GNAT_NODE references a Volatile_Full_Access entity. */
+
+static bool
+node_has_volatile_full_access (Node_Id gnat_node)
+{
+ Entity_Id gnat_entity;
+
+ switch (Nkind (gnat_node))
{
- case N_Expanded_Name:
- case N_Explicit_Dereference:
case N_Identifier:
- case N_Indexed_Component:
+ case N_Expanded_Name:
+ gnat_entity = Entity (gnat_node);
+ if (Ekind (gnat_entity) != E_Variable)
+ break;
+ return Is_Volatile_Full_Access (gnat_entity)
+ || Is_Volatile_Full_Access (Etype (gnat_entity));
+
case N_Selected_Component:
- if (!Atomic_Sync_Required (gnat_node))
- return false;
- break;
+ gnat_entity = Entity (Selector_Name (gnat_node));
+ return Is_Volatile_Full_Access (gnat_entity)
+ || Is_Volatile_Full_Access (Etype (gnat_entity));
+
+ case N_Indexed_Component:
+ case N_Explicit_Dereference:
+ return Is_Volatile_Full_Access (Etype (gnat_node));
default:
- return false;
+ break;
}
- /* Then, scan the parent to find out cases where the flag is irrelevant. */
- kind = Nkind (gnat_parent);
- switch (kind)
+ return false;
+}
+
+/* Strip any type conversion on GNAT_NODE and return the result. */
+
+static Node_Id
+gnat_strip_type_conversion (Node_Id gnat_node)
+{
+ Node_Kind kind = Nkind (gnat_node);
+
+ if (kind == N_Type_Conversion || kind == N_Unchecked_Type_Conversion)
+ gnat_node = Expression (gnat_node);
+
+ return gnat_node;
+}
+
+/* Return true if GNAT_NODE requires outer atomic access, i.e. atomic access
+ of an object of which GNAT_NODE is a component. */
+
+static bool
+outer_atomic_access_required_p (Node_Id gnat_node)
+{
+ gnat_node = gnat_strip_type_conversion (gnat_node);
+
+ while (Nkind (gnat_node) == N_Indexed_Component
+ || Nkind (gnat_node) == N_Selected_Component
+ || Nkind (gnat_node) == N_Slice)
+ {
+ gnat_node = gnat_strip_type_conversion (Prefix (gnat_node));
+ if (node_has_volatile_full_access (gnat_node))
+ return true;
+ }
+
+ return false;
+}
+
+/* Return true if GNAT_NODE requires atomic access and set SYNC according to
+ the associated synchronization setting. */
+
+static bool
+atomic_access_required_p (Node_Id gnat_node, bool *sync)
+{
+ const Node_Id gnat_parent = Parent (gnat_node);
+ unsigned char attr_id;
+ bool as_a_whole = true;
+
+ /* First, scan the parent to find out cases where the flag is irrelevant. */
+ switch (Nkind (gnat_parent))
{
case N_Attribute_Reference:
attr_id = Get_Attribute_Id (Attribute_Name (gnat_parent));
/* Do not mess up machine code insertions. */
if (attr_id == Attr_Asm_Input || attr_id == Attr_Asm_Output)
return false;
+
+ /* Nothing to do if we are the prefix of an attribute, since we do not
+ want an atomic access for things like 'Size. */
+
+ /* ... fall through ... */
+
+ case N_Reference:
+ /* The N_Reference node is like an attribute. */
+ if (Prefix (gnat_parent) == gnat_node)
+ return false;
+ break;
+
+ case N_Indexed_Component:
+ case N_Selected_Component:
+ case N_Slice:
+ /* If we are the prefix, then the access is only partial. */
+ if (Prefix (gnat_parent) == gnat_node)
+ as_a_whole = false;
break;
case N_Object_Renaming_Declaration:
- /* Do not generate a function call as a renamed object. */
+ /* Nothing to do for the identifier in an object renaming declaration,
+ the renaming itself does not need atomic access. */
return false;
default:
break;
}
+ /* Then, scan the node to find the atomic object. */
+ gnat_node = gnat_strip_type_conversion (gnat_node);
+
+ /* For Atomic itself, only reads and updates of the object as a whole require
+ atomic access (RM C.6 (15)). But for Volatile_Full_Access, all reads and
+ updates require atomic access. */
+ if (!(as_a_whole && node_is_atomic (gnat_node))
+ && !node_has_volatile_full_access (gnat_node))
+ return false;
+
+ /* If an outer atomic access will also be required, it cancels this one. */
+ if (outer_atomic_access_required_p (gnat_node))
+ return false;
+
+ *sync = Atomic_Sync_Required (gnat_node);
+
return true;
}
\f
GNU_RESULT_TYPE_P is a pointer to where we should place the result type.
If GNU_TARGET is non-null, this must be a function call on the RHS of a
N_Assignment_Statement and the result is to be placed into that object.
- If, in addition, ATOMIC_SYNC is true, then the assignment to GNU_TARGET
- requires atomic synchronization. */
+ If OUTER_ATOMIC_ACCESS is true, then the assignment to GNU_TARGET must be a
+ load-modify-store sequence. Otherwise, if ATOMIC_ACCESS is true, then the
+ assignment to GNU_TARGET must be atomic. If, in addition, ATOMIC_SYNC is
+ true, then the assignment to GNU_TARGET requires atomic synchronization. */
static tree
Call_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, tree gnu_target,
- bool atomic_sync)
+ bool outer_atomic_access, bool atomic_access, bool atomic_sync)
{
const bool function_call = (Nkind (gnat_node) == N_Function_Call);
const bool returning_value = (function_call && !gnu_target);
bool pushed_binding_level = false;
Entity_Id gnat_formal;
Node_Id gnat_actual;
+ bool sync;
gcc_assert (TREE_CODE (gnu_subprog_type) == FUNCTION_TYPE);
/* Start from the real object and build the actual. */
gnu_actual = gnu_name;
- /* If this is an atomic access of an In or In Out parameter for which
- synchronization is required, build the atomic load. */
+ /* If atomic access is required for an In or In Out actual parameter,
+ build the atomic load. */
if (is_true_formal_parm
&& !is_by_ref_formal_parm
&& Ekind (gnat_formal) != E_Out_Parameter
- && atomic_sync_required_p (gnat_actual))
- gnu_actual = build_atomic_load (gnu_actual);
+ && atomic_access_required_p (gnat_actual, &sync))
+ gnu_actual = build_atomic_load (gnu_actual, sync);
/* If this was a procedure call, we may not have removed any padding.
So do it here for the part we will use as an input, if any. */
gnu_result = convert (TREE_TYPE (gnu_actual), gnu_result);
}
- if (atomic_sync_required_p (gnat_actual))
- gnu_result = build_atomic_store (gnu_actual, gnu_result);
+ /* If an outer atomic access is required for an actual parameter,
+ build the load-modify-store sequence. */
+ if (outer_atomic_access_required_p (gnat_actual))
+ gnu_result
+ = build_load_modify_store (gnu_actual, gnu_result, gnat_node);
+
+ /* Or else, if simple atomic access is required, build the atomic
+ store. */
+ else if (atomic_access_required_p (gnat_actual, &sync))
+ gnu_result = build_atomic_store (gnu_actual, gnu_result, sync);
+
+ /* Otherwise build a regular assignment. */
else
gnu_result = build_binary_op (MODIFY_EXPR, NULL_TREE,
gnu_actual, gnu_result);
- set_expr_location_from_node (gnu_result, gnat_node);
+
+ if (EXPR_P (gnu_result))
+ set_expr_location_from_node (gnu_result, gnat_node);
append_to_statement_list (gnu_result, &gnu_stmt_list);
gnu_cico_list = TREE_CHAIN (gnu_cico_list);
gnu_name_list = TREE_CHAIN (gnu_name_list);
else
op_code = MODIFY_EXPR;
- if (atomic_sync)
- gnu_call = build_atomic_store (gnu_target, gnu_call);
+ /* Use the required method to move the result to the target. */
+ if (outer_atomic_access)
+ gnu_call
+ = build_load_modify_store (gnu_target, gnu_call, gnat_node);
+ else if (atomic_access)
+ gnu_call = build_atomic_store (gnu_target, gnu_call, atomic_sync);
else
gnu_call
= build_binary_op (op_code, NULL_TREE, gnu_target, gnu_call);
- set_expr_location_from_node (gnu_call, gnat_parent);
+
+ if (EXPR_P (gnu_call))
+ set_expr_location_from_node (gnu_call, gnat_parent);
append_to_statement_list (gnu_call, &gnu_stmt_list);
}
else
tree gnu_result_type = void_type_node;
tree gnu_expr, gnu_lhs, gnu_rhs;
Node_Id gnat_temp;
+ bool sync;
/* Save node number for error message and set location information. */
error_gnat_node = gnat_node;
case N_Defining_Identifier:
gnu_result = Identifier_to_gnu (gnat_node, &gnu_result_type);
- /* If this is an atomic access on the RHS for which synchronization is
- required, build the atomic load. */
- if (atomic_sync_required_p (gnat_node)
+ /* If atomic access is required on the RHS, build the atomic load. */
+ if (atomic_access_required_p (gnat_node, &sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result);
+ gnu_result = build_atomic_load (gnu_result, sync);
break;
case N_Integer_Literal:
/* Don't do anything if this renaming is handled by the front end or if
we are just annotating types and this object has a composite or task
- type, don't elaborate it. We return the result in case it contains
- any SAVE_EXPRs that need to be evaluated here, but this cannot occur
- at the global level (see Renaming, case 2 in gnat_to_gnu_entity). */
+ type, don't elaborate it. */
if (!Is_Renaming_Of_Object (gnat_temp)
&& ! (type_annotate_only
&& (Is_Array_Type (Etype (gnat_temp))
tree gnu_temp
= gnat_to_gnu_entity (gnat_temp,
gnat_to_gnu (Renamed_Object (gnat_temp)), 1);
- if (!global_bindings_p ())
- gnu_result = gnu_temp;
+ /* We need to make sure that the side-effects of the renamed object
+ are evaluated at this point, so we evaluate its address. */
+ if (TREE_SIDE_EFFECTS (gnu_temp))
+ gnu_result = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_temp);
}
break;
tree gnu_temp
= gnat_to_gnu_entity (gnat_temp,
gnat_to_gnu (Renamed_Entity (gnat_temp)), 1);
- if (!global_bindings_p ())
- gnu_result = gnu_temp;
+ if (TREE_SIDE_EFFECTS (gnu_temp))
+ gnu_result = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_temp);
}
break;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
gnu_result = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_result);
- /* If this is an atomic access on the RHS for which synchronization is
- required, build the atomic load. */
- if (atomic_sync_required_p (gnat_node)
+ /* If atomic access is required on the RHS, build the atomic load. */
+ if (atomic_access_required_p (gnat_node, &sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result);
+ gnu_result = build_atomic_load (gnu_result, sync);
break;
case N_Indexed_Component:
gnu_result_type = get_unpadded_type (Etype (gnat_node));
- /* If this is an atomic access on the RHS for which synchronization is
- required, build the atomic load. */
- if (atomic_sync_required_p (gnat_node)
+ /* If atomic access is required on the RHS, build the atomic load. */
+ if (atomic_access_required_p (gnat_node, &sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result);
+ gnu_result = build_atomic_load (gnu_result, sync);
}
break;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
- /* If this is an atomic access on the RHS for which synchronization is
- required, build the atomic load. */
- if (atomic_sync_required_p (gnat_node)
+ /* If atomic access is required on the RHS, build the atomic load. */
+ if (atomic_access_required_p (gnat_node, &sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result);
+ gnu_result = build_atomic_load (gnu_result, sync);
}
break;
gnu_result = build_call_raise (SE_Object_Too_Large, gnat_node,
N_Raise_Storage_Error);
else if (Nkind (Expression (gnat_node)) == N_Function_Call)
- gnu_result
- = Call_to_gnu (Expression (gnat_node), &gnu_result_type, gnu_lhs,
- atomic_sync_required_p (Name (gnat_node)));
+ {
+ bool outer_atomic_access
+ = outer_atomic_access_required_p (Name (gnat_node));
+ bool atomic_access
+ = !outer_atomic_access
+ && atomic_access_required_p (Name (gnat_node), &sync);
+ gnu_result
+ = Call_to_gnu (Expression (gnat_node), &gnu_result_type, gnu_lhs,
+ outer_atomic_access, atomic_access, sync);
+ }
else
{
const Node_Id gnat_expr = Expression (gnat_node);
gnu_rhs = emit_range_check (gnu_rhs, Etype (Name (gnat_node)),
gnat_node);
- /* If atomic synchronization is required, build an atomic store. */
- if (atomic_sync_required_p (Name (gnat_node)))
- gnu_result = build_atomic_store (gnu_lhs, gnu_rhs);
+ /* If an outer atomic access is required on the LHS, build the load-
+ modify-store sequence. */
+ if (outer_atomic_access_required_p (Name (gnat_node)))
+ gnu_result = build_load_modify_store (gnu_lhs, gnu_rhs, gnat_node);
+
+ /* Or else, if atomic access is required, build the atomic store. */
+ else if (atomic_access_required_p (Name (gnat_node), &sync))
+ gnu_result = build_atomic_store (gnu_lhs, gnu_rhs, sync);
/* Or else, use memset when the conditions are met. */
else if (use_memset_p)
case N_Function_Call:
case N_Procedure_Call_Statement:
- gnu_result = Call_to_gnu (gnat_node, &gnu_result_type, NULL_TREE, false);
+ gnu_result = Call_to_gnu (gnat_node, &gnu_result_type, NULL_TREE,
+ false, false, false);
break;
/************************/
}
\f
/* Subroutine of assoc_to_constructor: VALUES is a list of field associations,
- some of which are from RECORD_TYPE. Return a CONSTRUCTOR consisting
- of the associations that are from RECORD_TYPE. If we see an internal
- record, make a recursive call to fill it in as well. */
+ some of which are from RECORD_TYPE. Return a CONSTRUCTOR consisting of the
+ associations that are from RECORD_TYPE. If we see an internal record, make
+ a recursive call to fill it in as well. */
static tree
extract_values (tree values, tree record_type)
return 0;
}
-/* Build an atomic load for the underlying atomic object in SRC. */
+/* Build an atomic load for the underlying atomic object in SRC. SYNC is
+ true if the load requires synchronization. */
tree
-build_atomic_load (tree src)
+build_atomic_load (tree src, bool sync)
{
tree ptr_type
= build_pointer_type
- (build_qualified_type (void_type_node, TYPE_QUAL_VOLATILE));
- tree mem_model = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
+ (build_qualified_type (void_type_node,
+ TYPE_QUAL_ATOMIC | TYPE_QUAL_VOLATILE));
+ tree mem_model
+ = build_int_cst (integer_type_node,
+ sync ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED);
tree orig_src = src;
tree t, addr, val;
unsigned int size;
return convert (TREE_TYPE (orig_src), t);
}
-/* Build an atomic store from SRC to the underlying atomic object in DEST. */
+/* Build an atomic store from SRC to the underlying atomic object in DEST.
+ SYNC is true if the store requires synchronization. */
tree
-build_atomic_store (tree dest, tree src)
+build_atomic_store (tree dest, tree src, bool sync)
{
tree ptr_type
= build_pointer_type
- (build_qualified_type (void_type_node, TYPE_QUAL_VOLATILE));
- tree mem_model = build_int_cst (integer_type_node, MEMMODEL_SEQ_CST);
+ (build_qualified_type (void_type_node,
+ TYPE_QUAL_ATOMIC | TYPE_QUAL_VOLATILE));
+ tree mem_model
+ = build_int_cst (integer_type_node,
+ sync ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED);
tree orig_dest = dest;
tree t, int_type, addr;
unsigned int size;
return build_call_expr (t, 3, addr, src, mem_model);
}
+
+/* Return true if EXP, a CALL_EXPR, is an atomic load. */
+
+static bool
+call_is_atomic_load (tree exp)
+{
+ tree fndecl = get_callee_fndecl (exp);
+
+ if (!(fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL))
+ return false;
+
+ enum built_in_function code = DECL_FUNCTION_CODE (fndecl);
+ return BUILT_IN_ATOMIC_LOAD_N <= code && code <= BUILT_IN_ATOMIC_LOAD_16;
+}
+
+/* Build a load-modify-store sequence from SRC to DEST. GNAT_NODE is used for
+ the location of the sequence. Note that, even if the load and the store are
+ both atomic, the sequence itself is not atomic. */
+
+tree
+build_load_modify_store (tree dest, tree src, Node_Id gnat_node)
+{
+ tree ref = dest;
+
+ while (handled_component_p (ref))
+ {
+ /* The load should already have been generated during the translation
+ of the GNAT destination tree; find it out in the GNU tree. */
+ if (TREE_CODE (TREE_OPERAND (ref, 0)) == VIEW_CONVERT_EXPR)
+ {
+ tree op = TREE_OPERAND (TREE_OPERAND (ref, 0), 0);
+ if (TREE_CODE (op) == CALL_EXPR && call_is_atomic_load (op))
+ {
+ tree type = TREE_TYPE (TREE_OPERAND (ref, 0));
+ tree t = CALL_EXPR_ARG (op, 0);
+ tree obj, temp, stmt;
+
+ /* Find out the loaded object. */
+ if (TREE_CODE (t) == NOP_EXPR)
+ t = TREE_OPERAND (t, 0);
+ if (TREE_CODE (t) == ADDR_EXPR)
+ obj = TREE_OPERAND (t, 0);
+ else
+ obj = build1 (INDIRECT_REF, type, t);
+
+ /* Drop atomic and volatile qualifiers for the temporary. */
+ type = TYPE_MAIN_VARIANT (type);
+
+ /* And drop BLKmode, if need be, to put it into a register. */
+ if (TYPE_MODE (type) == BLKmode)
+ {
+ unsigned int size = tree_to_uhwi (TYPE_SIZE (type));
+ type = copy_type (type);
+ SET_TYPE_MODE (type, mode_for_size (size, MODE_INT, 0));
+ }
+
+ /* Create the temporary by inserting a SAVE_EXPR. */
+ temp = build1 (SAVE_EXPR, type,
+ build1 (VIEW_CONVERT_EXPR, type, op));
+ TREE_OPERAND (ref, 0) = temp;
+
+ start_stmt_group ();
+
+ /* Build the modify of the temporary. */
+ stmt = build_binary_op (MODIFY_EXPR, NULL_TREE, dest, src);
+ add_stmt_with_node (stmt, gnat_node);
+
+ /* Build the store to the object. */
+ stmt = build_atomic_store (obj, temp, false);
+ add_stmt_with_node (stmt, gnat_node);
+
+ return end_stmt_group ();
+ }
+ }
+
+ ref = TREE_OPERAND (ref, 0);
+ }
+
+ /* Something went wrong earlier if we have not found the atomic load. */
+ gcc_unreachable ();
+}
\f
/* Make a binary operation of kind OP_CODE. RESULT_TYPE is the type
desired for the result. Usually the operation is to be performed
strip anything that get_inner_reference can handle. Then remove any
conversions between types having the same code and mode. And mark
VIEW_CONVERT_EXPRs with TREE_ADDRESSABLE. When done, we must have
- either an INDIRECT_REF, a NULL_EXPR or a DECL node. */
+ either an INDIRECT_REF, a NULL_EXPR, a SAVE_EXPR or a DECL node. */
result = left_operand;
while (true)
{
gcc_assert (TREE_CODE (result) == INDIRECT_REF
|| TREE_CODE (result) == NULL_EXPR
+ || TREE_CODE (result) == SAVE_EXPR
|| DECL_P (result));
/* Convert the right operand to the operation type unless it is
break;
case CALL_EXPR:
- result = gnat_stabilize_reference_1 (ref, force);
+ if (call_is_atomic_load (ref))
+ result
+ = build_call_expr (TREE_OPERAND (CALL_EXPR_FN (ref), 0), 2,
+ gnat_stabilize_reference (CALL_EXPR_ARG (ref, 0),
+ force, success),
+ CALL_EXPR_ARG (ref, 1));
+ else
+ result = gnat_stabilize_reference_1 (ref, force);
break;
case COMPOUND_EXPR:
+2015-05-25 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/vfa1_1.adb: New test.
+ * gnat.dg/vfa1_2.adb: Likewise.
+ * gnat.dg/vfa1_3.adb: Likewise.
+ * gnat.dg/vfa1_4.adb: Likewise.
+ * gnat.dg/vfa1_pkg.ads: New helper.
+
2015-05-25 Alexander Monakov <amonakov@ispras.ru>
* gcc.target/i386/pr66232-1.c: Adjust scan pattern.
--- /dev/null
+-- { dg-do compile }
+-- { dg-options "-fdump-tree-gimple" }
+
+with VFA1_Pkg; use VFA1_Pkg;
+
+procedure VFA1_1 is
+ Temp : Integer;
+
+ function F (I : Integer) return Integer is
+ begin
+ return I;
+ end;
+
+ function F2 return Integer is
+ begin
+ return Integer(Counter1);
+ end;
+
+ procedure P3 (I : Out Integer) is
+ begin
+ null;
+ end;
+
+begin
+
+ Counter1 := Int(Counter2);
+ Counter2 := Integer(Counter1);
+
+ Temp := Integer(Counter1);
+ Counter1 := Int(Temp);
+
+ Temp := Counter2;
+ Counter2 := Temp;
+
+ Temp := Integer (Counter1) + Counter2;
+
+ if Counter1 /= Int (Counter2) then
+ raise Program_Error;
+ end if;
+
+ Temp := F(Integer (Counter1));
+ Counter1 := Int(F(Temp));
+
+ Temp := F(Counter2);
+ Counter2 := F(Temp);
+
+ Temp := F2;
+ P3 (Counter2);
+
+end;
+
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__counter1" 6 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__counter2" 5 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__counter1" 3 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__counter2" 4 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { cleanup-tree-dump "gimple" } }
--- /dev/null
+-- { dg-do compile }
+-- { dg-options "-fdump-tree-gimple" }
+
+with VFA1_Pkg; use VFA1_Pkg;
+
+procedure VFA1_2 is
+ Temp : Int8_t;
+
+ function F (I : Int8_t) return Int8_t is
+ begin
+ return I;
+ end;
+
+ function F2 return Int8_t is
+ begin
+ return Int8_t(Timer1(1));
+ end;
+
+ procedure P3 (I : out Int8_t) is
+ begin
+ null;
+ end;
+
+begin
+
+ Temp := Timer1(1);
+ Timer1(2) := Temp;
+
+ Temp := Timer2(1);
+ Timer2(2) := Temp;
+
+ Temp := Timer1(1) + Timer2(2);
+
+ if Timer1(1) /= Timer2(2) then
+ raise Program_Error;
+ end if;
+
+ Temp := F(Timer1(1));
+ Timer2(2) := F(Temp);
+
+ Temp := F(Timer2(2));
+ Timer1(1) := F(Temp);
+
+ Temp := F2;
+ P3 (Timer2(2));
+
+end;
+
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__timer1" 7 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__timer2" 7 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__timer1" 2 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__timer2" 3 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { cleanup-tree-dump "gimple" } }
--- /dev/null
+-- { dg-do compile }
+-- { dg-options "-fdump-tree-gimple" }
+
+with VFA1_Pkg; use VFA1_Pkg;
+
+procedure VFA1_3 is
+
+ Temp : Short_Integer;
+
+ function F (I : Short_Integer) return Short_Integer is
+ begin
+ return I;
+ end;
+
+ function F2 return Short_Integer is
+ begin
+ return Short_Integer(Buffer1.A);
+ end;
+
+ procedure P3 (I : out Short_Integer) is
+ begin
+ null;
+ end;
+
+begin
+
+ Temp := Buffer1.A;
+ Buffer1.B := Temp;
+
+ Temp := Buffer2.A;
+ Buffer2.B := Temp;
+
+ Temp := Buffer1.A + Buffer2.B;
+
+ if Buffer1.A /= Buffer2.B then
+ raise Program_Error;
+ end if;
+
+ Temp := F(Buffer1.A);
+ Buffer2.B := F(Temp);
+
+ Temp := F(Buffer2.B);
+ Buffer1.A := F(Temp);
+
+ Temp := F2;
+ P3 (Buffer2.B);
+
+end;
+
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__buffer1" 7 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__buffer2" 7 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__buffer1" 2 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__buffer2" 3 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { cleanup-tree-dump "gimple" } }
--- /dev/null
+-- { dg-do compile }
+-- { dg-options "-fdump-tree-gimple" }
+
+with VFA1_Pkg; use VFA1_Pkg;
+
+procedure VFA1_4 is
+
+ Temp : Int8_t;
+
+ function F (I : Int8_t) return Int8_t is
+ begin
+ return I;
+ end;
+
+ function F2 return Int8_t is
+ begin
+ return Int8_t(Mixer1(1).R);
+ end;
+
+ procedure P3 (I : out Int8_t) is
+ begin
+ null;
+ end;
+
+begin
+
+ Temp := Mixer1(1).R;
+ Mixer1(2).R := Temp;
+
+ Temp := Mixer2(1).R;
+ Mixer2(2).R := Temp;
+
+ Temp := Mixer1(1).R + Mixer2(2).R;
+
+ if Mixer1(1).R /= Mixer2(2).R then
+ raise Program_Error;
+ end if;
+
+ Temp := F(Mixer1(1).R);
+ Mixer2(2).R := F(Temp);
+
+ Temp := F(Mixer2(2).R);
+ Mixer1(1).R := F(Temp);
+
+ Temp := F2;
+ P3 (Mixer2(2).R);
+
+end;
+
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__mixer1" 7 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&vfa1_pkg__mixer2" 7 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_load\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__mixer1" 2 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&vfa1_pkg__mixer2" 3 "gimple"} }
+-- { dg-final { scan-tree-dump-times "atomic_store\[^\n\r\]*&temp" 0 "gimple"} }
+
+-- { dg-final { cleanup-tree-dump "gimple" } }
--- /dev/null
+package VFA1_Pkg is
+
+ type Int8_t is mod 2**8;
+
+ type Int is new Integer;
+ pragma Volatile_Full_Access (Int);
+
+ Counter1 : Int;
+
+ Counter2 : Integer;
+ pragma Volatile_Full_Access (Counter2);
+
+ type Arr is array (1 .. 4) of Int8_t;
+ for Arr'Alignment use 4;
+ pragma Volatile_Full_Access (Arr);
+
+ Timer1 : Arr;
+
+ Timer2 : array (1 .. 4) of Int8_t;
+ for Timer2'Alignment use 4;
+ pragma Volatile_Full_Access (Timer2);
+
+ type Rec is record
+ A : Short_Integer;
+ B : Short_Integer;
+ end record;
+
+ type Rec_VFA is new Rec;
+ pragma Volatile_Full_Access (Rec_VFA);
+
+ Buffer1 : Rec_VFA;
+
+ Buffer2 : Rec;
+ pragma Volatile_Full_Access (Buffer2);
+
+ type Code is record
+ R : Int8_t;
+ I : Int8_t;
+ end record;
+ pragma Volatile_Full_Access (Code);
+
+ type CArr is array (1 .. 2) of Code;
+ pragma Volatile_Full_Access (CArr);
+
+ Mixer1 : Carr;
+
+ Mixer2 : array (1 .. 2) of Code;
+ pragma Volatile_Full_Access (Mixer2);
+
+end VFA1_Pkg;