return gnu_result;
}
\f
-/* This page implements a form of Named Return Value optimization modelled
+/* This page implements a form of Named Return Value optimization modeled
on the C++ optimization of the same name. The main difference is that
we disregard any semantical considerations when applying it here, the
counterpart being that we don't try to apply it to semantically loaded
rest_of_subprog_body_compilation (gnu_subprog_decl);
}
\f
-/* Return true if GNAT_NODE references an Atomic entity. */
+/* The type of an atomic access. */
+
+typedef enum { NOT_ATOMIC, SIMPLE_ATOMIC, OUTER_ATOMIC } atomic_acces_t;
+
+/* Return true if GNAT_NODE references an Atomic entity. This is modeled on
+ the Is_Atomic_Object predicate of the front-end, but additionally handles
+ explicit dereferences. */
static bool
node_is_atomic (Node_Id gnat_node)
return Is_Atomic (gnat_entity) || Is_Atomic (Etype (gnat_entity));
case N_Selected_Component:
- gnat_entity = Entity (Selector_Name (gnat_node));
- return Is_Atomic (gnat_entity) || Is_Atomic (Etype (gnat_entity));
+ return Is_Atomic (Etype (gnat_node))
+ || Is_Atomic (Entity (Selector_Name (gnat_node)));
case N_Indexed_Component:
- if (Has_Atomic_Components (Etype (Prefix (gnat_node))))
- return true;
- if (Is_Entity_Name (Prefix (gnat_node))
- && Has_Atomic_Components (Entity (Prefix (gnat_node))))
- return true;
-
- /* ... fall through ... */
+ return Is_Atomic (Etype (gnat_node))
+ || Has_Atomic_Components (Etype (Prefix (gnat_node)))
+ || (Is_Entity_Name (Prefix (gnat_node))
+ && Has_Atomic_Components (Entity (Prefix (gnat_node))));
case N_Explicit_Dereference:
return Is_Atomic (Etype (gnat_node));
return false;
}
-/* Return true if GNAT_NODE references a Volatile_Full_Access entity. */
+/* Return true if GNAT_NODE references a Volatile_Full_Access entity. This is
+ modeled on the Is_VFA_Object predicate of the front-end, but additionally
+ handles explicit dereferences. */
static bool
-node_has_volatile_full_access (Node_Id gnat_node)
+node_is_volatile_full_access (Node_Id gnat_node)
{
Entity_Id gnat_entity;
|| Is_Volatile_Full_Access (Etype (gnat_entity));
case N_Selected_Component:
- gnat_entity = Entity (Selector_Name (gnat_node));
- return Is_Volatile_Full_Access (gnat_entity)
- || Is_Volatile_Full_Access (Etype (gnat_entity));
+ return Is_Volatile_Full_Access (Etype (gnat_node))
+ || Is_Volatile_Full_Access (Entity (Selector_Name (gnat_node)));
case N_Indexed_Component:
case N_Explicit_Dereference:
return false;
}
-/* Strip any type conversion on GNAT_NODE and return the result. */
+/* Return true if GNAT_NODE references a component of a larger object. */
-static Node_Id
-gnat_strip_type_conversion (Node_Id gnat_node)
+static inline bool
+node_is_component (Node_Id gnat_node)
{
- Node_Kind kind = Nkind (gnat_node);
-
- if (kind == N_Type_Conversion || kind == N_Unchecked_Type_Conversion)
- gnat_node = Expression (gnat_node);
-
- return gnat_node;
+ const Node_Kind k = Nkind (gnat_node);
+ return
+ (k == N_Indexed_Component || k == N_Selected_Component || k == N_Slice);
}
-/* Return true if GNAT_NODE requires outer atomic access, i.e. atomic access
- of an object of which GNAT_NODE is a component. */
-
-static bool
-outer_atomic_access_required_p (Node_Id gnat_node)
-{
- gnat_node = gnat_strip_type_conversion (gnat_node);
-
- while (true)
- {
- switch (Nkind (gnat_node))
- {
- case N_Identifier:
- case N_Expanded_Name:
- if (No (Renamed_Object (Entity (gnat_node))))
- return false;
- gnat_node
- = gnat_strip_type_conversion (Renamed_Object (Entity (gnat_node)));
- break;
+/* Compute whether GNAT_NODE requires atomic access and set TYPE to the type
+ of access and SYNC according to the associated synchronization setting.
- case N_Indexed_Component:
- case N_Selected_Component:
- case N_Slice:
- gnat_node = gnat_strip_type_conversion (Prefix (gnat_node));
- if (node_has_volatile_full_access (gnat_node))
- return true;
- break;
+ We implement 3 different semantics of atomicity in this function:
- default:
- return false;
- }
- }
-
- gcc_unreachable ();
-}
+ 1. the Ada 95/2005/2012 semantics of the Atomic aspect/pragma,
+ 2. the Ada 2020 semantics of the Atomic aspect/pragma,
+ 3. the semantics of the Volatile_Full_Access GNAT aspect/pragma.
-/* Return true if GNAT_NODE requires atomic access and set SYNC according to
- the associated synchronization setting. */
+ They are mutually exclusive and the FE should have rejected conflicts. */
-static bool
-atomic_access_required_p (Node_Id gnat_node, bool *sync)
+static void
+get_atomic_access (Node_Id gnat_node, atomic_acces_t *type, bool *sync)
{
- const Node_Id gnat_parent = Parent (gnat_node);
+ Node_Id gnat_parent, gnat_temp;
unsigned char attr_id;
- bool as_a_whole = true;
- /* First, scan the parent to find out cases where the flag is irrelevant. */
+ /* First, scan the parent to filter out irrelevant cases. */
+ gnat_parent = Parent (gnat_node);
switch (Nkind (gnat_parent))
{
case N_Attribute_Reference:
attr_id = Get_Attribute_Id (Attribute_Name (gnat_parent));
/* Do not mess up machine code insertions. */
if (attr_id == Attr_Asm_Input || attr_id == Attr_Asm_Output)
- return false;
+ goto not_atomic;
/* Nothing to do if we are the prefix of an attribute, since we do not
want an atomic access for things like 'Size. */
case N_Reference:
/* The N_Reference node is like an attribute. */
if (Prefix (gnat_parent) == gnat_node)
- return false;
- break;
-
- case N_Indexed_Component:
- case N_Selected_Component:
- case N_Slice:
- /* If we are the prefix, then the access is only partial. */
- if (Prefix (gnat_parent) == gnat_node)
- as_a_whole = false;
+ goto not_atomic;
break;
case N_Object_Renaming_Declaration:
/* Nothing to do for the identifier in an object renaming declaration,
the renaming itself does not need atomic access. */
- return false;
+ goto not_atomic;
default:
break;
}
- /* Then, scan the node to find the atomic object. */
- gnat_node = gnat_strip_type_conversion (gnat_node);
+ /* Now strip any type conversion from GNAT_NODE. */
+ if (Nkind (gnat_node) == N_Type_Conversion
+ || Nkind (gnat_node) == N_Unchecked_Type_Conversion)
+ gnat_node = Expression (gnat_node);
- /* For Atomic itself, only reads and updates of the object as a whole require
- atomic access (RM C.6 (15)). But for Volatile_Full_Access, all reads and
- updates require atomic access. */
- if (!(as_a_whole && node_is_atomic (gnat_node))
- && !node_has_volatile_full_access (gnat_node))
- return false;
+ /* Up to Ada 2012, for Atomic itself, only reads and updates of the object as
+ a whole require atomic access (RM C.6(15)). But, starting with Ada 2020,
+ reads of or writes to a nonatomic subcomponent of the object also require
+ atomic access (RM C.6(19)). */
+ if (node_is_atomic (gnat_node))
+ {
+ bool as_a_whole = true;
- /* If an outer atomic access will also be required, it cancels this one. */
- if (outer_atomic_access_required_p (gnat_node))
- return false;
+ /* If we are the prefix of the parent, then the access is partial. */
+ for (gnat_temp = gnat_node, gnat_parent = Parent (gnat_temp);
+ node_is_component (gnat_parent) && Prefix (gnat_parent) == gnat_temp;
+ gnat_temp = gnat_parent, gnat_parent = Parent (gnat_temp))
+ if (Ada_Version < Ada_2020 || node_is_atomic (gnat_parent))
+ goto not_atomic;
+ else
+ as_a_whole = false;
- *sync = Atomic_Sync_Required (gnat_node);
+ /* We consider that partial accesses are not sequential actions and,
+ therefore, do not require synchronization. */
+ *type = SIMPLE_ATOMIC;
+ *sync = as_a_whole ? Atomic_Sync_Required (gnat_node) : false;
+ return;
+ }
- return true;
+ /* Look for an outer atomic access of a nonatomic subcomponent. Note that,
+ for VFA, we do this before looking at the node itself because we need to
+ access the outermost VFA object atomically, unlike for Atomic where it is
+ the innermost atomic object (RM C.6(19)). */
+ for (gnat_temp = gnat_node;
+ node_is_component (gnat_temp);
+ gnat_temp = Prefix (gnat_temp))
+ if ((Ada_Version >= Ada_2020 && node_is_atomic (Prefix (gnat_temp)))
+ || node_is_volatile_full_access (Prefix (gnat_temp)))
+ {
+ *type = OUTER_ATOMIC;
+ *sync = false;
+ return;
+ }
+
+ /* Unlike Atomic, accessing a VFA object always requires atomic access. */
+ if (node_is_volatile_full_access (gnat_node))
+ {
+ *type = SIMPLE_ATOMIC;
+ *sync = false;
+ return;
+ }
+
+not_atomic:
+ *type = NOT_ATOMIC;
+ *sync = false;
}
\f
+\f/* Return true if GNAT_NODE requires simple atomic access and, if so, set SYNC
+ according to the associated synchronization setting. */
+
+static inline bool
+simple_atomic_access_required_p (Node_Id gnat_node, bool *sync)
+{
+ atomic_acces_t type;
+ get_atomic_access (gnat_node, &type, sync);
+ return type == SIMPLE_ATOMIC;
+}
+
/* Create a temporary variable with PREFIX and TYPE, and return it. */
static tree
GNU_RESULT_TYPE_P is a pointer to where we should place the result type.
If GNU_TARGET is non-null, this must be a function call on the RHS of a
N_Assignment_Statement and the result is to be placed into that object.
- If OUTER_ATOMIC_ACCESS is true, then the assignment to GNU_TARGET must be a
- load-modify-store sequence. Otherwise, if ATOMIC_ACCESS is true, then the
- assignment to GNU_TARGET must be atomic. If, in addition, ATOMIC_SYNC is
- true, then the assignment to GNU_TARGET requires atomic synchronization. */
+ ATOMIC_ACCESS is the type of atomic access to be used for the assignment
+ to GNU_TARGET. If, in addition, ATOMIC_SYNC is true, then the assignment
+ to GNU_TARGET requires atomic synchronization. */
static tree
Call_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, tree gnu_target,
- bool outer_atomic_access, bool atomic_access, bool atomic_sync)
+ atomic_acces_t atomic_access, bool atomic_sync)
{
const bool function_call = (Nkind (gnat_node) == N_Function_Call);
const bool returning_value = (function_call && !gnu_target);
bool pushed_binding_level = false;
Entity_Id gnat_formal;
Node_Id gnat_actual;
- bool sync;
+ atomic_acces_t aa_type;
+ bool aa_sync;
gcc_assert (FUNC_OR_METHOD_TYPE_P (gnu_subprog_type));
if (is_true_formal_parm
&& !is_by_ref_formal_parm
&& Ekind (gnat_formal) != E_Out_Parameter
- && atomic_access_required_p (gnat_actual, &sync))
- gnu_actual = build_atomic_load (gnu_actual, sync);
+ && simple_atomic_access_required_p (gnat_actual, &aa_sync))
+ gnu_actual = build_atomic_load (gnu_actual, aa_sync);
/* If this was a procedure call, we may not have removed any padding.
So do it here for the part we will use as an input, if any. */
gnu_result = convert (TREE_TYPE (gnu_actual), gnu_result);
}
+ get_atomic_access (gnat_actual, &aa_type, &aa_sync);
+
/* If an outer atomic access is required for an actual parameter,
build the load-modify-store sequence. */
- if (outer_atomic_access_required_p (gnat_actual))
+ if (aa_type == OUTER_ATOMIC)
gnu_result
= build_load_modify_store (gnu_actual, gnu_result, gnat_node);
- /* Or else, if simple atomic access is required, build the atomic
+ /* Or else, if a simple atomic access is required, build the atomic
store. */
- else if (atomic_access_required_p (gnat_actual, &sync))
- gnu_result = build_atomic_store (gnu_actual, gnu_result, sync);
+ else if (aa_type == SIMPLE_ATOMIC)
+ gnu_result
+ = build_atomic_store (gnu_actual, gnu_result, aa_sync);
/* Otherwise build a regular assignment. */
else
op_code = MODIFY_EXPR;
/* Use the required method to move the result to the target. */
- if (outer_atomic_access)
+ if (atomic_access == OUTER_ATOMIC)
gnu_call
= build_load_modify_store (gnu_target, gnu_call, gnat_node);
- else if (atomic_access)
+ else if (atomic_access == SIMPLE_ATOMIC)
gnu_call = build_atomic_store (gnu_target, gnu_call, atomic_sync);
else
gnu_call
static bool
lhs_or_actual_p (Node_Id gnat_node)
{
- Node_Id gnat_parent = Parent (gnat_node);
- Node_Kind kind = Nkind (gnat_parent);
+ const Node_Id gnat_parent = Parent (gnat_node);
+ const Node_Kind kind = Nkind (gnat_parent);
if (kind == N_Assignment_Statement && Name (gnat_parent) == gnat_node)
return true;
static bool
present_in_lhs_or_actual_p (Node_Id gnat_node)
{
- Node_Kind kind;
-
if (lhs_or_actual_p (gnat_node))
return true;
- kind = Nkind (Parent (gnat_node));
+ const Node_Kind kind = Nkind (Parent (gnat_node));
if ((kind == N_Type_Conversion || kind == N_Unchecked_Type_Conversion)
&& lhs_or_actual_p (Parent (gnat_node)))
tree gnu_result_type = void_type_node;
tree gnu_expr, gnu_lhs, gnu_rhs;
Node_Id gnat_temp;
- bool sync = false;
+ atomic_acces_t aa_type;
+ bool aa_sync;
/* Save node number for error message and set location information. */
Current_Error_Node = gnat_node;
gnu_result = Identifier_to_gnu (gnat_node, &gnu_result_type);
/* If atomic access is required on the RHS, build the atomic load. */
- if (atomic_access_required_p (gnat_node, &sync)
+ if (simple_atomic_access_required_p (gnat_node, &aa_sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result, sync);
+ gnu_result = build_atomic_load (gnu_result, aa_sync);
break;
case N_Integer_Literal:
gnu_result = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_result);
/* If atomic access is required on the RHS, build the atomic load. */
- if (atomic_access_required_p (gnat_node, &sync)
+ if (simple_atomic_access_required_p (gnat_node, &aa_sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result, sync);
+ gnu_result = build_atomic_load (gnu_result, aa_sync);
break;
case N_Indexed_Component:
gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* If atomic access is required on the RHS, build the atomic load. */
- if (atomic_access_required_p (gnat_node, &sync)
+ if (simple_atomic_access_required_p (gnat_node, &aa_sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result, sync);
+ gnu_result = build_atomic_load (gnu_result, aa_sync);
}
break;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* If atomic access is required on the RHS, build the atomic load. */
- if (atomic_access_required_p (gnat_node, &sync)
+ if (simple_atomic_access_required_p (gnat_node, &aa_sync)
&& !present_in_lhs_or_actual_p (gnat_node))
- gnu_result = build_atomic_load (gnu_result, sync);
+ gnu_result = build_atomic_load (gnu_result, aa_sync);
}
break;
N_Raise_Storage_Error);
else if (Nkind (Expression (gnat_node)) == N_Function_Call)
{
- bool outer_atomic_access
- = outer_atomic_access_required_p (Name (gnat_node));
- bool atomic_access
- = !outer_atomic_access
- && atomic_access_required_p (Name (gnat_node), &sync);
+ get_atomic_access (Name (gnat_node), &aa_type, &aa_sync);
gnu_result
= Call_to_gnu (Expression (gnat_node), &gnu_result_type, gnu_lhs,
- outer_atomic_access, atomic_access, sync);
+ aa_type, aa_sync);
}
else
{
gigi_checking_assert (!Do_Range_Check (gnat_expr));
+ get_atomic_access (Name (gnat_node), &aa_type, &aa_sync);
+
/* If an outer atomic access is required on the LHS, build the load-
modify-store sequence. */
- if (outer_atomic_access_required_p (Name (gnat_node)))
+ if (aa_type == OUTER_ATOMIC)
gnu_result = build_load_modify_store (gnu_lhs, gnu_rhs, gnat_node);
- /* Or else, if atomic access is required, build the atomic store. */
- else if (atomic_access_required_p (Name (gnat_node), &sync))
- gnu_result = build_atomic_store (gnu_lhs, gnu_rhs, sync);
+ /* Or else, if a simple atomic access is required, build the atomic
+ store. */
+ else if (aa_type == SIMPLE_ATOMIC)
+ gnu_result = build_atomic_store (gnu_lhs, gnu_rhs, aa_sync);
/* Or else, use memset when the conditions are met. This has already
been validated by Aggr_Assignment_OK_For_Backend in the front-end
case N_Function_Call:
case N_Procedure_Call_Statement:
gnu_result = Call_to_gnu (gnat_node, &gnu_result_type, NULL_TREE,
- false, false, false);
+ NOT_ATOMIC, false);
break;
/************************/
/* If the operand is going to end up in memory,
mark it addressable. Note that we don't test
allows_mem like in the input case below; this
- is modelled on the C front-end. */
+ is modeled on the C front-end. */
if (!allows_reg)
{
output = remove_conversions (output, false);
static Entity_Id
get_controlling_type (Entity_Id subprog)
{
- /* This is modelled on Expand_Interface_Thunk. */
+ /* This is modeled on Expand_Interface_Thunk. */
Entity_Id controlling_type = Etype (First_Formal (subprog));
if (Is_Access_Type (controlling_type))
controlling_type = Directly_Designated_Type (controlling_type);