typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
/* Return the ipa data associated with NODE, allocating zeroed memory
- if necessary. */
+ if necessary. TRAVERSE_ALIASES is true if we must traverse aliases
+ and set *NODE accordingly. */
static struct tm_ipa_cg_data *
-get_cg_data (struct cgraph_node *node)
+get_cg_data (struct cgraph_node **node, bool traverse_aliases)
{
- struct tm_ipa_cg_data *d = (struct tm_ipa_cg_data *) node->aux;
+ struct tm_ipa_cg_data *d;
+
+ if (traverse_aliases && (*node)->alias)
+ *node = cgraph_get_node ((*node)->thunk.alias);
+
+ d = (struct tm_ipa_cg_data *) (*node)->aux;
if (d == NULL)
{
d = (struct tm_ipa_cg_data *)
obstack_alloc (&tm_obstack.obstack, sizeof (*d));
- node->aux = (void *) d;
+ (*node)->aux = (void *) d;
memset (d, 0, sizeof (*d));
}
node = cgraph_get_node (fndecl);
gcc_assert (node != NULL);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
pcallers = (for_clone ? &d->tm_callers_clone
: &d->tm_callers_normal);
ipa_tm_note_irrevocable (struct cgraph_node *node,
cgraph_node_queue *worklist_p)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d = get_cg_data (&node, true);
struct cgraph_edge *e;
d->is_irrevocable = true;
for (e = node->callers; e ; e = e->next_caller)
{
basic_block bb;
+ struct cgraph_node *caller;
/* Don't examine recursive calls. */
if (e->caller == node)
if (is_tm_safe_or_pure (e->caller->decl))
continue;
- d = get_cg_data (e->caller);
+ caller = e->caller;
+ d = get_cg_data (&caller, true);
/* Check if the callee is in a transactional region. If so,
schedule the function for normal re-scan as well. */
&& bitmap_bit_p (d->transaction_blocks_normal, bb->index))
d->want_irr_scan_normal = true;
- maybe_push_queue (e->caller, worklist_p, &d->in_worklist);
+ maybe_push_queue (caller, worklist_p, &d->in_worklist);
}
}
if (TREE_CODE (fn) == ADDR_EXPR)
{
struct tm_ipa_cg_data *d;
+ struct cgraph_node *node;
fn = TREE_OPERAND (fn, 0);
if (is_tm_ending_fndecl (fn))
if (find_tm_replacement_function (fn))
break;
- d = get_cg_data (cgraph_get_node (fn));
+ node = cgraph_get_node(fn);
+ d = get_cg_data (&node, true);
/* Return true if irrevocable, but above all, believe
the user. */
{
struct tm_ipa_cg_data *d;
unsigned *pcallers;
+ struct cgraph_node *tnode;
if (is_tm_ending_fndecl (fndecl))
continue;
if (find_tm_replacement_function (fndecl))
continue;
- d = get_cg_data (cgraph_get_node (fndecl));
+ tnode = cgraph_get_node (fndecl);
+ d = get_cg_data (&tnode, true);
+
pcallers = (for_clone ? &d->tm_callers_clone
: &d->tm_callers_normal);
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
calculate_dominance_info (CDI_DOMINATORS);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
queue = VEC_alloc (basic_block, heap, 10);
new_irr = BITMAP_ALLOC (&tm_obstack);
static bool
ipa_tm_mayenterirr_function (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
- tree decl = node->decl;
- unsigned flags = flags_from_decl_or_type (decl);
+ struct tm_ipa_cg_data *d;
+ tree decl;
+ unsigned flags;
+
+ d = get_cg_data (&node, true);
+ decl = node->decl;
+ flags = flags_from_decl_or_type (decl);
/* Handle some TM builtins. Ordinarily these aren't actually generated
at this point, but handling these functions when written in by the
new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
new_node->tm_clone = true;
- get_cg_data (node)->clone = new_node;
+ /* ?? Do not traverse aliases here. */
+ get_cg_data (&node, false)->clone = new_node;
record_tm_clone_pair (old_decl, new_decl);
new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
new_node->lowered = true;
new_node->tm_clone = 1;
- get_cg_data (old_node)->clone = new_node;
+ get_cg_data (&old_node, true)->clone = new_node;
if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
{
}
else
{
- struct tm_ipa_cg_data *d = get_cg_data (e->callee);
+ struct tm_ipa_cg_data *d;
+ struct cgraph_node *tnode = e->callee;
+
+ d = get_cg_data (&tnode, true);
new_node = d->clone;
/* As we've already skipped pure calls and appropriate builtins,
static void
ipa_tm_transform_transaction (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d;
struct tm_region *region;
bool need_ssa_rename = false;
+ d = get_cg_data (&node, true);
+
current_function_decl = node->decl;
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
calculate_dominance_info (CDI_DOMINATORS);
static void
ipa_tm_transform_clone (struct cgraph_node *node)
{
- struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_ipa_cg_data *d;
bool need_ssa_rename;
+ d = get_cg_data (&node, true);
+
/* If this function makes no calls and has no irrevocable blocks,
then there's nothing to do. */
/* ??? Remove non-aborting top-level transactions. */
if (is_tm_callable (node->decl)
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
}
tm_region_init (NULL);
if (all_tm_regions)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
/* Scan for calls that are in each transaction. */
ipa_tm_scan_calls_transaction (d, &tm_callees);
{
node = VEC_index (cgraph_node_p, tm_callees, i);
a = cgraph_function_body_availability (node);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
/* Put it in the worklist so we can scan the function later
(ipa_tm_scan_irr_function) and mark the irrevocable
if (node->alias)
{
node = cgraph_get_node (node->thunk.alias);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
continue;
}
}
node = VEC_index (cgraph_node_p, irr_worklist, i);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
d->in_worklist = false;
if (d->want_irr_scan_normal)
node = VEC_index (cgraph_node_p, tm_callees, i);
if (ipa_tm_mayenterirr_function (node))
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
gcc_assert (d->in_worklist == false);
maybe_push_queue (node, &irr_worklist, &d->in_worklist);
}
}
node = VEC_index (cgraph_node_p, irr_worklist, i);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
d->in_worklist = false;
node->local.tm_may_enter_irr = true;
if (!is_tm_safe_or_pure (caller->decl)
&& !caller->local.tm_may_enter_irr)
{
- d = get_cg_data (caller);
+ d = get_cg_data (&caller, true);
maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
}
}
if (ref->use == IPA_REF_ALIAS
&& !caller->local.tm_may_enter_irr)
{
- d = get_cg_data (caller);
+ /* ?? Do not traverse aliases here. */
+ d = get_cg_data (&caller, false);
maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
}
}
if (node->reachable && node->lowered
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (is_tm_safe (node->decl))
ipa_tm_diagnose_tm_safe (node);
else if (d->all_tm_regions)
continue;
a = cgraph_function_body_availability (node);
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (a <= AVAIL_NOT_AVAILABLE)
doit = is_tm_callable (node->decl);
node = VEC_index (cgraph_node_p, tm_callees, i);
if (node->analyzed)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (d->clone)
ipa_tm_transform_clone (node);
}
if (node->reachable && node->lowered
&& cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
{
- d = get_cg_data (node);
+ d = get_cg_data (&node, true);
if (d->all_tm_regions)
ipa_tm_transform_transaction (node);
}