target.h (globalize_decl_name): New.
[gcc.git] / gcc / cgraph.c
1 /* Callgraph handling code.
2 Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
21
22 /* This file contains basic routines manipulating call graph
23
24 The callgraph:
25
26 The call-graph is data structure designed for intra-procedural optimization
27 but it is also used in non-unit-at-a-time compilation to allow easier code
28 sharing.
29
30 The call-graph consist of nodes and edges represented via linked lists.
31 Each function (external or not) corresponds to the unique node.
32
33 The mapping from declarations to call-graph nodes is done using hash table
34 based on DECL_UID. The call-graph nodes are created lazily using
35 cgraph_node function when called for unknown declaration.
36
37 The callgraph at the moment does not represent indirect calls or calls
38 from other compilation unit. Flag NEEDED is set for each node that may
39 be accessed in such an invisible way and it shall be considered an
40 entry point to the callgraph.
41
42 Interprocedural information:
43
44 Callgraph is place to store data needed for interprocedural optimization.
45 All data structures are divided into three components: local_info that
46 is produced while analyzing the function, global_info that is result
47 of global walking of the callgraph on the end of compilation and
48 rtl_info used by RTL backend to propagate data from already compiled
49 functions to their callers.
50
51 Inlining plans:
52
53 The function inlining information is decided in advance and maintained
54 in the callgraph as so called inline plan.
55 For each inlined call, the callee's node is cloned to represent the
56 new function copy produced by inliner.
57 Each inlined call gets a unique corresponding clone node of the callee
58 and the data structure is updated while inlining is performed, so
59 the clones are eliminated and their callee edges redirected to the
60 caller.
61
62 Each edge has "inline_failed" field. When the field is set to NULL,
63 the call will be inlined. When it is non-NULL it contains a reason
64 why inlining wasn't performed. */
65
66 #include "config.h"
67 #include "system.h"
68 #include "coretypes.h"
69 #include "tm.h"
70 #include "tree.h"
71 #include "tree-inline.h"
72 #include "langhooks.h"
73 #include "hashtab.h"
74 #include "toplev.h"
75 #include "flags.h"
76 #include "ggc.h"
77 #include "debug.h"
78 #include "target.h"
79 #include "basic-block.h"
80 #include "cgraph.h"
81 #include "varray.h"
82 #include "output.h"
83 #include "intl.h"
84 #include "tree-gimple.h"
85 #include "tree-dump.h"
86 #include "tree-flow.h"
87
88 static void cgraph_node_remove_callers (struct cgraph_node *node);
89 static inline void cgraph_edge_remove_caller (struct cgraph_edge *e);
90 static inline void cgraph_edge_remove_callee (struct cgraph_edge *e);
91
92 /* Hash table used to convert declarations into nodes. */
93 static GTY((param_is (struct cgraph_node))) htab_t cgraph_hash;
94
95 /* The linked list of cgraph nodes. */
96 struct cgraph_node *cgraph_nodes;
97
98 /* Queue of cgraph nodes scheduled to be lowered. */
99 struct cgraph_node *cgraph_nodes_queue;
100
101 /* Queue of cgraph nodes scheduled to be added into cgraph. This is a
102 secondary queue used during optimization to accommodate passes that
103 may generate new functions that need to be optimized and expanded. */
104 struct cgraph_node *cgraph_new_nodes;
105
106 /* Number of nodes in existence. */
107 int cgraph_n_nodes;
108
109 /* Maximal uid used in cgraph nodes. */
110 int cgraph_max_uid;
111
112 /* Maximal pid used for profiling */
113 int cgraph_max_pid;
114
115 /* Set when whole unit has been analyzed so we can access global info. */
116 bool cgraph_global_info_ready = false;
117
118 /* What state callgraph is in right now. */
119 enum cgraph_state cgraph_state = CGRAPH_STATE_CONSTRUCTION;
120
121 /* Set when the cgraph is fully build and the basic flags are computed. */
122 bool cgraph_function_flags_ready = false;
123
124 /* Linked list of cgraph asm nodes. */
125 struct cgraph_asm_node *cgraph_asm_nodes;
126
127 /* Last node in cgraph_asm_nodes. */
128 static GTY(()) struct cgraph_asm_node *cgraph_asm_last_node;
129
130 /* The order index of the next cgraph node to be created. This is
131 used so that we can sort the cgraph nodes in order by when we saw
132 them, to support -fno-toplevel-reorder. */
133 int cgraph_order;
134
135 static hashval_t hash_node (const void *);
136 static int eq_node (const void *, const void *);
137
138 /* Returns a hash code for P. */
139
140 static hashval_t
141 hash_node (const void *p)
142 {
143 const struct cgraph_node *n = (const struct cgraph_node *) p;
144 return (hashval_t) DECL_UID (n->decl);
145 }
146
147 /* Returns nonzero if P1 and P2 are equal. */
148
149 static int
150 eq_node (const void *p1, const void *p2)
151 {
152 const struct cgraph_node *n1 = (const struct cgraph_node *) p1;
153 const struct cgraph_node *n2 = (const struct cgraph_node *) p2;
154 return DECL_UID (n1->decl) == DECL_UID (n2->decl);
155 }
156
157 /* Allocate new callgraph node and insert it into basic data structures. */
158
159 static struct cgraph_node *
160 cgraph_create_node (void)
161 {
162 struct cgraph_node *node;
163
164 node = GGC_CNEW (struct cgraph_node);
165 node->next = cgraph_nodes;
166 node->uid = cgraph_max_uid++;
167 node->pid = -1;
168 node->order = cgraph_order++;
169 if (cgraph_nodes)
170 cgraph_nodes->previous = node;
171 node->previous = NULL;
172 node->global.estimated_growth = INT_MIN;
173 cgraph_nodes = node;
174 cgraph_n_nodes++;
175 return node;
176 }
177
178 /* Return cgraph node assigned to DECL. Create new one when needed. */
179
180 struct cgraph_node *
181 cgraph_node (tree decl)
182 {
183 struct cgraph_node key, *node, **slot;
184
185 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
186
187 if (!cgraph_hash)
188 cgraph_hash = htab_create_ggc (10, hash_node, eq_node, NULL);
189
190 key.decl = decl;
191
192 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, &key, INSERT);
193
194 if (*slot)
195 {
196 node = *slot;
197 if (!node->master_clone)
198 node->master_clone = node;
199 return node;
200 }
201
202 node = cgraph_create_node ();
203 node->decl = decl;
204 *slot = node;
205 if (DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
206 {
207 node->origin = cgraph_node (DECL_CONTEXT (decl));
208 node->next_nested = node->origin->nested;
209 node->origin->nested = node;
210 node->master_clone = node;
211 }
212 return node;
213 }
214
215 /* Insert already constructed node into hashtable. */
216
217 void
218 cgraph_insert_node_to_hashtable (struct cgraph_node *node)
219 {
220 struct cgraph_node **slot;
221
222 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, node, INSERT);
223
224 gcc_assert (!*slot);
225 *slot = node;
226 }
227
228
229 /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME.
230 Return NULL if there's no such node. */
231
232 struct cgraph_node *
233 cgraph_node_for_asm (tree asmname)
234 {
235 struct cgraph_node *node;
236
237 for (node = cgraph_nodes; node ; node = node->next)
238 if (decl_assembler_name_equal (node->decl, asmname))
239 return node;
240
241 return NULL;
242 }
243
244 /* Returns a hash value for X (which really is a die_struct). */
245
246 static hashval_t
247 edge_hash (const void *x)
248 {
249 return htab_hash_pointer (((struct cgraph_edge *) x)->call_stmt);
250 }
251
252 /* Return nonzero if decl_id of die_struct X is the same as UID of decl *Y. */
253
254 static int
255 edge_eq (const void *x, const void *y)
256 {
257 return ((struct cgraph_edge *) x)->call_stmt == y;
258 }
259
260 /* Return callgraph edge representing CALL_EXPR statement. */
261 struct cgraph_edge *
262 cgraph_edge (struct cgraph_node *node, tree call_stmt)
263 {
264 struct cgraph_edge *e, *e2;
265 int n = 0;
266
267 if (node->call_site_hash)
268 return htab_find_with_hash (node->call_site_hash, call_stmt,
269 htab_hash_pointer (call_stmt));
270
271 /* This loop may turn out to be performance problem. In such case adding
272 hashtables into call nodes with very many edges is probably best
273 solution. It is not good idea to add pointer into CALL_EXPR itself
274 because we want to make possible having multiple cgraph nodes representing
275 different clones of the same body before the body is actually cloned. */
276 for (e = node->callees; e; e= e->next_callee)
277 {
278 if (e->call_stmt == call_stmt)
279 break;
280 n++;
281 }
282 if (n > 100)
283 {
284 node->call_site_hash = htab_create_ggc (120, edge_hash, edge_eq, NULL);
285 for (e2 = node->callees; e2; e2 = e2->next_callee)
286 {
287 void **slot;
288 slot = htab_find_slot_with_hash (node->call_site_hash,
289 e2->call_stmt,
290 htab_hash_pointer (e2->call_stmt),
291 INSERT);
292 gcc_assert (!*slot);
293 *slot = e2;
294 }
295 }
296 return e;
297 }
298
299 /* Change call_smtt of edge E to NEW_STMT. */
300
301 void
302 cgraph_set_call_stmt (struct cgraph_edge *e, tree new_stmt)
303 {
304 if (e->caller->call_site_hash)
305 {
306 htab_remove_elt_with_hash (e->caller->call_site_hash,
307 e->call_stmt,
308 htab_hash_pointer (e->call_stmt));
309 }
310 e->call_stmt = new_stmt;
311 if (e->caller->call_site_hash)
312 {
313 void **slot;
314 slot = htab_find_slot_with_hash (e->caller->call_site_hash,
315 e->call_stmt,
316 htab_hash_pointer
317 (e->call_stmt), INSERT);
318 gcc_assert (!*slot);
319 *slot = e;
320 }
321 }
322
323 /* Create edge from CALLER to CALLEE in the cgraph. */
324
325 struct cgraph_edge *
326 cgraph_create_edge (struct cgraph_node *caller, struct cgraph_node *callee,
327 tree call_stmt, gcov_type count, int nest)
328 {
329 struct cgraph_edge *edge = GGC_NEW (struct cgraph_edge);
330 #ifdef ENABLE_CHECKING
331 struct cgraph_edge *e;
332
333 for (e = caller->callees; e; e = e->next_callee)
334 gcc_assert (e->call_stmt != call_stmt);
335 #endif
336
337 gcc_assert (get_call_expr_in (call_stmt));
338
339 if (!DECL_SAVED_TREE (callee->decl))
340 edge->inline_failed = N_("function body not available");
341 else if (callee->local.redefined_extern_inline)
342 edge->inline_failed = N_("redefined extern inline functions are not "
343 "considered for inlining");
344 else if (callee->local.inlinable)
345 edge->inline_failed = N_("function not considered for inlining");
346 else
347 edge->inline_failed = N_("function not inlinable");
348
349 edge->aux = NULL;
350
351 edge->caller = caller;
352 edge->callee = callee;
353 edge->call_stmt = call_stmt;
354 edge->prev_caller = NULL;
355 edge->next_caller = callee->callers;
356 if (callee->callers)
357 callee->callers->prev_caller = edge;
358 edge->prev_callee = NULL;
359 edge->next_callee = caller->callees;
360 if (caller->callees)
361 caller->callees->prev_callee = edge;
362 caller->callees = edge;
363 callee->callers = edge;
364 edge->count = count;
365 edge->loop_nest = nest;
366 if (caller->call_site_hash)
367 {
368 void **slot;
369 slot = htab_find_slot_with_hash (caller->call_site_hash,
370 edge->call_stmt,
371 htab_hash_pointer
372 (edge->call_stmt),
373 INSERT);
374 gcc_assert (!*slot);
375 *slot = edge;
376 }
377 return edge;
378 }
379
380 /* Remove the edge E from the list of the callers of the callee. */
381
382 static inline void
383 cgraph_edge_remove_callee (struct cgraph_edge *e)
384 {
385 if (e->prev_caller)
386 e->prev_caller->next_caller = e->next_caller;
387 if (e->next_caller)
388 e->next_caller->prev_caller = e->prev_caller;
389 if (!e->prev_caller)
390 e->callee->callers = e->next_caller;
391 }
392
393 /* Remove the edge E from the list of the callees of the caller. */
394
395 static inline void
396 cgraph_edge_remove_caller (struct cgraph_edge *e)
397 {
398 if (e->prev_callee)
399 e->prev_callee->next_callee = e->next_callee;
400 if (e->next_callee)
401 e->next_callee->prev_callee = e->prev_callee;
402 if (!e->prev_callee)
403 e->caller->callees = e->next_callee;
404 if (e->caller->call_site_hash)
405 htab_remove_elt_with_hash (e->caller->call_site_hash,
406 e->call_stmt,
407 htab_hash_pointer (e->call_stmt));
408 }
409
410 /* Remove the edge E in the cgraph. */
411
412 void
413 cgraph_remove_edge (struct cgraph_edge *e)
414 {
415 /* Remove from callers list of the callee. */
416 cgraph_edge_remove_callee (e);
417
418 /* Remove from callees list of the callers. */
419 cgraph_edge_remove_caller (e);
420 }
421
422 /* Redirect callee of E to N. The function does not update underlying
423 call expression. */
424
425 void
426 cgraph_redirect_edge_callee (struct cgraph_edge *e, struct cgraph_node *n)
427 {
428 /* Remove from callers list of the current callee. */
429 cgraph_edge_remove_callee (e);
430
431 /* Insert to callers list of the new callee. */
432 e->prev_caller = NULL;
433 if (n->callers)
434 n->callers->prev_caller = e;
435 e->next_caller = n->callers;
436 n->callers = e;
437 e->callee = n;
438 }
439
440 /* Remove all callees from the node. */
441
442 void
443 cgraph_node_remove_callees (struct cgraph_node *node)
444 {
445 struct cgraph_edge *e;
446
447 /* It is sufficient to remove the edges from the lists of callers of
448 the callees. The callee list of the node can be zapped with one
449 assignment. */
450 for (e = node->callees; e; e = e->next_callee)
451 cgraph_edge_remove_callee (e);
452 node->callees = NULL;
453 if (node->call_site_hash)
454 {
455 htab_delete (node->call_site_hash);
456 node->call_site_hash = NULL;
457 }
458 }
459
460 /* Remove all callers from the node. */
461
462 static void
463 cgraph_node_remove_callers (struct cgraph_node *node)
464 {
465 struct cgraph_edge *e;
466
467 /* It is sufficient to remove the edges from the lists of callees of
468 the callers. The caller list of the node can be zapped with one
469 assignment. */
470 for (e = node->callers; e; e = e->next_caller)
471 cgraph_edge_remove_caller (e);
472 node->callers = NULL;
473 }
474
475 /* Release memory used to represent body of function NODE. */
476
477 void
478 cgraph_release_function_body (struct cgraph_node *node)
479 {
480 if (DECL_STRUCT_FUNCTION (node->decl)
481 && DECL_STRUCT_FUNCTION (node->decl)->gimple_df)
482 {
483 tree old_decl = current_function_decl;
484 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
485 current_function_decl = node->decl;
486 delete_tree_ssa ();
487 delete_tree_cfg_annotations ();
488 cfun->eh = NULL;
489 current_function_decl = old_decl;
490 pop_cfun();
491 }
492 DECL_SAVED_TREE (node->decl) = NULL;
493 DECL_STRUCT_FUNCTION (node->decl) = NULL;
494 DECL_INITIAL (node->decl) = error_mark_node;
495 }
496
497 /* Remove the node from cgraph. */
498
499 void
500 cgraph_remove_node (struct cgraph_node *node)
501 {
502 void **slot;
503 bool kill_body = false;
504
505 cgraph_node_remove_callers (node);
506 cgraph_node_remove_callees (node);
507 /* Incremental inlining access removed nodes stored in the postorder list.
508 */
509 node->needed = node->reachable = false;
510 while (node->nested)
511 cgraph_remove_node (node->nested);
512 if (node->origin)
513 {
514 struct cgraph_node **node2 = &node->origin->nested;
515
516 while (*node2 != node)
517 node2 = &(*node2)->next_nested;
518 *node2 = node->next_nested;
519 }
520 if (node->previous)
521 node->previous->next = node->next;
522 else
523 cgraph_nodes = node->next;
524 if (node->next)
525 node->next->previous = node->previous;
526 node->next = NULL;
527 node->previous = NULL;
528 slot = htab_find_slot (cgraph_hash, node, NO_INSERT);
529 if (*slot == node)
530 {
531 if (node->next_clone)
532 {
533 struct cgraph_node *new_node = node->next_clone;
534 struct cgraph_node *n;
535
536 /* Make the next clone be the master clone */
537 for (n = new_node; n; n = n->next_clone)
538 n->master_clone = new_node;
539
540 *slot = new_node;
541 node->next_clone->prev_clone = NULL;
542 }
543 else
544 {
545 htab_clear_slot (cgraph_hash, slot);
546 kill_body = true;
547 }
548 }
549 else
550 {
551 node->prev_clone->next_clone = node->next_clone;
552 if (node->next_clone)
553 node->next_clone->prev_clone = node->prev_clone;
554 }
555
556 /* While all the clones are removed after being proceeded, the function
557 itself is kept in the cgraph even after it is compiled. Check whether
558 we are done with this body and reclaim it proactively if this is the case.
559 */
560 if (!kill_body && *slot)
561 {
562 struct cgraph_node *n = (struct cgraph_node *) *slot;
563 if (!n->next_clone && !n->global.inlined_to
564 && (cgraph_global_info_ready
565 && (TREE_ASM_WRITTEN (n->decl) || DECL_EXTERNAL (n->decl))))
566 kill_body = true;
567 }
568
569 if (kill_body && flag_unit_at_a_time)
570 cgraph_release_function_body (node);
571 node->decl = NULL;
572 if (node->call_site_hash)
573 {
574 htab_delete (node->call_site_hash);
575 node->call_site_hash = NULL;
576 }
577 cgraph_n_nodes--;
578 /* Do not free the structure itself so the walk over chain can continue. */
579 }
580
581 /* Notify finalize_compilation_unit that given node is reachable. */
582
583 void
584 cgraph_mark_reachable_node (struct cgraph_node *node)
585 {
586 if (!node->reachable && node->local.finalized)
587 {
588 notice_global_symbol (node->decl);
589 node->reachable = 1;
590 gcc_assert (!cgraph_global_info_ready);
591
592 node->next_needed = cgraph_nodes_queue;
593 cgraph_nodes_queue = node;
594 }
595 }
596
597 /* Likewise indicate that a node is needed, i.e. reachable via some
598 external means. */
599
600 void
601 cgraph_mark_needed_node (struct cgraph_node *node)
602 {
603 node->needed = 1;
604 cgraph_mark_reachable_node (node);
605 }
606
607 /* Return local info for the compiled function. */
608
609 struct cgraph_local_info *
610 cgraph_local_info (tree decl)
611 {
612 struct cgraph_node *node;
613
614 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
615 node = cgraph_node (decl);
616 return &node->local;
617 }
618
619 /* Return local info for the compiled function. */
620
621 struct cgraph_global_info *
622 cgraph_global_info (tree decl)
623 {
624 struct cgraph_node *node;
625
626 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL && cgraph_global_info_ready);
627 node = cgraph_node (decl);
628 return &node->global;
629 }
630
631 /* Return local info for the compiled function. */
632
633 struct cgraph_rtl_info *
634 cgraph_rtl_info (tree decl)
635 {
636 struct cgraph_node *node;
637
638 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
639 node = cgraph_node (decl);
640 if (decl != current_function_decl
641 && !TREE_ASM_WRITTEN (node->decl))
642 return NULL;
643 return &node->rtl;
644 }
645
646 /* Return name of the node used in debug output. */
647 const char *
648 cgraph_node_name (struct cgraph_node *node)
649 {
650 return lang_hooks.decl_printable_name (node->decl, 2);
651 }
652
653 /* Names used to print out the availability enum. */
654 const char * const cgraph_availability_names[] =
655 {"unset", "not_available", "overwrittable", "available", "local"};
656
657 /* Dump given cgraph node. */
658 void
659 dump_cgraph_node (FILE *f, struct cgraph_node *node)
660 {
661 struct cgraph_edge *edge;
662 fprintf (f, "%s/%i(%i):", cgraph_node_name (node), node->uid, node->pid);
663 if (node->global.inlined_to)
664 fprintf (f, " (inline copy in %s/%i)",
665 cgraph_node_name (node->global.inlined_to),
666 node->global.inlined_to->uid);
667 if (cgraph_function_flags_ready)
668 fprintf (f, " availability:%s",
669 cgraph_availability_names [cgraph_function_body_availability (node)]);
670 if (node->master_clone && node->master_clone->uid != node->uid)
671 fprintf (f, "(%i)", node->master_clone->uid);
672 if (node->count)
673 fprintf (f, " executed "HOST_WIDEST_INT_PRINT_DEC"x",
674 (HOST_WIDEST_INT)node->count);
675 if (node->local.self_insns)
676 fprintf (f, " %i insns", node->local.self_insns);
677 if (node->global.insns && node->global.insns != node->local.self_insns)
678 fprintf (f, " (%i after inlining)", node->global.insns);
679 if (node->local.estimated_self_stack_size)
680 fprintf (f, " %i bytes stack usage", (int)node->local.estimated_self_stack_size);
681 if (node->global.estimated_stack_size != node->local.estimated_self_stack_size)
682 fprintf (f, " %i bytes after inlining", (int)node->global.estimated_stack_size);
683 if (node->origin)
684 fprintf (f, " nested in: %s", cgraph_node_name (node->origin));
685 if (node->needed)
686 fprintf (f, " needed");
687 else if (node->reachable)
688 fprintf (f, " reachable");
689 if (DECL_SAVED_TREE (node->decl))
690 fprintf (f, " tree");
691 if (node->output)
692 fprintf (f, " output");
693 if (node->local.local)
694 fprintf (f, " local");
695 if (node->local.externally_visible)
696 fprintf (f, " externally_visible");
697 if (node->local.finalized)
698 fprintf (f, " finalized");
699 if (node->local.disregard_inline_limits)
700 fprintf (f, " always_inline");
701 else if (node->local.inlinable)
702 fprintf (f, " inlinable");
703 if (node->local.redefined_extern_inline)
704 fprintf (f, " redefined_extern_inline");
705 if (TREE_ASM_WRITTEN (node->decl))
706 fprintf (f, " asm_written");
707
708 fprintf (f, "\n called by: ");
709 for (edge = node->callers; edge; edge = edge->next_caller)
710 {
711 fprintf (f, "%s/%i ", cgraph_node_name (edge->caller),
712 edge->caller->uid);
713 if (edge->count)
714 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
715 (HOST_WIDEST_INT)edge->count);
716 if (!edge->inline_failed)
717 fprintf(f, "(inlined) ");
718 }
719
720 fprintf (f, "\n calls: ");
721 for (edge = node->callees; edge; edge = edge->next_callee)
722 {
723 fprintf (f, "%s/%i ", cgraph_node_name (edge->callee),
724 edge->callee->uid);
725 if (!edge->inline_failed)
726 fprintf(f, "(inlined) ");
727 if (edge->count)
728 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
729 (HOST_WIDEST_INT)edge->count);
730 if (edge->loop_nest)
731 fprintf (f, "(nested in %i loops) ", edge->loop_nest);
732 }
733 fprintf (f, "\n");
734 }
735
736 /* Dump the callgraph. */
737
738 void
739 dump_cgraph (FILE *f)
740 {
741 struct cgraph_node *node;
742
743 fprintf (f, "callgraph:\n\n");
744 for (node = cgraph_nodes; node; node = node->next)
745 dump_cgraph_node (f, node);
746 }
747
748 /* Set the DECL_ASSEMBLER_NAME and update cgraph hashtables. */
749 void
750 change_decl_assembler_name (tree decl, tree name)
751 {
752 if (!DECL_ASSEMBLER_NAME_SET_P (decl))
753 {
754 SET_DECL_ASSEMBLER_NAME (decl, name);
755 return;
756 }
757 if (name == DECL_ASSEMBLER_NAME (decl))
758 return;
759
760 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))
761 && DECL_RTL_SET_P (decl))
762 warning (0, "%D renamed after being referenced in assembly", decl);
763
764 SET_DECL_ASSEMBLER_NAME (decl, name);
765 }
766
767 /* Add a top-level asm statement to the list. */
768
769 struct cgraph_asm_node *
770 cgraph_add_asm_node (tree asm_str)
771 {
772 struct cgraph_asm_node *node;
773
774 node = GGC_CNEW (struct cgraph_asm_node);
775 node->asm_str = asm_str;
776 node->order = cgraph_order++;
777 node->next = NULL;
778 if (cgraph_asm_nodes == NULL)
779 cgraph_asm_nodes = node;
780 else
781 cgraph_asm_last_node->next = node;
782 cgraph_asm_last_node = node;
783 return node;
784 }
785
786 /* Return true when the DECL can possibly be inlined. */
787 bool
788 cgraph_function_possibly_inlined_p (tree decl)
789 {
790 if (!cgraph_global_info_ready)
791 return (DECL_INLINE (decl) && !flag_really_no_inline);
792 return DECL_POSSIBLY_INLINED (decl);
793 }
794
795 /* Create clone of E in the node N represented by CALL_EXPR the callgraph. */
796 struct cgraph_edge *
797 cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n,
798 tree call_stmt, gcov_type count_scale, int loop_nest,
799 bool update_original)
800 {
801 struct cgraph_edge *new;
802
803 new = cgraph_create_edge (n, e->callee, call_stmt,
804 e->count * count_scale / REG_BR_PROB_BASE,
805 e->loop_nest + loop_nest);
806
807 new->inline_failed = e->inline_failed;
808 if (update_original)
809 {
810 e->count -= new->count;
811 if (e->count < 0)
812 e->count = 0;
813 }
814 return new;
815 }
816
817 /* Create node representing clone of N executed COUNT times. Decrease
818 the execution counts from original node too.
819
820 When UPDATE_ORIGINAL is true, the counts are subtracted from the original
821 function's profile to reflect the fact that part of execution is handled
822 by node. */
823 struct cgraph_node *
824 cgraph_clone_node (struct cgraph_node *n, gcov_type count, int loop_nest,
825 bool update_original)
826 {
827 struct cgraph_node *new = cgraph_create_node ();
828 struct cgraph_edge *e;
829 gcov_type count_scale;
830
831 new->decl = n->decl;
832 new->origin = n->origin;
833 if (new->origin)
834 {
835 new->next_nested = new->origin->nested;
836 new->origin->nested = new;
837 }
838 new->analyzed = n->analyzed;
839 new->local = n->local;
840 new->global = n->global;
841 new->rtl = n->rtl;
842 new->master_clone = n->master_clone;
843 new->count = count;
844 if (n->count)
845 count_scale = new->count * REG_BR_PROB_BASE / n->count;
846 else
847 count_scale = 0;
848 if (update_original)
849 {
850 n->count -= count;
851 if (n->count < 0)
852 n->count = 0;
853 }
854
855 for (e = n->callees;e; e=e->next_callee)
856 cgraph_clone_edge (e, new, e->call_stmt, count_scale, loop_nest,
857 update_original);
858
859 new->next_clone = n->next_clone;
860 new->prev_clone = n;
861 n->next_clone = new;
862 if (new->next_clone)
863 new->next_clone->prev_clone = new;
864
865 return new;
866 }
867
868 /* Return true if N is an master_clone, (see cgraph_master_clone). */
869
870 bool
871 cgraph_is_master_clone (struct cgraph_node *n)
872 {
873 return (n == cgraph_master_clone (n));
874 }
875
876 struct cgraph_node *
877 cgraph_master_clone (struct cgraph_node *n)
878 {
879 enum availability avail = cgraph_function_body_availability (n);
880
881 if (avail == AVAIL_NOT_AVAILABLE || avail == AVAIL_OVERWRITABLE)
882 return NULL;
883
884 if (!n->master_clone)
885 n->master_clone = cgraph_node (n->decl);
886
887 return n->master_clone;
888 }
889
890 /* NODE is no longer nested function; update cgraph accordingly. */
891 void
892 cgraph_unnest_node (struct cgraph_node *node)
893 {
894 struct cgraph_node **node2 = &node->origin->nested;
895 gcc_assert (node->origin);
896
897 while (*node2 != node)
898 node2 = &(*node2)->next_nested;
899 *node2 = node->next_nested;
900 node->origin = NULL;
901 }
902
903 /* Return function availability. See cgraph.h for description of individual
904 return values. */
905 enum availability
906 cgraph_function_body_availability (struct cgraph_node *node)
907 {
908 enum availability avail;
909 gcc_assert (cgraph_function_flags_ready);
910 if (!node->analyzed)
911 avail = AVAIL_NOT_AVAILABLE;
912 else if (node->local.local)
913 avail = AVAIL_LOCAL;
914 else if (node->local.externally_visible)
915 avail = AVAIL_AVAILABLE;
916
917 /* If the function can be overwritten, return OVERWRITABLE. Take
918 care at least of two notable extensions - the COMDAT functions
919 used to share template instantiations in C++ (this is symmetric
920 to code cp_cannot_inline_tree_fn and probably shall be shared and
921 the inlinability hooks completely eliminated).
922
923 ??? Does the C++ one definition rule allow us to always return
924 AVAIL_AVAILABLE here? That would be good reason to preserve this
925 hook Similarly deal with extern inline functions - this is again
926 necessary to get C++ shared functions having keyed templates
927 right and in the C extension documentation we probably should
928 document the requirement of both versions of function (extern
929 inline and offline) having same side effect characteristics as
930 good optimization is what this optimization is about. */
931
932 else if (!(*targetm.binds_local_p) (node->decl)
933 && !DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl))
934 avail = AVAIL_OVERWRITABLE;
935 else avail = AVAIL_AVAILABLE;
936
937 return avail;
938 }
939
940 /* Add the function FNDECL to the call graph.
941 Unlike cgraph_finalize_function, this function is intended to be used
942 by middle end and allows insertion of new function at arbitrary point
943 of compilation. The function can be either in high, low or SSA form
944 GIMPLE.
945
946 The function is assumed to be reachable and have address taken (so no
947 API breaking optimizations are performed on it).
948
949 Main work done by this function is to enqueue the function for later
950 processing to avoid need the passes to be re-entrant. */
951
952 void
953 cgraph_add_new_function (tree fndecl, bool lowered)
954 {
955 struct cgraph_node *node;
956 switch (cgraph_state)
957 {
958 case CGRAPH_STATE_CONSTRUCTION:
959 /* Just enqueue function to be processed at nearest occurence. */
960 node = cgraph_node (fndecl);
961 node->next_needed = cgraph_new_nodes;
962 if (lowered)
963 node->lowered = true;
964 cgraph_new_nodes = node;
965 break;
966
967 case CGRAPH_STATE_IPA:
968 case CGRAPH_STATE_IPA_SSA:
969 case CGRAPH_STATE_EXPANSION:
970 /* Bring the function into finalized state and enqueue for later
971 analyzing and compilation. */
972 node = cgraph_node (fndecl);
973 node->local.local = false;
974 node->local.finalized = true;
975 node->reachable = node->needed = true;
976 if (lowered)
977 node->lowered = true;
978 node->next_needed = cgraph_new_nodes;
979 cgraph_new_nodes = node;
980 break;
981
982 case CGRAPH_STATE_FINISHED:
983 /* At the very end of compilation we have to do all the work up
984 to expansion. */
985 push_cfun (DECL_STRUCT_FUNCTION (fndecl));
986 current_function_decl = fndecl;
987 tree_register_cfg_hooks ();
988 if (!lowered)
989 tree_lowering_passes (fndecl);
990 bitmap_obstack_initialize (NULL);
991 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (fndecl)) && optimize)
992 execute_pass_list (pass_early_local_passes.sub);
993 bitmap_obstack_release (NULL);
994 tree_rest_of_compilation (fndecl);
995 pop_cfun ();
996 current_function_decl = NULL;
997 break;
998 }
999 }
1000
1001 #include "gt-cgraph.h"