Add analyzer plugin support and CPython GIL example
[gcc.git] / gcc / analyzer / engine.cc
1 /* The analysis "engine".
2 Copyright (C) 2019-2020 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tree.h"
25 #include "fold-const.h"
26 #include "gcc-rich-location.h"
27 #include "alloc-pool.h"
28 #include "fibonacci_heap.h"
29 #include "shortest-paths.h"
30 #include "diagnostic-core.h"
31 #include "diagnostic-event-id.h"
32 #include "diagnostic-path.h"
33 #include "function.h"
34 #include "pretty-print.h"
35 #include "sbitmap.h"
36 #include "bitmap.h"
37 #include "tristate.h"
38 #include "ordered-hash-map.h"
39 #include "selftest.h"
40 #include "json.h"
41 #include "analyzer/analyzer.h"
42 #include "analyzer/analyzer-logging.h"
43 #include "analyzer/call-string.h"
44 #include "analyzer/program-point.h"
45 #include "analyzer/store.h"
46 #include "analyzer/region-model.h"
47 #include "analyzer/constraint-manager.h"
48 #include "analyzer/sm.h"
49 #include "analyzer/pending-diagnostic.h"
50 #include "analyzer/diagnostic-manager.h"
51 #include "cfg.h"
52 #include "basic-block.h"
53 #include "gimple.h"
54 #include "gimple-iterator.h"
55 #include "gimple-pretty-print.h"
56 #include "cgraph.h"
57 #include "digraph.h"
58 #include "analyzer/supergraph.h"
59 #include "analyzer/program-state.h"
60 #include "analyzer/exploded-graph.h"
61 #include "analyzer/analysis-plan.h"
62 #include "analyzer/checker-path.h"
63 #include "analyzer/state-purge.h"
64 #include "analyzer/bar-chart.h"
65 #include <zlib.h>
66 #include "plugin.h"
67
68 /* For an overview, see gcc/doc/analyzer.texi. */
69
70 #if ENABLE_ANALYZER
71
72 namespace ana {
73
74 /* class impl_region_model_context : public region_model_context. */
75
76 impl_region_model_context::
77 impl_region_model_context (exploded_graph &eg,
78 const exploded_node *enode_for_diag,
79 const program_state *old_state,
80 program_state *new_state,
81 const gimple *stmt,
82 stmt_finder *stmt_finder)
83 : m_eg (&eg), m_logger (eg.get_logger ()),
84 m_enode_for_diag (enode_for_diag),
85 m_old_state (old_state),
86 m_new_state (new_state),
87 m_stmt (stmt),
88 m_stmt_finder (stmt_finder),
89 m_ext_state (eg.get_ext_state ())
90 {
91 }
92
93 impl_region_model_context::
94 impl_region_model_context (program_state *state,
95 const extrinsic_state &ext_state,
96 logger *logger)
97 : m_eg (NULL), m_logger (logger), m_enode_for_diag (NULL),
98 m_old_state (NULL),
99 m_new_state (state),
100 m_stmt (NULL),
101 m_stmt_finder (NULL),
102 m_ext_state (ext_state)
103 {
104 }
105
106 void
107 impl_region_model_context::warn (pending_diagnostic *d)
108 {
109 LOG_FUNC (get_logger ());
110 if (m_eg)
111 m_eg->get_diagnostic_manager ().add_diagnostic
112 (m_enode_for_diag, m_enode_for_diag->get_supernode (),
113 m_stmt, m_stmt_finder, d);
114 }
115
116 void
117 impl_region_model_context::on_svalue_leak (const svalue *sval)
118
119 {
120 int sm_idx;
121 sm_state_map *smap;
122 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
123 smap->on_svalue_leak (sval, this);
124 }
125
126 void
127 impl_region_model_context::
128 on_liveness_change (const svalue_set &live_svalues,
129 const region_model *model)
130 {
131 int sm_idx;
132 sm_state_map *smap;
133 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
134 smap->on_liveness_change (live_svalues, model, this);
135 }
136
137 void
138 impl_region_model_context::on_unknown_change (const svalue *sval,
139 bool is_mutable)
140 {
141 int sm_idx;
142 sm_state_map *smap;
143 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
144 smap->on_unknown_change (sval, is_mutable, m_ext_state);
145 }
146
147 void
148 impl_region_model_context::on_escaped_function (tree fndecl)
149 {
150 m_eg->on_escaped_function (fndecl);
151 }
152
153 /* struct setjmp_record. */
154
155 int
156 setjmp_record::cmp (const setjmp_record &rec1, const setjmp_record &rec2)
157 {
158 if (int cmp_enode = rec1.m_enode->m_index - rec2.m_enode->m_index)
159 return cmp_enode;
160 gcc_assert (&rec1 == &rec2);
161 return 0;
162 }
163
164 /* class setjmp_svalue : public svalue. */
165
166 /* Implementation of svalue::accept vfunc for setjmp_svalue. */
167
168 void
169 setjmp_svalue::accept (visitor *v) const
170 {
171 v->visit_setjmp_svalue (this);
172 }
173
174 /* Implementation of svalue::dump_to_pp vfunc for setjmp_svalue. */
175
176 void
177 setjmp_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
178 {
179 if (simple)
180 pp_printf (pp, "SETJMP(EN: %i)", get_enode_index ());
181 else
182 pp_printf (pp, "setjmp_svalue(EN%i)", get_enode_index ());
183 }
184
185 /* Get the index of the stored exploded_node. */
186
187 int
188 setjmp_svalue::get_enode_index () const
189 {
190 return m_setjmp_record.m_enode->m_index;
191 }
192
193 /* Concrete implementation of sm_context, wiring it up to the rest of this
194 file. */
195
196 class impl_sm_context : public sm_context
197 {
198 public:
199 impl_sm_context (exploded_graph &eg,
200 int sm_idx,
201 const state_machine &sm,
202 const exploded_node *enode_for_diag,
203 const program_state *old_state,
204 program_state *new_state,
205 const sm_state_map *old_smap,
206 sm_state_map *new_smap,
207 stmt_finder *stmt_finder = NULL)
208 : sm_context (sm_idx, sm),
209 m_logger (eg.get_logger ()),
210 m_eg (eg), m_enode_for_diag (enode_for_diag),
211 m_old_state (old_state), m_new_state (new_state),
212 m_old_smap (old_smap), m_new_smap (new_smap),
213 m_stmt_finder (stmt_finder)
214 {
215 }
216
217 logger *get_logger () const { return m_logger.get_logger (); }
218
219 tree get_fndecl_for_call (const gcall *call) FINAL OVERRIDE
220 {
221 impl_region_model_context old_ctxt
222 (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
223 call);
224 region_model *model = m_new_state->m_region_model;
225 return model->get_fndecl_for_call (call, &old_ctxt);
226 }
227
228 state_machine::state_t get_state (const gimple *stmt,
229 tree var)
230 {
231 logger * const logger = get_logger ();
232 LOG_FUNC (logger);
233 impl_region_model_context old_ctxt
234 (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
235 stmt);
236 const svalue *var_old_sval
237 = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
238
239 state_machine::state_t current
240 = m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
241 return current;
242 }
243
244 void set_next_state (const gimple *stmt,
245 tree var,
246 state_machine::state_t to,
247 tree origin)
248 {
249 logger * const logger = get_logger ();
250 LOG_FUNC (logger);
251 impl_region_model_context old_ctxt
252 (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
253 stmt);
254 const svalue *var_old_sval
255 = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
256
257 impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
258 m_old_state, m_new_state,
259 stmt);
260 const svalue *var_new_sval
261 = m_new_state->m_region_model->get_rvalue (var, &new_ctxt);
262 const svalue *origin_new_sval
263 = m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
264
265 state_machine::state_t current
266 = m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
267 if (logger)
268 logger->log ("%s: state transition of %qE: %s -> %s",
269 m_sm.get_name (),
270 var,
271 current->get_name (),
272 to->get_name ());
273 m_new_smap->set_state (m_new_state->m_region_model, var_new_sval,
274 to, origin_new_sval, m_eg.get_ext_state ());
275 }
276
277 void warn (const supernode *snode, const gimple *stmt,
278 tree var, pending_diagnostic *d) FINAL OVERRIDE
279 {
280 LOG_FUNC (get_logger ());
281 gcc_assert (d); // take ownership
282 impl_region_model_context old_ctxt
283 (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL);
284
285 const svalue *var_old_sval
286 = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
287 state_machine::state_t current
288 = (var
289 ? m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ())
290 : m_old_smap->get_global_state ());
291 m_eg.get_diagnostic_manager ().add_diagnostic
292 (&m_sm, m_enode_for_diag, snode, stmt, m_stmt_finder,
293 var, var_old_sval, current, d);
294 }
295
296 /* Hook for picking more readable trees for SSA names of temporaries,
297 so that rather than e.g.
298 "double-free of '<unknown>'"
299 we can print:
300 "double-free of 'inbuf.data'". */
301
302 tree get_diagnostic_tree (tree expr) FINAL OVERRIDE
303 {
304 /* Only for SSA_NAMEs of temporaries; otherwise, return EXPR, as it's
305 likely to be the least surprising tree to report. */
306 if (TREE_CODE (expr) != SSA_NAME)
307 return expr;
308 if (SSA_NAME_VAR (expr) != NULL)
309 return expr;
310
311 gcc_assert (m_new_state);
312 const svalue *sval = m_new_state->m_region_model->get_rvalue (expr, NULL);
313 /* Find trees for all regions storing the value. */
314 if (tree t = m_new_state->m_region_model->get_representative_tree (sval))
315 return t;
316 else
317 return expr;
318 }
319
320 state_machine::state_t get_global_state () const FINAL OVERRIDE
321 {
322 return m_old_state->m_checker_states[m_sm_idx]->get_global_state ();
323 }
324
325 void set_global_state (state_machine::state_t state) FINAL OVERRIDE
326 {
327 m_new_state->m_checker_states[m_sm_idx]->set_global_state (state);
328 }
329
330 void on_custom_transition (custom_transition *transition) FINAL OVERRIDE
331 {
332 transition->impl_transition (&m_eg,
333 const_cast<exploded_node *> (m_enode_for_diag),
334 m_sm_idx);
335 }
336
337 tree is_zero_assignment (const gimple *stmt) FINAL OVERRIDE
338 {
339 const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
340 if (!assign_stmt)
341 return NULL_TREE;
342 impl_region_model_context old_ctxt
343 (m_eg, m_enode_for_diag, m_old_state, m_new_state, stmt);
344 if (const svalue *sval
345 = m_new_state->m_region_model->get_gassign_result (assign_stmt,
346 &old_ctxt))
347 if (tree cst = sval->maybe_get_constant ())
348 if (::zerop(cst))
349 return gimple_assign_lhs (assign_stmt);
350 return NULL_TREE;
351 }
352
353 log_user m_logger;
354 exploded_graph &m_eg;
355 const exploded_node *m_enode_for_diag;
356 const program_state *m_old_state;
357 program_state *m_new_state;
358 const sm_state_map *m_old_smap;
359 sm_state_map *m_new_smap;
360 stmt_finder *m_stmt_finder;
361 };
362
363 /* Subclass of stmt_finder for finding the best stmt to report the leak at,
364 given the emission path. */
365
366 class leak_stmt_finder : public stmt_finder
367 {
368 public:
369 leak_stmt_finder (const exploded_graph &eg, tree var)
370 : m_eg (eg), m_var (var) {}
371
372 stmt_finder *clone () const FINAL OVERRIDE
373 {
374 return new leak_stmt_finder (m_eg, m_var);
375 }
376
377 const gimple *find_stmt (const exploded_path &epath)
378 FINAL OVERRIDE
379 {
380 logger * const logger = m_eg.get_logger ();
381 LOG_FUNC (logger);
382
383 if (m_var && TREE_CODE (m_var) == SSA_NAME)
384 {
385 /* Locate the final write to this SSA name in the path. */
386 const gimple *def_stmt = SSA_NAME_DEF_STMT (m_var);
387
388 int idx_of_def_stmt;
389 bool found = epath.find_stmt_backwards (def_stmt, &idx_of_def_stmt);
390 if (!found)
391 goto not_found;
392
393 /* What was the next write to the underlying var
394 after the SSA name was set? (if any). */
395
396 for (unsigned idx = idx_of_def_stmt + 1;
397 idx < epath.m_edges.length ();
398 ++idx)
399 {
400 const exploded_edge *eedge = epath.m_edges[idx];
401 if (logger)
402 logger->log ("eedge[%i]: EN %i -> EN %i",
403 idx,
404 eedge->m_src->m_index,
405 eedge->m_dest->m_index);
406 const exploded_node *dst_node = eedge->m_dest;
407 const program_point &dst_point = dst_node->get_point ();
408 const gimple *stmt = dst_point.get_stmt ();
409 if (!stmt)
410 continue;
411 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
412 {
413 tree lhs = gimple_assign_lhs (assign);
414 if (TREE_CODE (lhs) == SSA_NAME
415 && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (m_var))
416 return assign;
417 }
418 }
419 }
420
421 not_found:
422
423 /* Look backwards for the first statement with a location. */
424 int i;
425 const exploded_edge *eedge;
426 FOR_EACH_VEC_ELT_REVERSE (epath.m_edges, i, eedge)
427 {
428 if (logger)
429 logger->log ("eedge[%i]: EN %i -> EN %i",
430 i,
431 eedge->m_src->m_index,
432 eedge->m_dest->m_index);
433 const exploded_node *dst_node = eedge->m_dest;
434 const program_point &dst_point = dst_node->get_point ();
435 const gimple *stmt = dst_point.get_stmt ();
436 if (stmt)
437 if (get_pure_location (stmt->location) != UNKNOWN_LOCATION)
438 return stmt;
439 }
440
441 gcc_unreachable ();
442 return NULL;
443 }
444
445 private:
446 const exploded_graph &m_eg;
447 tree m_var;
448 };
449
450 /* A measurement of how good EXPR is for presenting to the user, so
451 that e.g. we can say prefer printing
452 "leak of 'tmp.m_ptr'"
453 over:
454 "leak of '<unknown>'". */
455
456 static int
457 readability (const_tree expr)
458 {
459 gcc_assert (expr);
460 switch (TREE_CODE (expr))
461 {
462 case COMPONENT_REF:
463 case MEM_REF:
464 /* Impose a slight readability penalty relative to that of
465 operand 0. */
466 return readability (TREE_OPERAND (expr, 0)) - 16;
467
468 case SSA_NAME:
469 {
470 if (tree var = SSA_NAME_VAR (expr))
471 /* Slightly favor the underlying var over the SSA name to
472 avoid having them compare equal. */
473 return readability (var) - 1;
474 /* Avoid printing '<unknown>' for SSA names for temporaries. */
475 return -1;
476 }
477 break;
478
479 case PARM_DECL:
480 case VAR_DECL:
481 if (DECL_NAME (expr))
482 /* Arbitrarily-chosen "high readability" value. */
483 return 65536;
484 else
485 /* We don't want to print temporaries. For example, the C FE
486 prints them as e.g. "<Uxxxx>" where "xxxx" is the low 16 bits
487 of the tree pointer (see pp_c_tree_decl_identifier). */
488 return -1;
489
490 case RESULT_DECL:
491 /* Printing "<return-value>" isn't ideal, but is less awful than
492 trying to print a temporary. */
493 return 32768;
494
495 default:
496 return 0;
497 }
498
499 return 0;
500 }
501
502 /* A qsort comparator for trees to sort them into most user-readable to
503 least user-readable. */
504
505 int
506 readability_comparator (const void *p1, const void *p2)
507 {
508 path_var pv1 = *(path_var const *)p1;
509 path_var pv2 = *(path_var const *)p2;
510
511 int r1 = readability (pv1.m_tree);
512 int r2 = readability (pv2.m_tree);
513 if (int cmp = r2 - r1)
514 return cmp;
515
516 /* Favor items that are deeper on the stack and hence more recent;
517 this also favors locals over globals. */
518 if (int cmp = pv2.m_stack_depth - pv1.m_stack_depth)
519 return cmp;
520
521 /* Otherwise, if they have the same readability, then impose an
522 arbitrary deterministic ordering on them. */
523
524 if (int cmp = TREE_CODE (pv1.m_tree) - TREE_CODE (pv2.m_tree))
525 return cmp;
526
527 switch (TREE_CODE (pv1.m_tree))
528 {
529 default:
530 break;
531 case SSA_NAME:
532 if (int cmp = (SSA_NAME_VERSION (pv1.m_tree)
533 - SSA_NAME_VERSION (pv2.m_tree)))
534 return cmp;
535 break;
536 case PARM_DECL:
537 case VAR_DECL:
538 case RESULT_DECL:
539 if (int cmp = DECL_UID (pv1.m_tree) - DECL_UID (pv2.m_tree))
540 return cmp;
541 break;
542 }
543
544 /* TODO: We ought to find ways of sorting such cases. */
545 return 0;
546 }
547
548 /* Find the best tree for SVAL and call SM's on_leak vfunc with it.
549 If on_leak returns a pending_diagnostic, queue it up to be reported,
550 so that we potentially complain about a leak of SVAL in the given STATE. */
551
552 void
553 impl_region_model_context::on_state_leak (const state_machine &sm,
554 const svalue *sval,
555 state_machine::state_t state)
556 {
557 logger * const logger = get_logger ();
558 LOG_SCOPE (logger);
559 if (logger)
560 {
561 logger->start_log_line ();
562 logger->log_partial ("considering leak of ");
563 sval->dump_to_pp (logger->get_printer (), true);
564 logger->end_log_line ();
565 }
566
567 if (!m_eg)
568 return;
569
570 /* m_old_state also needs to be non-NULL so that the sm_ctxt can look
571 up the old state of SVAL. */
572 gcc_assert (m_old_state);
573
574 /* SVAL has leaked within the new state: it is not used by any reachable
575 regions.
576 We need to convert it back to a tree, but since it's likely no regions
577 use it, we have to find the "best" tree for it in the old_state. */
578 svalue_set visited;
579 path_var leaked_pv
580 = m_old_state->m_region_model->get_representative_path_var (sval,
581 &visited);
582
583 /* This might be NULL; the pending_diagnostic subclasses need to cope
584 with this. */
585 tree leaked_tree = leaked_pv.m_tree;
586 if (logger)
587 {
588 if (leaked_tree)
589 logger->log ("best leaked_tree: %qE", leaked_tree);
590 else
591 logger->log ("best leaked_tree: NULL");
592 }
593
594 leak_stmt_finder stmt_finder (*m_eg, leaked_tree);
595 gcc_assert (m_enode_for_diag);
596
597 /* Don't complain about leaks when returning from "main". */
598 if (m_enode_for_diag->get_supernode ()
599 && m_enode_for_diag->get_supernode ()->return_p ())
600 {
601 tree fndecl = m_enode_for_diag->get_function ()->decl;
602 if (id_equal (DECL_NAME (fndecl), "main"))
603 {
604 if (logger)
605 logger->log ("not reporting leak from main");
606 return;
607 }
608 }
609
610 pending_diagnostic *pd = sm.on_leak (leaked_tree);
611 if (pd)
612 m_eg->get_diagnostic_manager ().add_diagnostic
613 (&sm, m_enode_for_diag, m_enode_for_diag->get_supernode (),
614 m_stmt, &stmt_finder,
615 leaked_tree, sval, state, pd);
616 }
617
618 /* Implementation of region_model_context::on_condition vfunc.
619 Notify all state machines about the condition, which could lead to
620 state transitions. */
621
622 void
623 impl_region_model_context::on_condition (tree lhs, enum tree_code op, tree rhs)
624 {
625 int sm_idx;
626 sm_state_map *smap;
627 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
628 {
629 const state_machine &sm = m_ext_state.get_sm (sm_idx);
630 impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
631 m_old_state, m_new_state,
632 m_old_state->m_checker_states[sm_idx],
633 m_new_state->m_checker_states[sm_idx]);
634 sm.on_condition (&sm_ctxt,
635 m_enode_for_diag->get_supernode (), m_stmt,
636 lhs, op, rhs);
637 }
638 }
639
640 /* Implementation of region_model_context::on_phi vfunc.
641 Notify all state machines about the phi, which could lead to
642 state transitions. */
643
644 void
645 impl_region_model_context::on_phi (const gphi *phi, tree rhs)
646 {
647 int sm_idx;
648 sm_state_map *smap;
649 FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
650 {
651 const state_machine &sm = m_ext_state.get_sm (sm_idx);
652 impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
653 m_old_state, m_new_state,
654 m_old_state->m_checker_states[sm_idx],
655 m_new_state->m_checker_states[sm_idx]);
656 sm.on_phi (&sm_ctxt, m_enode_for_diag->get_supernode (), phi, rhs);
657 }
658 }
659
660 /* Implementation of region_model_context::on_unexpected_tree_code vfunc.
661 Mark the new state as being invalid for further exploration.
662 TODO(stage1): introduce a warning for when this occurs. */
663
664 void
665 impl_region_model_context::on_unexpected_tree_code (tree t,
666 const dump_location_t &loc)
667 {
668 logger * const logger = get_logger ();
669 if (logger)
670 logger->log ("unhandled tree code: %qs in %qs at %s:%i",
671 get_tree_code_name (TREE_CODE (t)),
672 loc.get_impl_location ().m_function,
673 loc.get_impl_location ().m_file,
674 loc.get_impl_location ().m_line);
675 if (m_new_state)
676 m_new_state->m_valid = false;
677 }
678
679 /* struct point_and_state. */
680
681 /* Assert that this object is sane. */
682
683 void
684 point_and_state::validate (const extrinsic_state &ext_state) const
685 {
686 /* Skip this in a release build. */
687 #if !CHECKING_P
688 return;
689 #endif
690
691 m_point.validate ();
692
693 m_state.validate (ext_state);
694
695 /* Verify that the callstring's model of the stack corresponds to that
696 of the region_model. */
697 /* They should have the same depth. */
698 gcc_assert (m_point.get_stack_depth ()
699 == m_state.m_region_model->get_stack_depth ());
700 /* Check the functions in the callstring vs those in the frames
701 at each depth. */
702 for (const frame_region *iter_frame
703 = m_state.m_region_model->get_current_frame ();
704 iter_frame; iter_frame = iter_frame->get_calling_frame ())
705 {
706 int index = iter_frame->get_index ();
707 gcc_assert (m_point.get_function_at_depth (index)
708 == iter_frame->get_function ());
709 }
710 }
711
712 /* Subroutine of print_enode_indices: print a run of indices from START_IDX
713 to END_IDX to PP, using and updating *FIRST_RUN. */
714
715 static void
716 print_run (pretty_printer *pp, int start_idx, int end_idx,
717 bool *first_run)
718 {
719 if (!(*first_run))
720 pp_string (pp, ", ");
721 *first_run = false;
722 if (start_idx == end_idx)
723 pp_printf (pp, "EN: %i", start_idx);
724 else
725 pp_printf (pp, "EN: %i-%i", start_idx, end_idx);
726 }
727
728 /* Print the indices within ENODES to PP, collecting them as
729 runs/singletons e.g. "EN: 4-7, EN: 20-23, EN: 42". */
730
731 static void
732 print_enode_indices (pretty_printer *pp,
733 const auto_vec<exploded_node *> &enodes)
734 {
735 int cur_start_idx = -1;
736 int cur_finish_idx = -1;
737 bool first_run = true;
738 unsigned i;
739 exploded_node *enode;
740 FOR_EACH_VEC_ELT (enodes, i, enode)
741 {
742 if (cur_start_idx == -1)
743 {
744 gcc_assert (cur_finish_idx == -1);
745 cur_start_idx = cur_finish_idx = enode->m_index;
746 }
747 else
748 {
749 if (enode->m_index == cur_finish_idx + 1)
750 /* Continuation of a run. */
751 cur_finish_idx = enode->m_index;
752 else
753 {
754 /* Finish existing run, start a new one. */
755 gcc_assert (cur_start_idx >= 0);
756 gcc_assert (cur_finish_idx >= 0);
757 print_run (pp, cur_start_idx, cur_finish_idx,
758 &first_run);
759 cur_start_idx = cur_finish_idx = enode->m_index;
760 }
761 }
762 }
763 /* Finish any existing run. */
764 if (cur_start_idx >= 0)
765 {
766 gcc_assert (cur_finish_idx >= 0);
767 print_run (pp, cur_start_idx, cur_finish_idx,
768 &first_run);
769 }
770 }
771
772 /* struct eg_traits::dump_args_t. */
773
774 /* The <FILENAME>.eg.dot output can quickly become unwieldy if we show
775 full details for all enodes (both in terms of CPU time to render it,
776 and in terms of being meaningful to a human viewing it).
777
778 If we show just the IDs then the resulting graph is usually viewable,
779 but then we have to keep switching back and forth between the .dot
780 view and other dumps.
781
782 This function implements a heuristic for showing detail at the enodes
783 that (we hope) matter, and just the ID at other enodes, fixing the CPU
784 usage of the .dot viewer, and drawing the attention of the viewer
785 to these enodes.
786
787 Return true if ENODE should be shown in detail in .dot output.
788 Return false if no detail should be shown for ENODE. */
789
790 bool
791 eg_traits::dump_args_t::show_enode_details_p (const exploded_node &enode) const
792 {
793 /* If the number of exploded nodes isn't too large, we may as well show
794 all enodes in full detail in the .dot output. */
795 if (m_eg.m_nodes.length ()
796 <= (unsigned) param_analyzer_max_enodes_for_full_dump)
797 return true;
798
799 /* Otherwise, assume that what's most interesting are state explosions,
800 and thus the places where this happened.
801 Expand enodes at program points where we hit the per-enode limit, so we
802 can investigate what exploded. */
803 const per_program_point_data *per_point_data
804 = m_eg.get_per_program_point_data (enode.get_point ());
805 return per_point_data->m_excess_enodes > 0;
806 }
807
808 /* class exploded_node : public dnode<eg_traits>. */
809
810 const char *
811 exploded_node::status_to_str (enum status s)
812 {
813 switch (s)
814 {
815 default: gcc_unreachable ();
816 case STATUS_WORKLIST: return "WORKLIST";
817 case STATUS_PROCESSED: return "PROCESSED";
818 case STATUS_MERGER: return "MERGER";
819 case STATUS_BULK_MERGED: return "BULK_MERGED";
820 }
821 }
822
823 /* exploded_node's ctor. */
824
825 exploded_node::exploded_node (const point_and_state &ps,
826 int index)
827 : m_ps (ps), m_status (STATUS_WORKLIST), m_index (index),
828 m_num_processed_stmts (0)
829 {
830 gcc_checking_assert (ps.get_state ().m_region_model->canonicalized_p ());
831 }
832
833 /* Get the stmt that was processed in this enode at index IDX.
834 IDX is an index within the stmts processed at this enode, rather
835 than within those of the supernode. */
836
837 const gimple *
838 exploded_node::get_processed_stmt (unsigned idx) const
839 {
840 gcc_assert (idx < m_num_processed_stmts);
841 const program_point &point = get_point ();
842 gcc_assert (point.get_kind () == PK_BEFORE_STMT);
843 const supernode *snode = get_supernode ();
844 const unsigned int point_stmt_idx = point.get_stmt_idx ();
845 const unsigned int idx_within_snode = point_stmt_idx + idx;
846 const gimple *stmt = snode->m_stmts[idx_within_snode];
847 return stmt;
848 }
849
850 /* For use by dump_dot, get a value for the .dot "fillcolor" attribute.
851 Colorize by sm-state, to make it easier to see how sm-state propagates
852 through the exploded_graph. */
853
854 const char *
855 exploded_node::get_dot_fillcolor () const
856 {
857 const program_state &state = get_state ();
858
859 /* We want to be able to easily distinguish the no-sm-state case,
860 and to be able to distinguish cases where there's a single state
861 from each other.
862
863 Sum the sm_states, and use the result to choose from a table,
864 modulo table-size, special-casing the "no sm-state" case. */
865 int total_sm_state = 0;
866 int i;
867 sm_state_map *smap;
868 FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
869 {
870 for (sm_state_map::iterator_t iter = smap->begin ();
871 iter != smap->end ();
872 ++iter)
873 total_sm_state += (*iter).second.m_state->get_id ();
874 total_sm_state += smap->get_global_state ()->get_id ();
875 }
876
877 if (total_sm_state > 0)
878 {
879 /* An arbitrarily-picked collection of light colors. */
880 const char * const colors[]
881 = {"azure", "coral", "cornsilk", "lightblue", "yellow",
882 "honeydew", "lightpink", "lightsalmon", "palegreen1",
883 "wheat", "seashell"};
884 const int num_colors = sizeof (colors) / sizeof (colors[0]);
885 return colors[total_sm_state % num_colors];
886 }
887 else
888 /* No sm-state. */
889 return "lightgrey";
890 }
891
892 /* Implementation of dnode::dump_dot vfunc for exploded_node. */
893
894 void
895 exploded_node::dump_dot (graphviz_out *gv, const dump_args_t &args) const
896 {
897 pretty_printer *pp = gv->get_pp ();
898
899 dump_dot_id (pp);
900 pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
901 get_dot_fillcolor ());
902 pp_write_text_to_stream (pp);
903
904 pp_printf (pp, "EN: %i", m_index);
905 if (m_status == STATUS_MERGER)
906 pp_string (pp, " (merger)");
907 else if (m_status == STATUS_BULK_MERGED)
908 pp_string (pp, " (bulk merged)");
909 pp_newline (pp);
910
911 if (args.show_enode_details_p (*this))
912 {
913 format f (true);
914 m_ps.get_point ().print (pp, f);
915 pp_newline (pp);
916
917 const extrinsic_state &ext_state = args.m_eg.get_ext_state ();
918 const program_state &state = m_ps.get_state ();
919 state.dump_to_pp (ext_state, false, true, pp);
920 pp_newline (pp);
921
922 /* Show any stmts that were processed within this enode,
923 and their index within the supernode. */
924 if (m_num_processed_stmts > 0)
925 {
926 const program_point &point = get_point ();
927 gcc_assert (point.get_kind () == PK_BEFORE_STMT);
928 const supernode *snode = get_supernode ();
929 const unsigned int point_stmt_idx = point.get_stmt_idx ();
930
931 pp_printf (pp, "stmts: %i", m_num_processed_stmts);
932 pp_newline (pp);
933 for (unsigned i = 0; i < m_num_processed_stmts; i++)
934 {
935 const unsigned int idx_within_snode = point_stmt_idx + i;
936 const gimple *stmt = snode->m_stmts[idx_within_snode];
937 pp_printf (pp, " %i: ", idx_within_snode);
938 pp_gimple_stmt_1 (pp, stmt, 0, (dump_flags_t)0);
939 pp_newline (pp);
940 }
941 }
942 }
943
944 /* Dump any saved_diagnostics at this enode. */
945 {
946 const diagnostic_manager &dm = args.m_eg.get_diagnostic_manager ();
947 for (unsigned i = 0; i < dm.get_num_diagnostics (); i++)
948 {
949 const saved_diagnostic *sd = dm.get_saved_diagnostic (i);
950 if (sd->m_enode == this)
951 {
952 pp_printf (pp, "DIAGNOSTIC: %s", sd->m_d->get_kind ());
953 pp_newline (pp);
954 }
955 }
956 }
957
958 pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
959
960 pp_string (pp, "\"];\n\n");
961 pp_flush (pp);
962 }
963
964 /* Dump this to PP in a form suitable for use as an id in .dot output. */
965
966 void
967 exploded_node::dump_dot_id (pretty_printer *pp) const
968 {
969 pp_printf (pp, "exploded_node_%i", m_index);
970 }
971
972 /* Dump a multiline representation of this node to PP. */
973
974 void
975 exploded_node::dump_to_pp (pretty_printer *pp,
976 const extrinsic_state &ext_state) const
977 {
978 pp_printf (pp, "EN: %i", m_index);
979 pp_newline (pp);
980
981 format f (true);
982 m_ps.get_point ().print (pp, f);
983 pp_newline (pp);
984
985 m_ps.get_state ().dump_to_pp (ext_state, false, true, pp);
986 pp_newline (pp);
987 }
988
989 /* Dump a multiline representation of this node to FILE. */
990
991 void
992 exploded_node::dump (FILE *fp,
993 const extrinsic_state &ext_state) const
994 {
995 pretty_printer pp;
996 pp_format_decoder (&pp) = default_tree_printer;
997 pp_show_color (&pp) = pp_show_color (global_dc->printer);
998 pp.buffer->stream = fp;
999 dump_to_pp (&pp, ext_state);
1000 pp_flush (&pp);
1001 }
1002
1003 /* Dump a multiline representation of this node to stderr. */
1004
1005 DEBUG_FUNCTION void
1006 exploded_node::dump (const extrinsic_state &ext_state) const
1007 {
1008 dump (stderr, ext_state);
1009 }
1010
1011 /* Return a new json::object of the form
1012 {"point" : object for program_point,
1013 "state" : object for program_state,
1014 "status" : str,
1015 "idx" : int,
1016 "processed_stmts" : int}. */
1017
1018 json::object *
1019 exploded_node::to_json (const extrinsic_state &ext_state) const
1020 {
1021 json::object *enode_obj = new json::object ();
1022
1023 enode_obj->set ("point", get_point ().to_json ());
1024 enode_obj->set ("state", get_state ().to_json (ext_state));
1025 enode_obj->set ("status", new json::string (status_to_str (m_status)));
1026 enode_obj->set ("idx", new json::integer_number (m_index));
1027 enode_obj->set ("processed_stmts",
1028 new json::integer_number (m_num_processed_stmts));
1029
1030 return enode_obj;
1031 }
1032
1033 } // namespace ana
1034
1035 /* Return true if FNDECL has a gimple body. */
1036 // TODO: is there a pre-canned way to do this?
1037
1038 bool
1039 fndecl_has_gimple_body_p (tree fndecl)
1040 {
1041 if (fndecl == NULL_TREE)
1042 return false;
1043
1044 cgraph_node *n = cgraph_node::get (fndecl);
1045 if (!n)
1046 return false;
1047
1048 return n->has_gimple_body_p ();
1049 }
1050
1051 namespace ana {
1052
1053 /* A pending_diagnostic subclass for implementing "__analyzer_dump_path". */
1054
1055 class dump_path_diagnostic
1056 : public pending_diagnostic_subclass<dump_path_diagnostic>
1057 {
1058 public:
1059 bool emit (rich_location *richloc) FINAL OVERRIDE
1060 {
1061 inform (richloc, "path");
1062 return true;
1063 }
1064
1065 const char *get_kind () const FINAL OVERRIDE { return "dump_path_diagnostic"; }
1066
1067 bool operator== (const dump_path_diagnostic &) const
1068 {
1069 return true;
1070 }
1071 };
1072
1073 /* Modify STATE in place, applying the effects of the stmt at this node's
1074 point. */
1075
1076 exploded_node::on_stmt_flags
1077 exploded_node::on_stmt (exploded_graph &eg,
1078 const supernode *snode,
1079 const gimple *stmt,
1080 program_state *state) const
1081 {
1082 logger *logger = eg.get_logger ();
1083 LOG_SCOPE (logger);
1084 if (logger)
1085 {
1086 logger->start_log_line ();
1087 pp_gimple_stmt_1 (logger->get_printer (), stmt, 0, (dump_flags_t)0);
1088 logger->end_log_line ();
1089 }
1090
1091 /* Update input_location in case of ICE: make it easier to track down which
1092 source construct we're failing to handle. */
1093 input_location = stmt->location;
1094
1095 gcc_assert (state->m_region_model);
1096
1097 /* Preserve the old state. It is used here for looking
1098 up old checker states, for determining state transitions, and
1099 also within impl_region_model_context and impl_sm_context for
1100 going from tree to svalue_id. */
1101 const program_state old_state (*state);
1102
1103 impl_region_model_context ctxt (eg, this,
1104 &old_state, state,
1105 stmt);
1106
1107 bool unknown_side_effects = false;
1108
1109 switch (gimple_code (stmt))
1110 {
1111 default:
1112 /* No-op for now. */
1113 break;
1114
1115 case GIMPLE_ASSIGN:
1116 {
1117 const gassign *assign = as_a <const gassign *> (stmt);
1118 state->m_region_model->on_assignment (assign, &ctxt);
1119 }
1120 break;
1121
1122 case GIMPLE_ASM:
1123 /* No-op for now. */
1124 break;
1125
1126 case GIMPLE_CALL:
1127 {
1128 /* Track whether we have a gcall to a function that's not recognized by
1129 anything, for which we don't have a function body, or for which we
1130 don't know the fndecl. */
1131 const gcall *call = as_a <const gcall *> (stmt);
1132
1133 /* Debugging/test support. */
1134 if (is_special_named_call_p (call, "__analyzer_describe", 2))
1135 state->m_region_model->impl_call_analyzer_describe (call, &ctxt);
1136 else if (is_special_named_call_p (call, "__analyzer_dump", 0))
1137 {
1138 /* Handle the builtin "__analyzer_dump" by dumping state
1139 to stderr. */
1140 state->dump (eg.get_ext_state (), true);
1141 }
1142 else if (is_special_named_call_p (call, "__analyzer_dump_path", 0))
1143 {
1144 /* Handle the builtin "__analyzer_dump_path" by queuing a
1145 diagnostic at this exploded_node. */
1146 ctxt.warn (new dump_path_diagnostic ());
1147 }
1148 else if (is_special_named_call_p (call, "__analyzer_dump_region_model",
1149 0))
1150 {
1151 /* Handle the builtin "__analyzer_dump_region_model" by dumping
1152 the region model's state to stderr. */
1153 state->m_region_model->dump (false);
1154 }
1155 else if (is_special_named_call_p (call, "__analyzer_eval", 1))
1156 state->m_region_model->impl_call_analyzer_eval (call, &ctxt);
1157 else if (is_special_named_call_p (call, "__analyzer_break", 0))
1158 {
1159 /* Handle the builtin "__analyzer_break" by triggering a
1160 breakpoint. */
1161 /* TODO: is there a good cross-platform way to do this? */
1162 raise (SIGINT);
1163 }
1164 else if (is_special_named_call_p (call,
1165 "__analyzer_dump_exploded_nodes",
1166 1))
1167 {
1168 /* This is handled elsewhere. */
1169 }
1170 else if (is_setjmp_call_p (call))
1171 state->m_region_model->on_setjmp (call, this, &ctxt);
1172 else if (is_longjmp_call_p (call))
1173 {
1174 on_longjmp (eg, call, state, &ctxt);
1175 return on_stmt_flags::terminate_path ();
1176 }
1177 else
1178 unknown_side_effects
1179 = state->m_region_model->on_call_pre (call, &ctxt);
1180 }
1181 break;
1182
1183 case GIMPLE_RETURN:
1184 {
1185 const greturn *return_ = as_a <const greturn *> (stmt);
1186 state->m_region_model->on_return (return_, &ctxt);
1187 }
1188 break;
1189 }
1190
1191 bool any_sm_changes = false;
1192 int sm_idx;
1193 sm_state_map *smap;
1194 FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
1195 {
1196 const state_machine &sm = eg.get_ext_state ().get_sm (sm_idx);
1197 const sm_state_map *old_smap
1198 = old_state.m_checker_states[sm_idx];
1199 sm_state_map *new_smap = state->m_checker_states[sm_idx];
1200 impl_sm_context sm_ctxt (eg, sm_idx, sm, this, &old_state, state,
1201 old_smap, new_smap);
1202 /* Allow the state_machine to handle the stmt. */
1203 if (sm.on_stmt (&sm_ctxt, snode, stmt))
1204 unknown_side_effects = false;
1205 if (*old_smap != *new_smap)
1206 any_sm_changes = true;
1207 }
1208
1209 if (const gcall *call = dyn_cast <const gcall *> (stmt))
1210 state->m_region_model->on_call_post (call, unknown_side_effects, &ctxt);
1211
1212 return on_stmt_flags (any_sm_changes);
1213 }
1214
1215 /* Consider the effect of following superedge SUCC from this node.
1216
1217 Return true if it's feasible to follow the edge, or false
1218 if it's infeasible.
1219
1220 Examples: if it's the "true" branch within
1221 a CFG and we know the conditional is false, we know it's infeasible.
1222 If it's one of multiple interprocedual "return" edges, then only
1223 the edge back to the most recent callsite is feasible.
1224
1225 Update NEXT_STATE accordingly (e.g. to record that a condition was
1226 true or false, or that the NULL-ness of a pointer has been checked,
1227 pushing/popping stack frames, etc).
1228
1229 Update NEXT_POINT accordingly (updating the call string). */
1230
1231 bool
1232 exploded_node::on_edge (exploded_graph &eg,
1233 const superedge *succ,
1234 program_point *next_point,
1235 program_state *next_state) const
1236 {
1237 LOG_FUNC (eg.get_logger ());
1238
1239 if (!next_point->on_edge (eg, succ))
1240 return false;
1241
1242 if (!next_state->on_edge (eg, *this, succ))
1243 return false;
1244
1245 return true;
1246 }
1247
1248 /* Verify that the stack at LONGJMP_POINT is still valid, given a call
1249 to "setjmp" at SETJMP_POINT - the stack frame that "setjmp" was
1250 called in must still be valid.
1251
1252 Caveat: this merely checks the call_strings in the points; it doesn't
1253 detect the case where a frame returns and is then called again. */
1254
1255 static bool
1256 valid_longjmp_stack_p (const program_point &longjmp_point,
1257 const program_point &setjmp_point)
1258 {
1259 const call_string &cs_at_longjmp = longjmp_point.get_call_string ();
1260 const call_string &cs_at_setjmp = setjmp_point.get_call_string ();
1261
1262 if (cs_at_longjmp.length () < cs_at_setjmp.length ())
1263 return false;
1264
1265 /* Check that the call strings match, up to the depth of the
1266 setjmp point. */
1267 for (unsigned depth = 0; depth < cs_at_setjmp.length (); depth++)
1268 if (cs_at_longjmp[depth] != cs_at_setjmp[depth])
1269 return false;
1270
1271 return true;
1272 }
1273
1274 /* A pending_diagnostic subclass for complaining about bad longjmps,
1275 where the enclosing function of the "setjmp" has returned (and thus
1276 the stack frame no longer exists). */
1277
1278 class stale_jmp_buf : public pending_diagnostic_subclass<dump_path_diagnostic>
1279 {
1280 public:
1281 stale_jmp_buf (const gcall *setjmp_call, const gcall *longjmp_call,
1282 const program_point &setjmp_point)
1283 : m_setjmp_call (setjmp_call), m_longjmp_call (longjmp_call),
1284 m_setjmp_point (setjmp_point), m_stack_pop_event (NULL)
1285 {}
1286
1287 bool emit (rich_location *richloc) FINAL OVERRIDE
1288 {
1289 return warning_at
1290 (richloc, OPT_Wanalyzer_stale_setjmp_buffer,
1291 "%qs called after enclosing function of %qs has returned",
1292 get_user_facing_name (m_longjmp_call),
1293 get_user_facing_name (m_setjmp_call));
1294 }
1295
1296 const char *get_kind () const FINAL OVERRIDE
1297 { return "stale_jmp_buf"; }
1298
1299 bool operator== (const stale_jmp_buf &other) const
1300 {
1301 return (m_setjmp_call == other.m_setjmp_call
1302 && m_longjmp_call == other.m_longjmp_call);
1303 }
1304
1305 bool
1306 maybe_add_custom_events_for_superedge (const exploded_edge &eedge,
1307 checker_path *emission_path)
1308 FINAL OVERRIDE
1309 {
1310 /* Detect exactly when the stack first becomes invalid,
1311 and issue an event then. */
1312 if (m_stack_pop_event)
1313 return false;
1314 const exploded_node *src_node = eedge.m_src;
1315 const program_point &src_point = src_node->get_point ();
1316 const exploded_node *dst_node = eedge.m_dest;
1317 const program_point &dst_point = dst_node->get_point ();
1318 if (valid_longjmp_stack_p (src_point, m_setjmp_point)
1319 && !valid_longjmp_stack_p (dst_point, m_setjmp_point))
1320 {
1321 /* Compare with diagnostic_manager::add_events_for_superedge. */
1322 const int src_stack_depth = src_point.get_stack_depth ();
1323 m_stack_pop_event = new custom_event
1324 (src_point.get_location (),
1325 src_point.get_fndecl (),
1326 src_stack_depth,
1327 "stack frame is popped here, invalidating saved environment");
1328 emission_path->add_event (m_stack_pop_event);
1329 return false;
1330 }
1331 return false;
1332 }
1333
1334 label_text describe_final_event (const evdesc::final_event &ev)
1335 {
1336 if (m_stack_pop_event)
1337 return ev.formatted_print
1338 ("%qs called after enclosing function of %qs returned at %@",
1339 get_user_facing_name (m_longjmp_call),
1340 get_user_facing_name (m_setjmp_call),
1341 m_stack_pop_event->get_id_ptr ());
1342 else
1343 return ev.formatted_print
1344 ("%qs called after enclosing function of %qs has returned",
1345 get_user_facing_name (m_longjmp_call),
1346 get_user_facing_name (m_setjmp_call));;
1347 }
1348
1349
1350 private:
1351 const gcall *m_setjmp_call;
1352 const gcall *m_longjmp_call;
1353 program_point m_setjmp_point;
1354 custom_event *m_stack_pop_event;
1355 };
1356
1357 /* Handle LONGJMP_CALL, a call to longjmp or siglongjmp.
1358
1359 Attempt to locate where setjmp/sigsetjmp was called on the jmp_buf and build
1360 an exploded_node and exploded_edge to it representing a rewind to that frame,
1361 handling the various kinds of failure that can occur. */
1362
1363 void
1364 exploded_node::on_longjmp (exploded_graph &eg,
1365 const gcall *longjmp_call,
1366 program_state *new_state,
1367 region_model_context *ctxt) const
1368 {
1369 tree buf_ptr = gimple_call_arg (longjmp_call, 0);
1370 gcc_assert (POINTER_TYPE_P (TREE_TYPE (buf_ptr)));
1371
1372 region_model *new_region_model = new_state->m_region_model;
1373 const svalue *buf_ptr_sval = new_region_model->get_rvalue (buf_ptr, ctxt);
1374 const region *buf = new_region_model->deref_rvalue (buf_ptr_sval, buf_ptr,
1375 ctxt);
1376
1377 const svalue *buf_content_sval = new_region_model->get_store_value (buf);
1378 const setjmp_svalue *setjmp_sval
1379 = buf_content_sval->dyn_cast_setjmp_svalue ();
1380 if (!setjmp_sval)
1381 return;
1382
1383 const setjmp_record tmp_setjmp_record = setjmp_sval->get_setjmp_record ();
1384
1385 /* Build a custom enode and eedge for rewinding from the longjmp/siglongjmp
1386 call back to the setjmp/sigsetjmp. */
1387 rewind_info_t rewind_info (tmp_setjmp_record, longjmp_call);
1388
1389 const gcall *setjmp_call = rewind_info.get_setjmp_call ();
1390 const program_point &setjmp_point = rewind_info.get_setjmp_point ();
1391
1392 const program_point &longjmp_point = get_point ();
1393
1394 /* Verify that the setjmp's call_stack hasn't been popped. */
1395 if (!valid_longjmp_stack_p (longjmp_point, setjmp_point))
1396 {
1397 ctxt->warn (new stale_jmp_buf (setjmp_call, longjmp_call, setjmp_point));
1398 return;
1399 }
1400
1401 gcc_assert (longjmp_point.get_stack_depth ()
1402 >= setjmp_point.get_stack_depth ());
1403
1404 /* Update the state for use by the destination node. */
1405
1406 /* Stash the current number of diagnostics so that we can update
1407 any that this adds to show where the longjmp is rewinding to. */
1408
1409 diagnostic_manager *dm = &eg.get_diagnostic_manager ();
1410 unsigned prev_num_diagnostics = dm->get_num_diagnostics ();
1411
1412 new_region_model->on_longjmp (longjmp_call, setjmp_call,
1413 setjmp_point.get_stack_depth (), ctxt);
1414
1415 /* Detect leaks in the new state relative to the old state. */
1416 program_state::detect_leaks (get_state (), *new_state, NULL,
1417 eg.get_ext_state (), ctxt);
1418
1419 program_point next_point
1420 = program_point::after_supernode (setjmp_point.get_supernode (),
1421 setjmp_point.get_call_string ());
1422
1423 exploded_node *next
1424 = eg.get_or_create_node (next_point, *new_state, this);
1425
1426 /* Create custom exploded_edge for a longjmp. */
1427 if (next)
1428 {
1429 exploded_edge *eedge
1430 = eg.add_edge (const_cast<exploded_node *> (this), next, NULL,
1431 new rewind_info_t (tmp_setjmp_record, longjmp_call));
1432
1433 /* For any diagnostics that were queued here (such as leaks) we want
1434 the checker_path to show the rewinding events after the "final event"
1435 so that the user sees where the longjmp is rewinding to (otherwise the
1436 path is meaningless).
1437
1438 For example, we want to emit something like:
1439 | NN | {
1440 | NN | longjmp (env, 1);
1441 | | ~~~~~~~~~~~~~~~~
1442 | | |
1443 | | (10) 'ptr' leaks here; was allocated at (7)
1444 | | (11) rewinding from 'longjmp' in 'inner'...
1445 |
1446 <-------------+
1447 |
1448 'outer': event 12
1449 |
1450 | NN | i = setjmp(env);
1451 | | ^~~~~~
1452 | | |
1453 | | (12) ...to 'setjmp' in 'outer' (saved at (2))
1454
1455 where the "final" event above is event (10), but we want to append
1456 events (11) and (12) afterwards.
1457
1458 Do this by setting m_trailing_eedge on any diagnostics that were
1459 just saved. */
1460 unsigned num_diagnostics = dm->get_num_diagnostics ();
1461 for (unsigned i = prev_num_diagnostics; i < num_diagnostics; i++)
1462 {
1463 saved_diagnostic *sd = dm->get_saved_diagnostic (i);
1464 sd->m_trailing_eedge = eedge;
1465 }
1466 }
1467 }
1468
1469 /* Subroutine of exploded_graph::process_node for finding the successors
1470 of the supernode for a function exit basic block.
1471
1472 Ensure that pop_frame is called, potentially queuing diagnostics about
1473 leaks. */
1474
1475 void
1476 exploded_node::detect_leaks (exploded_graph &eg) const
1477 {
1478 LOG_FUNC_1 (eg.get_logger (), "EN: %i", m_index);
1479
1480 gcc_assert (get_point ().get_supernode ()->return_p ());
1481
1482 /* If we're not a "top-level" function, do nothing; pop_frame
1483 will be called when handling the return superedge. */
1484 if (get_point ().get_stack_depth () > 1)
1485 return;
1486
1487 /* We have a "top-level" function. */
1488 gcc_assert (get_point ().get_stack_depth () == 1);
1489
1490 const program_state &old_state = get_state ();
1491
1492 /* Work with a temporary copy of the state: pop the frame, and see
1493 what leaks (via purge_unused_svalues). */
1494 program_state new_state (old_state);
1495
1496 gcc_assert (new_state.m_region_model);
1497
1498 impl_region_model_context ctxt (eg, this,
1499 &old_state, &new_state,
1500 get_stmt ());
1501 const svalue *result = NULL;
1502 new_state.m_region_model->pop_frame (NULL, &result, &ctxt);
1503 program_state::detect_leaks (old_state, new_state, result,
1504 eg.get_ext_state (), &ctxt);
1505 }
1506
1507 /* Dump the successors and predecessors of this enode to OUTF. */
1508
1509 void
1510 exploded_node::dump_succs_and_preds (FILE *outf) const
1511 {
1512 unsigned i;
1513 exploded_edge *e;
1514 {
1515 auto_vec<exploded_node *> preds (m_preds.length ());
1516 FOR_EACH_VEC_ELT (m_preds, i, e)
1517 preds.quick_push (e->m_src);
1518 pretty_printer pp;
1519 print_enode_indices (&pp, preds);
1520 fprintf (outf, "preds: %s\n",
1521 pp_formatted_text (&pp));
1522 }
1523 {
1524 auto_vec<exploded_node *> succs (m_succs.length ());
1525 FOR_EACH_VEC_ELT (m_succs, i, e)
1526 succs.quick_push (e->m_dest);
1527 pretty_printer pp;
1528 print_enode_indices (&pp, succs);
1529 fprintf (outf, "succs: %s\n",
1530 pp_formatted_text (&pp));
1531 }
1532 }
1533
1534 /* class rewind_info_t : public exploded_edge::custom_info_t. */
1535
1536 /* Implementation of exploded_edge::custom_info_t::update_model vfunc
1537 for rewind_info_t.
1538
1539 Update state for the special-case of a rewind of a longjmp
1540 to a setjmp (which doesn't have a superedge, but does affect
1541 state). */
1542
1543 void
1544 rewind_info_t::update_model (region_model *model,
1545 const exploded_edge &eedge)
1546 {
1547 const program_point &longjmp_point = eedge.m_src->get_point ();
1548 const program_point &setjmp_point = eedge.m_dest->get_point ();
1549
1550 gcc_assert (longjmp_point.get_stack_depth ()
1551 >= setjmp_point.get_stack_depth ());
1552
1553 model->on_longjmp (get_longjmp_call (),
1554 get_setjmp_call (),
1555 setjmp_point.get_stack_depth (), NULL);
1556 }
1557
1558 /* Implementation of exploded_edge::custom_info_t::add_events_to_path vfunc
1559 for rewind_info_t. */
1560
1561 void
1562 rewind_info_t::add_events_to_path (checker_path *emission_path,
1563 const exploded_edge &eedge)
1564 {
1565 const exploded_node *src_node = eedge.m_src;
1566 const program_point &src_point = src_node->get_point ();
1567 const int src_stack_depth = src_point.get_stack_depth ();
1568 const exploded_node *dst_node = eedge.m_dest;
1569 const program_point &dst_point = dst_node->get_point ();
1570 const int dst_stack_depth = dst_point.get_stack_depth ();
1571
1572 emission_path->add_event
1573 (new rewind_from_longjmp_event
1574 (&eedge, get_longjmp_call ()->location,
1575 src_point.get_fndecl (),
1576 src_stack_depth, this));
1577 emission_path->add_event
1578 (new rewind_to_setjmp_event
1579 (&eedge, get_setjmp_call ()->location,
1580 dst_point.get_fndecl (),
1581 dst_stack_depth, this));
1582 }
1583
1584 /* class exploded_edge : public dedge<eg_traits>. */
1585
1586 /* exploded_edge's ctor. */
1587
1588 exploded_edge::exploded_edge (exploded_node *src, exploded_node *dest,
1589 const superedge *sedge,
1590 custom_info_t *custom_info)
1591 : dedge<eg_traits> (src, dest), m_sedge (sedge),
1592 m_custom_info (custom_info)
1593 {
1594 }
1595
1596 /* exploded_edge's dtor. */
1597
1598 exploded_edge::~exploded_edge ()
1599 {
1600 delete m_custom_info;
1601 }
1602
1603 /* Implementation of dedge::dump_dot vfunc for exploded_edge.
1604 Use the label of the underlying superedge, if any. */
1605
1606 void
1607 exploded_edge::dump_dot (graphviz_out *gv, const dump_args_t &) const
1608 {
1609 pretty_printer *pp = gv->get_pp ();
1610
1611 const char *style = "\"solid,bold\"";
1612 const char *color = "black";
1613 int weight = 10;
1614 const char *constraint = "true";
1615
1616 if (m_sedge)
1617 switch (m_sedge->m_kind)
1618 {
1619 default:
1620 gcc_unreachable ();
1621 case SUPEREDGE_CFG_EDGE:
1622 break;
1623 case SUPEREDGE_CALL:
1624 color = "red";
1625 //constraint = "false";
1626 break;
1627 case SUPEREDGE_RETURN:
1628 color = "green";
1629 //constraint = "false";
1630 break;
1631 case SUPEREDGE_INTRAPROCEDURAL_CALL:
1632 style = "\"dotted\"";
1633 break;
1634 }
1635 if (m_custom_info)
1636 {
1637 color = "red";
1638 style = "\"dotted\"";
1639 }
1640
1641 m_src->dump_dot_id (pp);
1642 pp_string (pp, " -> ");
1643 m_dest->dump_dot_id (pp);
1644 pp_printf (pp,
1645 (" [style=%s, color=%s, weight=%d, constraint=%s,"
1646 " headlabel=\""),
1647 style, color, weight, constraint);
1648
1649 if (m_sedge)
1650 m_sedge->dump_label_to_pp (pp, false);
1651 else if (m_custom_info)
1652 m_custom_info->print (pp);
1653
1654 //pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
1655
1656 pp_printf (pp, "\"];\n");
1657 }
1658
1659 /* Return a new json::object of the form
1660 {"src_idx": int, the index of the source exploded edge,
1661 "dst_idx": int, the index of the destination exploded edge,
1662 "sedge": (optional) object for the superedge, if any,
1663 "custom": (optional) str, a description, if this is a custom edge}. */
1664
1665 json::object *
1666 exploded_edge::to_json () const
1667 {
1668 json::object *eedge_obj = new json::object ();
1669 eedge_obj->set ("src_idx", new json::integer_number (m_src->m_index));
1670 eedge_obj->set ("dst_idx", new json::integer_number (m_dest->m_index));
1671 if (m_sedge)
1672 eedge_obj->set ("sedge", m_sedge->to_json ());
1673 if (m_custom_info)
1674 {
1675 pretty_printer pp;
1676 pp_format_decoder (&pp) = default_tree_printer;
1677 m_custom_info->print (&pp);
1678 eedge_obj->set ("custom", new json::string (pp_formatted_text (&pp)));
1679 }
1680 return eedge_obj;
1681 }
1682
1683 /* struct stats. */
1684
1685 /* stats' ctor. */
1686
1687 stats::stats (int num_supernodes)
1688 : m_node_reuse_count (0),
1689 m_node_reuse_after_merge_count (0),
1690 m_num_supernodes (num_supernodes)
1691 {
1692 for (int i = 0; i < NUM_POINT_KINDS; i++)
1693 m_num_nodes[i] = 0;
1694 }
1695
1696 /* Log these stats in multiline form to LOGGER. */
1697
1698 void
1699 stats::log (logger *logger) const
1700 {
1701 gcc_assert (logger);
1702 for (int i = 0; i < NUM_POINT_KINDS; i++)
1703 if (m_num_nodes[i] > 0)
1704 logger->log ("m_num_nodes[%s]: %i",
1705 point_kind_to_string (static_cast <enum point_kind> (i)),
1706 m_num_nodes[i]);
1707 logger->log ("m_node_reuse_count: %i", m_node_reuse_count);
1708 logger->log ("m_node_reuse_after_merge_count: %i",
1709 m_node_reuse_after_merge_count);
1710 }
1711
1712 /* Dump these stats in multiline form to OUT. */
1713
1714 void
1715 stats::dump (FILE *out) const
1716 {
1717 for (int i = 0; i < NUM_POINT_KINDS; i++)
1718 if (m_num_nodes[i] > 0)
1719 fprintf (out, "m_num_nodes[%s]: %i\n",
1720 point_kind_to_string (static_cast <enum point_kind> (i)),
1721 m_num_nodes[i]);
1722 fprintf (out, "m_node_reuse_count: %i\n", m_node_reuse_count);
1723 fprintf (out, "m_node_reuse_after_merge_count: %i\n",
1724 m_node_reuse_after_merge_count);
1725
1726 if (m_num_supernodes > 0)
1727 fprintf (out, "PK_AFTER_SUPERNODE nodes per supernode: %.2f\n",
1728 (float)m_num_nodes[PK_AFTER_SUPERNODE] / (float)m_num_supernodes);
1729 }
1730
1731 /* Return the total number of enodes recorded within this object. */
1732
1733 int
1734 stats::get_total_enodes () const
1735 {
1736 int result = 0;
1737 for (int i = 0; i < NUM_POINT_KINDS; i++)
1738 result += m_num_nodes[i];
1739 return result;
1740 }
1741
1742 /* strongly_connected_components's ctor. Tarjan's SCC algorithm. */
1743
1744 strongly_connected_components::
1745 strongly_connected_components (const supergraph &sg, logger *logger)
1746 : m_sg (sg), m_per_node (m_sg.num_nodes ())
1747 {
1748 LOG_SCOPE (logger);
1749 auto_timevar tv (TV_ANALYZER_SCC);
1750
1751 for (int i = 0; i < m_sg.num_nodes (); i++)
1752 m_per_node.quick_push (per_node_data ());
1753
1754 for (int i = 0; i < m_sg.num_nodes (); i++)
1755 if (m_per_node[i].m_index == -1)
1756 strong_connect (i);
1757
1758 if (0)
1759 dump ();
1760 }
1761
1762 /* Dump this object to stderr. */
1763
1764 DEBUG_FUNCTION void
1765 strongly_connected_components::dump () const
1766 {
1767 for (int i = 0; i < m_sg.num_nodes (); i++)
1768 {
1769 const per_node_data &v = m_per_node[i];
1770 fprintf (stderr, "SN %i: index: %i lowlink: %i on_stack: %i\n",
1771 i, v.m_index, v.m_lowlink, v.m_on_stack);
1772 }
1773 }
1774
1775 /* Subroutine of strongly_connected_components's ctor, part of Tarjan's
1776 SCC algorithm. */
1777
1778 void
1779 strongly_connected_components::strong_connect (unsigned index)
1780 {
1781 supernode *v_snode = m_sg.get_node_by_index (index);
1782
1783 /* Set the depth index for v to the smallest unused index. */
1784 per_node_data *v = &m_per_node[index];
1785 v->m_index = index;
1786 v->m_lowlink = index;
1787 m_stack.safe_push (index);
1788 v->m_on_stack = true;
1789 index++;
1790
1791 /* Consider successors of v. */
1792 unsigned i;
1793 superedge *sedge;
1794 FOR_EACH_VEC_ELT (v_snode->m_succs, i, sedge)
1795 {
1796 if (sedge->get_kind () != SUPEREDGE_CFG_EDGE
1797 && sedge->get_kind () != SUPEREDGE_INTRAPROCEDURAL_CALL)
1798 continue;
1799 supernode *w_snode = sedge->m_dest;
1800 per_node_data *w = &m_per_node[w_snode->m_index];
1801 if (w->m_index == -1)
1802 {
1803 /* Successor w has not yet been visited; recurse on it. */
1804 strong_connect (w_snode->m_index);
1805 v->m_lowlink = MIN (v->m_lowlink, w->m_lowlink);
1806 }
1807 else if (w->m_on_stack)
1808 {
1809 /* Successor w is in stack S and hence in the current SCC
1810 If w is not on stack, then (v, w) is a cross-edge in the DFS
1811 tree and must be ignored. */
1812 v->m_lowlink = MIN (v->m_lowlink, w->m_index);
1813 }
1814 }
1815
1816 /* If v is a root node, pop the stack and generate an SCC. */
1817
1818 if (v->m_lowlink == v->m_index)
1819 {
1820 per_node_data *w;
1821 do {
1822 int idx = m_stack.pop ();
1823 w = &m_per_node[idx];
1824 w->m_on_stack = false;
1825 } while (w != v);
1826 }
1827 }
1828
1829 /* worklist's ctor. */
1830
1831 worklist::worklist (const exploded_graph &eg, const analysis_plan &plan)
1832 : m_scc (eg.get_supergraph (), eg.get_logger ()),
1833 m_plan (plan),
1834 m_queue (key_t (*this, NULL))
1835 {
1836 }
1837
1838 /* Return the number of nodes in the worklist. */
1839
1840 unsigned
1841 worklist::length () const
1842 {
1843 return m_queue.nodes ();
1844 }
1845
1846 /* Return the next node in the worklist, removing it. */
1847
1848 exploded_node *
1849 worklist::take_next ()
1850 {
1851 return m_queue.extract_min ();
1852 }
1853
1854 /* Return the next node in the worklist without removing it. */
1855
1856 exploded_node *
1857 worklist::peek_next ()
1858 {
1859 return m_queue.min ();
1860 }
1861
1862 /* Add ENODE to the worklist. */
1863
1864 void
1865 worklist::add_node (exploded_node *enode)
1866 {
1867 gcc_assert (enode->get_status () == exploded_node::STATUS_WORKLIST);
1868 m_queue.insert (key_t (*this, enode), enode);
1869 }
1870
1871 /* Comparator for implementing worklist::key_t comparison operators.
1872 Return negative if KA is before KB
1873 Return positive if KA is after KB
1874 Return 0 if they are equal.
1875
1876 The ordering of the worklist is critical for performance and for
1877 avoiding node explosions. Ideally we want all enodes at a CFG join-point
1878 with the same callstring to be sorted next to each other in the worklist
1879 so that a run of consecutive enodes can be merged and processed "in bulk"
1880 rather than individually or pairwise, minimizing the number of new enodes
1881 created. */
1882
1883 int
1884 worklist::key_t::cmp (const worklist::key_t &ka, const worklist::key_t &kb)
1885 {
1886 const program_point &point_a = ka.m_enode->get_point ();
1887 const program_point &point_b = kb.m_enode->get_point ();
1888 const call_string &call_string_a = point_a.get_call_string ();
1889 const call_string &call_string_b = point_b.get_call_string ();
1890
1891 /* Order empty-callstring points with different functions based on the
1892 analysis_plan, so that we generate summaries before they are used. */
1893 if (flag_analyzer_call_summaries
1894 && call_string_a.empty_p ()
1895 && call_string_b.empty_p ()
1896 && point_a.get_function () != NULL
1897 && point_b.get_function () != NULL
1898 && point_a.get_function () != point_b.get_function ())
1899 {
1900 if (int cmp = ka.m_worklist.m_plan.cmp_function (point_a.get_function (),
1901 point_b.get_function ()))
1902 return cmp;
1903 }
1904
1905 /* First, order by SCC. */
1906 int scc_id_a = ka.get_scc_id (ka.m_enode);
1907 int scc_id_b = kb.get_scc_id (kb.m_enode);
1908 if (scc_id_a != scc_id_b)
1909 return scc_id_a - scc_id_b;
1910
1911 /* If in same SCC, order by supernode index (an arbitrary but stable
1912 ordering). */
1913 const supernode *snode_a = ka.m_enode->get_supernode ();
1914 const supernode *snode_b = kb.m_enode->get_supernode ();
1915 if (snode_a == NULL)
1916 {
1917 if (snode_b != NULL)
1918 /* One is NULL. */
1919 return -1;
1920 else
1921 /* Both are NULL. */
1922 return 0;
1923 }
1924 if (snode_b == NULL)
1925 /* One is NULL. */
1926 return 1;
1927 /* Neither are NULL. */
1928 gcc_assert (snode_a && snode_b);
1929 if (snode_a->m_index != snode_b->m_index)
1930 return snode_a->m_index - snode_b->m_index;
1931
1932 gcc_assert (snode_a == snode_b);
1933
1934 /* The points might vary by callstring; try sorting by callstring. */
1935 int cs_cmp = call_string::cmp (call_string_a, call_string_b);
1936 if (cs_cmp)
1937 return cs_cmp;
1938
1939 /* Order within supernode via program point. */
1940 int within_snode_cmp
1941 = function_point::cmp_within_supernode (point_a.get_function_point (),
1942 point_b.get_function_point ());
1943 if (within_snode_cmp)
1944 return within_snode_cmp;
1945
1946 /* Otherwise, we ought to have the same program_point. */
1947 gcc_assert (point_a == point_b);
1948
1949 const program_state &state_a = ka.m_enode->get_state ();
1950 const program_state &state_b = kb.m_enode->get_state ();
1951
1952 /* Sort by sm-state, so that identical sm-states are grouped
1953 together in the worklist. */
1954 for (unsigned sm_idx = 0; sm_idx < state_a.m_checker_states.length ();
1955 ++sm_idx)
1956 {
1957 sm_state_map *smap_a = state_a.m_checker_states[sm_idx];
1958 sm_state_map *smap_b = state_b.m_checker_states[sm_idx];
1959
1960 if (int smap_cmp = sm_state_map::cmp (*smap_a, *smap_b))
1961 return smap_cmp;
1962 }
1963
1964 /* Otherwise, we have two enodes at the same program point but with
1965 different states. We don't have a good total ordering on states,
1966 so order them by enode index, so that we have at least have a
1967 stable sort. */
1968 return ka.m_enode->m_index - kb.m_enode->m_index;
1969 }
1970
1971 /* exploded_graph's ctor. */
1972
1973 exploded_graph::exploded_graph (const supergraph &sg, logger *logger,
1974 const extrinsic_state &ext_state,
1975 const state_purge_map *purge_map,
1976 const analysis_plan &plan,
1977 int verbosity)
1978 : m_sg (sg), m_logger (logger),
1979 m_worklist (*this, plan),
1980 m_ext_state (ext_state),
1981 m_purge_map (purge_map),
1982 m_plan (plan),
1983 m_diagnostic_manager (logger, ext_state.get_engine (), verbosity),
1984 m_global_stats (m_sg.num_nodes ()),
1985 m_functionless_stats (m_sg.num_nodes ()),
1986 m_PK_AFTER_SUPERNODE_per_snode (m_sg.num_nodes ())
1987 {
1988 m_origin = get_or_create_node (program_point::origin (),
1989 program_state (ext_state), NULL);
1990 for (int i = 0; i < m_sg.num_nodes (); i++)
1991 m_PK_AFTER_SUPERNODE_per_snode.quick_push (i);
1992 }
1993
1994 /* exploded_graph's dtor. */
1995
1996 exploded_graph::~exploded_graph ()
1997 {
1998 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
1999 iter != m_per_function_stats.end ();
2000 ++iter)
2001 delete (*iter).second;
2002
2003 for (point_map_t::iterator iter = m_per_point_data.begin ();
2004 iter != m_per_point_data.end ();
2005 ++iter)
2006 delete (*iter).second;
2007 }
2008
2009 /* Ensure that there is an exploded_node representing an external call to
2010 FUN, adding it to the worklist if creating it.
2011
2012 Add an edge from the origin exploded_node to the function entrypoint
2013 exploded_node.
2014
2015 Return the exploded_node for the entrypoint to the function. */
2016
2017 exploded_node *
2018 exploded_graph::add_function_entry (function *fun)
2019 {
2020 gcc_assert (gimple_has_body_p (fun->decl));
2021
2022 /* Be idempotent. */
2023 if (m_functions_with_enodes.contains (fun))
2024 {
2025 logger * const logger = get_logger ();
2026 if (logger)
2027 logger->log ("entrypoint for %qE already exists", fun->decl);
2028 return NULL;
2029 }
2030
2031 program_point point = program_point::from_function_entry (m_sg, fun);
2032 program_state state (m_ext_state);
2033 state.push_frame (m_ext_state, fun);
2034
2035 if (!state.m_valid)
2036 return NULL;
2037
2038 exploded_node *enode = get_or_create_node (point, state, NULL);
2039 if (!enode)
2040 return NULL;
2041
2042 add_edge (m_origin, enode, NULL);
2043
2044 m_functions_with_enodes.add (fun);
2045
2046 return enode;
2047 }
2048
2049 /* Get or create an exploded_node for (POINT, STATE).
2050 If a new node is created, it is added to the worklist.
2051
2052 Use ENODE_FOR_DIAG, a pre-existing enode, for any diagnostics
2053 that need to be emitted (e.g. when purging state *before* we have
2054 a new enode). */
2055
2056 exploded_node *
2057 exploded_graph::get_or_create_node (const program_point &point,
2058 const program_state &state,
2059 const exploded_node *enode_for_diag)
2060 {
2061 logger * const logger = get_logger ();
2062 LOG_FUNC (logger);
2063 if (logger)
2064 {
2065 format f (false);
2066 pretty_printer *pp = logger->get_printer ();
2067 logger->start_log_line ();
2068 pp_string (pp, "point: ");
2069 point.print (pp, f);
2070 logger->end_log_line ();
2071 logger->start_log_line ();
2072 pp_string (pp, "state: ");
2073 state.dump_to_pp (m_ext_state, true, false, pp);
2074 logger->end_log_line ();
2075 }
2076
2077 /* Stop exploring paths for which we don't know how to effectively
2078 model the state. */
2079 if (!state.m_valid)
2080 {
2081 if (logger)
2082 logger->log ("invalid state; not creating node");
2083 return NULL;
2084 }
2085
2086 auto_cfun sentinel (point.get_function ());
2087
2088 state.validate (get_ext_state ());
2089
2090 //state.dump (get_ext_state ());
2091
2092 /* Prune state to try to improve the chances of a cache hit,
2093 avoiding generating redundant nodes. */
2094 program_state pruned_state
2095 = state.prune_for_point (*this, point, enode_for_diag);
2096
2097 pruned_state.validate (get_ext_state ());
2098
2099 //pruned_state.dump (get_ext_state ());
2100
2101 if (logger)
2102 {
2103 pretty_printer *pp = logger->get_printer ();
2104 logger->start_log_line ();
2105 pp_string (pp, "pruned_state: ");
2106 pruned_state.dump_to_pp (m_ext_state, true, false, pp);
2107 logger->end_log_line ();
2108 pruned_state.m_region_model->dump_to_pp (logger->get_printer (), true,
2109 false);
2110 }
2111
2112 stats *per_fn_stats = get_or_create_function_stats (point.get_function ());
2113
2114 stats *per_cs_stats
2115 = &get_or_create_per_call_string_data (point.get_call_string ())->m_stats;
2116
2117 point_and_state ps (point, pruned_state);
2118 ps.validate (m_ext_state);
2119 if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
2120 {
2121 /* An exploded_node for PS already exists. */
2122 if (logger)
2123 logger->log ("reused EN: %i", (*slot)->m_index);
2124 m_global_stats.m_node_reuse_count++;
2125 per_fn_stats->m_node_reuse_count++;
2126 per_cs_stats->m_node_reuse_count++;
2127 return *slot;
2128 }
2129
2130 per_program_point_data *per_point_data
2131 = get_or_create_per_program_point_data (point);
2132
2133 /* Consider merging state with another enode at this program_point. */
2134 if (flag_analyzer_state_merge)
2135 {
2136 exploded_node *existing_enode;
2137 unsigned i;
2138 FOR_EACH_VEC_ELT (per_point_data->m_enodes, i, existing_enode)
2139 {
2140 if (logger)
2141 logger->log ("considering merging with existing EN: %i for point",
2142 existing_enode->m_index);
2143 gcc_assert (existing_enode->get_point () == point);
2144 const program_state &existing_state = existing_enode->get_state ();
2145
2146 /* This merges successfully within the loop. */
2147
2148 program_state merged_state (m_ext_state);
2149 if (pruned_state.can_merge_with_p (existing_state, point,
2150 &merged_state))
2151 {
2152 if (logger)
2153 logger->log ("merging new state with that of EN: %i",
2154 existing_enode->m_index);
2155
2156 /* Try again for a cache hit.
2157 Whether we get one or not, merged_state's value_ids have no
2158 relationship to those of the input state, and thus to those
2159 of CHANGE, so we must purge any svalue_ids from *CHANGE. */
2160 ps.set_state (merged_state);
2161
2162 if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
2163 {
2164 /* An exploded_node for PS already exists. */
2165 if (logger)
2166 logger->log ("reused EN: %i", (*slot)->m_index);
2167 m_global_stats.m_node_reuse_after_merge_count++;
2168 per_fn_stats->m_node_reuse_after_merge_count++;
2169 per_cs_stats->m_node_reuse_after_merge_count++;
2170 return *slot;
2171 }
2172 }
2173 else
2174 if (logger)
2175 logger->log ("not merging new state with that of EN: %i",
2176 existing_enode->m_index);
2177 }
2178 }
2179
2180 /* Impose a limit on the number of enodes per program point, and
2181 simply stop if we exceed it. */
2182 if ((int)per_point_data->m_enodes.length ()
2183 >= param_analyzer_max_enodes_per_program_point)
2184 {
2185 pretty_printer pp;
2186 point.print (&pp, format (false));
2187 print_enode_indices (&pp, per_point_data->m_enodes);
2188 if (logger)
2189 logger->log ("not creating enode; too many at program point: %s",
2190 pp_formatted_text (&pp));
2191 warning_at (point.get_location (), OPT_Wanalyzer_too_complex,
2192 "terminating analysis for this program point: %s",
2193 pp_formatted_text (&pp));
2194 per_point_data->m_excess_enodes++;
2195 return NULL;
2196 }
2197
2198 ps.validate (m_ext_state);
2199
2200 /* An exploded_node for "ps" doesn't already exist; create one. */
2201 exploded_node *node = new exploded_node (ps, m_nodes.length ());
2202 add_node (node);
2203 m_point_and_state_to_node.put (node->get_ps_key (), node);
2204
2205 /* Update per-program_point data. */
2206 per_point_data->m_enodes.safe_push (node);
2207
2208 const enum point_kind node_pk = node->get_point ().get_kind ();
2209 m_global_stats.m_num_nodes[node_pk]++;
2210 per_fn_stats->m_num_nodes[node_pk]++;
2211 per_cs_stats->m_num_nodes[node_pk]++;
2212
2213 if (node_pk == PK_AFTER_SUPERNODE)
2214 m_PK_AFTER_SUPERNODE_per_snode[point.get_supernode ()->m_index]++;
2215
2216 if (logger)
2217 {
2218 format f (false);
2219 pretty_printer *pp = logger->get_printer ();
2220 logger->log ("created EN: %i", node->m_index);
2221 logger->start_log_line ();
2222 pp_string (pp, "point: ");
2223 point.print (pp, f);
2224 logger->end_log_line ();
2225 logger->start_log_line ();
2226 pp_string (pp, "pruned_state: ");
2227 pruned_state.dump_to_pp (m_ext_state, true, false, pp);
2228 logger->end_log_line ();
2229 }
2230
2231 /* Add the new node to the worlist. */
2232 m_worklist.add_node (node);
2233 return node;
2234 }
2235
2236 /* Add an exploded_edge from SRC to DEST, recording its association
2237 with SEDGE (which may be NULL), and, if non-NULL, taking ownership
2238 of REWIND_INFO.
2239 Return the newly-created eedge. */
2240
2241 exploded_edge *
2242 exploded_graph::add_edge (exploded_node *src, exploded_node *dest,
2243 const superedge *sedge,
2244 exploded_edge::custom_info_t *custom_info)
2245 {
2246 if (get_logger ())
2247 get_logger ()->log ("creating edge EN: %i -> EN: %i",
2248 src->m_index, dest->m_index);
2249 exploded_edge *e = new exploded_edge (src, dest, sedge, custom_info);
2250 digraph<eg_traits>::add_edge (e);
2251 return e;
2252 }
2253
2254 /* Ensure that this graph has per-program_point-data for POINT;
2255 borrow a pointer to it. */
2256
2257 per_program_point_data *
2258 exploded_graph::
2259 get_or_create_per_program_point_data (const program_point &point)
2260 {
2261 if (per_program_point_data **slot = m_per_point_data.get (&point))
2262 return *slot;
2263
2264 per_program_point_data *per_point_data = new per_program_point_data (point);
2265 m_per_point_data.put (&per_point_data->m_key, per_point_data);
2266 return per_point_data;
2267 }
2268
2269 /* Get this graph's per-program-point-data for POINT if there is any,
2270 otherwise NULL. */
2271
2272 per_program_point_data *
2273 exploded_graph::get_per_program_point_data (const program_point &point) const
2274 {
2275 if (per_program_point_data **slot
2276 = const_cast <point_map_t &> (m_per_point_data).get (&point))
2277 return *slot;
2278
2279 return NULL;
2280 }
2281
2282 /* Ensure that this graph has per-call_string-data for CS;
2283 borrow a pointer to it. */
2284
2285 per_call_string_data *
2286 exploded_graph::get_or_create_per_call_string_data (const call_string &cs)
2287 {
2288 if (per_call_string_data **slot = m_per_call_string_data.get (&cs))
2289 return *slot;
2290
2291 per_call_string_data *data = new per_call_string_data (cs, m_sg.num_nodes ());
2292 m_per_call_string_data.put (&data->m_key,
2293 data);
2294 return data;
2295 }
2296
2297 /* Ensure that this graph has per-function-data for FUN;
2298 borrow a pointer to it. */
2299
2300 per_function_data *
2301 exploded_graph::get_or_create_per_function_data (function *fun)
2302 {
2303 if (per_function_data **slot = m_per_function_data.get (fun))
2304 return *slot;
2305
2306 per_function_data *data = new per_function_data ();
2307 m_per_function_data.put (fun, data);
2308 return data;
2309 }
2310
2311 /* Get this graph's per-function-data for FUN if there is any,
2312 otherwise NULL. */
2313
2314 per_function_data *
2315 exploded_graph::get_per_function_data (function *fun) const
2316 {
2317 if (per_function_data **slot
2318 = const_cast <per_function_data_t &> (m_per_function_data).get (fun))
2319 return *slot;
2320
2321 return NULL;
2322 }
2323
2324 /* Return true if NODE and FUN should be traversed directly, rather than
2325 called via other functions. */
2326
2327 static bool
2328 toplevel_function_p (cgraph_node *node, function *fun, logger *logger)
2329 {
2330 /* TODO: better logic here
2331 e.g. only if more than one caller, and significantly complicated.
2332 Perhaps some whole-callgraph analysis to decide if it's worth summarizing
2333 an edge, and if so, we need summaries. */
2334 if (flag_analyzer_call_summaries)
2335 {
2336 int num_call_sites = 0;
2337 for (cgraph_edge *edge = node->callers; edge; edge = edge->next_caller)
2338 ++num_call_sites;
2339
2340 /* For now, if there's more than one in-edge, and we want call
2341 summaries, do it at the top level so that there's a chance
2342 we'll have a summary when we need one. */
2343 if (num_call_sites > 1)
2344 {
2345 if (logger)
2346 logger->log ("traversing %qE (%i call sites)",
2347 fun->decl, num_call_sites);
2348 return true;
2349 }
2350 }
2351
2352 if (!TREE_PUBLIC (fun->decl))
2353 {
2354 if (logger)
2355 logger->log ("not traversing %qE (static)", fun->decl);
2356 return false;
2357 }
2358
2359 if (logger)
2360 logger->log ("traversing %qE (all checks passed)", fun->decl);
2361
2362 return true;
2363 }
2364
2365 /* Callback for walk_tree for finding callbacks within initializers;
2366 ensure they are treated as possible entrypoints to the analysis. */
2367
2368 static tree
2369 add_any_callbacks (tree *tp, int *, void *data)
2370 {
2371 exploded_graph *eg = (exploded_graph *)data;
2372 if (TREE_CODE (*tp) == FUNCTION_DECL)
2373 eg->on_escaped_function (*tp);
2374 return NULL_TREE;
2375 }
2376
2377 /* Add initial nodes to EG, with entrypoints for externally-callable
2378 functions. */
2379
2380 void
2381 exploded_graph::build_initial_worklist ()
2382 {
2383 logger * const logger = get_logger ();
2384 LOG_SCOPE (logger);
2385
2386 cgraph_node *node;
2387 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
2388 {
2389 function *fun = node->get_fun ();
2390 if (!toplevel_function_p (node, fun, logger))
2391 continue;
2392 exploded_node *enode = add_function_entry (fun);
2393 if (logger)
2394 {
2395 if (enode)
2396 logger->log ("created EN %i for %qE entrypoint",
2397 enode->m_index, fun->decl);
2398 else
2399 logger->log ("did not create enode for %qE entrypoint", fun->decl);
2400 }
2401 }
2402
2403 /* Find callbacks that are reachable from global initializers. */
2404 varpool_node *vpnode;
2405 FOR_EACH_VARIABLE (vpnode)
2406 {
2407 tree decl = vpnode->decl;
2408 if (!TREE_PUBLIC (decl))
2409 continue;
2410 tree init = DECL_INITIAL (decl);
2411 if (!init)
2412 continue;
2413 walk_tree (&init, add_any_callbacks, this, NULL);
2414 }
2415 }
2416
2417 /* The main loop of the analysis.
2418 Take freshly-created exploded_nodes from the worklist, calling
2419 process_node on them to explore the <point, state> graph.
2420 Add edges to their successors, potentially creating new successors
2421 (which are also added to the worklist). */
2422
2423 void
2424 exploded_graph::process_worklist ()
2425 {
2426 logger * const logger = get_logger ();
2427 LOG_SCOPE (logger);
2428 auto_timevar tv (TV_ANALYZER_WORKLIST);
2429
2430 while (m_worklist.length () > 0)
2431 {
2432 exploded_node *node = m_worklist.take_next ();
2433 gcc_assert (node->get_status () == exploded_node::STATUS_WORKLIST);
2434 gcc_assert (node->m_succs.length () == 0
2435 || node == m_origin);
2436
2437 if (logger)
2438 logger->log ("next to process: EN: %i", node->m_index);
2439
2440 /* If we have a run of nodes that are before-supernode, try merging and
2441 processing them together, rather than pairwise or individually. */
2442 if (flag_analyzer_state_merge && node != m_origin)
2443 if (maybe_process_run_of_before_supernode_enodes (node))
2444 goto handle_limit;
2445
2446 /* Avoid exponential explosions of nodes by attempting to merge
2447 nodes that are at the same program point and which have
2448 sufficiently similar state. */
2449 if (flag_analyzer_state_merge && node != m_origin)
2450 if (exploded_node *node_2 = m_worklist.peek_next ())
2451 {
2452 gcc_assert (node_2->get_status ()
2453 == exploded_node::STATUS_WORKLIST);
2454 gcc_assert (node->m_succs.length () == 0);
2455 gcc_assert (node_2->m_succs.length () == 0);
2456
2457 gcc_assert (node != node_2);
2458
2459 if (logger)
2460 logger->log ("peek worklist: EN: %i", node_2->m_index);
2461
2462 if (node->get_point () == node_2->get_point ())
2463 {
2464 const program_point &point = node->get_point ();
2465 if (logger)
2466 {
2467 format f (false);
2468 pretty_printer *pp = logger->get_printer ();
2469 logger->start_log_line ();
2470 logger->log_partial
2471 ("got potential merge EN: %i and EN: %i at ",
2472 node->m_index, node_2->m_index);
2473 point.print (pp, f);
2474 logger->end_log_line ();
2475 }
2476 const program_state &state = node->get_state ();
2477 const program_state &state_2 = node_2->get_state ();
2478
2479 /* They shouldn't be equal, or we wouldn't have two
2480 separate nodes. */
2481 gcc_assert (state != state_2);
2482
2483 program_state merged_state (m_ext_state);
2484 if (state.can_merge_with_p (state_2, point, &merged_state))
2485 {
2486 if (logger)
2487 logger->log ("merging EN: %i and EN: %i",
2488 node->m_index, node_2->m_index);
2489
2490 if (merged_state == state)
2491 {
2492 /* Then merge node_2 into node by adding an edge. */
2493 add_edge (node_2, node, NULL);
2494
2495 /* Remove node_2 from the worklist. */
2496 m_worklist.take_next ();
2497 node_2->set_status (exploded_node::STATUS_MERGER);
2498
2499 /* Continue processing "node" below. */
2500 }
2501 else if (merged_state == state_2)
2502 {
2503 /* Then merge node into node_2, and leave node_2
2504 in the worklist, to be processed on the next
2505 iteration. */
2506 add_edge (node, node_2, NULL);
2507 node->set_status (exploded_node::STATUS_MERGER);
2508 continue;
2509 }
2510 else
2511 {
2512 /* We have a merged state that differs from
2513 both state and state_2. */
2514
2515 /* Remove node_2 from the worklist. */
2516 m_worklist.take_next ();
2517
2518 /* Create (or get) an exploded node for the merged
2519 states, adding to the worklist. */
2520 exploded_node *merged_enode
2521 = get_or_create_node (node->get_point (),
2522 merged_state, node);
2523 if (merged_enode == NULL)
2524 continue;
2525
2526 if (logger)
2527 logger->log ("merged EN: %i and EN: %i into EN: %i",
2528 node->m_index, node_2->m_index,
2529 merged_enode->m_index);
2530
2531 /* "node" and "node_2" have both now been removed
2532 from the worklist; we should not process them.
2533
2534 "merged_enode" may be a new node; if so it will be
2535 processed in a subsequent iteration.
2536 Alternatively, "merged_enode" could be an existing
2537 node; one way the latter can
2538 happen is if we end up merging a succession of
2539 similar nodes into one. */
2540
2541 /* If merged_node is one of the two we were merging,
2542 add it back to the worklist to ensure it gets
2543 processed.
2544
2545 Add edges from the merged nodes to it (but not a
2546 self-edge). */
2547 if (merged_enode == node)
2548 m_worklist.add_node (merged_enode);
2549 else
2550 {
2551 add_edge (node, merged_enode, NULL);
2552 node->set_status (exploded_node::STATUS_MERGER);
2553 }
2554
2555 if (merged_enode == node_2)
2556 m_worklist.add_node (merged_enode);
2557 else
2558 {
2559 add_edge (node_2, merged_enode, NULL);
2560 node_2->set_status (exploded_node::STATUS_MERGER);
2561 }
2562
2563 continue;
2564 }
2565 }
2566
2567 /* TODO: should we attempt more than two nodes,
2568 or just do pairs of nodes? (and hope that we get
2569 a cascade of mergers). */
2570 }
2571 }
2572
2573 process_node (node);
2574
2575 handle_limit:
2576 /* Impose a hard limit on the number of exploded nodes, to ensure
2577 that the analysis terminates in the face of pathological state
2578 explosion (or bugs).
2579
2580 Specifically, the limit is on the number of PK_AFTER_SUPERNODE
2581 exploded nodes, looking at supernode exit events.
2582
2583 We use exit rather than entry since there can be multiple
2584 entry ENs, one per phi; the number of PK_AFTER_SUPERNODE ought
2585 to be equivalent to the number of supernodes multiplied by the
2586 number of states. */
2587 const int limit = m_sg.num_nodes () * param_analyzer_bb_explosion_factor;
2588 if (m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE] > limit)
2589 {
2590 if (logger)
2591 logger->log ("bailing out; too many nodes");
2592 warning_at (node->get_point ().get_location (),
2593 OPT_Wanalyzer_too_complex,
2594 "analysis bailed out early"
2595 " (%i 'after-snode' enodes; %i enodes)",
2596 m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE],
2597 m_nodes.length ());
2598 return;
2599 }
2600 }
2601 }
2602
2603 /* Attempt to process a consecutive run of sufficiently-similar nodes in
2604 the worklist at a CFG join-point (having already popped ENODE from the
2605 head of the worklist).
2606
2607 If ENODE's point is of the form (before-supernode, SNODE) and the next
2608 nodes in the worklist are a consecutive run of enodes of the same form,
2609 for the same supernode as ENODE (but potentially from different in-edges),
2610 process them all together, setting their status to STATUS_BULK_MERGED,
2611 and return true.
2612 Otherwise, return false, in which case ENODE must be processed in the
2613 normal way.
2614
2615 When processing them all together, generate successor states based
2616 on phi nodes for the appropriate CFG edges, and then attempt to merge
2617 these states into a minimal set of merged successor states, partitioning
2618 the inputs by merged successor state.
2619
2620 Create new exploded nodes for all of the merged states, and add edges
2621 connecting the input enodes to the corresponding merger exploded nodes.
2622
2623 We hope we have a much smaller number of merged successor states
2624 compared to the number of input enodes - ideally just one,
2625 if all successor states can be merged.
2626
2627 Processing and merging many together as one operation rather than as
2628 pairs avoids scaling issues where per-pair mergers could bloat the
2629 graph with merger nodes (especially so after switch statements). */
2630
2631 bool
2632 exploded_graph::
2633 maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
2634 {
2635 /* A struct for tracking per-input state. */
2636 struct item
2637 {
2638 item (exploded_node *input_enode)
2639 : m_input_enode (input_enode),
2640 m_processed_state (input_enode->get_state ()),
2641 m_merger_idx (-1)
2642 {}
2643
2644 exploded_node *m_input_enode;
2645 program_state m_processed_state;
2646 int m_merger_idx;
2647 };
2648
2649 gcc_assert (enode->get_status () == exploded_node::STATUS_WORKLIST);
2650 gcc_assert (enode->m_succs.length () == 0);
2651
2652 const program_point &point = enode->get_point ();
2653
2654 if (point.get_kind () != PK_BEFORE_SUPERNODE)
2655 return false;
2656
2657 const supernode *snode = point.get_supernode ();
2658
2659 logger * const logger = get_logger ();
2660 LOG_SCOPE (logger);
2661
2662 /* Find a run of enodes in the worklist that are before the same supernode,
2663 but potentially from different in-edges. */
2664 auto_vec <exploded_node *> enodes;
2665 enodes.safe_push (enode);
2666 while (exploded_node *enode_2 = m_worklist.peek_next ())
2667 {
2668 gcc_assert (enode_2->get_status ()
2669 == exploded_node::STATUS_WORKLIST);
2670 gcc_assert (enode_2->m_succs.length () == 0);
2671
2672 const program_point &point_2 = enode_2->get_point ();
2673
2674 if (point_2.get_kind () == PK_BEFORE_SUPERNODE
2675 && point_2.get_supernode () == snode
2676 && point_2.get_call_string () == point.get_call_string ())
2677 {
2678 enodes.safe_push (enode_2);
2679 m_worklist.take_next ();
2680 }
2681 else
2682 break;
2683 }
2684
2685 /* If the only node is ENODE, then give up. */
2686 if (enodes.length () == 1)
2687 return false;
2688
2689 if (logger)
2690 logger->log ("got run of %i enodes for SN: %i",
2691 enodes.length (), snode->m_index);
2692
2693 /* All of these enodes have a shared successor point (even if they
2694 were for different in-edges). */
2695 program_point next_point (point.get_next ());
2696
2697 /* Calculate the successor state for each enode in enodes. */
2698 auto_delete_vec<item> items (enodes.length ());
2699 unsigned i;
2700 exploded_node *iter_enode;
2701 FOR_EACH_VEC_ELT (enodes, i, iter_enode)
2702 {
2703 item *it = new item (iter_enode);
2704 items.quick_push (it);
2705 const program_state &state = iter_enode->get_state ();
2706 program_state *next_state = &it->m_processed_state;
2707 const program_point &iter_point = iter_enode->get_point ();
2708 if (const superedge *iter_sedge = iter_point.get_from_edge ())
2709 {
2710 impl_region_model_context ctxt (*this, iter_enode,
2711 &state, next_state, NULL);
2712 const cfg_superedge *last_cfg_superedge
2713 = iter_sedge->dyn_cast_cfg_superedge ();
2714 if (last_cfg_superedge)
2715 next_state->m_region_model->update_for_phis
2716 (snode, last_cfg_superedge, &ctxt);
2717 }
2718 }
2719
2720 /* Attempt to partition the items into a set of merged states.
2721 We hope we have a much smaller number of merged states
2722 compared to the number of input enodes - ideally just one,
2723 if all can be merged. */
2724 auto_delete_vec <program_state> merged_states;
2725 auto_vec<item *> first_item_for_each_merged_state;
2726 item *it;
2727 FOR_EACH_VEC_ELT (items, i, it)
2728 {
2729 const program_state &it_state = it->m_processed_state;
2730 program_state *merged_state;
2731 unsigned iter_merger_idx;
2732 FOR_EACH_VEC_ELT (merged_states, iter_merger_idx, merged_state)
2733 {
2734 program_state merge (m_ext_state);
2735 if (it_state.can_merge_with_p (*merged_state, next_point, &merge))
2736 {
2737 *merged_state = merge;
2738 it->m_merger_idx = iter_merger_idx;
2739 if (logger)
2740 logger->log ("reusing merger state %i for item %i (EN: %i)",
2741 it->m_merger_idx, i, it->m_input_enode->m_index);
2742 goto got_merger;
2743 }
2744 }
2745 /* If it couldn't be merged with any existing merged_states,
2746 create a new one. */
2747 if (it->m_merger_idx == -1)
2748 {
2749 it->m_merger_idx = merged_states.length ();
2750 merged_states.safe_push (new program_state (it_state));
2751 first_item_for_each_merged_state.safe_push (it);
2752 if (logger)
2753 logger->log ("using new merger state %i for item %i (EN: %i)",
2754 it->m_merger_idx, i, it->m_input_enode->m_index);
2755 }
2756 got_merger:
2757 gcc_assert (it->m_merger_idx >= 0);
2758 gcc_assert ((unsigned)it->m_merger_idx < merged_states.length ());
2759 }
2760
2761 /* Create merger nodes. */
2762 auto_vec<exploded_node *> next_enodes (merged_states.length ());
2763 program_state *merged_state;
2764 FOR_EACH_VEC_ELT (merged_states, i, merged_state)
2765 {
2766 exploded_node *src_enode
2767 = first_item_for_each_merged_state[i]->m_input_enode;
2768 exploded_node *next
2769 = get_or_create_node (next_point, *merged_state, src_enode);
2770 /* "next" could be NULL; we handle that when adding the edges below. */
2771 next_enodes.quick_push (next);
2772 if (logger)
2773 {
2774 if (next)
2775 logger->log ("using EN: %i for merger state %i", next->m_index, i);
2776 else
2777 logger->log ("using NULL enode for merger state %i", i);
2778 }
2779 }
2780
2781 /* Create edges from each input enode to the appropriate successor enode.
2782 Update the status of the now-processed input enodes. */
2783 FOR_EACH_VEC_ELT (items, i, it)
2784 {
2785 exploded_node *next = next_enodes[it->m_merger_idx];
2786 if (next)
2787 add_edge (it->m_input_enode, next, NULL);
2788 it->m_input_enode->set_status (exploded_node::STATUS_BULK_MERGED);
2789 }
2790
2791 if (logger)
2792 logger->log ("merged %i in-enodes into %i out-enode(s) at SN: %i",
2793 items.length (), merged_states.length (), snode->m_index);
2794
2795 return true;
2796 }
2797
2798 /* Return true if STMT must appear at the start of its exploded node, and
2799 thus we can't consolidate its effects within a run of other statements,
2800 where PREV_STMT was the previous statement. */
2801
2802 static bool
2803 stmt_requires_new_enode_p (const gimple *stmt,
2804 const gimple *prev_stmt)
2805 {
2806 if (const gcall *call = dyn_cast <const gcall *> (stmt))
2807 {
2808 /* Stop consolidating at calls to
2809 "__analyzer_dump_exploded_nodes", so they always appear at the
2810 start of an exploded_node. */
2811 if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
2812 1))
2813 return true;
2814
2815 /* sm-signal.cc injects an additional custom eedge at "signal" calls
2816 from the registration enode to the handler enode, separate from the
2817 regular next state, which defeats the "detect state change" logic
2818 in process_node. Work around this via special-casing, to ensure
2819 we split the enode immediately before any "signal" call. */
2820 if (is_special_named_call_p (call, "signal", 2))
2821 return true;
2822 }
2823
2824 /* If we had a PREV_STMT with an unknown location, and this stmt
2825 has a known location, then if a state change happens here, it
2826 could be consolidated into PREV_STMT, giving us an event with
2827 no location. Ensure that STMT gets its own exploded_node to
2828 avoid this. */
2829 if (get_pure_location (prev_stmt->location) == UNKNOWN_LOCATION
2830 && get_pure_location (stmt->location) != UNKNOWN_LOCATION)
2831 return true;
2832
2833 return false;
2834 }
2835
2836 /* The core of exploded_graph::process_worklist (the main analysis loop),
2837 handling one node in the worklist.
2838
2839 Get successor <point, state> pairs for NODE, calling get_or_create on
2840 them, and adding an exploded_edge to each successors.
2841
2842 Freshly-created nodes will be added to the worklist. */
2843
2844 void
2845 exploded_graph::process_node (exploded_node *node)
2846 {
2847 logger * const logger = get_logger ();
2848 LOG_FUNC_1 (logger, "EN: %i", node->m_index);
2849
2850 node->set_status (exploded_node::STATUS_PROCESSED);
2851
2852 const program_point &point = node->get_point ();
2853
2854 /* Update cfun and input_location in case of an ICE: make it easier to
2855 track down which source construct we're failing to handle. */
2856 auto_cfun sentinel (node->get_function ());
2857 const gimple *stmt = point.get_stmt ();
2858 if (stmt)
2859 input_location = stmt->location;
2860
2861 const program_state &state = node->get_state ();
2862 if (logger)
2863 {
2864 pretty_printer *pp = logger->get_printer ();
2865 logger->start_log_line ();
2866 pp_string (pp, "point: ");
2867 point.print (pp, format (false));
2868 pp_string (pp, ", state: ");
2869 state.dump_to_pp (m_ext_state, true, false, pp);
2870 logger->end_log_line ();
2871 }
2872
2873 switch (point.get_kind ())
2874 {
2875 default:
2876 gcc_unreachable ();
2877 case PK_ORIGIN:
2878 /* This node exists to simplify finding the shortest path
2879 to an exploded_node. */
2880 break;
2881
2882 case PK_BEFORE_SUPERNODE:
2883 {
2884 program_state next_state (state);
2885
2886 if (point.get_from_edge ())
2887 {
2888 impl_region_model_context ctxt (*this, node,
2889 &state, &next_state, NULL);
2890 const cfg_superedge *last_cfg_superedge
2891 = point.get_from_edge ()->dyn_cast_cfg_superedge ();
2892 if (last_cfg_superedge)
2893 next_state.m_region_model->update_for_phis
2894 (node->get_supernode (),
2895 last_cfg_superedge,
2896 &ctxt);
2897 }
2898
2899 program_point next_point (point.get_next ());
2900 exploded_node *next = get_or_create_node (next_point, next_state, node);
2901 if (next)
2902 add_edge (node, next, NULL);
2903 }
2904 break;
2905 case PK_BEFORE_STMT:
2906 {
2907 /* Determine the effect of a run of one or more statements
2908 within one supernode, generating an edge to the program_point
2909 after the last statement that's processed.
2910
2911 Stop iterating statements and thus consolidating into one enode
2912 when:
2913 - reaching the end of the statements in the supernode
2914 - if an sm-state-change occurs (so that it gets its own
2915 exploded_node)
2916 - if "-fanalyzer-fine-grained" is active
2917 - encountering certain statements must appear at the start of
2918 their enode (for which stmt_requires_new_enode_p returns true)
2919
2920 Update next_state in-place, to get the result of the one
2921 or more stmts that are processed.
2922
2923 Split the node in-place if an sm-state-change occurs, so that
2924 the sm-state-change occurs on an edge where the src enode has
2925 exactly one stmt, the one that caused the change. */
2926 program_state next_state (state);
2927 const supernode *snode = point.get_supernode ();
2928 unsigned stmt_idx;
2929 const gimple *prev_stmt = NULL;
2930 for (stmt_idx = point.get_stmt_idx ();
2931 stmt_idx < snode->m_stmts.length ();
2932 stmt_idx++)
2933 {
2934 const gimple *stmt = snode->m_stmts[stmt_idx];
2935
2936 if (stmt_idx > point.get_stmt_idx ())
2937 if (stmt_requires_new_enode_p (stmt, prev_stmt))
2938 {
2939 stmt_idx--;
2940 break;
2941 }
2942 prev_stmt = stmt;
2943
2944 program_state old_state (next_state);
2945
2946 /* Process the stmt. */
2947 exploded_node::on_stmt_flags flags
2948 = node->on_stmt (*this, snode, stmt, &next_state);
2949 node->m_num_processed_stmts++;
2950
2951 /* If flags.m_terminate_path, stop analyzing; any nodes/edges
2952 will have been added by on_stmt (e.g. for handling longjmp). */
2953 if (flags.m_terminate_path)
2954 return;
2955
2956 if (next_state.m_region_model)
2957 {
2958 impl_region_model_context ctxt (*this, node,
2959 &old_state, &next_state, stmt);
2960 program_state::detect_leaks (old_state, next_state, NULL,
2961 get_ext_state (), &ctxt);
2962 }
2963
2964 unsigned next_idx = stmt_idx + 1;
2965 program_point next_point
2966 = (next_idx < point.get_supernode ()->m_stmts.length ()
2967 ? program_point::before_stmt (point.get_supernode (), next_idx,
2968 point.get_call_string ())
2969 : program_point::after_supernode (point.get_supernode (),
2970 point.get_call_string ()));
2971 next_state = next_state.prune_for_point (*this, next_point, node);
2972
2973 if (flags.m_sm_changes || flag_analyzer_fine_grained)
2974 {
2975 program_point split_point
2976 = program_point::before_stmt (point.get_supernode (),
2977 stmt_idx,
2978 point.get_call_string ());
2979 if (split_point != node->get_point ())
2980 {
2981 /* If we're not at the start of NODE, split the enode at
2982 this stmt, so we have:
2983 node -> split_enode
2984 so that when split_enode is processed the next edge
2985 we add will be:
2986 split_enode -> next
2987 and any state change will effectively occur on that
2988 latter edge, and split_enode will contain just stmt. */
2989 if (logger)
2990 logger->log ("getting split_enode");
2991 exploded_node *split_enode
2992 = get_or_create_node (split_point, old_state, node);
2993 if (!split_enode)
2994 return;
2995 /* "stmt" will be reprocessed when split_enode is
2996 processed. */
2997 node->m_num_processed_stmts--;
2998 if (logger)
2999 logger->log ("creating edge to split_enode");
3000 add_edge (node, split_enode, NULL);
3001 return;
3002 }
3003 else
3004 /* If we're at the start of NODE, stop iterating,
3005 so that an edge will be created from NODE to
3006 (next_point, next_state) below. */
3007 break;
3008 }
3009 }
3010 unsigned next_idx = stmt_idx + 1;
3011 program_point next_point
3012 = (next_idx < point.get_supernode ()->m_stmts.length ()
3013 ? program_point::before_stmt (point.get_supernode (), next_idx,
3014 point.get_call_string ())
3015 : program_point::after_supernode (point.get_supernode (),
3016 point.get_call_string ()));
3017 exploded_node *next = get_or_create_node (next_point, next_state, node);
3018 if (next)
3019 add_edge (node, next, NULL);
3020 }
3021 break;
3022 case PK_AFTER_SUPERNODE:
3023 {
3024 /* If this is an EXIT BB, detect leaks, and potentially
3025 create a function summary. */
3026 if (point.get_supernode ()->return_p ())
3027 {
3028 node->detect_leaks (*this);
3029 if (flag_analyzer_call_summaries
3030 && point.get_call_string ().empty_p ())
3031 {
3032 /* TODO: create function summary
3033 There can be more than one; each corresponds to a different
3034 final enode in the function. */
3035 if (logger)
3036 {
3037 pretty_printer *pp = logger->get_printer ();
3038 logger->start_log_line ();
3039 logger->log_partial
3040 ("would create function summary for %qE; state: ",
3041 point.get_fndecl ());
3042 state.dump_to_pp (m_ext_state, true, false, pp);
3043 logger->end_log_line ();
3044 }
3045 per_function_data *per_fn_data
3046 = get_or_create_per_function_data (point.get_function ());
3047 per_fn_data->add_call_summary (node);
3048 }
3049 }
3050 /* Traverse into successors of the supernode. */
3051 int i;
3052 superedge *succ;
3053 FOR_EACH_VEC_ELT (point.get_supernode ()->m_succs, i, succ)
3054 {
3055 if (logger)
3056 logger->log ("considering SN: %i -> SN: %i",
3057 succ->m_src->m_index, succ->m_dest->m_index);
3058
3059 program_point next_point
3060 = program_point::before_supernode (succ->m_dest, succ,
3061 point.get_call_string ());
3062 program_state next_state (state);
3063
3064 if (!node->on_edge (*this, succ, &next_point, &next_state))
3065 {
3066 if (logger)
3067 logger->log ("skipping impossible edge to SN: %i",
3068 succ->m_dest->m_index);
3069 continue;
3070 }
3071
3072 exploded_node *next = get_or_create_node (next_point, next_state,
3073 node);
3074 if (next)
3075 add_edge (node, next, succ);
3076 }
3077 }
3078 break;
3079 }
3080 }
3081
3082 /* Ensure that this graph has a stats instance for FN, return it.
3083 FN can be NULL, in which case a stats instances is returned covering
3084 "functionless" parts of the graph (the origin node). */
3085
3086 stats *
3087 exploded_graph::get_or_create_function_stats (function *fn)
3088 {
3089 if (!fn)
3090 return &m_functionless_stats;
3091
3092 if (stats **slot = m_per_function_stats.get (fn))
3093 return *slot;
3094 else
3095 {
3096 int num_supernodes = fn ? n_basic_blocks_for_fn (fn) : 0;
3097 /* not quite the num supernodes, but nearly. */
3098 stats *new_stats = new stats (num_supernodes);
3099 m_per_function_stats.put (fn, new_stats);
3100 return new_stats;
3101 }
3102 }
3103
3104 /* Print bar charts to PP showing:
3105 - the number of enodes per function, and
3106 - for each function:
3107 - the number of enodes per supernode/BB
3108 - the number of excess enodes per supernode/BB beyond the
3109 per-program-point limit, if there were any. */
3110
3111 void
3112 exploded_graph::print_bar_charts (pretty_printer *pp) const
3113 {
3114 cgraph_node *cgnode;
3115
3116 pp_string (pp, "enodes per function:");
3117 pp_newline (pp);
3118 bar_chart enodes_per_function;
3119 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (cgnode)
3120 {
3121 function *fn = cgnode->get_fun ();
3122 const stats * const *s_ptr
3123 = const_cast <function_stat_map_t &> (m_per_function_stats).get (fn);
3124 enodes_per_function.add_item (function_name (fn),
3125 s_ptr ? (*s_ptr)->get_total_enodes () : 0);
3126 }
3127 enodes_per_function.print (pp);
3128
3129 /* Accumulate number of enodes per supernode. */
3130 auto_vec<unsigned> enodes_per_supernode (m_sg.num_nodes ());
3131 for (int i = 0; i < m_sg.num_nodes (); i++)
3132 enodes_per_supernode.quick_push (0);
3133 int i;
3134 exploded_node *enode;
3135 FOR_EACH_VEC_ELT (m_nodes, i, enode)
3136 {
3137 const supernode *iter_snode = enode->get_supernode ();
3138 if (!iter_snode)
3139 continue;
3140 enodes_per_supernode[iter_snode->m_index]++;
3141 }
3142
3143 /* Accumulate excess enodes per supernode. */
3144 auto_vec<unsigned> excess_enodes_per_supernode (m_sg.num_nodes ());
3145 for (int i = 0; i < m_sg.num_nodes (); i++)
3146 excess_enodes_per_supernode.quick_push (0);
3147 for (point_map_t::iterator iter = m_per_point_data.begin ();
3148 iter != m_per_point_data.end (); ++iter)
3149 {
3150 const program_point *point = (*iter).first;
3151 const supernode *iter_snode = point->get_supernode ();
3152 if (!iter_snode)
3153 continue;
3154 const per_program_point_data *point_data = (*iter).second;
3155 excess_enodes_per_supernode[iter_snode->m_index]
3156 += point_data->m_excess_enodes;
3157 }
3158
3159 /* Show per-function bar_charts of enodes per supernode/BB. */
3160 pp_string (pp, "per-function enodes per supernode/BB:");
3161 pp_newline (pp);
3162 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (cgnode)
3163 {
3164 function *fn = cgnode->get_fun ();
3165 pp_printf (pp, "function: %qs", function_name (fn));
3166 pp_newline (pp);
3167
3168 bar_chart enodes_per_snode;
3169 bar_chart excess_enodes_per_snode;
3170 bool have_excess_enodes = false;
3171 for (int i = 0; i < m_sg.num_nodes (); i++)
3172 {
3173 const supernode *iter_snode = m_sg.get_node_by_index (i);
3174 if (iter_snode->get_function () != fn)
3175 continue;
3176 pretty_printer tmp_pp;
3177 pp_printf (&tmp_pp, "sn %i (bb %i)",
3178 iter_snode->m_index, iter_snode->m_bb->index);
3179 enodes_per_snode.add_item (pp_formatted_text (&tmp_pp),
3180 enodes_per_supernode[iter_snode->m_index]);
3181 const int num_excess
3182 = excess_enodes_per_supernode[iter_snode->m_index];
3183 excess_enodes_per_snode.add_item (pp_formatted_text (&tmp_pp),
3184 num_excess);
3185 if (num_excess)
3186 have_excess_enodes = true;
3187 }
3188 enodes_per_snode.print (pp);
3189 if (have_excess_enodes)
3190 {
3191 pp_printf (pp, "EXCESS ENODES:");
3192 pp_newline (pp);
3193 excess_enodes_per_snode.print (pp);
3194 }
3195 }
3196 }
3197
3198 /* Write all stats information to this graph's logger, if any. */
3199
3200 void
3201 exploded_graph::log_stats () const
3202 {
3203 logger * const logger = get_logger ();
3204 if (!logger)
3205 return;
3206
3207 LOG_SCOPE (logger);
3208
3209 m_ext_state.get_engine ()->log_stats (logger);
3210
3211 logger->log ("m_sg.num_nodes (): %i", m_sg.num_nodes ());
3212 logger->log ("m_nodes.length (): %i", m_nodes.length ());
3213 logger->log ("m_edges.length (): %i", m_edges.length ());
3214 logger->log ("remaining enodes in worklist: %i", m_worklist.length ());
3215
3216 logger->log ("global stats:");
3217 m_global_stats.log (logger);
3218
3219 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
3220 iter != m_per_function_stats.end ();
3221 ++iter)
3222 {
3223 function *fn = (*iter).first;
3224 log_scope s (logger, function_name (fn));
3225 (*iter).second->log (logger);
3226 }
3227
3228 print_bar_charts (logger->get_printer ());
3229 }
3230
3231 /* Dump all stats information to OUT. */
3232
3233 void
3234 exploded_graph::dump_stats (FILE *out) const
3235 {
3236 fprintf (out, "m_sg.num_nodes (): %i\n", m_sg.num_nodes ());
3237 fprintf (out, "m_nodes.length (): %i\n", m_nodes.length ());
3238 fprintf (out, "m_edges.length (): %i\n", m_edges.length ());
3239 fprintf (out, "remaining enodes in worklist: %i", m_worklist.length ());
3240
3241 fprintf (out, "global stats:\n");
3242 m_global_stats.dump (out);
3243
3244 for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
3245 iter != m_per_function_stats.end ();
3246 ++iter)
3247 {
3248 function *fn = (*iter).first;
3249 fprintf (out, "function: %s\n", function_name (fn));
3250 (*iter).second->dump (out);
3251 }
3252
3253 fprintf (out, "PK_AFTER_SUPERNODE per supernode:\n");
3254 for (unsigned i = 0; i < m_PK_AFTER_SUPERNODE_per_snode.length (); i++)
3255 fprintf (out, " SN %i: %3i\n", i, m_PK_AFTER_SUPERNODE_per_snode[i]);
3256 }
3257
3258 void
3259 exploded_graph::dump_states_for_supernode (FILE *out,
3260 const supernode *snode) const
3261 {
3262 fprintf (out, "PK_AFTER_SUPERNODE nodes for SN: %i\n", snode->m_index);
3263 int i;
3264 exploded_node *enode;
3265 int state_idx = 0;
3266 FOR_EACH_VEC_ELT (m_nodes, i, enode)
3267 {
3268 const supernode *iter_snode = enode->get_supernode ();
3269 if (enode->get_point ().get_kind () == PK_AFTER_SUPERNODE
3270 && iter_snode == snode)
3271 {
3272 pretty_printer pp;
3273 pp_format_decoder (&pp) = default_tree_printer;
3274 enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
3275 fprintf (out, "state %i: EN: %i\n %s\n",
3276 state_idx++, enode->m_index,
3277 pp_formatted_text (&pp));
3278 }
3279 }
3280 fprintf (out, "#exploded_node for PK_AFTER_SUPERNODE for SN: %i = %i\n",
3281 snode->m_index, state_idx);
3282 }
3283
3284 /* Return a new json::object of the form
3285 {"nodes" : [objs for enodes],
3286 "edges" : [objs for eedges],
3287 "ext_state": object for extrinsic_state,
3288 "diagnostic_manager": object for diagnostic_manager}. */
3289
3290 json::object *
3291 exploded_graph::to_json () const
3292 {
3293 json::object *egraph_obj = new json::object ();
3294
3295 /* Nodes. */
3296 {
3297 json::array *nodes_arr = new json::array ();
3298 unsigned i;
3299 exploded_node *n;
3300 FOR_EACH_VEC_ELT (m_nodes, i, n)
3301 nodes_arr->append (n->to_json (m_ext_state));
3302 egraph_obj->set ("nodes", nodes_arr);
3303 }
3304
3305 /* Edges. */
3306 {
3307 json::array *edges_arr = new json::array ();
3308 unsigned i;
3309 exploded_edge *n;
3310 FOR_EACH_VEC_ELT (m_edges, i, n)
3311 edges_arr->append (n->to_json ());
3312 egraph_obj->set ("edges", edges_arr);
3313 }
3314
3315 /* m_sg is JSONified at the top-level. */
3316
3317 egraph_obj->set ("ext_state", m_ext_state.to_json ());
3318 egraph_obj->set ("diagnostic_manager", m_diagnostic_manager.to_json ());
3319
3320 /* The following fields aren't yet being JSONified:
3321 worklist m_worklist;
3322 const state_purge_map *const m_purge_map;
3323 const analysis_plan &m_plan;
3324 stats m_global_stats;
3325 function_stat_map_t m_per_function_stats;
3326 stats m_functionless_stats;
3327 call_string_data_map_t m_per_call_string_data;
3328 auto_vec<int> m_PK_AFTER_SUPERNODE_per_snode; */
3329
3330 return egraph_obj;
3331 }
3332
3333 /* Look for the last use of SEARCH_STMT within this path.
3334 If found write the edge's index to *OUT_IDX and return true, otherwise
3335 return false. */
3336
3337 bool
3338 exploded_path::find_stmt_backwards (const gimple *search_stmt,
3339 int *out_idx) const
3340 {
3341 int i;
3342 const exploded_edge *eedge;
3343 FOR_EACH_VEC_ELT_REVERSE (m_edges, i, eedge)
3344 {
3345 const exploded_node *dst_node = eedge->m_dest;
3346 const program_point &dst_point = dst_node->get_point ();
3347 const gimple *stmt = dst_point.get_stmt ();
3348 if (stmt == search_stmt)
3349 {
3350 *out_idx = i;
3351 return true;
3352 }
3353 }
3354 return false;
3355 }
3356
3357 /* Get the final exploded_node in this path, which must be non-empty. */
3358
3359 exploded_node *
3360 exploded_path::get_final_enode () const
3361 {
3362 gcc_assert (m_edges.length () > 0);
3363 return m_edges[m_edges.length () - 1]->m_dest;
3364 }
3365
3366 /* Check state along this path, returning true if it is feasible.
3367 If OUT is non-NULL, and the path is infeasible, write a new
3368 feasibility_problem to *OUT. */
3369
3370 bool
3371 exploded_path::feasible_p (logger *logger, feasibility_problem **out,
3372 engine *eng, const exploded_graph *eg) const
3373 {
3374 LOG_SCOPE (logger);
3375
3376 auto_sbitmap snodes_visited (eg->get_supergraph ().m_nodes.length ());
3377
3378 /* Traverse the path, updating this model. */
3379 region_model model (eng->get_model_manager ());
3380 for (unsigned edge_idx = 0; edge_idx < m_edges.length (); edge_idx++)
3381 {
3382 const exploded_edge *eedge = m_edges[edge_idx];
3383 if (logger)
3384 logger->log ("considering edge %i: EN:%i -> EN:%i",
3385 edge_idx,
3386 eedge->m_src->m_index,
3387 eedge->m_dest->m_index);
3388 const exploded_node &src_enode = *eedge->m_src;
3389 const program_point &src_point = src_enode.get_point ();
3390 if (logger)
3391 {
3392 logger->start_log_line ();
3393 src_point.print (logger->get_printer (), format (false));
3394 logger->end_log_line ();
3395 }
3396
3397 /* Update state for the stmts that were processed in each enode. */
3398 for (unsigned stmt_idx = 0; stmt_idx < src_enode.m_num_processed_stmts;
3399 stmt_idx++)
3400 {
3401 const gimple *stmt = src_enode.get_processed_stmt (stmt_idx);
3402
3403 /* Update cfun and input_location in case of ICE: make it easier to
3404 track down which source construct we're failing to handle. */
3405 auto_cfun sentinel (src_point.get_function ());
3406 input_location = stmt->location;
3407
3408 if (const gassign *assign = dyn_cast <const gassign *> (stmt))
3409 model.on_assignment (assign, NULL);
3410 else if (const greturn *return_ = dyn_cast <const greturn *> (stmt))
3411 model.on_return (return_, NULL);
3412 }
3413
3414 const superedge *sedge = eedge->m_sedge;
3415 if (sedge)
3416 {
3417 if (logger)
3418 logger->log (" sedge: SN:%i -> SN:%i %s",
3419 sedge->m_src->m_index,
3420 sedge->m_dest->m_index,
3421 sedge->get_description (false));
3422
3423 const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
3424 rejected_constraint *rc = NULL;
3425 if (!model.maybe_update_for_edge (*sedge, last_stmt, NULL, &rc))
3426 {
3427 if (logger)
3428 {
3429 logger->log ("rejecting due to region model");
3430 model.dump_to_pp (logger->get_printer (), true, false);
3431 }
3432 if (out)
3433 *out = new feasibility_problem (edge_idx, *eedge,
3434 last_stmt, rc);
3435 else
3436 delete rc;
3437 return false;
3438 }
3439 }
3440 else
3441 {
3442 /* Special-case the initial eedge from the origin node to the
3443 initial function by pushing a frame for it. */
3444 if (edge_idx == 0)
3445 {
3446 gcc_assert (eedge->m_src->m_index == 0);
3447 gcc_assert (src_point.get_kind () == PK_ORIGIN);
3448 gcc_assert (eedge->m_dest->get_point ().get_kind ()
3449 == PK_BEFORE_SUPERNODE);
3450 function *fun = eedge->m_dest->get_function ();
3451 gcc_assert (fun);
3452 model.push_frame (fun, NULL, NULL);
3453 if (logger)
3454 logger->log (" pushing frame for %qD", fun->decl);
3455 }
3456 else if (eedge->m_custom_info)
3457 {
3458 eedge->m_custom_info->update_model (&model, *eedge);
3459 }
3460 }
3461
3462 /* Handle phi nodes on an edge leaving a PK_BEFORE_SUPERNODE (to
3463 a PK_BEFORE_STMT, or a PK_AFTER_SUPERNODE if no stmts).
3464 This will typically not be associated with a superedge. */
3465 if (src_point.get_from_edge ())
3466 {
3467 const cfg_superedge *last_cfg_superedge
3468 = src_point.get_from_edge ()->dyn_cast_cfg_superedge ();
3469 const exploded_node &dst_enode = *eedge->m_dest;
3470 const unsigned dst_snode_idx = dst_enode.get_supernode ()->m_index;
3471 if (last_cfg_superedge)
3472 {
3473 if (logger)
3474 logger->log (" update for phis");
3475 model.update_for_phis (src_enode.get_supernode (),
3476 last_cfg_superedge,
3477 NULL);
3478 /* If we've entering an snode that we've already visited on this
3479 epath, then we need do fix things up for loops; see the
3480 comment for store::loop_replay_fixup.
3481 Perhaps we should probably also verify the callstring,
3482 and track program_points, but hopefully doing it by supernode
3483 is good enough. */
3484 if (bitmap_bit_p (snodes_visited, dst_snode_idx))
3485 model.loop_replay_fixup (dst_enode.get_state ().m_region_model);
3486 }
3487 bitmap_set_bit (snodes_visited, dst_snode_idx);
3488 }
3489
3490 if (logger)
3491 {
3492 logger->log ("state after edge %i: EN:%i -> EN:%i",
3493 edge_idx,
3494 eedge->m_src->m_index,
3495 eedge->m_dest->m_index);
3496 logger->start_log_line ();
3497 model.dump_to_pp (logger->get_printer (), true, false);
3498 logger->end_log_line ();
3499 }
3500 }
3501
3502 return true;
3503 }
3504
3505 /* Dump this path in multiline form to PP. */
3506
3507 void
3508 exploded_path::dump_to_pp (pretty_printer *pp) const
3509 {
3510 for (unsigned i = 0; i < m_edges.length (); i++)
3511 {
3512 const exploded_edge *eedge = m_edges[i];
3513 pp_printf (pp, "m_edges[%i]: EN %i -> EN %i",
3514 i,
3515 eedge->m_src->m_index,
3516 eedge->m_dest->m_index);
3517 pp_newline (pp);
3518 }
3519 }
3520
3521 /* Dump this path in multiline form to FP. */
3522
3523 void
3524 exploded_path::dump (FILE *fp) const
3525 {
3526 pretty_printer pp;
3527 pp_format_decoder (&pp) = default_tree_printer;
3528 pp_show_color (&pp) = pp_show_color (global_dc->printer);
3529 pp.buffer->stream = fp;
3530 dump_to_pp (&pp);
3531 pp_flush (&pp);
3532 }
3533
3534 /* Dump this path in multiline form to stderr. */
3535
3536 DEBUG_FUNCTION void
3537 exploded_path::dump () const
3538 {
3539 dump (stderr);
3540 }
3541
3542 /* class feasibility_problem. */
3543
3544 void
3545 feasibility_problem::dump_to_pp (pretty_printer *pp) const
3546 {
3547 pp_printf (pp, "edge from EN: %i to EN: %i",
3548 m_eedge.m_src->m_index, m_eedge.m_dest->m_index);
3549 if (m_rc)
3550 {
3551 pp_string (pp, "; rejected constraint: ");
3552 m_rc->dump_to_pp (pp);
3553 pp_string (pp, "; rmodel: ");
3554 m_rc->m_model.dump_to_pp (pp, true, false);
3555 }
3556 }
3557
3558 /* A family of cluster subclasses for use when generating .dot output for
3559 exploded graphs (-fdump-analyzer-exploded-graph), for grouping the
3560 enodes into hierarchical boxes.
3561
3562 All functionless enodes appear in the top-level graph.
3563 Every (function, call_string) pair gets its own cluster. Within that
3564 cluster, each supernode gets its own cluster.
3565
3566 Hence all enodes relating to a particular function with a particular
3567 callstring will be in a cluster together; all enodes for the same
3568 function but with a different callstring will be in a different
3569 cluster. */
3570
3571 /* Base class of cluster for clustering exploded_node instances in .dot
3572 output, based on various subclass-specific criteria. */
3573
3574 class exploded_cluster : public cluster<eg_traits>
3575 {
3576 };
3577
3578 /* Cluster containing all exploded_node instances for one supernode. */
3579
3580 class supernode_cluster : public exploded_cluster
3581 {
3582 public:
3583 supernode_cluster (const supernode *supernode) : m_supernode (supernode) {}
3584
3585 // TODO: dtor?
3586
3587 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
3588 {
3589 gv->println ("subgraph \"cluster_supernode_%i\" {", m_supernode->m_index);
3590 gv->indent ();
3591 gv->println ("style=\"dashed\";");
3592 gv->println ("label=\"SN: %i (bb: %i; scc: %i)\";",
3593 m_supernode->m_index, m_supernode->m_bb->index,
3594 args.m_eg.get_scc_id (*m_supernode));
3595
3596 int i;
3597 exploded_node *enode;
3598 FOR_EACH_VEC_ELT (m_enodes, i, enode)
3599 enode->dump_dot (gv, args);
3600
3601 /* Terminate subgraph. */
3602 gv->outdent ();
3603 gv->println ("}");
3604 }
3605
3606 void add_node (exploded_node *en) FINAL OVERRIDE
3607 {
3608 m_enodes.safe_push (en);
3609 }
3610
3611 /* Comparator for use by auto_vec<supernode_cluster *>::qsort. */
3612
3613 static int cmp_ptr_ptr (const void *p1, const void *p2)
3614 {
3615 const supernode_cluster *c1
3616 = *(const supernode_cluster * const *)p1;
3617 const supernode_cluster *c2
3618 = *(const supernode_cluster * const *)p2;
3619 return c1->m_supernode->m_index - c2->m_supernode->m_index;
3620 }
3621
3622 private:
3623 const supernode *m_supernode;
3624 auto_vec <exploded_node *> m_enodes;
3625 };
3626
3627 /* Cluster containing all supernode_cluster instances for one
3628 (function, call_string) pair. */
3629
3630 class function_call_string_cluster : public exploded_cluster
3631 {
3632 public:
3633 function_call_string_cluster (function *fun, call_string cs)
3634 : m_fun (fun), m_cs (cs) {}
3635
3636 ~function_call_string_cluster ()
3637 {
3638 for (map_t::iterator iter = m_map.begin ();
3639 iter != m_map.end ();
3640 ++iter)
3641 delete (*iter).second;
3642 }
3643
3644 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
3645 {
3646 const char *funcname = function_name (m_fun);
3647
3648 gv->println ("subgraph \"cluster_function_%s\" {",
3649 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (m_fun->decl)));
3650 gv->indent ();
3651 gv->write_indent ();
3652 gv->print ("label=\"call string: ");
3653 m_cs.print (gv->get_pp ());
3654 gv->print (" function: %s \";", funcname);
3655 gv->print ("\n");
3656
3657 /* Dump m_map, sorting it to avoid churn when comparing dumps. */
3658 auto_vec<supernode_cluster *> child_clusters (m_map.elements ());
3659 for (map_t::iterator iter = m_map.begin ();
3660 iter != m_map.end ();
3661 ++iter)
3662 child_clusters.quick_push ((*iter).second);
3663
3664 child_clusters.qsort (supernode_cluster::cmp_ptr_ptr);
3665
3666 unsigned i;
3667 supernode_cluster *child_cluster;
3668 FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
3669 child_cluster->dump_dot (gv, args);
3670
3671 /* Terminate subgraph. */
3672 gv->outdent ();
3673 gv->println ("}");
3674 }
3675
3676 void add_node (exploded_node *en) FINAL OVERRIDE
3677 {
3678 const supernode *supernode = en->get_supernode ();
3679 gcc_assert (supernode);
3680 supernode_cluster **slot = m_map.get (supernode);
3681 if (slot)
3682 (*slot)->add_node (en);
3683 else
3684 {
3685 supernode_cluster *child = new supernode_cluster (supernode);
3686 m_map.put (supernode, child);
3687 child->add_node (en);
3688 }
3689 }
3690
3691 /* Comparator for use by auto_vec<function_call_string_cluster *>. */
3692
3693 static int cmp_ptr_ptr (const void *p1, const void *p2)
3694 {
3695 const function_call_string_cluster *c1
3696 = *(const function_call_string_cluster * const *)p1;
3697 const function_call_string_cluster *c2
3698 = *(const function_call_string_cluster * const *)p2;
3699 if (int cmp_names
3700 = strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c1->m_fun->decl)),
3701 IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c2->m_fun->decl))))
3702 return cmp_names;
3703 return call_string::cmp (c1->m_cs, c2->m_cs);
3704 }
3705
3706 private:
3707 function *m_fun;
3708 call_string m_cs;
3709 typedef ordered_hash_map<const supernode *, supernode_cluster *> map_t;
3710 map_t m_map;
3711 };
3712
3713 /* Keys for root_cluster. */
3714
3715 struct function_call_string
3716 {
3717 function_call_string (function *fun, call_string cs)
3718 : m_fun (fun), m_cs (cs)
3719 {
3720 gcc_assert (fun);
3721 }
3722
3723 function *m_fun;
3724 call_string m_cs;
3725 };
3726
3727 } // namespace ana
3728
3729 template <> struct default_hash_traits<function_call_string>
3730 : public pod_hash_traits<function_call_string>
3731 {
3732 static const bool empty_zero_p = false;
3733 };
3734
3735 template <>
3736 inline hashval_t
3737 pod_hash_traits<function_call_string>::hash (value_type v)
3738 {
3739 return pointer_hash <function>::hash (v.m_fun) ^ v.m_cs.hash ();
3740 }
3741
3742 template <>
3743 inline bool
3744 pod_hash_traits<function_call_string>::equal (const value_type &existing,
3745 const value_type &candidate)
3746 {
3747 return existing.m_fun == candidate.m_fun && existing.m_cs == candidate.m_cs;
3748 }
3749 template <>
3750 inline void
3751 pod_hash_traits<function_call_string>::mark_deleted (value_type &v)
3752 {
3753 v.m_fun = reinterpret_cast<function *> (1);
3754 }
3755 template <>
3756 inline void
3757 pod_hash_traits<function_call_string>::mark_empty (value_type &v)
3758 {
3759 v.m_fun = NULL;
3760 }
3761 template <>
3762 inline bool
3763 pod_hash_traits<function_call_string>::is_deleted (value_type v)
3764 {
3765 return v.m_fun == reinterpret_cast<function *> (1);
3766 }
3767 template <>
3768 inline bool
3769 pod_hash_traits<function_call_string>::is_empty (value_type v)
3770 {
3771 return v.m_fun == NULL;
3772 }
3773
3774 namespace ana {
3775
3776 /* Top-level cluster for generating .dot output for exploded graphs,
3777 handling the functionless nodes, and grouping the remaining nodes by
3778 callstring. */
3779
3780 class root_cluster : public exploded_cluster
3781 {
3782 public:
3783 ~root_cluster ()
3784 {
3785 for (map_t::iterator iter = m_map.begin ();
3786 iter != m_map.end ();
3787 ++iter)
3788 delete (*iter).second;
3789 }
3790
3791 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
3792 {
3793 int i;
3794 exploded_node *enode;
3795 FOR_EACH_VEC_ELT (m_functionless_enodes, i, enode)
3796 enode->dump_dot (gv, args);
3797
3798 /* Dump m_map, sorting it to avoid churn when comparing dumps. */
3799 auto_vec<function_call_string_cluster *> child_clusters (m_map.elements ());
3800 for (map_t::iterator iter = m_map.begin ();
3801 iter != m_map.end ();
3802 ++iter)
3803 child_clusters.quick_push ((*iter).second);
3804
3805 child_clusters.qsort (function_call_string_cluster::cmp_ptr_ptr);
3806
3807 function_call_string_cluster *child_cluster;
3808 FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
3809 child_cluster->dump_dot (gv, args);
3810 }
3811
3812 void add_node (exploded_node *en) FINAL OVERRIDE
3813 {
3814 function *fun = en->get_function ();
3815 if (!fun)
3816 {
3817 m_functionless_enodes.safe_push (en);
3818 return;
3819 }
3820
3821 const call_string &cs = en->get_point ().get_call_string ();
3822 function_call_string key (fun, cs);
3823 function_call_string_cluster **slot = m_map.get (key);
3824 if (slot)
3825 (*slot)->add_node (en);
3826 else
3827 {
3828 function_call_string_cluster *child
3829 = new function_call_string_cluster (fun, cs);
3830 m_map.put (key, child);
3831 child->add_node (en);
3832 }
3833 }
3834
3835 private:
3836 /* This can't be an ordered_hash_map, as we can't store vec<call_string>,
3837 since it's not a POD; vec<>::quick_push has:
3838 *slot = obj;
3839 and the slot isn't initialized, so the assignment op dies when cleaning up
3840 un-inited *slot (within the truncate call). */
3841 typedef hash_map<function_call_string, function_call_string_cluster *> map_t;
3842 map_t m_map;
3843
3844 /* This should just be the origin exploded_node. */
3845 auto_vec <exploded_node *> m_functionless_enodes;
3846 };
3847
3848 /* Subclass of range_label for use within
3849 exploded_graph::dump_exploded_nodes for implementing
3850 -fdump-analyzer-exploded-nodes: a label for a specific
3851 exploded_node. */
3852
3853 class enode_label : public range_label
3854 {
3855 public:
3856 enode_label (const extrinsic_state &ext_state,
3857 exploded_node *enode)
3858 : m_ext_state (ext_state), m_enode (enode) {}
3859
3860 label_text get_text (unsigned) const FINAL OVERRIDE
3861 {
3862 pretty_printer pp;
3863 pp_format_decoder (&pp) = default_tree_printer;
3864 m_enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
3865 return make_label_text (false, "EN: %i: %s",
3866 m_enode->m_index, pp_formatted_text (&pp));
3867 }
3868
3869 private:
3870 const extrinsic_state &m_ext_state;
3871 exploded_node *m_enode;
3872 };
3873
3874 /* Postprocessing support for dumping the exploded nodes.
3875 Handle -fdump-analyzer-exploded-nodes,
3876 -fdump-analyzer-exploded-nodes-2, and the
3877 "__analyzer_dump_exploded_nodes" builtin. */
3878
3879 void
3880 exploded_graph::dump_exploded_nodes () const
3881 {
3882 // TODO
3883 /* Locate calls to __analyzer_dump_exploded_nodes. */
3884 // Print how many egs there are for them?
3885 /* Better: log them as we go, and record the exploded nodes
3886 in question. */
3887
3888 /* Show every enode. */
3889
3890 /* Gather them by stmt, so that we can more clearly see the
3891 "hotspots" requiring numerous exploded nodes. */
3892
3893 /* Alternatively, simply throw them all into one big rich_location
3894 and see if the label-printing will sort it out...
3895 This requires them all to be in the same source file. */
3896
3897 if (flag_dump_analyzer_exploded_nodes)
3898 {
3899 auto_timevar tv (TV_ANALYZER_DUMP);
3900 gcc_rich_location richloc (UNKNOWN_LOCATION);
3901 unsigned i;
3902 exploded_node *enode;
3903 FOR_EACH_VEC_ELT (m_nodes, i, enode)
3904 {
3905 if (const gimple *stmt = enode->get_stmt ())
3906 {
3907 if (get_pure_location (richloc.get_loc ()) == UNKNOWN_LOCATION)
3908 richloc.set_range (0, stmt->location, SHOW_RANGE_WITH_CARET);
3909 else
3910 richloc.add_range (stmt->location,
3911 SHOW_RANGE_WITHOUT_CARET,
3912 new enode_label (m_ext_state, enode));
3913 }
3914 }
3915 warning_at (&richloc, 0, "%i exploded nodes", m_nodes.length ());
3916
3917 /* Repeat the warning without all the labels, so that message is visible
3918 (the other one may well have scrolled past the terminal limit). */
3919 warning_at (richloc.get_loc (), 0,
3920 "%i exploded nodes", m_nodes.length ());
3921
3922 if (m_worklist.length () > 0)
3923 warning_at (richloc.get_loc (), 0,
3924 "worklist still contains %i nodes", m_worklist.length ());
3925 }
3926
3927 /* Dump the egraph in textual form to a dump file. */
3928 if (flag_dump_analyzer_exploded_nodes_2)
3929 {
3930 auto_timevar tv (TV_ANALYZER_DUMP);
3931 char *filename
3932 = concat (dump_base_name, ".eg.txt", NULL);
3933 FILE *outf = fopen (filename, "w");
3934 if (!outf)
3935 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
3936 free (filename);
3937
3938 fprintf (outf, "exploded graph for %s\n", dump_base_name);
3939 fprintf (outf, " nodes: %i\n", m_nodes.length ());
3940 fprintf (outf, " edges: %i\n", m_edges.length ());
3941
3942 unsigned i;
3943 exploded_node *enode;
3944 FOR_EACH_VEC_ELT (m_nodes, i, enode)
3945 {
3946 fprintf (outf, "\nEN %i:\n", enode->m_index);
3947 enode->dump_succs_and_preds (outf);
3948 pretty_printer pp;
3949 enode->get_point ().print (&pp, format (true));
3950 fprintf (outf, "%s\n", pp_formatted_text (&pp));
3951 enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
3952 }
3953
3954 fclose (outf);
3955 }
3956
3957 /* Dump the egraph in textual form to multiple dump files, one per enode. */
3958 if (flag_dump_analyzer_exploded_nodes_3)
3959 {
3960 auto_timevar tv (TV_ANALYZER_DUMP);
3961
3962 unsigned i;
3963 exploded_node *enode;
3964 FOR_EACH_VEC_ELT (m_nodes, i, enode)
3965 {
3966 char *filename
3967 = xasprintf ("%s.en-%i.txt", dump_base_name, i);
3968 FILE *outf = fopen (filename, "w");
3969 if (!outf)
3970 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
3971 free (filename);
3972
3973 fprintf (outf, "EN %i:\n", enode->m_index);
3974 enode->dump_succs_and_preds (outf);
3975 pretty_printer pp;
3976 enode->get_point ().print (&pp, format (true));
3977 fprintf (outf, "%s\n", pp_formatted_text (&pp));
3978 enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
3979
3980 fclose (outf);
3981 }
3982 }
3983
3984 /* Emit a warning at any call to "__analyzer_dump_exploded_nodes",
3985 giving the number of processed exploded nodes for "before-stmt",
3986 and the IDs of processed, merger, and worklist enodes.
3987
3988 We highlight the count of *processed* enodes since this is of most
3989 interest in DejaGnu tests for ensuring that state merger has
3990 happened.
3991
3992 We don't show the count of merger and worklist enodes, as this is
3993 more of an implementation detail of the merging/worklist that we
3994 don't want to bake into our expected DejaGnu messages. */
3995
3996 unsigned i;
3997 exploded_node *enode;
3998 hash_set<const gimple *> seen;
3999 FOR_EACH_VEC_ELT (m_nodes, i, enode)
4000 {
4001 if (enode->get_point ().get_kind () != PK_BEFORE_STMT)
4002 continue;
4003
4004 if (const gimple *stmt = enode->get_stmt ())
4005 if (const gcall *call = dyn_cast <const gcall *> (stmt))
4006 if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
4007 1))
4008 {
4009 if (seen.contains (stmt))
4010 continue;
4011
4012 auto_vec<exploded_node *> processed_enodes;
4013 auto_vec<exploded_node *> merger_enodes;
4014 auto_vec<exploded_node *> worklist_enodes;
4015 /* This is O(N^2). */
4016 unsigned j;
4017 exploded_node *other_enode;
4018 FOR_EACH_VEC_ELT (m_nodes, j, other_enode)
4019 {
4020 if (other_enode->get_point ().get_kind () != PK_BEFORE_STMT)
4021 continue;
4022 if (other_enode->get_stmt () == stmt)
4023 switch (other_enode->get_status ())
4024 {
4025 default:
4026 gcc_unreachable ();
4027 case exploded_node::STATUS_WORKLIST:
4028 worklist_enodes.safe_push (other_enode);
4029 break;
4030 case exploded_node::STATUS_PROCESSED:
4031 processed_enodes.safe_push (other_enode);
4032 break;
4033 case exploded_node::STATUS_MERGER:
4034 merger_enodes.safe_push (other_enode);
4035 break;
4036 }
4037 }
4038
4039 pretty_printer pp;
4040 pp_character (&pp, '[');
4041 print_enode_indices (&pp, processed_enodes);
4042 if (merger_enodes.length () > 0)
4043 {
4044 pp_string (&pp, "] merger(s): [");
4045 print_enode_indices (&pp, merger_enodes);
4046 }
4047 if (worklist_enodes.length () > 0)
4048 {
4049 pp_string (&pp, "] worklist: [");
4050 print_enode_indices (&pp, worklist_enodes);
4051 }
4052 pp_character (&pp, ']');
4053
4054 warning_n (stmt->location, 0, processed_enodes.length (),
4055 "%i processed enode: %s",
4056 "%i processed enodes: %s",
4057 processed_enodes.length (), pp_formatted_text (&pp));
4058 seen.add (stmt);
4059
4060 /* If the argument is non-zero, then print all of the states
4061 of the various enodes. */
4062 tree t_arg = fold (gimple_call_arg (call, 0));
4063 if (TREE_CODE (t_arg) != INTEGER_CST)
4064 {
4065 error_at (call->location,
4066 "integer constant required for arg 1");
4067 return;
4068 }
4069 int i_arg = TREE_INT_CST_LOW (t_arg);
4070 if (i_arg)
4071 {
4072 exploded_node *other_enode;
4073 FOR_EACH_VEC_ELT (processed_enodes, j, other_enode)
4074 {
4075 fprintf (stderr, "%i of %i: EN %i:\n",
4076 j + 1, processed_enodes.length (),
4077 other_enode->m_index);
4078 other_enode->dump_succs_and_preds (stderr);
4079 /* Dump state. */
4080 other_enode->get_state ().dump (m_ext_state, false);
4081 }
4082 }
4083 }
4084 }
4085 }
4086
4087 DEBUG_FUNCTION exploded_node *
4088 exploded_graph::get_node_by_index (int idx) const
4089 {
4090 exploded_node *enode = m_nodes[idx];
4091 gcc_assert (enode->m_index == idx);
4092 return enode;
4093 }
4094
4095 /* Ensure that there is an exploded_node for a top-level call to FNDECL. */
4096
4097 void
4098 exploded_graph::on_escaped_function (tree fndecl)
4099 {
4100 logger * const logger = get_logger ();
4101 LOG_FUNC_1 (logger, "%qE", fndecl);
4102
4103 cgraph_node *cgnode = cgraph_node::get (fndecl);
4104 if (!cgnode)
4105 return;
4106
4107 function *fun = cgnode->get_fun ();
4108 if (!fun)
4109 return;
4110
4111 if (!gimple_has_body_p (fndecl))
4112 return;
4113
4114 exploded_node *enode = add_function_entry (fun);
4115 if (logger)
4116 {
4117 if (enode)
4118 logger->log ("created EN %i for %qE entrypoint",
4119 enode->m_index, fun->decl);
4120 else
4121 logger->log ("did not create enode for %qE entrypoint", fun->decl);
4122 }
4123 }
4124
4125 /* A collection of classes for visualizing the callgraph in .dot form
4126 (as represented in the supergraph). */
4127
4128 /* Forward decls. */
4129 class viz_callgraph_node;
4130 class viz_callgraph_edge;
4131 class viz_callgraph;
4132 class viz_callgraph_cluster;
4133
4134 /* Traits for using "digraph.h" to visualize the callgraph. */
4135
4136 struct viz_callgraph_traits
4137 {
4138 typedef viz_callgraph_node node_t;
4139 typedef viz_callgraph_edge edge_t;
4140 typedef viz_callgraph graph_t;
4141 struct dump_args_t
4142 {
4143 dump_args_t (const exploded_graph *eg) : m_eg (eg) {}
4144 const exploded_graph *m_eg;
4145 };
4146 typedef viz_callgraph_cluster cluster_t;
4147 };
4148
4149 /* Subclass of dnode representing a function within the callgraph. */
4150
4151 class viz_callgraph_node : public dnode<viz_callgraph_traits>
4152 {
4153 friend class viz_callgraph;
4154
4155 public:
4156 viz_callgraph_node (function *fun, int index)
4157 : m_fun (fun), m_index (index), m_num_supernodes (0), m_num_superedges (0)
4158 {
4159 gcc_assert (fun);
4160 }
4161
4162 void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
4163 {
4164 pretty_printer *pp = gv->get_pp ();
4165
4166 dump_dot_id (pp);
4167 pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=<",
4168 "lightgrey");
4169 pp_string (pp, "<TABLE BORDER=\"0\">");
4170 pp_write_text_to_stream (pp);
4171
4172 gv->begin_trtd ();
4173 pp_printf (pp, "VCG: %i: %s", m_index, function_name (m_fun));
4174 gv->end_tdtr ();
4175 pp_newline (pp);
4176
4177 gv->begin_trtd ();
4178 pp_printf (pp, "supernodes: %i\n", m_num_supernodes);
4179 gv->end_tdtr ();
4180 pp_newline (pp);
4181
4182 gv->begin_trtd ();
4183 pp_printf (pp, "superedges: %i\n", m_num_superedges);
4184 gv->end_tdtr ();
4185 pp_newline (pp);
4186
4187 if (args.m_eg)
4188 {
4189 unsigned i;
4190 exploded_node *enode;
4191 unsigned num_enodes = 0;
4192 FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
4193 {
4194 if (enode->get_point ().get_function () == m_fun)
4195 num_enodes++;
4196 }
4197 gv->begin_trtd ();
4198 pp_printf (pp, "enodes: %i\n", num_enodes);
4199 gv->end_tdtr ();
4200 pp_newline (pp);
4201
4202 // TODO: also show the per-callstring breakdown
4203 const exploded_graph::call_string_data_map_t *per_cs_data
4204 = args.m_eg->get_per_call_string_data ();
4205 for (exploded_graph::call_string_data_map_t::iterator iter
4206 = per_cs_data->begin ();
4207 iter != per_cs_data->end ();
4208 ++iter)
4209 {
4210 const call_string *cs = (*iter).first;
4211 //per_call_string_data *data = (*iter).second;
4212 num_enodes = 0;
4213 FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
4214 {
4215 if (enode->get_point ().get_function () == m_fun
4216 && enode->get_point ().get_call_string () == *cs)
4217 num_enodes++;
4218 }
4219 if (num_enodes > 0)
4220 {
4221 gv->begin_trtd ();
4222 cs->print (pp);
4223 pp_printf (pp, ": %i\n", num_enodes);
4224 pp_write_text_as_html_like_dot_to_stream (pp);
4225 gv->end_tdtr ();
4226 }
4227 }
4228
4229 /* Show any summaries. */
4230 per_function_data *data = args.m_eg->get_per_function_data (m_fun);
4231 if (data)
4232 {
4233 pp_newline (pp);
4234 gv->begin_trtd ();
4235 pp_printf (pp, "summaries: %i\n", data->m_summaries.length ());
4236 pp_write_text_as_html_like_dot_to_stream (pp);
4237 gv->end_tdtr ();
4238 }
4239 }
4240
4241 pp_string (pp, "</TABLE>>];\n\n");
4242 pp_flush (pp);
4243 }
4244
4245 void dump_dot_id (pretty_printer *pp) const
4246 {
4247 pp_printf (pp, "vcg_%i", m_index);
4248 }
4249
4250 private:
4251 function *m_fun;
4252 int m_index;
4253 int m_num_supernodes;
4254 int m_num_superedges;
4255 };
4256
4257 /* Subclass of dedge representing a callgraph edge. */
4258
4259 class viz_callgraph_edge : public dedge<viz_callgraph_traits>
4260 {
4261 public:
4262 viz_callgraph_edge (viz_callgraph_node *src, viz_callgraph_node *dest)
4263 : dedge<viz_callgraph_traits> (src, dest)
4264 {}
4265
4266 void dump_dot (graphviz_out *gv, const dump_args_t &) const
4267 FINAL OVERRIDE
4268 {
4269 pretty_printer *pp = gv->get_pp ();
4270
4271 const char *style = "\"solid,bold\"";
4272 const char *color = "black";
4273 int weight = 10;
4274 const char *constraint = "true";
4275
4276 m_src->dump_dot_id (pp);
4277 pp_string (pp, " -> ");
4278 m_dest->dump_dot_id (pp);
4279 pp_printf (pp,
4280 (" [style=%s, color=%s, weight=%d, constraint=%s,"
4281 " headlabel=\""),
4282 style, color, weight, constraint);
4283 pp_printf (pp, "\"];\n");
4284 }
4285 };
4286
4287 /* Subclass of digraph representing the callgraph. */
4288
4289 class viz_callgraph : public digraph<viz_callgraph_traits>
4290 {
4291 public:
4292 viz_callgraph (const supergraph &sg);
4293
4294 viz_callgraph_node *get_vcg_node_for_function (function *fun)
4295 {
4296 return *m_map.get (fun);
4297 }
4298
4299 viz_callgraph_node *get_vcg_node_for_snode (supernode *snode)
4300 {
4301 return get_vcg_node_for_function (snode->m_fun);
4302 }
4303
4304 private:
4305 hash_map<function *, viz_callgraph_node *> m_map;
4306 };
4307
4308 /* Placeholder subclass of cluster. */
4309
4310 class viz_callgraph_cluster : public cluster<viz_callgraph_traits>
4311 {
4312 };
4313
4314 /* viz_callgraph's ctor. */
4315
4316 viz_callgraph::viz_callgraph (const supergraph &sg)
4317 {
4318 cgraph_node *node;
4319 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
4320 {
4321 function *fun = node->get_fun ();
4322 viz_callgraph_node *vcg_node
4323 = new viz_callgraph_node (fun, m_nodes.length ());
4324 m_map.put (fun, vcg_node);
4325 add_node (vcg_node);
4326 }
4327
4328 unsigned i;
4329 superedge *sedge;
4330 FOR_EACH_VEC_ELT (sg.m_edges, i, sedge)
4331 {
4332 viz_callgraph_node *vcg_src = get_vcg_node_for_snode (sedge->m_src);
4333 if (vcg_src->m_fun)
4334 get_vcg_node_for_function (vcg_src->m_fun)->m_num_superedges++;
4335 if (sedge->dyn_cast_call_superedge ())
4336 {
4337 viz_callgraph_node *vcg_dest = get_vcg_node_for_snode (sedge->m_dest);
4338 viz_callgraph_edge *vcg_edge
4339 = new viz_callgraph_edge (vcg_src, vcg_dest);
4340 add_edge (vcg_edge);
4341 }
4342 }
4343
4344 supernode *snode;
4345 FOR_EACH_VEC_ELT (sg.m_nodes, i, snode)
4346 {
4347 if (snode->m_fun)
4348 get_vcg_node_for_function (snode->m_fun)->m_num_supernodes++;
4349 }
4350 }
4351
4352 /* Dump the callgraph to FILENAME. */
4353
4354 static void
4355 dump_callgraph (const supergraph &sg, const char *filename,
4356 const exploded_graph *eg)
4357 {
4358 FILE *outf = fopen (filename, "w");
4359 if (!outf)
4360 return;
4361
4362 // TODO
4363 viz_callgraph vcg (sg);
4364 vcg.dump_dot (filename, NULL, viz_callgraph_traits::dump_args_t (eg));
4365
4366 fclose (outf);
4367 }
4368
4369 /* Dump the callgraph to "<srcfile>.callgraph.dot". */
4370
4371 static void
4372 dump_callgraph (const supergraph &sg, const exploded_graph *eg)
4373 {
4374 auto_timevar tv (TV_ANALYZER_DUMP);
4375 char *filename = concat (dump_base_name, ".callgraph.dot", NULL);
4376 dump_callgraph (sg, filename, eg);
4377 free (filename);
4378 }
4379
4380 /* Subclass of dot_annotator for implementing
4381 DUMP_BASE_NAME.supergraph-eg.dot, a post-analysis dump of the supergraph.
4382
4383 Annotate the supergraph nodes by printing the exploded nodes in concise
4384 form within them, next to their pertinent statements where appropriate,
4385 colorizing the exploded nodes based on sm-state.
4386 Also show saved diagnostics within the exploded nodes, giving information
4387 on whether they were feasible, and, if infeasible, where the problem
4388 was. */
4389
4390 class exploded_graph_annotator : public dot_annotator
4391 {
4392 public:
4393 exploded_graph_annotator (const exploded_graph &eg)
4394 : m_eg (eg)
4395 {
4396 /* Avoid O(N^2) by prepopulating m_enodes_per_snodes. */
4397 unsigned i;
4398 supernode *snode;
4399 FOR_EACH_VEC_ELT (eg.get_supergraph ().m_nodes, i, snode)
4400 m_enodes_per_snodes.safe_push (new auto_vec <exploded_node *> ());
4401 exploded_node *enode;
4402 FOR_EACH_VEC_ELT (m_eg.m_nodes, i, enode)
4403 if (enode->get_supernode ())
4404 m_enodes_per_snodes[enode->get_supernode ()->m_index]->safe_push (enode);
4405 }
4406
4407 /* Show exploded nodes for BEFORE_SUPERNODE points before N. */
4408 bool add_node_annotations (graphviz_out *gv, const supernode &n,
4409 bool within_table)
4410 const FINAL OVERRIDE
4411 {
4412 if (!within_table)
4413 return false;
4414 gv->begin_tr ();
4415 pretty_printer *pp = gv->get_pp ();
4416
4417 gv->begin_td ();
4418 pp_string (pp, "BEFORE");
4419 pp_printf (pp, " (scc: %i)", m_eg.get_scc_id (n));
4420 gv->end_td ();
4421
4422 unsigned i;
4423 exploded_node *enode;
4424 bool had_enode = false;
4425 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[n.m_index], i, enode)
4426 {
4427 gcc_assert (enode->get_supernode () == &n);
4428 const program_point &point = enode->get_point ();
4429 if (point.get_kind () != PK_BEFORE_SUPERNODE)
4430 continue;
4431 print_enode (gv, enode);
4432 had_enode = true;
4433 }
4434 if (!had_enode)
4435 pp_string (pp, "<TD BGCOLOR=\"red\">UNREACHED</TD>");
4436 pp_flush (pp);
4437 gv->end_tr ();
4438 return true;
4439 }
4440
4441 /* Show exploded nodes for STMT. */
4442 void add_stmt_annotations (graphviz_out *gv, const gimple *stmt,
4443 bool within_row)
4444 const FINAL OVERRIDE
4445 {
4446 if (!within_row)
4447 return;
4448 pretty_printer *pp = gv->get_pp ();
4449
4450 const supernode *snode
4451 = m_eg.get_supergraph ().get_supernode_for_stmt (stmt);
4452 unsigned i;
4453 exploded_node *enode;
4454 bool had_td = false;
4455 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[snode->m_index], i, enode)
4456 {
4457 const program_point &point = enode->get_point ();
4458 if (point.get_kind () != PK_BEFORE_STMT)
4459 continue;
4460 if (point.get_stmt () != stmt)
4461 continue;
4462 print_enode (gv, enode);
4463 had_td = true;
4464 }
4465 pp_flush (pp);
4466 if (!had_td)
4467 {
4468 gv->begin_td ();
4469 gv->end_td ();
4470 }
4471 }
4472
4473 /* Show exploded nodes for AFTER_SUPERNODE points after N. */
4474 bool add_after_node_annotations (graphviz_out *gv, const supernode &n)
4475 const FINAL OVERRIDE
4476 {
4477 gv->begin_tr ();
4478 pretty_printer *pp = gv->get_pp ();
4479
4480 gv->begin_td ();
4481 pp_string (pp, "AFTER");
4482 gv->end_td ();
4483
4484 unsigned i;
4485 exploded_node *enode;
4486 FOR_EACH_VEC_ELT (*m_enodes_per_snodes[n.m_index], i, enode)
4487 {
4488 gcc_assert (enode->get_supernode () == &n);
4489 const program_point &point = enode->get_point ();
4490 if (point.get_kind () != PK_AFTER_SUPERNODE)
4491 continue;
4492 print_enode (gv, enode);
4493 }
4494 pp_flush (pp);
4495 gv->end_tr ();
4496 return true;
4497 }
4498
4499 private:
4500 /* Concisely print a TD element for ENODE, showing the index, status,
4501 and any saved_diagnostics at the enode. Colorize it to show sm-state.
4502
4503 Ideally we'd dump ENODE's state here, hidden behind some kind of
4504 interactive disclosure method like a tooltip, so that the states
4505 can be explored without overwhelming the graph.
4506 However, I wasn't able to get graphviz/xdot to show tooltips on
4507 individual elements within a HTML-like label. */
4508 void print_enode (graphviz_out *gv, const exploded_node *enode) const
4509 {
4510 pretty_printer *pp = gv->get_pp ();
4511 pp_printf (pp, "<TD BGCOLOR=\"%s\">",
4512 enode->get_dot_fillcolor ());
4513 pp_printf (pp, "<TABLE BORDER=\"0\">");
4514 gv->begin_trtd ();
4515 pp_printf (pp, "EN: %i", enode->m_index);
4516 switch (enode->get_status ())
4517 {
4518 default:
4519 gcc_unreachable ();
4520 case exploded_node::STATUS_WORKLIST:
4521 pp_string (pp, "(W)");
4522 break;
4523 case exploded_node::STATUS_PROCESSED:
4524 break;
4525 case exploded_node::STATUS_MERGER:
4526 pp_string (pp, "(M)");
4527 break;
4528 case exploded_node::STATUS_BULK_MERGED:
4529 pp_string (pp, "(BM)");
4530 break;
4531 }
4532 gv->end_tdtr ();
4533 /* Dump any saved_diagnostics at this enode. */
4534 {
4535 const diagnostic_manager &dm = m_eg.get_diagnostic_manager ();
4536 for (unsigned i = 0; i < dm.get_num_diagnostics (); i++)
4537 {
4538 const saved_diagnostic *sd = dm.get_saved_diagnostic (i);
4539 if (sd->m_enode == enode)
4540 print_saved_diagnostic (gv, sd);
4541 }
4542 }
4543 pp_printf (pp, "</TABLE>");
4544 pp_printf (pp, "</TD>");
4545 }
4546
4547 /* Print a TABLE element for SD, showing the kind, the length of the
4548 exploded_path, whether the path was feasible, and if infeasible,
4549 what the problem was. */
4550 void print_saved_diagnostic (graphviz_out *gv,
4551 const saved_diagnostic *sd) const
4552 {
4553 pretty_printer *pp = gv->get_pp ();
4554 gv->begin_trtd ();
4555 pp_printf (pp, "<TABLE BORDER=\"0\">");
4556 gv->begin_tr ();
4557 pp_string (pp, "<TD BGCOLOR=\"green\">");
4558 pp_printf (pp, "DIAGNOSTIC: %s", sd->m_d->get_kind ());
4559 gv->end_tdtr ();
4560 gv->begin_trtd ();
4561 pp_printf (pp, "epath length: %i", sd->get_epath_length ());
4562 gv->end_tdtr ();
4563 switch (sd->get_status ())
4564 {
4565 default:
4566 case saved_diagnostic::STATUS_NEW:
4567 gcc_unreachable ();
4568 break;
4569 case saved_diagnostic::STATUS_INFEASIBLE_PATH:
4570 {
4571 gv->begin_trtd ();
4572 pp_printf (pp, "INFEASIBLE");
4573 gv->end_tdtr ();
4574 const feasibility_problem *p = sd->get_feasibility_problem ();
4575 gcc_assert (p);
4576 gv->begin_trtd ();
4577 pp_printf (pp, "at eedge %i: EN:%i -> EN:%i",
4578 p->m_eedge_idx,
4579 p->m_eedge.m_src->m_index,
4580 p->m_eedge.m_dest->m_index);
4581 pp_write_text_as_html_like_dot_to_stream (pp);
4582 gv->end_tdtr ();
4583 gv->begin_trtd ();
4584 p->m_eedge.m_sedge->dump (pp);
4585 pp_write_text_as_html_like_dot_to_stream (pp);
4586 gv->end_tdtr ();
4587 gv->begin_trtd ();
4588 pp_gimple_stmt_1 (pp, p->m_last_stmt, 0, (dump_flags_t)0);
4589 pp_write_text_as_html_like_dot_to_stream (pp);
4590 gv->end_tdtr ();
4591 /* Ideally we'd print p->m_model here; see the notes above about
4592 tooltips. */
4593 }
4594 break;
4595 case saved_diagnostic::STATUS_FEASIBLE_PATH:
4596 gv->begin_trtd ();
4597 pp_printf (pp, "FEASIBLE");
4598 gv->end_tdtr ();
4599 break;
4600 }
4601 pp_printf (pp, "</TABLE>");
4602 gv->end_tdtr ();
4603 }
4604
4605 const exploded_graph &m_eg;
4606 auto_delete_vec<auto_vec <exploded_node *> > m_enodes_per_snodes;
4607 };
4608
4609 /* Implement -fdump-analyzer-json. */
4610
4611 static void
4612 dump_analyzer_json (const supergraph &sg,
4613 const exploded_graph &eg)
4614 {
4615 auto_timevar tv (TV_ANALYZER_DUMP);
4616 char *filename = concat (dump_base_name, ".analyzer.json.gz", NULL);
4617 gzFile output = gzopen (filename, "w");
4618 if (!output)
4619 {
4620 error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
4621 free (filename);
4622 return;
4623 }
4624
4625 json::object *toplev_obj = new json::object ();
4626 toplev_obj->set ("sgraph", sg.to_json ());
4627 toplev_obj->set ("egraph", eg.to_json ());
4628
4629 pretty_printer pp;
4630 toplev_obj->print (&pp);
4631 pp_formatted_text (&pp);
4632
4633 delete toplev_obj;
4634
4635 if (gzputs (output, pp_formatted_text (&pp)) == EOF
4636 || gzclose (output))
4637 error_at (UNKNOWN_LOCATION, "error writing %qs", filename);
4638
4639 free (filename);
4640 }
4641
4642 /* Concrete subclass of plugin_analyzer_init_iface, allowing plugins
4643 to register new state machines. */
4644
4645 class plugin_analyzer_init_impl : public plugin_analyzer_init_iface
4646 {
4647 public:
4648 plugin_analyzer_init_impl (auto_delete_vec <state_machine> *checkers,
4649 logger *logger)
4650 : m_checkers (checkers),
4651 m_logger (logger)
4652 {}
4653
4654 void register_state_machine (state_machine *sm) FINAL OVERRIDE
4655 {
4656 m_checkers->safe_push (sm);
4657 }
4658
4659 logger *get_logger () const FINAL OVERRIDE
4660 {
4661 return m_logger;
4662 }
4663
4664 private:
4665 auto_delete_vec <state_machine> *m_checkers;
4666 logger *m_logger;
4667 };
4668
4669 /* Run the analysis "engine". */
4670
4671 void
4672 impl_run_checkers (logger *logger)
4673 {
4674 LOG_SCOPE (logger);
4675
4676 /* If using LTO, ensure that the cgraph nodes have function bodies. */
4677 cgraph_node *node;
4678 FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
4679 node->get_untransformed_body ();
4680
4681 engine eng;
4682
4683 /* Create the supergraph. */
4684 supergraph sg (logger);
4685
4686 state_purge_map *purge_map = NULL;
4687
4688 if (flag_analyzer_state_purge)
4689 purge_map = new state_purge_map (sg, logger);
4690
4691 if (flag_dump_analyzer_supergraph)
4692 {
4693 /* Dump supergraph pre-analysis. */
4694 auto_timevar tv (TV_ANALYZER_DUMP);
4695 char *filename = concat (dump_base_name, ".supergraph.dot", NULL);
4696 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, NULL);
4697 sg.dump_dot (filename, args);
4698 free (filename);
4699 }
4700
4701 if (flag_dump_analyzer_state_purge)
4702 {
4703 auto_timevar tv (TV_ANALYZER_DUMP);
4704 state_purge_annotator a (purge_map);
4705 char *filename = concat (dump_base_name, ".state-purge.dot", NULL);
4706 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
4707 sg.dump_dot (filename, args);
4708 free (filename);
4709 }
4710
4711 auto_delete_vec <state_machine> checkers;
4712 make_checkers (checkers, logger);
4713
4714 plugin_analyzer_init_impl data (&checkers, logger);
4715 invoke_plugin_callbacks (PLUGIN_ANALYZER_INIT, &data);
4716
4717 if (logger)
4718 {
4719 int i;
4720 state_machine *sm;
4721 FOR_EACH_VEC_ELT (checkers, i, sm)
4722 logger->log ("checkers[%i]: %s", i, sm->get_name ());
4723 }
4724
4725 /* Extrinsic state shared by nodes in the graph. */
4726 const extrinsic_state ext_state (checkers, &eng, logger);
4727
4728 const analysis_plan plan (sg, logger);
4729
4730 /* The exploded graph. */
4731 exploded_graph eg (sg, logger, ext_state, purge_map, plan,
4732 analyzer_verbosity);
4733
4734 /* Add entrypoints to the graph for externally-callable functions. */
4735 eg.build_initial_worklist ();
4736
4737 /* Now process the worklist, exploring the <point, state> graph. */
4738 eg.process_worklist ();
4739
4740 if (flag_dump_analyzer_exploded_graph)
4741 {
4742 auto_timevar tv (TV_ANALYZER_DUMP);
4743 char *filename
4744 = concat (dump_base_name, ".eg.dot", NULL);
4745 exploded_graph::dump_args_t args (eg);
4746 root_cluster c;
4747 eg.dump_dot (filename, &c, args);
4748 free (filename);
4749 }
4750
4751 /* Now emit any saved diagnostics. */
4752 eg.get_diagnostic_manager ().emit_saved_diagnostics (eg);
4753
4754 eg.dump_exploded_nodes ();
4755
4756 eg.log_stats ();
4757
4758 if (flag_dump_analyzer_callgraph)
4759 dump_callgraph (sg, &eg);
4760
4761 if (flag_dump_analyzer_supergraph)
4762 {
4763 /* Dump post-analysis form of supergraph. */
4764 auto_timevar tv (TV_ANALYZER_DUMP);
4765 char *filename = concat (dump_base_name, ".supergraph-eg.dot", NULL);
4766 exploded_graph_annotator a (eg);
4767 supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
4768 sg.dump_dot (filename, args);
4769 free (filename);
4770 }
4771
4772 if (flag_dump_analyzer_json)
4773 dump_analyzer_json (sg, eg);
4774
4775 delete purge_map;
4776 }
4777
4778 /* External entrypoint to the analysis "engine".
4779 Set up any dumps, then call impl_run_checkers. */
4780
4781 void
4782 run_checkers ()
4783 {
4784 /* Save input_location. */
4785 location_t saved_input_location = input_location;
4786
4787 /* Handle -fdump-analyzer and -fdump-analyzer-stderr. */
4788 FILE *dump_fout = NULL;
4789 /* Track if we're responsible for closing dump_fout. */
4790 bool owns_dump_fout = false;
4791 if (flag_dump_analyzer_stderr)
4792 dump_fout = stderr;
4793 else if (flag_dump_analyzer)
4794 {
4795 char *dump_filename = concat (dump_base_name, ".analyzer.txt", NULL);
4796 dump_fout = fopen (dump_filename, "w");
4797 free (dump_filename);
4798 if (dump_fout)
4799 owns_dump_fout = true;
4800 }
4801
4802 {
4803 log_user the_logger (NULL);
4804 if (dump_fout)
4805 the_logger.set_logger (new logger (dump_fout, 0, 0,
4806 *global_dc->printer));
4807 LOG_SCOPE (the_logger.get_logger ());
4808
4809 impl_run_checkers (the_logger.get_logger ());
4810
4811 /* end of lifetime of the_logger (so that dump file is closed after the
4812 various dtors run). */
4813 }
4814
4815 if (owns_dump_fout)
4816 fclose (dump_fout);
4817
4818 /* Restore input_location. Subsequent passes may assume that input_location
4819 is some arbitrary value *not* in the block tree, which might be violated
4820 if we didn't restore it. */
4821 input_location = saved_input_location;
4822 }
4823
4824 } // namespace ana
4825
4826 #endif /* #if ENABLE_ANALYZER */