+2020-01-14 David Malcolm <dmalcolm@redhat.com>
+
+ * Makefile.in (lang_opt_files): Add analyzer.opt.
+ (ANALYZER_OBJS): New.
+ (OBJS): Add digraph.o, graphviz.o, ordered-hash-map-tests.o,
+ tristate.o and ANALYZER_OBJS.
+ (TEXI_GCCINT_FILES): Add analyzer.texi.
+ * common.opt (-fanalyzer): New driver option.
+ * config.in: Regenerate.
+ * configure: Regenerate.
+ * configure.ac (--disable-analyzer, ENABLE_ANALYZER): New option.
+ (gccdepdir): Also create depdir for "analyzer" subdir.
+ * digraph.cc: New file.
+ * digraph.h: New file.
+ * doc/analyzer.texi: New file.
+ * doc/gccint.texi ("Static Analyzer") New menu item.
+ (analyzer.texi): Include it.
+ * doc/invoke.texi ("Static Analyzer Options"): New list and new section.
+ ("Warning Options"): Add static analysis warnings to the list.
+ (-Wno-analyzer-double-fclose): New option.
+ (-Wno-analyzer-double-free): New option.
+ (-Wno-analyzer-exposure-through-output-file): New option.
+ (-Wno-analyzer-file-leak): New option.
+ (-Wno-analyzer-free-of-non-heap): New option.
+ (-Wno-analyzer-malloc-leak): New option.
+ (-Wno-analyzer-possible-null-argument): New option.
+ (-Wno-analyzer-possible-null-dereference): New option.
+ (-Wno-analyzer-null-argument): New option.
+ (-Wno-analyzer-null-dereference): New option.
+ (-Wno-analyzer-stale-setjmp-buffer): New option.
+ (-Wno-analyzer-tainted-array-index): New option.
+ (-Wno-analyzer-use-after-free): New option.
+ (-Wno-analyzer-use-of-pointer-in-stale-stack-frame): New option.
+ (-Wno-analyzer-use-of-uninitialized-value): New option.
+ (-Wanalyzer-too-complex): New option.
+ (-fanalyzer-call-summaries): New warning.
+ (-fanalyzer-checker=): New warning.
+ (-fanalyzer-fine-grained): New warning.
+ (-fno-analyzer-state-merge): New warning.
+ (-fno-analyzer-state-purge): New warning.
+ (-fanalyzer-transitivity): New warning.
+ (-fanalyzer-verbose-edges): New warning.
+ (-fanalyzer-verbose-state-changes): New warning.
+ (-fanalyzer-verbosity=): New warning.
+ (-fdump-analyzer): New warning.
+ (-fdump-analyzer-callgraph): New warning.
+ (-fdump-analyzer-exploded-graph): New warning.
+ (-fdump-analyzer-exploded-nodes): New warning.
+ (-fdump-analyzer-exploded-nodes-2): New warning.
+ (-fdump-analyzer-exploded-nodes-3): New warning.
+ (-fdump-analyzer-supergraph): New warning.
+ * doc/sourcebuild.texi (dg-require-dot): New.
+ (dg-check-dot): New.
+ * gdbinit.in (break-on-saved-diagnostic): New command.
+ * graphviz.cc: New file.
+ * graphviz.h: New file.
+ * ordered-hash-map-tests.cc: New file.
+ * ordered-hash-map.h: New file.
+ * passes.def (pass_analyzer): Add before
+ pass_ipa_whole_program_visibility.
+ * selftest-run-tests.c (selftest::run_tests): Call
+ selftest::ordered_hash_map_tests_cc_tests.
+ * selftest.h (selftest::ordered_hash_map_tests_cc_tests): New
+ decl.
+ * shortest-paths.h: New file.
+ * timevar.def (TV_ANALYZER): New timevar.
+ (TV_ANALYZER_SUPERGRAPH): Likewise.
+ (TV_ANALYZER_STATE_PURGE): Likewise.
+ (TV_ANALYZER_PLAN): Likewise.
+ (TV_ANALYZER_SCC): Likewise.
+ (TV_ANALYZER_WORKLIST): Likewise.
+ (TV_ANALYZER_DUMP): Likewise.
+ (TV_ANALYZER_DIAGNOSTICS): Likewise.
+ (TV_ANALYZER_SHORTEST_PATHS): Likewise.
+ * tree-pass.h (make_pass_analyzer): New decl.
+ * tristate.cc: New file.
+ * tristate.h: New file.
+
2020-01-14 Uroš Bizjak <ubizjak@gmail.com>
PR target/93254
xm_defines=@xm_defines@
lang_checks=
lang_checks_parallelized=
-lang_opt_files=@lang_opt_files@ $(srcdir)/c-family/c.opt $(srcdir)/common.opt $(srcdir)/params.opt
+lang_opt_files=@lang_opt_files@ $(srcdir)/c-family/c.opt $(srcdir)/common.opt $(srcdir)/params.opt $(srcdir)/analyzer/analyzer.opt
lang_specs_files=@lang_specs_files@
lang_tree_files=@lang_tree_files@
target_cpu_default=@target_cpu_default@
c-family/c-ubsan.o c-family/known-headers.o \
c-family/c-attribs.o c-family/c-warn.o c-family/c-spellcheck.o
+# Analyzer object files
+ANALYZER_OBJS = \
+ analyzer/analysis-plan.o \
+ analyzer/analyzer.o \
+ analyzer/analyzer-logging.o \
+ analyzer/analyzer-pass.o \
+ analyzer/analyzer-selftests.o \
+ analyzer/call-string.o \
+ analyzer/checker-path.o \
+ analyzer/constraint-manager.o \
+ analyzer/diagnostic-manager.o \
+ analyzer/engine.o \
+ analyzer/pending-diagnostic.o \
+ analyzer/program-point.o \
+ analyzer/program-state.o \
+ analyzer/region-model.o \
+ analyzer/sm.o \
+ analyzer/sm-file.o \
+ analyzer/sm-malloc.o \
+ analyzer/sm-pattern-test.o \
+ analyzer/sm-sensitive.o \
+ analyzer/sm-signal.o \
+ analyzer/sm-taint.o \
+ analyzer/state-purge.o \
+ analyzer/supergraph.o
+
# Language-independent object files.
# We put the *-match.o and insn-*.o files first so that a parallel make
# will build them sooner, because they are large and otherwise tend to be
df-problems.o \
df-scan.o \
dfp.o \
+ digraph.o \
dojump.o \
dominance.o \
domwalk.o \
godump.o \
graph.o \
graphds.o \
+ graphviz.o \
graphite.o \
graphite-isl-ast-to-gimple.o \
graphite-dependences.o \
optinfo-emit-json.o \
options-save.o \
opts-global.o \
+ ordered-hash-map-tests.o \
passes.o \
plugin.o \
postreload-gcse.o \
tree-vector-builder.o \
tree-vrp.o \
tree.o \
+ tristate.o \
typed-splay-tree.o \
unique-ptr-tests.o \
valtrack.o \
wide-int-print.o \
xcoffout.o \
$(out_object_file) \
+ $(ANALYZER_OBJS) \
$(EXTRA_OBJS) \
$(host_hook_obj)
gnu.texi gpl_v3.texi fdl.texi contrib.texi languages.texi \
sourcebuild.texi gty.texi libgcc.texi cfg.texi tree-ssa.texi \
loop.texi generic.texi gimple.texi plugins.texi optinfo.texi \
- match-and-simplify.texi ux.texi poly-int.texi
+ match-and-simplify.texi analyzer.texi ux.texi poly-int.texi
TEXI_GCCINSTALL_FILES = install.texi install-old.texi fdl.texi \
gcc-common.texi gcc-vers.texi
--- /dev/null
+2020-01-14 David Malcolm <dmalcolm@redhat.com>
+
+ * ChangeLog: New file.
+ * analyzer-selftests.cc: New file.
+ * analyzer-selftests.h: New file.
+ * analyzer.opt: New file.
+ * analysis-plan.cc: New file.
+ * analysis-plan.h: New file.
+ * analyzer-logging.cc: New file.
+ * analyzer-logging.h: New file.
+ * analyzer-pass.cc: New file.
+ * analyzer.cc: New file.
+ * analyzer.h: New file.
+ * call-string.cc: New file.
+ * call-string.h: New file.
+ * checker-path.cc: New file.
+ * checker-path.h: New file.
+ * constraint-manager.cc: New file.
+ * constraint-manager.h: New file.
+ * diagnostic-manager.cc: New file.
+ * diagnostic-manager.h: New file.
+ * engine.cc: New file.
+ * engine.h: New file.
+ * exploded-graph.h: New file.
+ * pending-diagnostic.cc: New file.
+ * pending-diagnostic.h: New file.
+ * program-point.cc: New file.
+ * program-point.h: New file.
+ * program-state.cc: New file.
+ * program-state.h: New file.
+ * region-model.cc: New file.
+ * region-model.h: New file.
+ * sm-file.cc: New file.
+ * sm-malloc.cc: New file.
+ * sm-malloc.dot: New file.
+ * sm-pattern-test.cc: New file.
+ * sm-sensitive.cc: New file.
+ * sm-signal.cc: New file.
+ * sm-taint.cc: New file.
+ * sm.cc: New file.
+ * sm.h: New file.
+ * state-purge.cc: New file.
+ * state-purge.h: New file.
+ * supergraph.cc: New file.
+ * supergraph.h: New file.
+
+2019-12-13 David Malcolm <dmalcolm@redhat.com>
+
+ * Initial creation
+
+\f
+Copyright (C) 2019-2020 Free Software Foundation, Inc.
+
+Copying and distribution of this file, with or without modification,
+are permitted in any medium without royalty provided the copyright
+notice and this notice are preserved.
--- /dev/null
+/* A class to encapsulate decisions about how the analysis should happen.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "options.h"
+#include "cgraph.h"
+#include "timevar.h"
+#include "ipa-utils.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-core.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/analysis-plan.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "function.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+
+#if ENABLE_ANALYZER
+
+/* class analysis_plan. */
+
+/* analysis_plan's ctor. */
+
+analysis_plan::analysis_plan (const supergraph &sg, logger *logger)
+: log_user (logger), m_sg (sg),
+ m_cgraph_node_postorder (XCNEWVEC (struct cgraph_node *,
+ symtab->cgraph_count)),
+ m_index_by_uid (symtab->cgraph_max_uid)
+{
+ LOG_SCOPE (logger);
+ auto_timevar time (TV_ANALYZER_PLAN);
+
+ m_num_cgraph_nodes = ipa_reverse_postorder (m_cgraph_node_postorder);
+ gcc_assert (m_num_cgraph_nodes == symtab->cgraph_count);
+ if (get_logger_file ())
+ ipa_print_order (get_logger_file (),
+ "analysis_plan", m_cgraph_node_postorder,
+ m_num_cgraph_nodes);
+
+ /* Populate m_index_by_uid. */
+ for (int i = 0; i < symtab->cgraph_max_uid; i++)
+ m_index_by_uid.quick_push (-1);
+ for (int i = 0; i < m_num_cgraph_nodes; i++)
+ {
+ gcc_assert (m_cgraph_node_postorder[i]->get_uid ()
+ < symtab->cgraph_max_uid);
+ m_index_by_uid[m_cgraph_node_postorder[i]->get_uid ()] = i;
+ }
+}
+
+/* analysis_plan's dtor. */
+
+analysis_plan::~analysis_plan ()
+{
+ free (m_cgraph_node_postorder);
+}
+
+/* Comparator for use by the exploded_graph's worklist, to order FUN_A
+ and FUN_B so that functions that are to be summarized are visited
+ before the summary is needed (based on a sort of the callgraph). */
+
+int
+analysis_plan::cmp_function (function *fun_a, function *fun_b) const
+{
+ cgraph_node *node_a = cgraph_node::get (fun_a->decl);
+ cgraph_node *node_b = cgraph_node::get (fun_b->decl);
+
+ int idx_a = m_index_by_uid[node_a->get_uid ()];
+ int idx_b = m_index_by_uid[node_b->get_uid ()];
+
+ return idx_b - idx_a;
+}
+
+/* Return true if the call EDGE should be analyzed using a call summary.
+ Return false if it should be analyzed using a full call and return. */
+
+bool
+analysis_plan::use_summary_p (const cgraph_edge *edge) const
+{
+ /* Don't use call summaries if -fno-analyzer-call-summaries. */
+ if (!flag_analyzer_call_summaries)
+ return false;
+
+ /* TODO: don't count callsites each time. */
+ int num_call_sites = 0;
+ const cgraph_node *callee = edge->callee;
+ for (cgraph_edge *edge = callee->callers; edge; edge = edge->next_caller)
+ ++num_call_sites;
+
+ /* Don't use a call summary if there's only one call site. */
+ if (num_call_sites <= 1)
+ return false;
+
+ /* Require the callee to be sufficiently complex to be worth
+ summarizing. */
+ if ((int)m_sg.get_num_snodes (callee->get_fun ())
+ < param_analyzer_min_snodes_for_call_summary)
+ return false;
+
+ return true;
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* A class to encapsulate decisions about how the analysis should happen.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_ANALYSIS_PLAN_H
+#define GCC_ANALYZER_ANALYSIS_PLAN_H
+
+/* A class to encapsulate decisions about how the analysis should happen.
+ Examples:
+ - the order in which functions should be analyzed, so that function
+ summaries are created before analysis of call sites that might use
+ them
+ - which callgraph edges should use call summaries
+ TODO: the above is a work-in-progress. */
+
+class analysis_plan : public log_user
+{
+public:
+ analysis_plan (const supergraph &sg, logger *logger);
+ ~analysis_plan ();
+
+ int cmp_function (function *fun_a, function *fun_b) const;
+
+ bool use_summary_p (const cgraph_edge *edge) const;
+
+private:
+ DISABLE_COPY_AND_ASSIGN (analysis_plan);
+
+ const supergraph &m_sg;
+
+ /* Result of ipa_reverse_postorder. */
+ cgraph_node **m_cgraph_node_postorder;
+ int m_num_cgraph_nodes;
+
+ /* Index of each node within the postorder ordering,
+ accessed via the "m_uid" field. */
+ auto_vec<int> m_index_by_uid;
+};
+
+#endif /* GCC_ANALYZER_ANALYSIS_PLAN_H */
--- /dev/null
+/* Hierarchical log messages for the analyzer.
+ Copyright (C) 2014-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "toplev.h" /* for print_version */
+#include "pretty-print.h" /* for print_version */
+#include "diagnostic.h"
+#include "tree-diagnostic.h"
+
+#include "analyzer/analyzer-logging.h"
+
+#if ENABLE_ANALYZER
+
+/* Implementation of class logger. */
+
+/* ctor for logger. */
+
+logger::logger (FILE *f_out,
+ int, /* flags */
+ int /* verbosity */,
+ const pretty_printer &reference_pp) :
+ m_refcount (0),
+ m_f_out (f_out),
+ m_indent_level (0),
+ m_log_refcount_changes (false),
+ m_pp (reference_pp.clone ())
+{
+ pp_show_color (m_pp) = 0;
+ pp_buffer (m_pp)->stream = f_out;
+
+ /* %qE in logs for SSA_NAMEs should show the ssa names, rather than
+ trying to prettify things by showing the underlying var. */
+ pp_format_decoder (m_pp) = default_tree_printer;
+
+ /* Begin the log by writing the GCC version. */
+ print_version (f_out, "", false);
+}
+
+/* The destructor for logger, invoked via
+ the decref method when the refcount hits zero.
+ Note that we do not close the underlying FILE * (m_f_out). */
+
+logger::~logger ()
+{
+ /* This should be the last message emitted. */
+ log ("%s", __PRETTY_FUNCTION__);
+ gcc_assert (m_refcount == 0);
+ delete m_pp;
+}
+
+/* Increment the reference count of the logger. */
+
+void
+logger::incref (const char *reason)
+{
+ m_refcount++;
+ if (m_log_refcount_changes)
+ log ("%s: reason: %s refcount now %i ",
+ __PRETTY_FUNCTION__, reason, m_refcount);
+}
+
+/* Decrement the reference count of the logger,
+ deleting it if nothing is referring to it. */
+
+void
+logger::decref (const char *reason)
+{
+ gcc_assert (m_refcount > 0);
+ --m_refcount;
+ if (m_log_refcount_changes)
+ log ("%s: reason: %s refcount now %i",
+ __PRETTY_FUNCTION__, reason, m_refcount);
+ if (m_refcount == 0)
+ delete this;
+}
+
+/* Write a formatted message to the log, by calling the log_va method. */
+
+void
+logger::log (const char *fmt, ...)
+{
+ va_list ap;
+ va_start (ap, fmt);
+ log_va (fmt, &ap);
+ va_end (ap);
+}
+
+/* Write an indented line to the log file.
+
+ We explicitly flush after each line: if something crashes the process,
+ we want the logfile/stream to contain the most up-to-date hint about the
+ last thing that was happening, without it being hidden in an in-process
+ buffer. */
+
+void
+logger::log_va (const char *fmt, va_list *ap)
+{
+ start_log_line ();
+ log_va_partial (fmt, ap);
+ end_log_line ();
+}
+
+void
+logger::start_log_line ()
+{
+ for (int i = 0; i < m_indent_level; i++)
+ fputc (' ', m_f_out);
+}
+
+void
+logger::log_partial (const char *fmt, ...)
+{
+ va_list ap;
+ va_start (ap, fmt);
+ log_va_partial (fmt, &ap);
+ va_end (ap);
+}
+
+void
+logger::log_va_partial (const char *fmt, va_list *ap)
+{
+ text_info text;
+ text.format_spec = fmt;
+ text.args_ptr = ap;
+ text.err_no = 0;
+ pp_format (m_pp, &text);
+ pp_output_formatted_text (m_pp);
+}
+
+void
+logger::end_log_line ()
+{
+ pp_flush (m_pp);
+ pp_clear_output_area (m_pp);
+ fprintf (m_f_out, "\n");
+ fflush (m_f_out);
+}
+
+/* Record the entry within a particular scope, indenting subsequent
+ log lines accordingly. */
+
+void
+logger::enter_scope (const char *scope_name)
+{
+ log ("entering: %s", scope_name);
+ m_indent_level += 1;
+}
+
+void
+logger::enter_scope (const char *scope_name, const char *fmt, va_list *ap)
+{
+ start_log_line ();
+ log_partial ("entering: %s: ", scope_name);
+ log_va_partial (fmt, ap);
+ end_log_line ();
+
+ m_indent_level += 1;
+}
+
+
+/* Record the exit from a particular scope, restoring the indent level to
+ before the scope was entered. */
+
+void
+logger::exit_scope (const char *scope_name)
+{
+ if (m_indent_level)
+ m_indent_level -= 1;
+ else
+ log ("(mismatching indentation)");
+ log ("exiting: %s", scope_name);
+}
+
+/* Implementation of class log_user. */
+
+/* The constructor for log_user. */
+
+log_user::log_user (logger *logger) : m_logger (logger)
+{
+ if (m_logger)
+ m_logger->incref("log_user ctor");
+}
+
+/* The destructor for log_user. */
+
+log_user::~log_user ()
+{
+ if (m_logger)
+ m_logger->decref("log_user dtor");
+}
+
+/* Set the logger for a log_user, managing the reference counts
+ of the old and new logger (either of which might be NULL). */
+
+void
+log_user::set_logger (logger *logger)
+{
+ if (logger)
+ logger->incref ("log_user::set_logger");
+ if (m_logger)
+ m_logger->decref ("log_user::set_logger");
+ m_logger = logger;
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Hierarchical log messages for the analyzer.
+ Copyright (C) 2014-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Adapted from jit-logging.h. */
+
+#ifndef ANALYZER_LOGGING_H
+#define ANALYZER_LOGGING_H
+
+/* A logger encapsulates a logging stream: a way to send
+ lines of pertinent information to a FILE *. */
+
+class logger
+{
+ public:
+ logger (FILE *f_out, int flags, int verbosity, const pretty_printer &reference_pp);
+ ~logger ();
+
+ void incref (const char *reason);
+ void decref (const char *reason);
+
+ void log (const char *fmt, ...)
+ ATTRIBUTE_GCC_DIAG(2, 3);
+ void log_va (const char *fmt, va_list *ap)
+ ATTRIBUTE_GCC_DIAG(2, 0);
+ void start_log_line ();
+ void log_partial (const char *fmt, ...)
+ ATTRIBUTE_GCC_DIAG(2, 3);
+ void log_va_partial (const char *fmt, va_list *ap)
+ ATTRIBUTE_GCC_DIAG(2, 0);
+ void end_log_line ();
+
+ void enter_scope (const char *scope_name);
+ void enter_scope (const char *scope_name, const char *fmt, va_list *ap)
+ ATTRIBUTE_GCC_DIAG(3, 0);
+ void exit_scope (const char *scope_name);
+
+ pretty_printer *get_printer () const { return m_pp; }
+ FILE *get_file () const { return m_f_out; }
+
+private:
+ DISABLE_COPY_AND_ASSIGN (logger);
+
+ int m_refcount;
+ FILE *m_f_out;
+ int m_indent_level;
+ bool m_log_refcount_changes;
+ pretty_printer *m_pp;
+};
+
+/* The class log_scope is an RAII-style class intended to make
+ it easy to notify a logger about entering and exiting the body of a
+ given function. */
+
+class log_scope
+{
+public:
+ log_scope (logger *logger, const char *name);
+ log_scope (logger *logger, const char *name, const char *fmt, ...)
+ ATTRIBUTE_GCC_DIAG(4, 5);
+ ~log_scope ();
+
+ private:
+ DISABLE_COPY_AND_ASSIGN (log_scope);
+
+ logger *m_logger;
+ const char *m_name;
+};
+
+/* The constructor for log_scope.
+
+ The normal case is that the logger is NULL, in which case this should
+ be largely a no-op.
+
+ If we do have a logger, notify it that we're entering the given scope.
+ We also need to hold a reference on it, to avoid a use-after-free
+ when logging the cleanup of the owner of the logger. */
+
+inline
+log_scope::log_scope (logger *logger, const char *name) :
+ m_logger (logger),
+ m_name (name)
+{
+ if (m_logger)
+ {
+ m_logger->incref ("log_scope ctor");
+ m_logger->enter_scope (m_name);
+ }
+}
+
+inline
+log_scope::log_scope (logger *logger, const char *name, const char *fmt, ...):
+ m_logger (logger),
+ m_name (name)
+{
+ if (m_logger)
+ {
+ m_logger->incref ("log_scope ctor");
+ va_list ap;
+ va_start (ap, fmt);
+ m_logger->enter_scope (m_name, fmt, &ap);
+ va_end (ap);
+ }
+}
+
+
+/* The destructor for log_scope; essentially the opposite of
+ the constructor. */
+
+inline
+log_scope::~log_scope ()
+{
+ if (m_logger)
+ {
+ m_logger->exit_scope (m_name);
+ m_logger->decref ("log_scope dtor");
+ }
+}
+
+/* A log_user is something that potentially uses a logger (which could be NULL).
+
+ The log_user class keeps the reference-count of a logger up-to-date. */
+
+class log_user
+{
+ public:
+ log_user (logger *logger);
+ ~log_user ();
+
+ logger * get_logger () const { return m_logger; }
+ void set_logger (logger * logger);
+
+ void log (const char *fmt, ...) const
+ ATTRIBUTE_GCC_DIAG(2, 3);
+
+ void start_log_line () const;
+ void end_log_line () const;
+
+ void enter_scope (const char *scope_name);
+ void exit_scope (const char *scope_name);
+
+ pretty_printer *get_logger_pp () const
+ {
+ gcc_assert (m_logger);
+ return m_logger->get_printer ();
+ }
+
+ FILE *get_logger_file () const
+ {
+ if (m_logger == NULL)
+ return NULL;
+ return m_logger->get_file ();
+ }
+
+ private:
+ DISABLE_COPY_AND_ASSIGN (log_user);
+
+ logger *m_logger;
+};
+
+/* A shortcut for calling log from a log_user, handling the common
+ case where the underlying logger is NULL via a no-op. */
+
+inline void
+log_user::log (const char *fmt, ...) const
+{
+ if (m_logger)
+ {
+ va_list ap;
+ va_start (ap, fmt);
+ m_logger->log_va (fmt, &ap);
+ va_end (ap);
+ }
+}
+
+/* A shortcut for starting a log line from a log_user,
+ handling the common case where the underlying logger is NULL via
+ a no-op. */
+
+inline void
+log_user::start_log_line () const
+{
+ if (m_logger)
+ m_logger->start_log_line ();
+}
+
+/* A shortcut for ending a log line from a log_user,
+ handling the common case where the underlying logger is NULL via
+ a no-op. */
+
+inline void
+log_user::end_log_line () const
+{
+ if (m_logger)
+ m_logger->end_log_line ();
+}
+
+/* A shortcut for recording entry into a scope from a log_user,
+ handling the common case where the underlying logger is NULL via
+ a no-op. */
+
+inline void
+log_user::enter_scope (const char *scope_name)
+{
+ if (m_logger)
+ m_logger->enter_scope (scope_name);
+}
+
+/* A shortcut for recording exit from a scope from a log_user,
+ handling the common case where the underlying logger is NULL via
+ a no-op. */
+
+inline void
+log_user::exit_scope (const char *scope_name)
+{
+ if (m_logger)
+ m_logger->exit_scope (scope_name);
+}
+
+/* If the given logger is non-NULL, log entry/exit of this scope to
+ it, identifying it using __PRETTY_FUNCTION__. */
+
+#define LOG_SCOPE(LOGGER) \
+ log_scope s (LOGGER, __PRETTY_FUNCTION__)
+
+/* If the given logger is non-NULL, log entry/exit of this scope to
+ it, identifying it using __func__. */
+
+#define LOG_FUNC(LOGGER) \
+ log_scope s (LOGGER, __func__)
+
+#define LOG_FUNC_1(LOGGER, FMT, A0) \
+ log_scope s (LOGGER, __func__, FMT, A0)
+
+#define LOG_FUNC_2(LOGGER, FMT, A0, A1) \
+ log_scope s (LOGGER, __func__, FMT, A0, A1)
+
+#define LOG_FUNC_3(LOGGER, FMT, A0, A1, A2) \
+ log_scope s (LOGGER, __func__, FMT, A0, A1, A2)
+
+#define LOG_FUNC_4(LOGGER, FMT, A0, A1, A2, A3) \
+ log_scope s (LOGGER, __func__, FMT, A0, A1, A2, A3)
+
+#endif /* ANALYZER_LOGGING_H */
--- /dev/null
+/* Integration of the analyzer with GCC's pass manager.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "context.h"
+#include "tree-pass.h"
+#include "diagnostic.h"
+#include "options.h"
+#include "analyzer/engine.h"
+
+namespace {
+
+/* Data for the analyzer pass. */
+
+const pass_data pass_data_analyzer =
+{
+ IPA_PASS, /* type */
+ "analyzer", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_ANALYZER, /* tv_id */
+ PROP_ssa, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+/* The analyzer pass. */
+
+class pass_analyzer : public ipa_opt_pass_d
+{
+public:
+ pass_analyzer(gcc::context *ctxt)
+ : ipa_opt_pass_d (pass_data_analyzer, ctxt,
+ NULL, /* generate_summary */
+ NULL, /* write_summary */
+ NULL, /* read_summary */
+ NULL, /* write_optimization_summary */
+ NULL, /* read_optimization_summary */
+ NULL, /* stmt_fixup */
+ 0, /* function_transform_todo_flags_start */
+ NULL, /* function_transform */
+ NULL) /* variable_transform */
+ {}
+
+ /* opt_pass methods: */
+ bool gate (function *) FINAL OVERRIDE;
+ unsigned int execute (function *) FINAL OVERRIDE;
+}; // class pass_analyzer
+
+/* Only run the analyzer if -fanalyzer. */
+
+bool
+pass_analyzer::gate (function *)
+{
+ return flag_analyzer != 0;
+}
+
+/* Entrypoint for the analyzer pass. */
+
+unsigned int
+pass_analyzer::execute (function *)
+{
+#if ENABLE_ANALYZER
+ run_checkers ();
+#else
+ sorry ("%qs was not enabled in this build of GCC"
+ " (missing configure-time option %qs)",
+ "-fanalyzer", "--enable-analyzer");
+#endif
+
+ return 0;
+}
+
+} // anon namespace
+
+/* Make an instance of the analyzer pass. */
+
+ipa_opt_pass_d *
+make_pass_analyzer (gcc::context *ctxt)
+{
+ return new pass_analyzer (ctxt);
+}
--- /dev/null
+/* Selftest support for the analyzer.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "stringpool.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-selftests.h"
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Build a VAR_DECL named NAME of type TYPE, simulating a file-level
+ static variable. */
+
+tree
+build_global_decl (const char *name, tree type)
+{
+ tree decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier (name), type);
+ TREE_STATIC (decl) = 1;
+ return decl;
+}
+
+/* Run all analyzer-specific selftests. */
+
+void
+run_analyzer_selftests ()
+{
+#if ENABLE_ANALYZER
+ analyzer_constraint_manager_cc_tests ();
+ analyzer_program_point_cc_tests ();
+ analyzer_program_state_cc_tests ();
+ analyzer_region_model_cc_tests ();
+#endif /* #if ENABLE_ANALYZER */
+}
+
+} /* end of namespace selftest. */
+
+#endif /* #if CHECKING_P */
--- /dev/null
+/* Selftests for the analyzer.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_SELFTESTS_H
+#define GCC_ANALYZER_SELFTESTS_H
+
+#if CHECKING_P
+
+namespace selftest {
+
+extern tree build_global_decl (const char *name, tree type);
+
+extern void run_analyzer_selftests ();
+
+/* Declarations for specific families of tests (by source file), in
+ alphabetical order. */
+extern void analyzer_checker_script_cc_tests ();
+extern void analyzer_constraint_manager_cc_tests ();
+extern void analyzer_program_point_cc_tests ();
+extern void analyzer_program_state_cc_tests ();
+extern void analyzer_region_model_cc_tests ();
+
+} /* end of namespace selftest. */
+
+#endif /* #if CHECKING_P */
+
+#endif /* GCC_ANALYZER_SELFTESTS_H */
--- /dev/null
+/* Utility functions for the analyzer.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "diagnostic.h"
+#include "intl.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+
+#if ENABLE_ANALYZER
+
+/* Helper function for checkers. Is the CALL to the given function name,
+ and with the given number of arguments?
+
+ This doesn't resolve function pointers via the region model;
+ is_named_call_p should be used instead, using a fndecl from
+ get_fndecl_for_call; this function should only be used for special cases
+ where it's not practical to get at the region model, or for special
+ analyzer functions such as __analyzer_dump. */
+
+bool
+is_special_named_call_p (const gcall *call, const char *funcname,
+ unsigned int num_args)
+{
+ gcc_assert (funcname);
+
+ tree fndecl = gimple_call_fndecl (call);
+ if (!fndecl)
+ return false;
+
+ return is_named_call_p (fndecl, funcname, call, num_args);
+}
+
+/* Helper function for checkers. Does FNDECL have the given FUNCNAME? */
+
+bool
+is_named_call_p (tree fndecl, const char *funcname)
+{
+ gcc_assert (fndecl);
+ gcc_assert (funcname);
+
+ return 0 == strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), funcname);
+}
+
+/* Helper function for checkers. Does FNDECL have the given FUNCNAME, and
+ does CALL have the given number of arguments? */
+
+bool
+is_named_call_p (tree fndecl, const char *funcname,
+ const gcall *call, unsigned int num_args)
+{
+ gcc_assert (fndecl);
+ gcc_assert (funcname);
+
+ if (!is_named_call_p (fndecl, funcname))
+ return false;
+
+ if (gimple_call_num_args (call) != num_args)
+ return false;
+
+ return true;
+}
+
+/* Return true if stmt is a setjmp call. */
+
+bool
+is_setjmp_call_p (const gimple *stmt)
+{
+ /* TODO: is there a less hacky way to check for "setjmp"? */
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (is_special_named_call_p (call, "_setjmp", 1))
+ return true;
+
+ return false;
+}
+
+/* Return true if stmt is a longjmp call. */
+
+bool
+is_longjmp_call_p (const gcall *call)
+{
+ /* TODO: is there a less hacky way to check for "longjmp"? */
+ if (is_special_named_call_p (call, "longjmp", 2))
+ return true;
+
+ return false;
+}
+
+/* Generate a label_text instance by formatting FMT, using a
+ temporary clone of the global_dc's printer (thus using its
+ formatting callbacks).
+
+ Colorize if the global_dc supports colorization and CAN_COLORIZE is
+ true. */
+
+label_text
+make_label_text (bool can_colorize, const char *fmt, ...)
+{
+ pretty_printer *pp = global_dc->printer->clone ();
+ pp_clear_output_area (pp);
+
+ if (!can_colorize)
+ pp_show_color (pp) = false;
+
+ text_info ti;
+ rich_location rich_loc (line_table, UNKNOWN_LOCATION);
+
+ va_list ap;
+
+ va_start (ap, fmt);
+
+ ti.format_spec = _(fmt);
+ ti.args_ptr = ≈
+ ti.err_no = 0;
+ ti.x_data = NULL;
+ ti.m_richloc = &rich_loc;
+
+ pp_format (pp, &ti);
+ pp_output_formatted_text (pp);
+
+ va_end (ap);
+
+ label_text result = label_text::take (xstrdup (pp_formatted_text (pp)));
+ delete pp;
+ return result;
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Utility functions for the analyzer.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_ANALYZER_H
+#define GCC_ANALYZER_ANALYZER_H
+
+/* Forward decls of common types, with indentation to show inheritance. */
+
+class graphviz_out;
+class supergraph;
+class supernode;
+class superedge;
+ class cfg_superedge;
+ class switch_cfg_superedge;
+ class callgraph_superedge;
+ class call_superedge;
+ class return_superedge;
+class svalue;
+ class region_svalue;
+ class constant_svalue;
+ class poisoned_svalue;
+ class unknown_svalue;
+ class setjmp_svalue;
+class region;
+ class map_region;
+ class symbolic_region;
+class region_model;
+class region_model_context;
+ class impl_region_model_context;
+class constraint_manager;
+class equiv_class;
+struct model_merger;
+struct svalue_id_merger_mapping;
+struct canonicalization;
+class pending_diagnostic;
+class state_change_event;
+class checker_path;
+class extrinsic_state;
+class sm_state_map;
+class stmt_finder;
+class program_point;
+class program_state;
+class exploded_graph;
+class exploded_node;
+class exploded_edge;
+class exploded_cluster;
+class exploded_path;
+class analysis_plan;
+class state_purge_map;
+class state_purge_per_ssa_name;
+class state_change;
+class rewind_info_t;
+
+extern bool is_special_named_call_p (const gcall *call, const char *funcname,
+ unsigned int num_args);
+extern bool is_named_call_p (tree fndecl, const char *funcname);
+extern bool is_named_call_p (tree fndecl, const char *funcname,
+ const gcall *call, unsigned int num_args);
+extern bool is_setjmp_call_p (const gimple *stmt);
+extern bool is_longjmp_call_p (const gcall *call);
+
+extern void register_analyzer_pass ();
+
+extern label_text make_label_text (bool can_colorize, const char *fmt, ...);
+
+/* An RAII-style class for pushing/popping cfun within a scope.
+ Doing so ensures we get "In function " announcements
+ from the diagnostics subsystem. */
+
+class auto_cfun
+{
+public:
+ auto_cfun (function *fun) { push_cfun (fun); }
+ ~auto_cfun () { pop_cfun (); }
+};
+
+/* Begin suppressing -Wformat and -Wformat-extra-args. */
+
+#define PUSH_IGNORE_WFORMAT \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wformat\"") \
+ _Pragma("GCC diagnostic ignored \"-Wformat-extra-args\"")
+
+/* Finish suppressing -Wformat and -Wformat-extra-args. */
+
+#define POP_IGNORE_WFORMAT \
+ _Pragma("GCC diagnostic pop")
+
+/* A template for creating hash traits for a POD type. */
+
+template <typename Type>
+struct pod_hash_traits : typed_noop_remove<Type>
+{
+ typedef Type value_type;
+ typedef Type compare_type;
+ static inline hashval_t hash (value_type);
+ static inline bool equal (const value_type &existing,
+ const value_type &candidate);
+ static inline void mark_deleted (Type &);
+ static inline void mark_empty (Type &);
+ static inline bool is_deleted (Type);
+ static inline bool is_empty (Type);
+};
+
+#endif /* GCC_ANALYZER_ANALYZER_H */
--- /dev/null
+; analyzer.opt -- Options for the analyzer.
+
+; Copyright (C) 2019-2020 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+; See the GCC internals manual for a description of this file's format.
+
+; Please try to keep this file in ASCII collating order.
+
+-param=analyzer-bb-explosion-factor=
+Common Joined UInteger Var(param_analyzer_bb_explosion_factor) Init(5) Param
+The maximum number of 'after supernode' exploded nodes within the analyzer per supernode, before terminating analysis.
+
+-param=analyzer-max-enodes-per-program-point=
+Common Joined UInteger Var(param_analyzer_max_enodes_per_program_point) Init(8) Param
+The maximum number of exploded nodes per program point within the analyzer, before terminating analysis of that point.
+
+-param=analyzer-max-recursion-depth=
+Common Joined UInteger Var(param_analyzer_max_recursion_depth) Init(2) Param
+The maximum number of times a callsite can appear in a call stack within the analyzer, before terminating analysis of a call tha would recurse deeper.
+
+-param=analyzer-min-snodes-for-call-summary=
+Common Joined UInteger Var(param_analyzer_min_snodes_for_call_summary) Init(10) Param
+The minimum number of supernodes within a function for the analyzer to consider summarizing its effects at call sites.
+
+Wanalyzer-double-fclose
+Common Var(warn_analyzer_double_fclose) Init(1) Warning
+Warn about code paths in which a stdio FILE can be closed more than once.
+
+Wanalyzer-double-free
+Common Var(warn_analyzer_double_free) Init(1) Warning
+Warn about code paths in which a pointer can be freed more than once.
+
+Wanalyzer-exposure-through-output-file
+Common Var(warn_analyzer_exposure_through_output_file) Init(1) Warning
+Warn about code paths in which sensitive data is written to a file.
+
+Wanalyzer-file-leak
+Common Var(warn_analyzer_file_leak) Init(1) Warning
+Warn about code paths in which a stdio FILE is not closed.
+
+Wanalyzer-free-of-non-heap
+Common Var(warn_analyzer_free_of_non_heap) Init(1) Warning
+Warn about code paths in which a non-heap pointer is freed.
+
+Wanalyzer-malloc-leak
+Common Var(warn_analyzer_malloc_leak) Init(1) Warning
+Warn about code paths in which a heap-allocated pointer leaks.
+
+Wanalyzer-possible-null-argument
+Common Var(warn_analyzer_possible_null_argument) Init(1) Warning
+Warn about code paths in which a possibly-NULL value is passed to a must-not-be-NULL function argument.
+
+Wanalyzer-possible-null-dereference
+Common Var(warn_analyzer_possible_null_dereference) Init(1) Warning
+Warn about code paths in which a possibly-NULL pointer is dereferenced.
+
+Wanalyzer-unsafe-call-within-signal-handler
+Common Var(warn_analyzer_unsafe_call_within_signal_handler) Init(1) Warning
+Warn about code paths in which an async-signal-unsafe function is called from a signal handler.
+
+Wanalyzer-null-argument
+Common Var(warn_analyzer_null_argument) Init(1) Warning
+Warn about code paths in which NULL is passed to a must-not-be-NULL function argument.
+
+Wanalyzer-null-dereference
+Common Var(warn_analyzer_null_dereference) Init(1) Warning
+Warn about code paths in which a NULL pointer is dereferenced.
+
+Wanalyzer-stale-setjmp-buffer
+Common Var(warn_analyzer_stale_setjmp_buffer) Init(1) Warning
+Warn about code paths in which a longjmp rewinds to a jmp_buf saved in a stack frame that has returned.
+
+Wanalyzer-tainted-array-index
+Common Var(warn_analyzer_tainted_array_index) Init(1) Warning
+Warn about code paths in which an unsanitized value is used as an array index.
+
+Wanalyzer-use-after-free
+Common Var(warn_analyzer_use_after_free) Init(1) Warning
+Warn about code paths in which a freed value is used.
+
+Wanalyzer-use-of-pointer-in-stale-stack-frame
+Common Var(warn_analyzer_use_of_pointer_in_stale_stack_frame) Init(1) Warning
+Warn about code paths in which a pointer to a stale stack frame is used.
+
+Wanalyzer-use-of-uninitialized-value
+Common Var(warn_analyzer_use_of_uninitialized_value) Init(1) Warning
+Warn about code paths in which an initialized value is used.
+
+Wanalyzer-too-complex
+Common Var(warn_analyzer_too_complex) Init(0) Warning
+Warn if the code is too complicated for the analyzer to fully explore.
+
+fanalyzer-checker=
+Common Joined RejectNegative Var(flag_analyzer_checker)
+Restrict the analyzer to run just the named checker.
+
+fanalyzer-fine-grained
+Common Var(flag_analyzer_fine_grained) Init(0)
+Avoid combining multiple statements into one exploded edge.
+
+fanalyzer-state-purge
+Common Var(flag_analyzer_state_purge) Init(1)
+Purge unneeded state during analysis.
+
+fanalyzer-state-merge
+Common Var(flag_analyzer_state_merge) Init(1)
+Merge similar-enough states during analysis.
+
+fanalyzer-transitivity
+Common Var(flag_analyzer_transitivity) Init(0)
+Enable transitivity of constraints during analysis.
+
+fanalyzer-call-summaries
+Common Var(flag_analyzer_call_summaries) Init(0)
+Approximate the effect of function calls to simplify analysis.
+
+fanalyzer-verbose-edges
+Common Var(flag_analyzer_verbose_edges) Init(0)
+Emit more verbose descriptions of control flow in diagnostics.
+
+fanalyzer-verbose-state-changes
+Common Var(flag_analyzer_verbose_state_changes) Init(0)
+Emit more verbose descriptions of state changes in diagnostics.
+
+fanalyzer-verbosity=
+Common Joined UInteger Var(analyzer_verbosity) Init(2)
+Control which events are displayed in diagnostic paths.
+
+fdump-analyzer
+Common RejectNegative Var(flag_dump_analyzer)
+Dump internal details about what the analyzer is doing to SRCFILE.analyzer.txt.
+
+fdump-analyzer-stderr
+Common RejectNegative Var(flag_dump_analyzer_stderr)
+Dump internal details about what the analyzer is doing to stderr.
+
+fdump-analyzer-callgraph
+Common RejectNegative Var(flag_dump_analyzer_callgraph)
+Dump the analyzer supergraph to a SRCFILE.callgraph.dot file.
+
+fdump-analyzer-exploded-graph
+Common RejectNegative Var(flag_dump_analyzer_exploded_graph)
+Dump the analyzer exploded graph to a SRCFILE.eg.dot file.
+
+fdump-analyzer-exploded-nodes
+Common RejectNegative Var(flag_dump_analyzer_exploded_nodes)
+Emit diagnostics showing the location of nodes in the exploded graph.
+
+fdump-analyzer-exploded-nodes-2
+Common RejectNegative Var(flag_dump_analyzer_exploded_nodes_2)
+Dump a textual representation of the exploded graph to SRCFILE.eg.txt.
+
+fdump-analyzer-exploded-nodes-3
+Common RejectNegative Var(flag_dump_analyzer_exploded_nodes_3)
+Dump a textual representation of the exploded graph to SRCFILE.eg-ID.txt.
+
+fdump-analyzer-state-purge
+Common RejectNegative Var(flag_dump_analyzer_state_purge)
+Dump state-purging information to a SRCFILE.state-purge.dot file.
+
+fdump-analyzer-supergraph
+Common RejectNegative Var(flag_dump_analyzer_supergraph)
+Dump the analyzer supergraph to a SRCFILE.supergraph.dot file.
+
+; This comment is to ensure we retain the blank line above.
--- /dev/null
+/* Call stacks at program points.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "pretty-print.h"
+#include "tree.h"
+#include "options.h"
+#include "analyzer/call-string.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "function.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+
+#if ENABLE_ANALYZER
+
+/* class call_string. */
+
+/* call_string's copy ctor. */
+
+call_string::call_string (const call_string &other)
+: m_return_edges (other.m_return_edges.length ())
+{
+ const return_superedge *e;
+ int i;
+ FOR_EACH_VEC_ELT (other.m_return_edges, i, e)
+ m_return_edges.quick_push (e);
+}
+
+/* call_string's assignment operator. */
+
+call_string&
+call_string::operator= (const call_string &other)
+{
+ // would be much simpler if we could rely on vec<> assignment op
+ m_return_edges.truncate (0);
+ m_return_edges.reserve (other.m_return_edges.length (), true);
+ const return_superedge *e;
+ int i;
+ FOR_EACH_VEC_ELT (other.m_return_edges, i, e)
+ m_return_edges.quick_push (e);
+ return *this;
+}
+
+/* call_string's equality operator. */
+
+bool
+call_string::operator== (const call_string &other) const
+{
+ if (m_return_edges.length () != other.m_return_edges.length ())
+ return false;
+ const return_superedge *e;
+ int i;
+ FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ if (e != other.m_return_edges[i])
+ return false;
+ return true;
+}
+
+/* Print this to PP. */
+
+void
+call_string::print (pretty_printer *pp) const
+{
+ pp_string (pp, "[");
+
+ const return_superedge *e;
+ int i;
+ FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ pp_printf (pp, "(SN: %i -> SN: %i in %s)",
+ e->m_src->m_index, e->m_dest->m_index,
+ function_name (e->m_dest->m_fun));
+ }
+
+ pp_string (pp, "]");
+}
+
+/* Generate a hash value for this call_string. */
+
+hashval_t
+call_string::hash () const
+{
+ inchash::hash hstate;
+ int i;
+ const return_superedge *e;
+ FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ hstate.add_ptr (e);
+ return hstate.end ();
+}
+
+/* Push the return superedge for CALL_SEDGE onto the end of this
+ call_string. */
+
+void
+call_string::push_call (const supergraph &sg,
+ const call_superedge *call_sedge)
+{
+ gcc_assert (call_sedge);
+ const return_superedge *return_sedge = call_sedge->get_edge_for_return (sg);
+ gcc_assert (return_sedge);
+ m_return_edges.safe_push (return_sedge);
+}
+
+/* Count the number of times the top-most call site appears in the
+ stack. */
+
+int
+call_string::calc_recursion_depth () const
+{
+ if (m_return_edges.is_empty ())
+ return 0;
+ const return_superedge *top_return_sedge
+ = m_return_edges[m_return_edges.length () - 1];
+
+ int result = 0;
+ const return_superedge *e;
+ int i;
+ FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ if (e == top_return_sedge)
+ ++result;
+ return result;
+}
+
+/* Comparator for call strings.
+ Return negative if A is before B.
+ Return positive if B is after A.
+ Return 0 if they are equal. */
+
+int
+call_string::cmp (const call_string &a,
+ const call_string &b)
+{
+ int result = cmp_1 (a, b);
+
+ /* Check that the ordering is symmetric */
+#if CHECKING_P
+ int reversed = cmp_1 (b, a);
+ gcc_assert (reversed == -result);
+#endif
+
+ /* We should only have 0 for equal pairs. */
+ gcc_assert (result != 0
+ || a == b);
+
+ return result;
+}
+
+/* Implementation of call_string::cmp.
+ This implements a version of lexicographical order. */
+
+int
+call_string::cmp_1 (const call_string &a,
+ const call_string &b)
+{
+ unsigned len_a = a.length ();
+ unsigned len_b = b.length ();
+
+ unsigned i = 0;
+ while (1)
+ {
+ /* Consider index i; the strings have been equal up to it. */
+
+ /* Have both strings run out? */
+ if (i >= len_a && i >= len_b)
+ return 0;
+
+ /* Otherwise, has just one of the strings run out? */
+ if (i >= len_a)
+ return 1;
+ if (i >= len_b)
+ return -1;
+
+ /* Otherwise, compare the edges. */
+ const return_superedge *edge_a = a[i];
+ const return_superedge *edge_b = b[i];
+ int src_cmp = edge_a->m_src->m_index - edge_b->m_src->m_index;
+ if (src_cmp)
+ return src_cmp;
+ int dest_cmp = edge_a->m_dest->m_index - edge_b->m_dest->m_index;
+ if (dest_cmp)
+ return dest_cmp;
+ i++;
+ // TODO: test coverage for this
+ }
+}
+
+/* Assert that this object is sane. */
+
+void
+call_string::validate () const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ /* Each entry's "caller" should be the "callee" of the previous entry. */
+ const return_superedge *e;
+ int i;
+ FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ if (i > 0)
+ gcc_assert (e->get_caller_function ()
+ == m_return_edges[i - 1]->get_callee_function ());
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Call stacks at program points.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_CALL_STRING_H
+#define GCC_ANALYZER_CALL_STRING_H
+
+class supergraph;
+class call_superedge;
+class return_superedge;
+
+/* A string of return_superedge pointers, representing a call stack
+ at a program point.
+
+ This is used to ensure that we generate interprocedurally valid paths
+ i.e. that we return to the same callsite that called us.
+
+ The class actually stores the return edges, rather than the call edges,
+ since that's what we need to compare against. */
+
+class call_string
+{
+public:
+ call_string () : m_return_edges () {}
+ call_string (const call_string &other);
+ call_string& operator= (const call_string &other);
+
+ bool operator== (const call_string &other) const;
+
+ void print (pretty_printer *pp) const;
+
+ hashval_t hash () const;
+
+ bool empty_p () const { return m_return_edges.is_empty (); }
+
+ void push_call (const supergraph &sg,
+ const call_superedge *sedge);
+ const return_superedge *pop () { return m_return_edges.pop (); }
+
+ int calc_recursion_depth () const;
+
+ static int cmp (const call_string &a,
+ const call_string &b);
+
+ unsigned length () const { return m_return_edges.length (); }
+ const return_superedge *operator[] (unsigned idx) const
+ {
+ return m_return_edges[idx];
+ }
+
+ void validate () const;
+
+private:
+ static int cmp_1 (const call_string &a,
+ const call_string &b);
+
+ auto_vec<const return_superedge *> m_return_edges;
+};
+
+#endif /* GCC_ANALYZER_CALL_STRING_H */
--- /dev/null
+/* Subclasses of diagnostic_path and diagnostic_event for analyzer diagnostics.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-pretty-print.h"
+#include "fold-const.h"
+#include "function.h"
+#include "diagnostic-path.h"
+#include "options.h"
+#include "cgraph.h"
+#include "function.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "alloc-pool.h"
+#include "fibonacci_heap.h"
+#include "diagnostic-event-id.h"
+#include "shortest-paths.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "sbitmap.h"
+#include "tristate.h"
+#include "ordered-hash-map.h"
+#include "selftest.h"
+#include "analyzer/region-model.h"
+#include "analyzer/program-state.h"
+#include "analyzer/checker-path.h"
+#include "gimple-iterator.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/pending-diagnostic.h"
+#include "analyzer/diagnostic-manager.h"
+#include "analyzer/constraint-manager.h"
+#include "analyzer/diagnostic-manager.h"
+#include "analyzer/checker-path.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/exploded-graph.h"
+
+#if ENABLE_ANALYZER
+
+/* Get a string for EK. */
+
+const char *
+event_kind_to_string (enum event_kind ek)
+{
+ switch (ek)
+ {
+ default:
+ gcc_unreachable ();
+ case EK_DEBUG:
+ return "EK_DEBUG";
+ case EK_CUSTOM:
+ return "EK_CUSTOM";
+ case EK_STMT:
+ return "EK_STMT";
+ case EK_FUNCTION_ENTRY:
+ return "EK_FUNCTION_ENTRY";
+ case EK_STATE_CHANGE:
+ return "EK_STATE_CHANGE";
+ case EK_START_CFG_EDGE:
+ return "EK_START_CFG_EDGE";
+ case EK_END_CFG_EDGE:
+ return "EK_END_CFG_EDGE";
+ case EK_CALL_EDGE:
+ return "EK_CALL_EDGE";
+ case EK_RETURN_EDGE:
+ return "EK_RETURN_EDGE";
+ case EK_SETJMP:
+ return "EK_SETJMP";
+ case EK_REWIND_FROM_LONGJMP:
+ return "EK_REWIND_FROM_LONGJMP";
+ case EK_REWIND_TO_SETJMP:
+ return "EK_REWIND_TO_SETJMP";
+ case EK_WARNING:
+ return "EK_WARNING";
+ }
+}
+
+/* class checker_event : public diagnostic_event. */
+
+/* Dump this event to PP (for debugging/logging purposes). */
+
+void
+checker_event::dump (pretty_printer *pp) const
+{
+ label_text event_desc (get_desc (false));
+ pp_printf (pp, "\"%s\" (depth %i, m_loc=%x)",
+ event_desc.m_buffer,
+ get_stack_depth (),
+ get_location ());
+ event_desc.maybe_free ();
+}
+
+/* Hook for being notified when this event has its final id EMISSION_ID
+ and is about to emitted for PD.
+
+ Base implementation of checker_event::prepare_for_emission vfunc;
+ subclasses that override this should chain up to it.
+
+ Record PD and EMISSION_ID, and call the get_desc vfunc, so that any
+ side-effects of the call to get_desc take place before
+ pending_diagnostic::emit is called.
+
+ For example, state_change_event::get_desc can call
+ pending_diagnostic::describe_state_change; free_of_non_heap can use this
+ to tweak the message (TODO: would be neater to simply capture the
+ pertinent data within the sm-state). */
+
+void
+checker_event::prepare_for_emission (checker_path *,
+ pending_diagnostic *pd,
+ diagnostic_event_id_t emission_id)
+{
+ m_pending_diagnostic = pd;
+ m_emission_id = emission_id;
+
+ label_text desc = get_desc (false);
+ desc.maybe_free ();
+}
+
+/* class debug_event : public checker_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ debug_event.
+ Use the saved string as the event's description. */
+
+label_text
+debug_event::get_desc (bool) const
+{
+ return label_text::borrow (m_desc);
+}
+
+/* class custom_event : public checker_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ custom_event.
+ Use the saved string as the event's description. */
+
+label_text
+custom_event::get_desc (bool) const
+{
+ return label_text::borrow (m_desc);
+}
+
+/* class statement_event : public checker_event. */
+
+/* statement_event's ctor. */
+
+statement_event::statement_event (const gimple *stmt, tree fndecl, int depth,
+ const program_state &dst_state)
+: checker_event (EK_STMT, gimple_location (stmt), fndecl, depth),
+ m_stmt (stmt),
+ m_dst_state (dst_state)
+{
+}
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ statement_event.
+ Use the statement's dump form as the event's description. */
+
+label_text
+statement_event::get_desc (bool) const
+{
+ pretty_printer pp;
+ pp_string (&pp, "stmt: ");
+ pp_gimple_stmt_1 (&pp, m_stmt, 0, (dump_flags_t)0);
+ return label_text::take (xstrdup (pp_formatted_text (&pp)));
+}
+
+/* class function_entry_event : public checker_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ function_entry_event.
+
+ Use a string such as "entry to 'foo'" as the event's description. */
+
+label_text
+function_entry_event::get_desc (bool can_colorize) const
+{
+ return make_label_text (can_colorize, "entry to %qE", m_fndecl);
+}
+
+/* class state_change_event : public checker_event. */
+
+/* state_change_event's ctor. */
+
+state_change_event::state_change_event (const supernode *node,
+ const gimple *stmt,
+ int stack_depth,
+ const state_machine &sm,
+ tree var,
+ state_machine::state_t from,
+ state_machine::state_t to,
+ tree origin,
+ const program_state &dst_state)
+: checker_event (EK_STATE_CHANGE,
+ stmt->location, node->m_fun->decl,
+ stack_depth),
+ m_node (node), m_stmt (stmt), m_sm (sm),
+ m_var (var), m_from (from), m_to (to),
+ m_origin (origin),
+ m_dst_state (dst_state)
+{
+}
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ state_change_event.
+
+ Attempt to generate a nicer human-readable description.
+ For greatest precision-of-wording, give the pending diagnostic
+ a chance to describe this state change (in terms of the
+ diagnostic).
+ Note that we only have a pending_diagnostic set on the event once
+ the diagnostic is about to being emitted, so the description for
+ an event can change. */
+
+label_text
+state_change_event::get_desc (bool can_colorize) const
+{
+ if (m_pending_diagnostic)
+ {
+ label_text custom_desc
+ = m_pending_diagnostic->describe_state_change
+ (evdesc::state_change (can_colorize, m_var, m_origin,
+ m_from, m_to, m_emission_id, *this));
+ if (custom_desc.m_buffer)
+ {
+ if (flag_analyzer_verbose_state_changes)
+ {
+ /* Append debug version. */
+ label_text result;
+ if (m_origin)
+ result = make_label_text
+ (can_colorize,
+ "%s (state of %qE: %qs -> %qs, origin: %qE)",
+ custom_desc.m_buffer,
+ m_var,
+ m_sm.get_state_name (m_from),
+ m_sm.get_state_name (m_to),
+ m_origin);
+ else
+ result = make_label_text
+ (can_colorize,
+ "%s (state of %qE: %qs -> %qs, origin: NULL)",
+ custom_desc.m_buffer,
+ m_var,
+ m_sm.get_state_name (m_from),
+ m_sm.get_state_name (m_to));
+ custom_desc.maybe_free ();
+ return result;
+ }
+ else
+ return custom_desc;
+ }
+ }
+
+ /* Fallback description. */
+ if (m_var)
+ {
+ if (m_origin)
+ return make_label_text
+ (can_colorize,
+ "state of %qE: %qs -> %qs (origin: %qE)",
+ m_var,
+ m_sm.get_state_name (m_from),
+ m_sm.get_state_name (m_to),
+ m_origin);
+ else
+ return make_label_text
+ (can_colorize,
+ "state of %qE: %qs -> %qs (origin: NULL)",
+ m_var,
+ m_sm.get_state_name (m_from),
+ m_sm.get_state_name (m_to));
+ }
+ else
+ {
+ gcc_assert (m_origin == NULL_TREE);
+ return make_label_text
+ (can_colorize,
+ "global state: %qs -> %qs",
+ m_sm.get_state_name (m_from),
+ m_sm.get_state_name (m_to));
+ }
+}
+
+/* class superedge_event : public checker_event. */
+
+/* Get the callgraph_superedge for this superedge_event, which must be
+ for an interprocedural edge, rather than a CFG edge. */
+
+const callgraph_superedge&
+superedge_event::get_callgraph_superedge () const
+{
+ gcc_assert (m_sedge->m_kind != SUPEREDGE_CFG_EDGE);
+ return *m_sedge->dyn_cast_callgraph_superedge ();
+}
+
+/* Determine if this event should be filtered at the given verbosity
+ level. */
+
+bool
+superedge_event::should_filter_p (int verbosity) const
+{
+ switch (m_sedge->m_kind)
+ {
+ case SUPEREDGE_CFG_EDGE:
+ {
+ if (verbosity < 2)
+ return true;
+
+ if (verbosity == 2)
+ {
+ /* Filter events with empty descriptions. This ought to filter
+ FALLTHRU, but retain true/false/switch edges. */
+ label_text desc = get_desc (false);
+ gcc_assert (desc.m_buffer);
+ if (desc.m_buffer[0] == '\0')
+ return true;
+ desc.maybe_free ();
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+/* superedge_event's ctor. */
+
+superedge_event::superedge_event (enum event_kind kind,
+ const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth)
+: checker_event (kind, loc, fndecl, depth),
+ m_eedge (eedge), m_sedge (eedge.m_sedge),
+ m_var (NULL_TREE), m_critical_state (0)
+{
+}
+
+/* class cfg_edge_event : public superedge_event. */
+
+/* Get the cfg_superedge for this cfg_edge_event. */
+
+const cfg_superedge &
+cfg_edge_event::get_cfg_superedge () const
+{
+ return *m_sedge->dyn_cast_cfg_superedge ();
+}
+
+/* cfg_edge_event's ctor. */
+
+cfg_edge_event::cfg_edge_event (enum event_kind kind,
+ const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth)
+: superedge_event (kind, eedge, loc, fndecl, depth)
+{
+ gcc_assert (eedge.m_sedge->m_kind == SUPEREDGE_CFG_EDGE);
+}
+
+/* class start_cfg_edge_event : public cfg_edge_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ start_cfg_edge_event.
+
+ If -fanalyzer-verbose-edges, then generate low-level descriptions, such
+ as
+ "taking 'true' edge SN:7 -> SN:8".
+
+ Otherwise, generate strings using the label of the underlying CFG if
+ any, such as:
+ "following 'true' branch..." or
+ "following 'case 3' branch..."
+ "following 'default' branch..."
+
+ For conditionals, attempt to supply a description of the condition that
+ holds, such as:
+ "following 'false' branch (when 'ptr' is non-NULL)..."
+
+ Failing that, return an empty description (which will lead to this event
+ being filtered). */
+
+label_text
+start_cfg_edge_event::get_desc (bool can_colorize) const
+{
+ bool user_facing = !flag_analyzer_verbose_edges;
+ char *edge_desc = m_sedge->get_description (user_facing);
+ if (user_facing)
+ {
+ if (edge_desc && strlen (edge_desc) > 0)
+ {
+ label_text cond_desc = maybe_describe_condition (can_colorize);
+ label_text result;
+ if (cond_desc.m_buffer)
+ {
+ result = make_label_text (can_colorize,
+ "following %qs branch (%s)...",
+ edge_desc, cond_desc.m_buffer);
+ cond_desc.maybe_free ();
+ }
+ else
+ {
+ result = make_label_text (can_colorize,
+ "following %qs branch...",
+ edge_desc);
+ }
+ free (edge_desc);
+ return result;
+ }
+ else
+ {
+ free (edge_desc);
+ return label_text::borrow ("");
+ }
+ }
+ else
+ {
+ if (strlen (edge_desc) > 0)
+ {
+ label_text result
+ = make_label_text (can_colorize,
+ "taking %qs edge SN:%i -> SN:%i",
+ edge_desc,
+ m_sedge->m_src->m_index,
+ m_sedge->m_dest->m_index);
+ free (edge_desc);
+ return result;
+ }
+ else
+ {
+ free (edge_desc);
+ return make_label_text (can_colorize,
+ "taking edge SN:%i -> SN:%i",
+ m_sedge->m_src->m_index,
+ m_sedge->m_dest->m_index);
+ }
+ }
+}
+
+/* Attempt to generate a description of any condition that holds at this edge.
+
+ The intent is to make the user-facing messages more clear, especially for
+ cases where there's a single or double-negative, such as
+ when describing the false branch of an inverted condition.
+
+ For example, rather than printing just:
+
+ | if (!ptr)
+ | ~
+ | |
+ | (1) following 'false' branch...
+
+ it's clearer to spell out the condition that holds:
+
+ | if (!ptr)
+ | ~
+ | |
+ | (1) following 'false' branch (when 'ptr' is non-NULL)...
+ ^^^^^^^^^^^^^^^^^^^^^^
+
+ In the above example, this function would generate the highlighted
+ string: "when 'ptr' is non-NULL".
+
+ If the edge is not a condition, or it's not clear that a description of
+ the condition would be helpful to the user, return NULL. */
+
+label_text
+start_cfg_edge_event::maybe_describe_condition (bool can_colorize) const
+{
+ const cfg_superedge& cfg_sedge = get_cfg_superedge ();
+
+ if (cfg_sedge.true_value_p () || cfg_sedge.false_value_p ())
+ {
+ const gimple *last_stmt = m_sedge->m_src->get_last_stmt ();
+ if (const gcond *cond_stmt = dyn_cast <const gcond *> (last_stmt))
+ {
+ enum tree_code op = gimple_cond_code (cond_stmt);
+ tree lhs = gimple_cond_lhs (cond_stmt);
+ tree rhs = gimple_cond_rhs (cond_stmt);
+ if (cfg_sedge.false_value_p ())
+ op = invert_tree_comparison (op, false /* honor_nans */);
+ return maybe_describe_condition (can_colorize,
+ lhs, op, rhs);
+ }
+ }
+ return label_text::borrow (NULL);
+}
+
+/* Subroutine of maybe_describe_condition above.
+
+ Attempt to generate a user-facing description of the condition
+ LHS OP RHS, but only if it is likely to make it easier for the
+ user to understand a condition. */
+
+label_text
+start_cfg_edge_event::maybe_describe_condition (bool can_colorize,
+ tree lhs,
+ enum tree_code op,
+ tree rhs)
+{
+ /* In theory we could just build a tree via
+ fold_build2 (op, boolean_type_node, lhs, rhs)
+ and print it with %qE on it, but this leads to warts such as
+ parenthesizing vars, such as '(i) <= 9', and uses of '<unknown>'. */
+
+ /* Special-case: describe testing the result of strcmp, as figuring
+ out what the "true" or "false" path is can be confusing to the user. */
+ if (TREE_CODE (lhs) == SSA_NAME
+ && zerop (rhs))
+ {
+ if (gcall *call = dyn_cast <gcall *> (SSA_NAME_DEF_STMT (lhs)))
+ if (is_special_named_call_p (call, "strcmp", 2))
+ {
+ if (op == EQ_EXPR)
+ return label_text::borrow ("when the strings are equal");
+ if (op == NE_EXPR)
+ return label_text::borrow ("when the strings are non-equal");
+ }
+ }
+
+ /* Only attempt to generate text for sufficiently simple expressions. */
+ if (!should_print_expr_p (lhs))
+ return label_text::borrow (NULL);
+ if (!should_print_expr_p (rhs))
+ return label_text::borrow (NULL);
+
+ /* Special cases for pointer comparisons against NULL. */
+ if (POINTER_TYPE_P (TREE_TYPE (lhs))
+ && POINTER_TYPE_P (TREE_TYPE (rhs))
+ && zerop (rhs))
+ {
+ if (op == EQ_EXPR)
+ return make_label_text (can_colorize, "when %qE is NULL",
+ lhs);
+ if (op == NE_EXPR)
+ return make_label_text (can_colorize, "when %qE is non-NULL",
+ lhs);
+ }
+
+ return make_label_text (can_colorize, "when %<%E %s %E%>",
+ lhs, op_symbol_code (op), rhs);
+}
+
+/* Subroutine of maybe_describe_condition.
+
+ Return true if EXPR is we will get suitable user-facing output
+ from %E on it. */
+
+bool
+start_cfg_edge_event::should_print_expr_p (tree expr)
+{
+ if (TREE_CODE (expr) == SSA_NAME)
+ {
+ if (SSA_NAME_VAR (expr))
+ return should_print_expr_p (SSA_NAME_VAR (expr));
+ else
+ return false;
+ }
+
+ if (DECL_P (expr))
+ return true;
+
+ if (CONSTANT_CLASS_P (expr))
+ return true;
+
+ return false;
+}
+
+/* class call_event : public superedge_event. */
+
+/* call_event's ctor. */
+
+call_event::call_event (const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth)
+: superedge_event (EK_CALL_EDGE, eedge, loc, fndecl, depth)
+{
+ gcc_assert (eedge.m_sedge->m_kind == SUPEREDGE_CALL);
+}
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ call_event.
+
+ If this call event passes critical state for an sm-based warning,
+ allow the diagnostic to generate a precise description, such as:
+
+ "passing freed pointer 'ptr' in call to 'foo' from 'bar'"
+
+ Otherwise, generate a description of the form
+ "calling 'foo' from 'bar'". */
+
+label_text
+call_event::get_desc (bool can_colorize) const
+{
+ if (m_critical_state && m_pending_diagnostic)
+ {
+ gcc_assert (m_var);
+ label_text custom_desc
+ = m_pending_diagnostic->describe_call_with_state
+ (evdesc::call_with_state (can_colorize,
+ m_sedge->m_src->m_fun->decl,
+ m_sedge->m_dest->m_fun->decl,
+ m_var,
+ m_critical_state));
+ if (custom_desc.m_buffer)
+ return custom_desc;
+ }
+
+ return make_label_text (can_colorize,
+ "calling %qE from %qE",
+ m_sedge->m_dest->m_fun->decl,
+ m_sedge->m_src->m_fun->decl);
+}
+
+/* Override of checker_event::is_call_p for calls. */
+
+bool
+call_event::is_call_p () const
+{
+ return true;
+}
+
+/* class return_event : public superedge_event. */
+
+/* return_event's ctor. */
+
+return_event::return_event (const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth)
+: superedge_event (EK_RETURN_EDGE, eedge, loc, fndecl, depth)
+{
+ gcc_assert (eedge.m_sedge->m_kind == SUPEREDGE_RETURN);
+}
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ return_event.
+
+ If this return event returns critical state for an sm-based warning,
+ allow the diagnostic to generate a precise description, such as:
+
+ "possible of NULL to 'foo' from 'bar'"
+
+ Otherwise, generate a description of the form
+ "returning to 'foo' from 'bar'. */
+
+label_text
+return_event::get_desc (bool can_colorize) const
+{
+ /* For greatest precision-of-wording, if this is returning the
+ state involved in the pending diagnostic, give the pending
+ diagnostic a chance to describe this return (in terms of
+ itself). */
+ if (m_critical_state && m_pending_diagnostic)
+ {
+ label_text custom_desc
+ = m_pending_diagnostic->describe_return_of_state
+ (evdesc::return_of_state (can_colorize,
+ m_sedge->m_dest->m_fun->decl,
+ m_sedge->m_src->m_fun->decl,
+ m_critical_state));
+ if (custom_desc.m_buffer)
+ return custom_desc;
+ }
+ return make_label_text (can_colorize,
+ "returning to %qE from %qE",
+ m_sedge->m_dest->m_fun->decl,
+ m_sedge->m_src->m_fun->decl);
+}
+
+/* Override of checker_event::is_return_p for returns. */
+
+bool
+return_event::is_return_p () const
+{
+ return true;
+}
+
+/* class setjmp_event : public checker_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ setjmp_event. */
+
+label_text
+setjmp_event::get_desc (bool can_colorize) const
+{
+ return make_label_text (can_colorize,
+ "%qs called here",
+ "setjmp");
+}
+
+/* Implementation of checker_event::prepare_for_emission vfunc for setjmp_event.
+
+ Record this setjmp's event ID into the path, so that rewind events can
+ use it. */
+
+void
+setjmp_event::prepare_for_emission (checker_path *path,
+ pending_diagnostic *pd,
+ diagnostic_event_id_t emission_id)
+{
+ checker_event::prepare_for_emission (path, pd, emission_id);
+ path->record_setjmp_event (m_enode, emission_id);
+}
+
+/* class rewind_event : public checker_event. */
+
+/* Get the fndecl containing the site of the longjmp call. */
+
+tree
+rewind_event::get_longjmp_caller () const
+{
+ return m_eedge->m_src->get_function ()->decl;
+}
+
+/* Get the fndecl containing the site of the setjmp call. */
+
+tree
+rewind_event::get_setjmp_caller () const
+{
+ return m_eedge->m_dest->get_function ()->decl;
+}
+
+/* rewind_event's ctor. */
+
+rewind_event::rewind_event (const exploded_edge *eedge,
+ enum event_kind kind,
+ location_t loc, tree fndecl, int depth)
+: checker_event (kind, loc, fndecl, depth),
+ m_eedge (eedge)
+{
+ gcc_assert (m_eedge->m_custom_info); // a rewind_info_t
+}
+
+/* class rewind_from_longjmp_event : public rewind_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ rewind_from_longjmp_event. */
+
+label_text
+rewind_from_longjmp_event::get_desc (bool can_colorize) const
+{
+ const char *src_name = "longjmp";
+
+ if (get_longjmp_caller () == get_setjmp_caller ())
+ /* Special-case: purely intraprocedural rewind. */
+ return make_label_text (can_colorize,
+ "rewinding within %qE from %qs...",
+ get_longjmp_caller (),
+ src_name);
+ else
+ return make_label_text (can_colorize,
+ "rewinding from %qs in %qE...",
+ src_name,
+ get_longjmp_caller ());
+}
+
+/* class rewind_to_setjmp_event : public rewind_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ rewind_to_setjmp_event. */
+
+label_text
+rewind_to_setjmp_event::get_desc (bool can_colorize) const
+{
+ const char *dst_name = "setjmp";
+
+ /* If we can, identify the ID of the setjmp_event. */
+ if (m_original_setjmp_event_id.known_p ())
+ {
+ if (get_longjmp_caller () == get_setjmp_caller ())
+ /* Special-case: purely intraprocedural rewind. */
+ return make_label_text (can_colorize,
+ "...to %qs (saved at %@)",
+ dst_name,
+ &m_original_setjmp_event_id);
+ else
+ return make_label_text (can_colorize,
+ "...to %qs in %qE (saved at %@)",
+ dst_name,
+ get_setjmp_caller (),
+ &m_original_setjmp_event_id);
+ }
+ else
+ {
+ if (get_longjmp_caller () == get_setjmp_caller ())
+ /* Special-case: purely intraprocedural rewind. */
+ return make_label_text (can_colorize,
+ "...to %qs",
+ dst_name,
+ get_setjmp_caller ());
+ else
+ return make_label_text (can_colorize,
+ "...to %qs in %qE",
+ dst_name,
+ get_setjmp_caller ());
+ }
+}
+
+/* Implementation of checker_event::prepare_for_emission vfunc for
+ rewind_to_setjmp_event.
+
+ Attempt to look up the setjmp event ID that recorded the jmp_buf
+ for this rewind. */
+
+void
+rewind_to_setjmp_event::prepare_for_emission (checker_path *path,
+ pending_diagnostic *pd,
+ diagnostic_event_id_t emission_id)
+{
+ checker_event::prepare_for_emission (path, pd, emission_id);
+ path->get_setjmp_event (m_rewind_info->get_enode_origin (),
+ &m_original_setjmp_event_id);
+}
+
+/* class warning_event : public checker_event. */
+
+/* Implementation of diagnostic_event::get_desc vfunc for
+ warning_event.
+
+ If the pending diagnostic implements describe_final_event, use it,
+ generating a precise description e.g.
+ "second 'free' here; first 'free' was at (7)"
+
+ Otherwise generate a generic description. */
+
+label_text
+warning_event::get_desc (bool can_colorize) const
+{
+ if (m_pending_diagnostic)
+ {
+ label_text ev_desc
+ = m_pending_diagnostic->describe_final_event
+ (evdesc::final_event (can_colorize, m_var, m_state));
+ if (ev_desc.m_buffer)
+ {
+ if (m_sm && flag_analyzer_verbose_state_changes)
+ {
+ label_text result
+ = make_label_text (can_colorize,
+ "%s (%qE is in state %qs)",
+ ev_desc.m_buffer,
+ m_var,m_sm->get_state_name (m_state));
+ ev_desc.maybe_free ();
+ return result;
+ }
+ else
+ return ev_desc;
+ }
+ }
+
+ if (m_sm)
+ return make_label_text (can_colorize,
+ "here (%qE is in state %qs)",
+ m_var,
+ m_sm->get_state_name (m_state));
+ else
+ return label_text::borrow ("here");
+}
+
+/* Print a single-line representation of this path to PP. */
+
+void
+checker_path::dump (pretty_printer *pp) const
+{
+ pp_character (pp, '[');
+
+ checker_event *e;
+ int i;
+ FOR_EACH_VEC_ELT (m_events, i, e)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ label_text event_desc (e->get_desc (false));
+ pp_printf (pp, "\"%s\"", event_desc.m_buffer);
+ event_desc.maybe_free ();
+ }
+ pp_character (pp, ']');
+}
+
+/* Print a multiline form of this path to LOGGER, prefixing it with DESC. */
+
+void
+checker_path::maybe_log (logger *logger, const char *desc) const
+{
+ if (!logger)
+ return;
+ logger->start_log_line ();
+ logger->log_partial ("%s: ", desc);
+ dump (logger->get_printer ());
+ logger->end_log_line ();
+ for (unsigned i = 0; i < m_events.length (); i++)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("%s[%i]: %s ", desc, i,
+ event_kind_to_string (m_events[i]->m_kind));
+ m_events[i]->dump (logger->get_printer ());
+ logger->end_log_line ();
+ }
+}
+
+/* Print a multiline form of this path to STDERR. */
+
+DEBUG_FUNCTION void
+checker_path::debug () const
+{
+ checker_event *e;
+ int i;
+ FOR_EACH_VEC_ELT (m_events, i, e)
+ {
+ label_text event_desc (e->get_desc (false));
+ fprintf (stderr,
+ "[%i]: %s \"%s\"\n",
+ i,
+ event_kind_to_string (m_events[i]->m_kind),
+ event_desc.m_buffer);
+ event_desc.maybe_free ();
+ }
+}
+
+/* Add a warning_event to the end of this path. */
+
+void
+checker_path::add_final_event (const state_machine *sm,
+ const exploded_node *enode, const gimple *stmt,
+ tree var, state_machine::state_t state)
+{
+ checker_event *end_of_path
+ = new warning_event (stmt->location,
+ enode->get_function ()->decl,
+ enode->get_stack_depth (),
+ sm, var, state);
+ add_event (end_of_path);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Subclasses of diagnostic_path and diagnostic_event for analyzer diagnostics.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_CHECKER_PATH_H
+#define GCC_ANALYZER_CHECKER_PATH_H
+
+/* An enum for discriminating between the concrete subclasses of
+ checker_event. */
+
+enum event_kind
+{
+ EK_DEBUG,
+ EK_CUSTOM,
+ EK_STMT,
+ EK_FUNCTION_ENTRY,
+ EK_STATE_CHANGE,
+ EK_START_CFG_EDGE,
+ EK_END_CFG_EDGE,
+ EK_CALL_EDGE,
+ EK_RETURN_EDGE,
+ EK_SETJMP,
+ EK_REWIND_FROM_LONGJMP,
+ EK_REWIND_TO_SETJMP,
+ EK_WARNING
+};
+
+extern const char *event_kind_to_string (enum event_kind ek);
+
+/* Event subclasses.
+
+ The class hierarchy looks like this (using indentation to show
+ inheritance, and with event_kinds shown for the concrete subclasses):
+
+ diagnostic_event
+ checker_event
+ debug_event (EK_DEBUG)
+ custom_event (EK_CUSTOM)
+ statement_event (EK_STMT)
+ function_entry_event (EK_FUNCTION_ENTRY)
+ state_change_event (EK_STATE_CHANGE)
+ superedge_event
+ cfg_edge_event
+ start_cfg_edge_event (EK_START_CFG_EDGE)
+ end_cfg_edge_event (EK_END_CFG_EDGE)
+ call_event (EK_CALL_EDGE)
+ return_edge (EK_RETURN_EDGE)
+ setjmp_event (EK_SETJMP)
+ rewind_event
+ rewind_from_longjmp_event (EK_REWIND_FROM_LONGJMP)
+ rewind_to_setjmp_event (EK_REWIND_TO_SETJMP)
+ warning_event (EK_WARNING). */
+
+/* Abstract subclass of diagnostic_event; the base class for use in
+ checker_path (the analyzer's diagnostic_path subclass). */
+
+class checker_event : public diagnostic_event
+{
+public:
+ checker_event (enum event_kind kind,
+ location_t loc, tree fndecl, int depth)
+ : m_kind (kind), m_loc (loc), m_fndecl (fndecl), m_depth (depth),
+ m_pending_diagnostic (NULL), m_emission_id ()
+ {
+ }
+
+ /* Implementation of diagnostic_event. */
+
+ location_t get_location () const FINAL OVERRIDE { return m_loc; }
+ tree get_fndecl () const FINAL OVERRIDE { return m_fndecl; }
+ int get_stack_depth () const FINAL OVERRIDE { return m_depth; }
+
+ /* Additional functionality. */
+
+ virtual checker_event *clone () const = 0;
+
+ virtual void prepare_for_emission (checker_path *,
+ pending_diagnostic *pd,
+ diagnostic_event_id_t emission_id);
+ virtual bool is_call_p () const { return false; }
+ virtual bool is_function_entry_p () const { return false; }
+ virtual bool is_return_p () const { return false; }
+
+ void dump (pretty_printer *pp) const;
+
+ public:
+ const enum event_kind m_kind;
+ protected:
+ location_t m_loc;
+ tree m_fndecl;
+ int m_depth;
+ pending_diagnostic *m_pending_diagnostic;
+ diagnostic_event_id_t m_emission_id; // only set once all pruning has occurred
+};
+
+/* A concrete event subclass for a purely textual event, for use in
+ debugging path creation and filtering. */
+
+class debug_event : public checker_event
+{
+public:
+ debug_event (location_t loc, tree fndecl, int depth,
+ const char *desc)
+ : checker_event (EK_DEBUG, loc, fndecl, depth),
+ m_desc (xstrdup (desc))
+ {
+ }
+ ~debug_event ()
+ {
+ free (m_desc);
+ }
+
+ label_text get_desc (bool) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new debug_event (m_loc, m_fndecl, m_depth, m_desc);
+ }
+
+private:
+ char *m_desc;
+};
+
+/* A concrete event subclass for custom events. These are not filtered,
+ as they are likely to be pertinent to the diagnostic. */
+
+class custom_event : public checker_event
+{
+public:
+ custom_event (location_t loc, tree fndecl, int depth,
+ const char *desc)
+ : checker_event (EK_CUSTOM, loc, fndecl, depth),
+ m_desc (xstrdup (desc))
+ {
+ }
+ ~custom_event ()
+ {
+ free (m_desc);
+ }
+
+ label_text get_desc (bool) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new custom_event (m_loc, m_fndecl, m_depth, m_desc);
+ }
+
+private:
+ char *m_desc;
+};
+
+/* A concrete event subclass describing the execution of a gimple statement,
+ for use at high verbosity levels when debugging paths. */
+
+class statement_event : public checker_event
+{
+public:
+ statement_event (const gimple *stmt, tree fndecl, int depth,
+ const program_state &dst_state);
+
+ label_text get_desc (bool) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new statement_event (m_stmt, m_fndecl, m_depth, m_dst_state);
+ }
+
+ const gimple * const m_stmt;
+ const program_state m_dst_state;
+};
+
+/* An event subclass describing the entry to a function. */
+
+class function_entry_event : public checker_event
+{
+public:
+ function_entry_event (location_t loc, tree fndecl, int depth)
+ : checker_event (EK_FUNCTION_ENTRY, loc, fndecl, depth)
+ {
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new function_entry_event (m_loc, m_fndecl, m_depth);
+ }
+
+ bool is_function_entry_p () const FINAL OVERRIDE { return true; }
+};
+
+/* Subclass of checker_event describing a state change. */
+
+class state_change_event : public checker_event
+{
+public:
+ state_change_event (const supernode *node, const gimple *stmt,
+ int stack_depth,
+ const state_machine &sm,
+ tree var,
+ state_machine::state_t from,
+ state_machine::state_t to,
+ tree origin,
+ const program_state &dst_state);
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new state_change_event (m_node, m_stmt, m_depth,
+ m_sm, m_var, m_from, m_to, m_origin,
+ m_dst_state);
+ }
+
+ region_id get_lvalue (tree expr) const
+ {
+ return m_dst_state.m_region_model->get_lvalue (expr, NULL);
+ }
+
+ const supernode *m_node;
+ const gimple *m_stmt;
+ const state_machine &m_sm;
+ tree m_var;
+ state_machine::state_t m_from;
+ state_machine::state_t m_to;
+ tree m_origin;
+ program_state m_dst_state;
+};
+
+/* Subclass of checker_event; parent class for subclasses that relate to
+ a superedge. */
+
+class superedge_event : public checker_event
+{
+public:
+ /* Mark this edge event as being either an interprocedural call or
+ return in which VAR is in STATE, and that this is critical to the
+ diagnostic (so that get_desc can attempt to get a better description
+ from any pending_diagnostic). */
+ void record_critical_state (tree var, state_machine::state_t state)
+ {
+ m_var = var;
+ m_critical_state = state;
+ }
+
+ const callgraph_superedge& get_callgraph_superedge () const;
+
+ bool should_filter_p (int verbosity) const;
+
+ protected:
+ superedge_event (enum event_kind kind, const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth);
+
+ public:
+ const exploded_edge &m_eedge;
+ const superedge *m_sedge;
+ tree m_var;
+ state_machine::state_t m_critical_state;
+};
+
+/* An abstract event subclass for when a CFG edge is followed; it has two
+ subclasses, representing the start of the edge and the end of the
+ edge, which come in pairs. */
+
+class cfg_edge_event : public superedge_event
+{
+public:
+ const cfg_superedge& get_cfg_superedge () const;
+
+ protected:
+ cfg_edge_event (enum event_kind kind, const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth);
+};
+
+/* A concrete event subclass for the start of a CFG edge
+ e.g. "following 'false' branch...'. */
+
+class start_cfg_edge_event : public cfg_edge_event
+{
+public:
+ start_cfg_edge_event (const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth)
+ : cfg_edge_event (EK_START_CFG_EDGE, eedge, loc, fndecl, depth)
+ {
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new start_cfg_edge_event (m_eedge, m_loc, m_fndecl, m_depth);
+ }
+
+ private:
+ label_text maybe_describe_condition (bool can_colorize) const;
+
+ static label_text maybe_describe_condition (bool can_colorize,
+ tree lhs,
+ enum tree_code op,
+ tree rhs);
+ static bool should_print_expr_p (tree);
+};
+
+/* A concrete event subclass for the end of a CFG edge
+ e.g. "...to here'. */
+
+class end_cfg_edge_event : public cfg_edge_event
+{
+public:
+ end_cfg_edge_event (const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth)
+ : cfg_edge_event (EK_END_CFG_EDGE, eedge, loc, fndecl, depth)
+ {
+ }
+
+ label_text get_desc (bool /*can_colorize*/) const FINAL OVERRIDE
+ {
+ return label_text::borrow ("...to here");
+ }
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new end_cfg_edge_event (m_eedge, m_loc, m_fndecl, m_depth);
+ }
+};
+
+/* A concrete event subclass for an interprocedural call. */
+
+class call_event : public superedge_event
+{
+public:
+ call_event (const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth);
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new call_event (m_eedge, m_loc, m_fndecl, m_depth);
+ }
+
+ bool is_call_p () const FINAL OVERRIDE;
+};
+
+/* A concrete event subclass for an interprocedural return. */
+
+class return_event : public superedge_event
+{
+public:
+ return_event (const exploded_edge &eedge,
+ location_t loc, tree fndecl, int depth);
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ checker_event *clone () const FINAL OVERRIDE
+ {
+ return new return_event (m_eedge, m_loc, m_fndecl, m_depth);
+ }
+
+ bool is_return_p () const FINAL OVERRIDE;
+};
+
+/* A concrete event subclass for a setjmp call. */
+
+class setjmp_event : public checker_event
+{
+public:
+ setjmp_event (location_t loc, const exploded_node *enode,
+ tree fndecl, int depth)
+ : checker_event (EK_SETJMP, loc, fndecl, depth),
+ m_enode (enode)
+ {
+ }
+
+ setjmp_event *clone () const FINAL OVERRIDE
+ {
+ return new setjmp_event (m_loc, m_enode, m_fndecl, m_depth);
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ void prepare_for_emission (checker_path *path,
+ pending_diagnostic *pd,
+ diagnostic_event_id_t emission_id) FINAL OVERRIDE;
+
+private:
+ const exploded_node *m_enode;
+};
+
+/* An abstract event subclass for rewinding from a longjmp to a setjmp.
+ Base class for two from/to subclasses, showing the two halves of the
+ rewind. */
+
+class rewind_event : public checker_event
+{
+public:
+ tree get_longjmp_caller () const;
+ tree get_setjmp_caller () const;
+ const exploded_edge *get_eedge () const { return m_eedge; }
+
+ protected:
+ rewind_event (const exploded_edge *eedge,
+ enum event_kind kind,
+ location_t loc, tree fndecl, int depth);
+
+ private:
+ const exploded_edge *m_eedge;
+};
+
+/* A concrete event subclass for rewinding from a longjmp to a setjmp,
+ showing the longjmp. */
+
+class rewind_from_longjmp_event : public rewind_event
+{
+public:
+ rewind_from_longjmp_event (const exploded_edge *eedge,
+ location_t loc, tree fndecl, int depth)
+ : rewind_event (eedge, EK_REWIND_FROM_LONGJMP, loc, fndecl, depth)
+ {
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ rewind_from_longjmp_event *clone () const FINAL OVERRIDE
+ {
+ return new rewind_from_longjmp_event (get_eedge (),
+ m_loc, m_fndecl, m_depth);
+ }
+};
+
+/* A concrete event subclass for rewinding from a longjmp to a setjmp,
+ showing the setjmp. */
+
+class rewind_to_setjmp_event : public rewind_event
+{
+public:
+ rewind_to_setjmp_event (const exploded_edge *eedge,
+ location_t loc, tree fndecl, int depth,
+ const rewind_info_t *rewind_info)
+ : rewind_event (eedge, EK_REWIND_TO_SETJMP, loc, fndecl, depth),
+ m_rewind_info (rewind_info)
+ {
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ rewind_to_setjmp_event *clone () const FINAL OVERRIDE
+ {
+ return new rewind_to_setjmp_event (get_eedge (),
+ m_loc, m_fndecl, m_depth,
+ m_rewind_info);
+ }
+
+ void prepare_for_emission (checker_path *path,
+ pending_diagnostic *pd,
+ diagnostic_event_id_t emission_id) FINAL OVERRIDE;
+
+private:
+ diagnostic_event_id_t m_original_setjmp_event_id;
+ const rewind_info_t *m_rewind_info;
+};
+
+/* Concrete subclass of checker_event for use at the end of a path:
+ a repeat of the warning message at the end of the path (perhaps with
+ references to pertinent events that occurred on the way), at the point
+ where the problem occurs. */
+
+class warning_event : public checker_event
+{
+public:
+ warning_event (location_t loc, tree fndecl, int depth,
+ const state_machine *sm,
+ tree var, state_machine::state_t state)
+ : checker_event (EK_WARNING, loc, fndecl, depth),
+ m_sm (sm), m_var (var), m_state (state)
+ {
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+ warning_event *clone () const FINAL OVERRIDE
+ {
+ return new warning_event (m_loc, m_fndecl, m_depth, m_sm, m_var, m_state);
+ }
+
+private:
+ const state_machine *m_sm;
+ tree m_var;
+ state_machine::state_t m_state;
+};
+
+/* Subclass of diagnostic_path for analyzer diagnostics. */
+
+class checker_path : public diagnostic_path
+{
+public:
+ checker_path () : diagnostic_path () {}
+
+ /* Implementation of diagnostic_path vfuncs. */
+
+ unsigned num_events () const FINAL OVERRIDE
+ {
+ return m_events.length ();
+ }
+
+ const diagnostic_event & get_event (int idx) const FINAL OVERRIDE
+ {
+ return *m_events[idx];
+ }
+
+ void dump (pretty_printer *pp) const;
+ void debug () const;
+
+ void maybe_log (logger *logger, const char *desc) const;
+
+ void add_event (checker_event *event)
+ {
+ m_events.safe_push (event);
+ }
+
+ void delete_event (int idx)
+ {
+ checker_event *event = m_events[idx];
+ m_events.ordered_remove (idx);
+ delete event;
+ }
+
+ void add_final_event (const state_machine *sm,
+ const exploded_node *enode, const gimple *stmt,
+ tree var, state_machine::state_t state);
+
+ /* After all event-pruning, a hook for notifying each event what
+ its ID will be. The events are notified in order, allowing
+ for later events to refer to the IDs of earlier events in
+ their descriptions. */
+ void prepare_for_emission (pending_diagnostic *pd)
+ {
+ checker_event *e;
+ int i;
+ FOR_EACH_VEC_ELT (m_events, i, e)
+ e->prepare_for_emission (this, pd, diagnostic_event_id_t (i));
+ }
+
+ void record_setjmp_event (const exploded_node *enode,
+ diagnostic_event_id_t setjmp_emission_id)
+ {
+ m_setjmp_event_ids.put (enode, setjmp_emission_id);
+ }
+
+ bool get_setjmp_event (const exploded_node *enode,
+ diagnostic_event_id_t *out_emission_id)
+ {
+ if (diagnostic_event_id_t *emission_id = m_setjmp_event_ids.get (enode))
+ {
+ *out_emission_id = *emission_id;
+ return true;
+ }
+ return false;
+ }
+
+ /* The events that have occurred along this path. */
+ auto_delete_vec<checker_event> m_events;
+
+ /* During prepare_for_emission (and after), the setjmp_event for each
+ exploded_node *, so that rewind events can refer to them in their
+ descriptions. */
+ hash_map <const exploded_node *, diagnostic_event_id_t> m_setjmp_event_ids;
+};
+
+#endif /* GCC_ANALYZER_CHECKER_PATH_H */
--- /dev/null
+/* Tracking equivalence classes and constraints at a point on an execution path.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "fold-const.h"
+#include "selftest.h"
+#include "graphviz.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "sbitmap.h"
+#include "tristate.h"
+#include "analyzer/region-model.h"
+#include "analyzer/constraint-manager.h"
+#include "analyzer/analyzer-selftests.h"
+
+#if ENABLE_ANALYZER
+
+/* One of the end-points of a range. */
+
+struct bound
+{
+ bound () : m_constant (NULL_TREE), m_closed (false) {}
+ bound (tree constant, bool closed)
+ : m_constant (constant), m_closed (closed) {}
+
+ void ensure_closed (bool is_upper);
+
+ const char * get_relation_as_str () const;
+
+ tree m_constant;
+ bool m_closed;
+};
+
+/* A range of values, used for determining if a value has been
+ constrained to just one possible constant value. */
+
+struct range
+{
+ range () : m_lower_bound (), m_upper_bound () {}
+ range (const bound &lower, const bound &upper)
+ : m_lower_bound (lower), m_upper_bound (upper) {}
+
+ void dump (pretty_printer *pp) const;
+
+ bool constrained_to_single_element (tree *out);
+
+ bound m_lower_bound;
+ bound m_upper_bound;
+};
+
+/* struct bound. */
+
+/* Ensure that this bound is closed by converting an open bound to a
+ closed one. */
+
+void
+bound::ensure_closed (bool is_upper)
+{
+ if (!m_closed)
+ {
+ /* Offset by 1 in the appropriate direction.
+ For example, convert 3 < x into 4 <= x,
+ and convert x < 5 into x <= 4. */
+ gcc_assert (CONSTANT_CLASS_P (m_constant));
+ m_constant = fold_build2 (is_upper ? MINUS_EXPR : PLUS_EXPR,
+ TREE_TYPE (m_constant),
+ m_constant, integer_one_node);
+ gcc_assert (CONSTANT_CLASS_P (m_constant));
+ m_closed = true;
+ }
+}
+
+/* Get "<=" vs "<" for this bound. */
+
+const char *
+bound::get_relation_as_str () const
+{
+ if (m_closed)
+ return "<=";
+ else
+ return "<";
+}
+
+/* struct range. */
+
+/* Dump this range to PP, which must support %E for tree. */
+
+void
+range::dump (pretty_printer *pp) const
+{
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%qE %s x %s %qE",
+ m_lower_bound.m_constant,
+ m_lower_bound.get_relation_as_str (),
+ m_upper_bound.get_relation_as_str (),
+ m_upper_bound.m_constant);
+POP_IGNORE_WFORMAT
+}
+
+/* Determine if there is only one possible value for this range.
+ If so, return true and write the constant to *OUT.
+ Otherwise, return false. */
+
+bool
+range::constrained_to_single_element (tree *out)
+{
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (m_lower_bound.m_constant)))
+ return false;
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (m_upper_bound.m_constant)))
+ return false;
+
+ /* Convert any open bounds to closed bounds. */
+ m_lower_bound.ensure_closed (false);
+ m_upper_bound.ensure_closed (true);
+
+ // Are they equal?
+ tree comparison
+ = fold_build2 (EQ_EXPR, boolean_type_node,
+ m_lower_bound.m_constant,
+ m_upper_bound.m_constant);
+ if (comparison == boolean_true_node)
+ {
+ *out = m_lower_bound.m_constant;
+ return true;
+ }
+ else
+ return false;
+}
+
+/* class equiv_class. */
+
+/* equiv_class's default ctor. */
+
+equiv_class::equiv_class ()
+: m_constant (NULL_TREE), m_cst_sid (svalue_id::null ()),
+ m_vars ()
+{
+}
+
+/* equiv_class's copy ctor. */
+
+equiv_class::equiv_class (const equiv_class &other)
+: m_constant (other.m_constant), m_cst_sid (other.m_cst_sid),
+ m_vars (other.m_vars.length ())
+{
+ int i;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (other.m_vars, i, sid)
+ m_vars.quick_push (*sid);
+}
+
+/* Print an all-on-one-line representation of this equiv_class to PP,
+ which must support %E for trees. */
+
+void
+equiv_class::print (pretty_printer *pp) const
+{
+ pp_character (pp, '{');
+ int i;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (m_vars, i, sid)
+ {
+ if (i > 0)
+ pp_string (pp, " == ");
+ sid->print (pp);
+ }
+ if (m_constant)
+ {
+ if (i > 0)
+ pp_string (pp, " == ");
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%qE", m_constant);
+POP_IGNORE_WFORMAT
+ }
+ pp_character (pp, '}');
+}
+
+/* Generate a hash value for this equiv_class. */
+
+hashval_t
+equiv_class::hash () const
+{
+ inchash::hash hstate;
+ int i;
+ svalue_id *sid;
+
+ inchash::add_expr (m_constant, hstate);
+ FOR_EACH_VEC_ELT (m_vars, i, sid)
+ inchash::add (*sid, hstate);
+ return hstate.end ();
+}
+
+/* Equality operator for equiv_class. */
+
+bool
+equiv_class::operator== (const equiv_class &other)
+{
+ if (m_constant != other.m_constant)
+ return false; // TODO: use tree equality here?
+
+ /* FIXME: should we compare m_cst_sid? */
+
+ if (m_vars.length () != other.m_vars.length ())
+ return false;
+
+ int i;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (m_vars, i, sid)
+ if (! (*sid == other.m_vars[i]))
+ return false;
+
+ return true;
+}
+
+/* Add SID to this equiv_class, using CM to check if it's a constant. */
+
+void
+equiv_class::add (svalue_id sid, const constraint_manager &cm)
+{
+ gcc_assert (!sid.null_p ());
+ if (tree cst = cm.maybe_get_constant (sid))
+ {
+ gcc_assert (CONSTANT_CLASS_P (cst));
+ /* FIXME: should we canonicalize which svalue is the constant
+ when there are multiple equal constants? */
+ m_constant = cst;
+ m_cst_sid = sid;
+ }
+ m_vars.safe_push (sid);
+}
+
+/* Remove SID from this equivalence class.
+ Return true if SID was the last var in the equivalence class (suggesting
+ a possible leak). */
+
+bool
+equiv_class::del (svalue_id sid)
+{
+ gcc_assert (!sid.null_p ());
+ gcc_assert (sid != m_cst_sid);
+
+ int i;
+ svalue_id *iv;
+ FOR_EACH_VEC_ELT (m_vars, i, iv)
+ {
+ if (*iv == sid)
+ {
+ m_vars[i] = m_vars[m_vars.length () - 1];
+ m_vars.pop ();
+ return m_vars.length () == 0;
+ }
+ }
+
+ /* SID must be in the class. */
+ gcc_unreachable ();
+ return false;
+}
+
+/* Get a representative member of this class, for handling cases
+ where the IDs can change mid-traversal. */
+
+svalue_id
+equiv_class::get_representative () const
+{
+ if (!m_cst_sid.null_p ())
+ return m_cst_sid;
+ else
+ {
+ gcc_assert (m_vars.length () > 0);
+ return m_vars[0];
+ }
+}
+
+/* Remap all svalue_ids within this equiv_class using MAP. */
+
+void
+equiv_class::remap_svalue_ids (const svalue_id_map &map)
+{
+ int i;
+ svalue_id *iv;
+ FOR_EACH_VEC_ELT (m_vars, i, iv)
+ map.update (iv);
+ map.update (&m_cst_sid);
+}
+
+/* Comparator for use by equiv_class::canonicalize. */
+
+static int
+svalue_id_cmp_by_id (const void *p1, const void *p2)
+{
+ const svalue_id *sid1 = (const svalue_id *)p1;
+ const svalue_id *sid2 = (const svalue_id *)p2;
+ return sid1->as_int () - sid2->as_int ();
+}
+
+/* Sort the svalues_ids within this equiv_class. */
+
+void
+equiv_class::canonicalize ()
+{
+ m_vars.qsort (svalue_id_cmp_by_id);
+}
+
+/* Get a debug string for C_OP. */
+
+const char *
+constraint_op_code (enum constraint_op c_op)
+{
+ switch (c_op)
+ {
+ default:
+ gcc_unreachable ();
+ case CONSTRAINT_NE: return "!=";
+ case CONSTRAINT_LT: return "<";
+ case CONSTRAINT_LE: return "<=";
+ }
+}
+
+/* Convert C_OP to an enum tree_code. */
+
+enum tree_code
+constraint_tree_code (enum constraint_op c_op)
+{
+ switch (c_op)
+ {
+ default:
+ gcc_unreachable ();
+ case CONSTRAINT_NE: return NE_EXPR;
+ case CONSTRAINT_LT: return LT_EXPR;
+ case CONSTRAINT_LE: return LE_EXPR;
+ }
+}
+
+/* Given "lhs C_OP rhs", determine "lhs T_OP rhs".
+
+ For example, given "x < y", then "x > y" is false. */
+
+static tristate
+eval_constraint_op_for_op (enum constraint_op c_op, enum tree_code t_op)
+{
+ switch (c_op)
+ {
+ default:
+ gcc_unreachable ();
+ case CONSTRAINT_NE:
+ if (t_op == EQ_EXPR)
+ return tristate (tristate::TS_FALSE);
+ if (t_op == NE_EXPR)
+ return tristate (tristate::TS_TRUE);
+ break;
+ case CONSTRAINT_LT:
+ if (t_op == LT_EXPR || t_op == LE_EXPR || t_op == NE_EXPR)
+ return tristate (tristate::TS_TRUE);
+ if (t_op == EQ_EXPR || t_op == GT_EXPR || t_op == GE_EXPR)
+ return tristate (tristate::TS_FALSE);
+ break;
+ case CONSTRAINT_LE:
+ if (t_op == LE_EXPR)
+ return tristate (tristate::TS_TRUE);
+ if (t_op == GT_EXPR)
+ return tristate (tristate::TS_FALSE);
+ break;
+ }
+ return tristate (tristate::TS_UNKNOWN);
+}
+
+/* class constraint. */
+
+/* Print this constraint to PP (which must support %E for trees),
+ using CM to look up equiv_class instances from ids. */
+
+void
+constraint::print (pretty_printer *pp, const constraint_manager &cm) const
+{
+ m_lhs.print (pp);
+ pp_string (pp, ": ");
+ m_lhs.get_obj (cm).print (pp);
+ pp_string (pp, " ");
+ pp_string (pp, constraint_op_code (m_op));
+ pp_string (pp, " ");
+ m_rhs.print (pp);
+ pp_string (pp, ": ");
+ m_rhs.get_obj (cm).print (pp);
+}
+
+/* Generate a hash value for this constraint. */
+
+hashval_t
+constraint::hash () const
+{
+ inchash::hash hstate;
+ hstate.add_int (m_lhs.m_idx);
+ hstate.add_int (m_op);
+ hstate.add_int (m_rhs.m_idx);
+ return hstate.end ();
+}
+
+/* Equality operator for constraints. */
+
+bool
+constraint::operator== (const constraint &other) const
+{
+ if (m_lhs != other.m_lhs)
+ return false;
+ if (m_op != other.m_op)
+ return false;
+ if (m_rhs != other.m_rhs)
+ return false;
+ return true;
+}
+
+/* class equiv_class_id. */
+
+/* Get the underlying equiv_class for this ID from CM. */
+
+const equiv_class &
+equiv_class_id::get_obj (const constraint_manager &cm) const
+{
+ return cm.get_equiv_class_by_index (m_idx);
+}
+
+/* Access the underlying equiv_class for this ID from CM. */
+
+equiv_class &
+equiv_class_id::get_obj (constraint_manager &cm) const
+{
+ return cm.get_equiv_class_by_index (m_idx);
+}
+
+/* Print this equiv_class_id to PP. */
+
+void
+equiv_class_id::print (pretty_printer *pp) const
+{
+ if (null_p ())
+ pp_printf (pp, "null");
+ else
+ pp_printf (pp, "ec%i", m_idx);
+}
+
+/* class constraint_manager. */
+
+/* constraint_manager's copy ctor. */
+
+constraint_manager::constraint_manager (const constraint_manager &other)
+: m_equiv_classes (other.m_equiv_classes.length ()),
+ m_constraints (other.m_constraints.length ())
+{
+ int i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (other.m_equiv_classes, i, ec)
+ m_equiv_classes.quick_push (new equiv_class (*ec));
+ constraint *c;
+ FOR_EACH_VEC_ELT (other.m_constraints, i, c)
+ m_constraints.quick_push (*c);
+}
+
+/* constraint_manager's assignment operator. */
+
+constraint_manager&
+constraint_manager::operator= (const constraint_manager &other)
+{
+ gcc_assert (m_equiv_classes.length () == 0);
+ gcc_assert (m_constraints.length () == 0);
+
+ int i;
+ equiv_class *ec;
+ m_equiv_classes.reserve (other.m_equiv_classes.length ());
+ FOR_EACH_VEC_ELT (other.m_equiv_classes, i, ec)
+ m_equiv_classes.quick_push (new equiv_class (*ec));
+ constraint *c;
+ m_constraints.reserve (other.m_constraints.length ());
+ FOR_EACH_VEC_ELT (other.m_constraints, i, c)
+ m_constraints.quick_push (*c);
+
+ return *this;
+}
+
+/* Generate a hash value for this constraint_manager. */
+
+hashval_t
+constraint_manager::hash () const
+{
+ inchash::hash hstate;
+ int i;
+ equiv_class *ec;
+ constraint *c;
+
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ hstate.merge_hash (ec->hash ());
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ hstate.merge_hash (c->hash ());
+ return hstate.end ();
+}
+
+/* Equality operator for constraint_manager. */
+
+bool
+constraint_manager::operator== (const constraint_manager &other) const
+{
+ if (m_equiv_classes.length () != other.m_equiv_classes.length ())
+ return false;
+ if (m_constraints.length () != other.m_constraints.length ())
+ return false;
+
+ int i;
+ equiv_class *ec;
+
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ if (!(*ec == *other.m_equiv_classes[i]))
+ return false;
+
+ constraint *c;
+
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ if (!(*c == other.m_constraints[i]))
+ return false;
+
+ return true;
+}
+
+/* Print this constraint_manager to PP (which must support %E for trees). */
+
+void
+constraint_manager::print (pretty_printer *pp) const
+{
+ pp_string (pp, "{");
+ int i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ equiv_class_id (i).print (pp);
+ pp_string (pp, ": ");
+ ec->print (pp);
+ }
+ pp_string (pp, " | ");
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ if (i > 0)
+ pp_string (pp, " && ");
+ c->print (pp, *this);
+ }
+ pp_printf (pp, "}");
+}
+
+/* Dump a multiline representation of this constraint_manager to PP
+ (which must support %E for trees). */
+
+void
+constraint_manager::dump_to_pp (pretty_printer *pp) const
+{
+ // TODO
+ pp_string (pp, " equiv classes:");
+ pp_newline (pp);
+ int i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ {
+ pp_string (pp, " ");
+ equiv_class_id (i).print (pp);
+ pp_string (pp, ": ");
+ ec->print (pp);
+ pp_newline (pp);
+ }
+ pp_string (pp, " constraints:");
+ pp_newline (pp);
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ pp_printf (pp, " %i: ", i);
+ c->print (pp, *this);
+ pp_newline (pp);
+ }
+}
+
+/* Dump a multiline representation of this constraint_manager to FP. */
+
+void
+constraint_manager::dump (FILE *fp) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Dump a multiline representation of this constraint_manager to stderr. */
+
+DEBUG_FUNCTION void
+constraint_manager::dump () const
+{
+ dump (stderr);
+}
+
+/* Dump a multiline representation of CM to stderr. */
+
+DEBUG_FUNCTION void
+debug (const constraint_manager &cm)
+{
+ cm.dump ();
+}
+
+/* Attempt to add the constraint LHS OP RHS to this constraint_manager.
+ Return true if the constraint could be added (or is already true).
+ Return false if the constraint contradicts existing knowledge. */
+
+bool
+constraint_manager::add_constraint (svalue_id lhs,
+ enum tree_code op,
+ svalue_id rhs)
+{
+ equiv_class_id lhs_ec_id = get_or_add_equiv_class (lhs);
+ equiv_class_id rhs_ec_id = get_or_add_equiv_class (rhs);
+ return add_constraint (lhs_ec_id, op,rhs_ec_id);
+}
+
+/* Attempt to add the constraint LHS_EC_ID OP RHS_EC_ID to this
+ constraint_manager.
+ Return true if the constraint could be added (or is already true).
+ Return false if the constraint contradicts existing knowledge. */
+
+bool
+constraint_manager::add_constraint (equiv_class_id lhs_ec_id,
+ enum tree_code op,
+ equiv_class_id rhs_ec_id)
+{
+ tristate t = eval_condition (lhs_ec_id, op, rhs_ec_id);
+
+ /* Discard constraints that are already known. */
+ if (t.is_true ())
+ return true;
+
+ /* Reject unsatisfiable constraints. */
+ if (t.is_false ())
+ return false;
+
+ gcc_assert (lhs_ec_id != rhs_ec_id);
+
+ /* For now, simply accumulate constraints, without attempting any further
+ optimization. */
+ switch (op)
+ {
+ case EQ_EXPR:
+ {
+ /* Merge rhs_ec into lhs_ec. */
+ equiv_class &lhs_ec_obj = lhs_ec_id.get_obj (*this);
+ const equiv_class &rhs_ec_obj = rhs_ec_id.get_obj (*this);
+
+ int i;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (rhs_ec_obj.m_vars, i, sid)
+ lhs_ec_obj.add (*sid, *this);
+
+ if (rhs_ec_obj.m_constant)
+ {
+ //gcc_assert (lhs_ec_obj.m_constant == NULL);
+ lhs_ec_obj.m_constant = rhs_ec_obj.m_constant;
+ }
+
+ /* Drop rhs equivalence class, overwriting it with the
+ final ec (which might be the same one). */
+ equiv_class_id final_ec_id = m_equiv_classes.length () - 1;
+ equiv_class *old_ec = m_equiv_classes[rhs_ec_id.m_idx];
+ equiv_class *final_ec = m_equiv_classes.pop ();
+ if (final_ec != old_ec)
+ m_equiv_classes[rhs_ec_id.m_idx] = final_ec;
+ delete old_ec;
+
+ /* Update the constraints. */
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ /* Update references to the rhs_ec so that
+ they refer to the lhs_ec. */
+ if (c->m_lhs == rhs_ec_id)
+ c->m_lhs = lhs_ec_id;
+ if (c->m_rhs == rhs_ec_id)
+ c->m_rhs = lhs_ec_id;
+
+ /* Renumber all constraints that refer to the final rhs_ec
+ to the old rhs_ec, where the old final_ec now lives. */
+ if (c->m_lhs == final_ec_id)
+ c->m_lhs = rhs_ec_id;
+ if (c->m_rhs == final_ec_id)
+ c->m_rhs = rhs_ec_id;
+ }
+ }
+ break;
+ case GE_EXPR:
+ add_constraint_internal (rhs_ec_id, CONSTRAINT_LE, lhs_ec_id);
+ break;
+ case LE_EXPR:
+ add_constraint_internal (lhs_ec_id, CONSTRAINT_LE, rhs_ec_id);
+ break;
+ case NE_EXPR:
+ add_constraint_internal (lhs_ec_id, CONSTRAINT_NE, rhs_ec_id);
+ break;
+ case GT_EXPR:
+ add_constraint_internal (rhs_ec_id, CONSTRAINT_LT, lhs_ec_id);
+ break;
+ case LT_EXPR:
+ add_constraint_internal (lhs_ec_id, CONSTRAINT_LT, rhs_ec_id);
+ break;
+ default:
+ /* do nothing. */
+ break;
+ }
+ validate ();
+ return true;
+}
+
+/* Subroutine of constraint_manager::add_constraint, for handling all
+ operations other than equality (for which equiv classes are merged). */
+
+void
+constraint_manager::add_constraint_internal (equiv_class_id lhs_id,
+ enum constraint_op c_op,
+ equiv_class_id rhs_id)
+{
+ /* Add the constraint. */
+ m_constraints.safe_push (constraint (lhs_id, c_op, rhs_id));
+
+ if (!flag_analyzer_transitivity)
+ return;
+
+ if (c_op != CONSTRAINT_NE)
+ {
+ /* The following can potentially add EQ_EXPR facts, which could lead
+ to ECs being merged, which would change the meaning of the EC IDs.
+ Hence we need to do this via representatives. */
+ svalue_id lhs = lhs_id.get_obj (*this).get_representative ();
+ svalue_id rhs = rhs_id.get_obj (*this).get_representative ();
+
+ /* We have LHS </<= RHS */
+
+ /* Handle transitivity of ordering by adding additional constraints
+ based on what we already knew.
+
+ So if we have already have:
+ (a < b)
+ (c < d)
+ Then adding:
+ (b < c)
+ will also add:
+ (a < c)
+ (b < d)
+ We need to recurse to ensure we also add:
+ (a < d).
+ We call the checked add_constraint to avoid adding constraints
+ that are already present. Doing so also ensures termination
+ in the case of cycles.
+
+ We also check for single-element ranges, adding EQ_EXPR facts
+ where we discover them. For example 3 < x < 5 implies
+ that x == 4 (if x is an integer). */
+ for (unsigned i = 0; i < m_constraints.length (); i++)
+ {
+ const constraint *other = &m_constraints[i];
+ if (other->is_ordering_p ())
+ {
+ /* Refresh the EC IDs, in case any mergers have happened. */
+ lhs_id = get_or_add_equiv_class (lhs);
+ rhs_id = get_or_add_equiv_class (rhs);
+
+ tree lhs_const = lhs_id.get_obj (*this).m_constant;
+ tree rhs_const = rhs_id.get_obj (*this).m_constant;
+ tree other_lhs_const
+ = other->m_lhs.get_obj (*this).m_constant;
+ tree other_rhs_const
+ = other->m_rhs.get_obj (*this).m_constant;
+
+ /* We have "LHS </<= RHS" and "other.lhs </<= other.rhs". */
+
+ /* If we have LHS </<= RHS and RHS </<= LHS, then we have a
+ cycle. */
+ if (rhs_id == other->m_lhs
+ && other->m_rhs == lhs_id)
+ {
+ /* We must have equality for this to be possible. */
+ gcc_assert (c_op == CONSTRAINT_LE
+ && other->m_op == CONSTRAINT_LE);
+ add_constraint (lhs_id, EQ_EXPR, rhs_id);
+ /* Adding an equality will merge the two ECs and potentially
+ reorganize the constraints. Stop iterating. */
+ return;
+ }
+ /* Otherwise, check for transitivity. */
+ if (rhs_id == other->m_lhs)
+ {
+ /* With RHS == other.lhs, we have:
+ "LHS </<= (RHS, other.lhs) </<= other.rhs"
+ and thus this implies "LHS </<= other.rhs". */
+
+ /* Do we have a tightly-constrained range? */
+ if (lhs_const
+ && !rhs_const
+ && other_rhs_const)
+ {
+ range r (bound (lhs_const, c_op == CONSTRAINT_LE),
+ bound (other_rhs_const,
+ other->m_op == CONSTRAINT_LE));
+ tree constant;
+ if (r.constrained_to_single_element (&constant))
+ {
+ svalue_id cst_sid = get_sid_for_constant (constant);
+ add_constraint
+ (rhs_id, EQ_EXPR,
+ get_or_add_equiv_class (cst_sid));
+ return;
+ }
+ }
+
+ /* Otherwise, add the constraint implied by transitivity. */
+ enum tree_code new_op
+ = ((c_op == CONSTRAINT_LE && other->m_op == CONSTRAINT_LE)
+ ? LE_EXPR : LT_EXPR);
+ add_constraint (lhs_id, new_op, other->m_rhs);
+ }
+ else if (other->m_rhs == lhs_id)
+ {
+ /* With other.rhs == LHS, we have:
+ "other.lhs </<= (other.rhs, LHS) </<= RHS"
+ and thus this implies "other.lhs </<= RHS". */
+
+ /* Do we have a tightly-constrained range? */
+ if (other_lhs_const
+ && !lhs_const
+ && rhs_const)
+ {
+ range r (bound (other_lhs_const,
+ other->m_op == CONSTRAINT_LE),
+ bound (rhs_const,
+ c_op == CONSTRAINT_LE));
+ tree constant;
+ if (r.constrained_to_single_element (&constant))
+ {
+ svalue_id cst_sid = get_sid_for_constant (constant);
+ add_constraint
+ (lhs_id, EQ_EXPR,
+ get_or_add_equiv_class (cst_sid));
+ return;
+ }
+ }
+
+ /* Otherwise, add the constraint implied by transitivity. */
+ enum tree_code new_op
+ = ((c_op == CONSTRAINT_LE && other->m_op == CONSTRAINT_LE)
+ ? LE_EXPR : LT_EXPR);
+ add_constraint (other->m_lhs, new_op, rhs_id);
+ }
+ }
+ }
+ }
+}
+
+/* Look for SID within the equivalence classes of this constraint_manager;
+ if found, write the id to *OUT and return true, otherwise return false. */
+
+bool
+constraint_manager::get_equiv_class_by_sid (svalue_id sid, equiv_class_id *out) const
+{
+ /* TODO: should we have a map, rather than these searches? */
+ int i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ {
+ int j;
+ svalue_id *iv;
+ FOR_EACH_VEC_ELT (ec->m_vars, j, iv)
+ if (*iv == sid)
+ {
+ *out = equiv_class_id (i);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Ensure that SID has an equivalence class within this constraint_manager;
+ return the ID of the class. */
+
+equiv_class_id
+constraint_manager::get_or_add_equiv_class (svalue_id sid)
+{
+ equiv_class_id result (-1);
+
+ /* Try svalue_id match. */
+ if (get_equiv_class_by_sid (sid, &result))
+ return result;
+
+ /* Try equality of constants. */
+ if (tree cst = maybe_get_constant (sid))
+ {
+ int i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ if (ec->m_constant)
+ {
+ tree eq = fold_build2 (EQ_EXPR, boolean_type_node,
+ cst, ec->m_constant);
+ if (eq == boolean_true_node)
+ {
+ ec->add (sid, *this);
+ return equiv_class_id (i);
+ }
+ }
+ }
+
+
+ /* Not found. */
+ equiv_class *new_ec = new equiv_class ();
+ new_ec->add (sid, *this);
+ m_equiv_classes.safe_push (new_ec);
+
+ equiv_class_id new_id (m_equiv_classes.length () - 1);
+
+ if (maybe_get_constant (sid))
+ {
+ /* If we have a new EC for a constant, add constraints comparing this
+ to other constants we may have (so that we accumulate the transitive
+ closure of all constraints on constants as the constants are
+ added). */
+ for (equiv_class_id other_id (0); other_id.m_idx < new_id.m_idx;
+ other_id.m_idx++)
+ {
+ const equiv_class &other_ec = other_id.get_obj (*this);
+ if (other_ec.m_constant)
+ {
+ /* If we have two ECs, both with constants, the constants must be
+ non-equal (or they would be in the same EC).
+ Determine the direction of the inequality, and record that
+ fact. */
+ tree lt
+ = fold_build2 (LT_EXPR, boolean_type_node,
+ new_ec->m_constant, other_ec.m_constant);
+ //gcc_assert (lt == boolean_true_node || lt == boolean_false_node);
+ // not true for int vs float comparisons
+ if (lt == boolean_true_node)
+ add_constraint_internal (new_id, CONSTRAINT_LT, other_id);
+ else if (lt == boolean_false_node)
+ add_constraint_internal (other_id, CONSTRAINT_LT, new_id);
+ /* Refresh new_id, in case ECs were merged. SID should always
+ be present by now, so this should never lead to a
+ recursion. */
+ new_id = get_or_add_equiv_class (sid);
+ }
+ }
+ }
+
+ return new_id;
+}
+
+/* Evaluate the condition LHS_EC OP RHS_EC. */
+
+tristate
+constraint_manager::eval_condition (equiv_class_id lhs_ec,
+ enum tree_code op,
+ equiv_class_id rhs_ec)
+{
+ if (lhs_ec == rhs_ec)
+ {
+ switch (op)
+ {
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ return tristate (tristate::TS_TRUE);
+
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ return tristate (tristate::TS_FALSE);
+ default:
+ break;
+ }
+ }
+
+ tree lhs_const = lhs_ec.get_obj (*this).get_any_constant ();
+ tree rhs_const = rhs_ec.get_obj (*this).get_any_constant ();
+ if (lhs_const && rhs_const)
+ {
+ tree comparison
+ = fold_build2 (op, boolean_type_node, lhs_const, rhs_const);
+ if (comparison == boolean_true_node)
+ return tristate (tristate::TS_TRUE);
+ if (comparison == boolean_false_node)
+ return tristate (tristate::TS_FALSE);
+ }
+
+ enum tree_code swapped_op = swap_tree_comparison (op);
+
+ int i;
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ if (c->m_lhs == lhs_ec
+ && c->m_rhs == rhs_ec)
+ {
+ tristate result_for_constraint
+ = eval_constraint_op_for_op (c->m_op, op);
+ if (result_for_constraint.is_known ())
+ return result_for_constraint;
+ }
+ /* Swapped operands. */
+ if (c->m_lhs == rhs_ec
+ && c->m_rhs == lhs_ec)
+ {
+ tristate result_for_constraint
+ = eval_constraint_op_for_op (c->m_op, swapped_op);
+ if (result_for_constraint.is_known ())
+ return result_for_constraint;
+ }
+ }
+
+ return tristate (tristate::TS_UNKNOWN);
+}
+
+/* Evaluate the condition LHS OP RHS, creating equiv_class instances for
+ LHS and RHS if they aren't already in equiv_classes. */
+
+tristate
+constraint_manager::eval_condition (svalue_id lhs,
+ enum tree_code op,
+ svalue_id rhs)
+{
+ return eval_condition (get_or_add_equiv_class (lhs),
+ op,
+ get_or_add_equiv_class (rhs));
+}
+
+/* Delete any information about svalue_id instances identified by P.
+ Such instances are removed from equivalence classes, and any
+ redundant ECs and constraints are also removed.
+ Accumulate stats into STATS. */
+
+void
+constraint_manager::purge (const purge_criteria &p, purge_stats *stats)
+{
+ /* Delete any svalue_ids identified by P within the various equivalence
+ classes. */
+ for (unsigned ec_idx = 0; ec_idx < m_equiv_classes.length (); )
+ {
+ equiv_class *ec = m_equiv_classes[ec_idx];
+
+ int i;
+ svalue_id *pv;
+ bool delete_ec = false;
+ FOR_EACH_VEC_ELT (ec->m_vars, i, pv)
+ {
+ if (*pv == ec->m_cst_sid)
+ continue;
+ if (p.should_purge_p (*pv))
+ {
+ if (ec->del (*pv))
+ if (!ec->m_constant)
+ delete_ec = true;
+ }
+ }
+
+ if (delete_ec)
+ {
+ delete ec;
+ m_equiv_classes.ordered_remove (ec_idx);
+ if (stats)
+ stats->m_num_equiv_classes++;
+
+ /* Update the constraints, potentially removing some. */
+ for (unsigned con_idx = 0; con_idx < m_constraints.length (); )
+ {
+ constraint *c = &m_constraints[con_idx];
+
+ /* Remove constraints that refer to the deleted EC. */
+ if (c->m_lhs == ec_idx
+ || c->m_rhs == ec_idx)
+ {
+ m_constraints.ordered_remove (con_idx);
+ if (stats)
+ stats->m_num_constraints++;
+ }
+ else
+ {
+ /* Renumber constraints that refer to ECs that have
+ had their idx changed. */
+ c->m_lhs.update_for_removal (ec_idx);
+ c->m_rhs.update_for_removal (ec_idx);
+
+ con_idx++;
+ }
+ }
+ }
+ else
+ ec_idx++;
+ }
+
+ /* Now delete any constraints that are purely between constants. */
+ for (unsigned con_idx = 0; con_idx < m_constraints.length (); )
+ {
+ constraint *c = &m_constraints[con_idx];
+ if (m_equiv_classes[c->m_lhs.m_idx]->m_vars.length () == 0
+ && m_equiv_classes[c->m_rhs.m_idx]->m_vars.length () == 0)
+ {
+ m_constraints.ordered_remove (con_idx);
+ if (stats)
+ stats->m_num_constraints++;
+ }
+ else
+ {
+ con_idx++;
+ }
+ }
+
+ /* Finally, delete any ECs that purely contain constants and aren't
+ referenced by any constraints. */
+ for (unsigned ec_idx = 0; ec_idx < m_equiv_classes.length (); )
+ {
+ equiv_class *ec = m_equiv_classes[ec_idx];
+ if (ec->m_vars.length () == 0)
+ {
+ equiv_class_id ec_id (ec_idx);
+ bool has_constraint = false;
+ for (unsigned con_idx = 0; con_idx < m_constraints.length ();
+ con_idx++)
+ {
+ constraint *c = &m_constraints[con_idx];
+ if (c->m_lhs == ec_id
+ || c->m_rhs == ec_id)
+ {
+ has_constraint = true;
+ break;
+ }
+ }
+ if (!has_constraint)
+ {
+ delete ec;
+ m_equiv_classes.ordered_remove (ec_idx);
+ if (stats)
+ stats->m_num_equiv_classes++;
+
+ /* Renumber constraints that refer to ECs that have
+ had their idx changed. */
+ for (unsigned con_idx = 0; con_idx < m_constraints.length ();
+ con_idx++)
+ {
+ constraint *c = &m_constraints[con_idx];
+ c->m_lhs.update_for_removal (ec_idx);
+ c->m_rhs.update_for_removal (ec_idx);
+ }
+ continue;
+ }
+ }
+ ec_idx++;
+ }
+
+ validate ();
+}
+
+/* Remap all svalue_ids within this constraint_manager using MAP. */
+
+void
+constraint_manager::remap_svalue_ids (const svalue_id_map &map)
+{
+ int i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ ec->remap_svalue_ids (map);
+}
+
+/* Comparator for use by constraint_manager::canonicalize.
+ Sort a pair of equiv_class instances, using the representative
+ svalue_id as a sort key. */
+
+static int
+equiv_class_cmp (const void *p1, const void *p2)
+{
+ const equiv_class *ec1 = *(const equiv_class * const *)p1;
+ const equiv_class *ec2 = *(const equiv_class * const *)p2;
+
+ svalue_id rep1 = ec1->get_representative ();
+ svalue_id rep2 = ec2->get_representative ();
+
+ return rep1.as_int () - rep2.as_int ();
+}
+
+/* Comparator for use by constraint_manager::canonicalize.
+ Sort a pair of constraint instances. */
+
+static int
+constraint_cmp (const void *p1, const void *p2)
+{
+ const constraint *c1 = (const constraint *)p1;
+ const constraint *c2 = (const constraint *)p2;
+ int lhs_cmp = c1->m_lhs.as_int () - c2->m_lhs.as_int ();
+ if (lhs_cmp)
+ return lhs_cmp;
+ int rhs_cmp = c1->m_rhs.as_int () - c2->m_rhs.as_int ();
+ if (rhs_cmp)
+ return rhs_cmp;
+ return c1->m_op - c2->m_op;
+}
+
+/* Reorder the equivalence classes and constraints within this
+ constraint_manager into a canonical order, to increase the
+ chances of finding equality with another instance. */
+
+void
+constraint_manager::canonicalize (unsigned num_svalue_ids)
+{
+ /* First, sort svalue_ids within the ECs. */
+ unsigned i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ ec->canonicalize ();
+
+ /* Next, sort the ECs into a canonical order. */
+
+ /* We will need to remap the equiv_class_ids in the constraints,
+ so we need to store the original index of each EC.
+ Build a lookup table, mapping from representative svalue_id
+ to the original equiv_class_id of that svalue_id. */
+ auto_vec<equiv_class_id> original_ec_id (num_svalue_ids);
+ for (i = 0; i < num_svalue_ids; i++)
+ original_ec_id.quick_push (equiv_class_id::null ());
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ {
+ svalue_id rep = ec->get_representative ();
+ gcc_assert (!rep.null_p ());
+ original_ec_id[rep.as_int ()] = i;
+ }
+
+ /* Sort the equivalence classes. */
+ m_equiv_classes.qsort (equiv_class_cmp);
+
+ /* Populate ec_id_map based on the old vs new EC ids. */
+ one_way_id_map<equiv_class_id> ec_id_map (m_equiv_classes.length ());
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ {
+ svalue_id rep = ec->get_representative ();
+ ec_id_map.put (original_ec_id[rep.as_int ()], i);
+ }
+
+ /* Update the EC ids within the constraints. */
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ ec_id_map.update (&c->m_lhs);
+ ec_id_map.update (&c->m_rhs);
+ }
+
+ /* Finally, sort the constraints. */
+ m_constraints.qsort (constraint_cmp);
+}
+
+/* A concrete subclass of constraint_manager for use when
+ merging two constraint_manager into a third constraint_manager,
+ each of which has its own region_model.
+ Calls are delegated to the constraint_manager for the merged model,
+ and thus affect its region_model. */
+
+class cleaned_constraint_manager : public constraint_manager
+{
+public:
+ cleaned_constraint_manager (constraint_manager *merged) : m_merged (merged) {}
+
+ constraint_manager *clone (region_model *) const FINAL OVERRIDE
+ {
+ gcc_unreachable ();
+ }
+ tree maybe_get_constant (svalue_id sid) const FINAL OVERRIDE
+ {
+ return m_merged->maybe_get_constant (sid);
+ }
+ svalue_id get_sid_for_constant (tree cst) const FINAL OVERRIDE
+ {
+ return m_merged->get_sid_for_constant (cst);
+ }
+ virtual int get_num_svalues () const FINAL OVERRIDE
+ {
+ return m_merged->get_num_svalues ();
+ }
+private:
+ constraint_manager *m_merged;
+};
+
+/* Concrete subclass of fact_visitor for use by constraint_manager::merge.
+ For every fact in CM_A, see if it is also true in *CM_B. Add such
+ facts to *OUT. */
+
+class merger_fact_visitor : public fact_visitor
+{
+public:
+ merger_fact_visitor (constraint_manager *cm_b,
+ constraint_manager *out)
+ : m_cm_b (cm_b), m_out (out)
+ {}
+
+ void on_fact (svalue_id lhs, enum tree_code code, svalue_id rhs)
+ FINAL OVERRIDE
+ {
+ if (m_cm_b->eval_condition (lhs, code, rhs).is_true ())
+ {
+ bool sat = m_out->add_constraint (lhs, code, rhs);
+ gcc_assert (sat);
+ }
+ }
+
+private:
+ constraint_manager *m_cm_b;
+ constraint_manager *m_out;
+};
+
+/* Use MERGER to merge CM_A and CM_B into *OUT.
+ If one thinks of a constraint_manager as a subset of N-dimensional
+ space, this takes the union of the points of CM_A and CM_B, and
+ expresses that into *OUT. Alternatively, it can be thought of
+ as the intersection of the constraints. */
+
+void
+constraint_manager::merge (const constraint_manager &cm_a,
+ const constraint_manager &cm_b,
+ constraint_manager *out,
+ const model_merger &merger)
+{
+ gcc_assert (merger.m_sid_mapping);
+
+ /* Map svalue_ids in each equiv class from both sources
+ to the merged region_model, dropping ids that don't survive merger,
+ and potentially creating svalues in *OUT for constants. */
+ cleaned_constraint_manager cleaned_cm_a (out);
+ const one_way_svalue_id_map &map_a_to_m
+ = merger.m_sid_mapping->m_map_from_a_to_m;
+ clean_merger_input (cm_a, map_a_to_m, &cleaned_cm_a);
+
+ cleaned_constraint_manager cleaned_cm_b (out);
+ const one_way_svalue_id_map &map_b_to_m
+ = merger.m_sid_mapping->m_map_from_b_to_m;
+ clean_merger_input (cm_b, map_b_to_m, &cleaned_cm_b);
+
+ /* At this point, the two cleaned CMs have ECs and constraints referring
+ to svalues in the merged region model, but both of them have separate
+ ECs. */
+
+ /* Merge the equivalence classes and constraints.
+ The easiest way to do this seems to be to enumerate all of the facts
+ in cleaned_cm_a, see which are also true in cleaned_cm_b,
+ and add those to *OUT. */
+ merger_fact_visitor v (&cleaned_cm_b, out);
+ cleaned_cm_a.for_each_fact (&v);
+}
+
+/* A subroutine of constraint_manager::merge.
+ Use MAP_SID_TO_M to map equivalence classes and constraints from
+ SM_IN to *OUT. Purge any non-constant svalue_id that don't appear
+ in the result of MAP_SID_TO_M, purging any ECs and their constraints
+ that become empty as a result. Potentially create svalues in
+ the merged region_model for constants that weren't already in use there. */
+
+void
+constraint_manager::
+clean_merger_input (const constraint_manager &cm_in,
+ const one_way_svalue_id_map &map_sid_to_m,
+ constraint_manager *out)
+{
+ one_way_id_map<equiv_class_id> map_ec_to_m
+ (cm_in.m_equiv_classes.length ());
+ unsigned ec_idx;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (cm_in.m_equiv_classes, ec_idx, ec)
+ {
+ equiv_class cleaned_ec;
+ if (tree cst = ec->get_any_constant ())
+ {
+ cleaned_ec.m_constant = cst;
+ /* Lazily create the constant in the out region_model. */
+ cleaned_ec.m_cst_sid = out->get_sid_for_constant (cst);
+ }
+ unsigned var_idx;
+ svalue_id *var_in_sid;
+ FOR_EACH_VEC_ELT (ec->m_vars, var_idx, var_in_sid)
+ {
+ svalue_id var_m_sid = map_sid_to_m.get_dst_for_src (*var_in_sid);
+ if (!var_m_sid.null_p ())
+ cleaned_ec.m_vars.safe_push (var_m_sid);
+ }
+ if (cleaned_ec.get_any_constant () || !cleaned_ec.m_vars.is_empty ())
+ {
+ map_ec_to_m.put (ec_idx, out->m_equiv_classes.length ());
+ out->m_equiv_classes.safe_push (new equiv_class (cleaned_ec));
+ }
+ }
+
+ /* Write out to *OUT any constraints for which both sides survived
+ cleaning, using the new EC IDs. */
+ unsigned con_idx;
+ constraint *c;
+ FOR_EACH_VEC_ELT (cm_in.m_constraints, con_idx, c)
+ {
+ equiv_class_id new_lhs = map_ec_to_m.get_dst_for_src (c->m_lhs);
+ if (new_lhs.null_p ())
+ continue;
+ equiv_class_id new_rhs = map_ec_to_m.get_dst_for_src (c->m_rhs);
+ if (new_rhs.null_p ())
+ continue;
+ out->m_constraints.safe_push (constraint (new_lhs,
+ c->m_op,
+ new_rhs));
+ }
+}
+
+/* Call VISITOR's on_fact vfunc repeatedly to express the various
+ equivalence classes and constraints.
+ This is used by constraint_manager::merge to find the common
+ facts between two input constraint_managers. */
+
+void
+constraint_manager::for_each_fact (fact_visitor *visitor) const
+{
+ /* First, call EQ_EXPR within the various equivalence classes. */
+ unsigned ec_idx;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, ec_idx, ec)
+ {
+ if (!ec->m_cst_sid.null_p ())
+ {
+ unsigned i;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (ec->m_vars, i, sid)
+ visitor->on_fact (ec->m_cst_sid, EQ_EXPR, *sid);
+ }
+ for (unsigned i = 0; i < ec->m_vars.length (); i++)
+ for (unsigned j = i + 1; j < ec->m_vars.length (); j++)
+ visitor->on_fact (ec->m_vars[i], EQ_EXPR, ec->m_vars[j]);
+ }
+
+ /* Now, express the various constraints. */
+ unsigned con_idx;
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, con_idx, c)
+ {
+ const equiv_class &ec_lhs = c->m_lhs.get_obj (*this);
+ const equiv_class &ec_rhs = c->m_rhs.get_obj (*this);
+ enum tree_code code = constraint_tree_code (c->m_op);
+
+ if (!ec_lhs.m_cst_sid.null_p ())
+ {
+ for (unsigned j = 0; j < ec_rhs.m_vars.length (); j++)
+ {
+ visitor->on_fact (ec_lhs.m_cst_sid, code, ec_rhs.m_vars[j]);
+ }
+ }
+ for (unsigned i = 0; i < ec_lhs.m_vars.length (); i++)
+ {
+ if (!ec_rhs.m_cst_sid.null_p ())
+ visitor->on_fact (ec_lhs.m_vars[i], code, ec_rhs.m_cst_sid);
+ for (unsigned j = 0; j < ec_rhs.m_vars.length (); j++)
+ visitor->on_fact (ec_lhs.m_vars[i], code, ec_rhs.m_vars[j]);
+ }
+ }
+}
+
+/* Assert that this object is valid. */
+
+void
+constraint_manager::validate () const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ int i;
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ {
+ gcc_assert (ec);
+
+ int j;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (ec->m_vars, j, sid)
+ {
+ gcc_assert (!sid->null_p ());
+ gcc_assert (sid->as_int () < get_num_svalues ());
+ }
+ if (ec->m_constant)
+ gcc_assert (CONSTANT_CLASS_P (ec->m_constant));
+#if 0
+ else
+ gcc_assert (ec->m_vars.length () > 0);
+#endif
+ }
+
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ gcc_assert (!c->m_lhs.null_p ());
+ gcc_assert (c->m_lhs.as_int () <= (int)m_equiv_classes.length ());
+ gcc_assert (!c->m_rhs.null_p ());
+ gcc_assert (c->m_rhs.as_int () <= (int)m_equiv_classes.length ());
+ }
+}
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Various constraint_manager selftests.
+ These have to be written in terms of a region_model, since
+ the latter is responsible for managing svalue and svalue_id
+ instances. */
+
+/* Verify that setting and getting simple conditions within a region_model
+ work (thus exercising the underlying constraint_manager). */
+
+static void
+test_constraint_conditions ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_0 = build_int_cst (integer_type_node, 0);
+
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+ tree z = build_global_decl ("z", integer_type_node);
+
+ /* Self-comparisons. */
+ {
+ region_model model;
+ ASSERT_CONDITION_TRUE (model, x, EQ_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, x, LE_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, x, GE_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, x, NE_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, x, LT_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, x, GT_EXPR, x);
+ }
+
+ /* x == y. */
+ {
+ region_model model;
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
+
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
+
+ ASSERT_CONDITION_TRUE (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, LE_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, GE_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, NE_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, LT_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, GT_EXPR, y);
+
+ /* Swapped operands. */
+ ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, LE_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, GE_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, NE_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, LT_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, GT_EXPR, x);
+
+ /* Comparison with other var. */
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, LE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, NE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, LT_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, GT_EXPR, z);
+ }
+
+ /* x == y, then y == z */
+ {
+ region_model model;
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
+
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
+ ADD_SAT_CONSTRAINT (model, y, EQ_EXPR, z);
+
+ ASSERT_CONDITION_TRUE (model, x, EQ_EXPR, z);
+ ASSERT_CONDITION_TRUE (model, x, LE_EXPR, z);
+ ASSERT_CONDITION_TRUE (model, x, GE_EXPR, z);
+ ASSERT_CONDITION_FALSE (model, x, NE_EXPR, z);
+ ASSERT_CONDITION_FALSE (model, x, LT_EXPR, z);
+ ASSERT_CONDITION_FALSE (model, x, GT_EXPR, z);
+ }
+
+ /* x != y. */
+ {
+ region_model model;
+
+ ADD_SAT_CONSTRAINT (model, x, NE_EXPR, y);
+
+ ASSERT_CONDITION_TRUE (model, x, NE_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, LE_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, LT_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, GT_EXPR, y);
+
+ /* Swapped operands. */
+ ASSERT_CONDITION_TRUE (model, y, NE_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, EQ_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, LE_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, GE_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, LT_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, GT_EXPR, x);
+
+ /* Comparison with other var. */
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, LE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, NE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, LT_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, GT_EXPR, z);
+ }
+
+ /* x < y. */
+ {
+ region_model model;
+
+ ADD_SAT_CONSTRAINT (model, x, LT_EXPR, y);
+
+ ASSERT_CONDITION_TRUE (model, x, LT_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, LE_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, NE_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, GT_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, GE_EXPR, y);
+
+ /* Swapped operands. */
+ ASSERT_CONDITION_FALSE (model, y, LT_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, LE_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, NE_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, EQ_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, GT_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, GE_EXPR, x);
+ }
+
+ /* x <= y. */
+ {
+ region_model model;
+
+ ADD_SAT_CONSTRAINT (model, x, LE_EXPR, y);
+
+ ASSERT_CONDITION_UNKNOWN (model, x, LT_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, LE_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, NE_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, GT_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, y);
+
+ /* Swapped operands. */
+ ASSERT_CONDITION_FALSE (model, y, LT_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, LE_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, NE_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, EQ_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, GT_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, GE_EXPR, x);
+ }
+
+ /* x > y. */
+ {
+ region_model model;
+
+ ADD_SAT_CONSTRAINT (model, x, GT_EXPR, y);
+
+ ASSERT_CONDITION_TRUE (model, x, GT_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, GE_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, NE_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, LT_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, LE_EXPR, y);
+
+ /* Swapped operands. */
+ ASSERT_CONDITION_FALSE (model, y, GT_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, GE_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, NE_EXPR, x);
+ ASSERT_CONDITION_FALSE (model, y, EQ_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, LT_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, LE_EXPR, x);
+ }
+
+ /* x >= y. */
+ {
+ region_model model;
+
+ ADD_SAT_CONSTRAINT (model, x, GE_EXPR, y);
+
+ ASSERT_CONDITION_UNKNOWN (model, x, GT_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, GE_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, NE_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, LT_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, LE_EXPR, y);
+
+ /* Swapped operands. */
+ ASSERT_CONDITION_FALSE (model, y, GT_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, GE_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, NE_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, EQ_EXPR, x);
+ ASSERT_CONDITION_UNKNOWN (model, y, LT_EXPR, x);
+ ASSERT_CONDITION_TRUE (model, y, LE_EXPR, x);
+ }
+
+ // TODO: implied orderings
+
+ /* Constants. */
+ {
+ region_model model;
+ ASSERT_CONDITION_FALSE (model, int_0, EQ_EXPR, int_42);
+ ASSERT_CONDITION_TRUE (model, int_0, NE_EXPR, int_42);
+ ASSERT_CONDITION_TRUE (model, int_0, LT_EXPR, int_42);
+ ASSERT_CONDITION_TRUE (model, int_0, LE_EXPR, int_42);
+ ASSERT_CONDITION_FALSE (model, int_0, GT_EXPR, int_42);
+ ASSERT_CONDITION_FALSE (model, int_0, GE_EXPR, int_42);
+ }
+
+ /* x == 0, y == 42. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
+ ADD_SAT_CONSTRAINT (model, y, EQ_EXPR, int_42);
+
+ ASSERT_CONDITION_TRUE (model, x, NE_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, LE_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, GE_EXPR, y);
+ ASSERT_CONDITION_TRUE (model, x, LT_EXPR, y);
+ ASSERT_CONDITION_FALSE (model, x, GT_EXPR, y);
+ }
+
+ /* Unsatisfiable combinations. */
+
+ /* x == y && x != y. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
+ ADD_UNSAT_CONSTRAINT (model, x, NE_EXPR, y);
+ }
+
+ /* x == 0 then x == 42. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
+ ADD_UNSAT_CONSTRAINT (model, x, EQ_EXPR, int_42);
+ }
+
+ /* x == 0 then x != 0. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
+ ADD_UNSAT_CONSTRAINT (model, x, NE_EXPR, int_0);
+ }
+
+ /* x == 0 then x > 0. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
+ ADD_UNSAT_CONSTRAINT (model, x, GT_EXPR, int_0);
+ }
+
+ /* x != y && x == y. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, NE_EXPR, y);
+ ADD_UNSAT_CONSTRAINT (model, x, EQ_EXPR, y);
+ }
+
+ /* x <= y && x > y. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, LE_EXPR, y);
+ ADD_UNSAT_CONSTRAINT (model, x, GT_EXPR, y);
+ }
+
+ // etc
+}
+
+/* Test transitivity of conditions. */
+
+static void
+test_transitivity ()
+{
+ tree a = build_global_decl ("a", integer_type_node);
+ tree b = build_global_decl ("b", integer_type_node);
+ tree c = build_global_decl ("c", integer_type_node);
+ tree d = build_global_decl ("d", integer_type_node);
+
+ /* a == b, then c == d, then c == b. */
+ {
+ region_model model;
+ ASSERT_CONDITION_UNKNOWN (model, a, EQ_EXPR, b);
+ ASSERT_CONDITION_UNKNOWN (model, b, EQ_EXPR, c);
+ ASSERT_CONDITION_UNKNOWN (model, c, EQ_EXPR, d);
+ ASSERT_CONDITION_UNKNOWN (model, a, EQ_EXPR, d);
+
+ ADD_SAT_CONSTRAINT (model, a, EQ_EXPR, b);
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, b);
+
+ ADD_SAT_CONSTRAINT (model, c, EQ_EXPR, d);
+ ASSERT_CONDITION_TRUE (model, c, EQ_EXPR, d);
+ ASSERT_CONDITION_UNKNOWN (model, a, EQ_EXPR, d);
+
+ ADD_SAT_CONSTRAINT (model, c, EQ_EXPR, b);
+ ASSERT_CONDITION_TRUE (model, c, EQ_EXPR, b);
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, d);
+ }
+
+ /* Transitivity: "a < b", "b < c" should imply "a < c". */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, LT_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, LT_EXPR, c);
+
+ ASSERT_CONDITION_TRUE (model, a, LT_EXPR, c);
+ ASSERT_CONDITION_FALSE (model, a, EQ_EXPR, c);
+ }
+
+ /* Transitivity: "a <= b", "b < c" should imply "a < c". */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, LE_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, LT_EXPR, c);
+
+ ASSERT_CONDITION_TRUE (model, a, LT_EXPR, c);
+ ASSERT_CONDITION_FALSE (model, a, EQ_EXPR, c);
+ }
+
+ /* Transitivity: "a <= b", "b <= c" should imply "a <= c". */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, LE_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, LE_EXPR, c);
+
+ ASSERT_CONDITION_TRUE (model, a, LE_EXPR, c);
+ ASSERT_CONDITION_UNKNOWN (model, a, EQ_EXPR, c);
+ }
+
+ /* Transitivity: "a > b", "b > c" should imply "a > c". */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GT_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, GT_EXPR, c);
+
+ ASSERT_CONDITION_TRUE (model, a, GT_EXPR, c);
+ ASSERT_CONDITION_FALSE (model, a, EQ_EXPR, c);
+ }
+
+ /* Transitivity: "a >= b", "b > c" should imply " a > c". */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, GT_EXPR, c);
+
+ ASSERT_CONDITION_TRUE (model, a, GT_EXPR, c);
+ ASSERT_CONDITION_FALSE (model, a, EQ_EXPR, c);
+ }
+
+ /* Transitivity: "a >= b", "b >= c" should imply "a >= c". */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, GE_EXPR, c);
+
+ ASSERT_CONDITION_TRUE (model, a, GE_EXPR, c);
+ ASSERT_CONDITION_UNKNOWN (model, a, EQ_EXPR, c);
+ }
+
+ /* Transitivity: "(a < b)", "(c < d)", "(b < c)" should
+ imply the easy cases:
+ (a < c)
+ (b < d)
+ but also that:
+ (a < d). */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, LT_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, c, LT_EXPR, d);
+ ADD_SAT_CONSTRAINT (model, b, LT_EXPR, c);
+
+ ASSERT_CONDITION_TRUE (model, a, LT_EXPR, c);
+ ASSERT_CONDITION_TRUE (model, b, LT_EXPR, d);
+ ASSERT_CONDITION_TRUE (model, a, LT_EXPR, d);
+ }
+
+ /* Transitivity: "a >= b", "b >= a" should imply that a == b. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, GE_EXPR, a);
+
+ // TODO:
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, b);
+ }
+
+ /* Transitivity: "a >= b", "b > a" should be impossible. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
+ ADD_UNSAT_CONSTRAINT (model, b, GT_EXPR, a);
+ }
+
+ /* Transitivity: "a >= b", "b >= c", "c >= a" should imply
+ that a == b == c. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, GE_EXPR, c);
+ ADD_SAT_CONSTRAINT (model, c, GE_EXPR, a);
+
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, c);
+ }
+
+ /* Transitivity: "a > b", "b > c", "c > a"
+ should be impossible. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GT_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, b, GT_EXPR, c);
+ ADD_UNSAT_CONSTRAINT (model, c, GT_EXPR, a);
+ }
+
+}
+
+/* Test various conditionals involving constants where the results
+ ought to be implied based on the values of the constants. */
+
+static void
+test_constant_comparisons ()
+{
+ tree int_3 = build_int_cst (integer_type_node, 3);
+ tree int_4 = build_int_cst (integer_type_node, 4);
+ tree int_5 = build_int_cst (integer_type_node, 5);
+
+ tree int_1023 = build_int_cst (integer_type_node, 1023);
+ tree int_1024 = build_int_cst (integer_type_node, 1024);
+
+ tree a = build_global_decl ("a", integer_type_node);
+ tree b = build_global_decl ("b", integer_type_node);
+
+ /* Given a >= 1024, then a <= 1023 should be impossible. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, int_1024);
+ ADD_UNSAT_CONSTRAINT (model, a, LE_EXPR, int_1023);
+ }
+
+ /* a > 4. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_4);
+ ASSERT_CONDITION_TRUE (model, a, GT_EXPR, int_4);
+ ASSERT_CONDITION_TRUE (model, a, NE_EXPR, int_3);
+ ASSERT_CONDITION_UNKNOWN (model, a, NE_EXPR, int_5);
+ }
+
+ /* a <= 4. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
+ ASSERT_CONDITION_FALSE (model, a, GT_EXPR, int_4);
+ ASSERT_CONDITION_FALSE (model, a, GT_EXPR, int_5);
+ ASSERT_CONDITION_UNKNOWN (model, a, NE_EXPR, int_3);
+ }
+
+ /* If "a > b" and "a == 3", then "b == 4" ought to be unsatisfiable. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GT_EXPR, b);
+ ADD_SAT_CONSTRAINT (model, a, EQ_EXPR, int_3);
+ ADD_UNSAT_CONSTRAINT (model, b, EQ_EXPR, int_4);
+ }
+
+ /* Various tests of int ranges where there is only one possible candidate. */
+ {
+ /* If "a <= 4" && "a > 3", then "a == 4",
+ assuming a is of integral type. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
+ ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_3);
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
+ }
+
+ /* If "a > 3" && "a <= 4", then "a == 4",
+ assuming a is of integral type. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_3);
+ ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
+ }
+ /* If "a > 3" && "a < 5", then "a == 4",
+ assuming a is of integral type. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_3);
+ ADD_SAT_CONSTRAINT (model, a, LT_EXPR, int_5);
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
+ }
+ /* If "a >= 4" && "a < 5", then "a == 4",
+ assuming a is of integral type. */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, int_4);
+ ADD_SAT_CONSTRAINT (model, a, LT_EXPR, int_5);
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
+ }
+ /* If "a >= 4" && "a <= 4", then "a == 4". */
+ {
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, a, GE_EXPR, int_4);
+ ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
+ ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
+ }
+ }
+
+ /* As above, but for floating-point:
+ if "f > 3" && "f <= 4" we don't know that f == 4. */
+ {
+ tree f = build_global_decl ("f", double_type_node);
+ tree float_3 = build_real_from_int_cst (double_type_node, int_3);
+ tree float_4 = build_real_from_int_cst (double_type_node, int_4);
+
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, f, GT_EXPR, float_3);
+ ADD_SAT_CONSTRAINT (model, f, LE_EXPR, float_4);
+ ASSERT_CONDITION_UNKNOWN (model, f, EQ_EXPR, float_4);
+ ASSERT_CONDITION_UNKNOWN (model, f, EQ_EXPR, int_4);
+ }
+}
+
+/* Verify various lower-level implementation details about
+ constraint_manager. */
+
+static void
+test_constraint_impl ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_0 = build_int_cst (integer_type_node, 0);
+
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+ tree z = build_global_decl ("z", integer_type_node);
+
+ /* x == y. */
+ {
+ region_model model;
+
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
+
+ /* Assert various things about the insides of model. */
+ constraint_manager *cm = model.get_constraints ();
+ ASSERT_EQ (cm->m_constraints.length (), 0);
+ ASSERT_EQ (cm->m_equiv_classes.length (), 1);
+ }
+
+ /* y <= z; x == y. */
+ {
+ region_model model;
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
+
+ ADD_SAT_CONSTRAINT (model, y, GE_EXPR, z);
+ ASSERT_CONDITION_TRUE (model, y, GE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
+
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
+
+ /* Assert various things about the insides of model. */
+ constraint_manager *cm = model.get_constraints ();
+ ASSERT_EQ (cm->m_constraints.length (), 1);
+ ASSERT_EQ (cm->m_equiv_classes.length (), 2);
+
+ /* Ensure that we merged the constraints. */
+ ASSERT_CONDITION_TRUE (model, x, GE_EXPR, z);
+ }
+
+ /* y <= z; y == x. */
+ {
+ region_model model;
+ ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
+
+ ADD_SAT_CONSTRAINT (model, y, GE_EXPR, z);
+ ASSERT_CONDITION_TRUE (model, y, GE_EXPR, z);
+ ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
+
+ ADD_SAT_CONSTRAINT (model, y, EQ_EXPR, x);
+
+ /* Assert various things about the insides of model. */
+ constraint_manager *cm = model.get_constraints ();
+ ASSERT_EQ (cm->m_constraints.length (), 1);
+ ASSERT_EQ (cm->m_equiv_classes.length (), 2);
+
+ /* Ensure that we merged the constraints. */
+ ASSERT_CONDITION_TRUE (model, x, GE_EXPR, z);
+ }
+
+ /* x == 0, then x != 42. */
+ {
+ region_model model;
+
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
+ ADD_SAT_CONSTRAINT (model, x, NE_EXPR, int_42);
+
+ /* Assert various things about the insides of model. */
+ constraint_manager *cm = model.get_constraints ();
+ ASSERT_EQ (cm->m_constraints.length (), 1);
+ ASSERT_EQ (cm->m_equiv_classes.length (), 2);
+ ASSERT_EQ (cm->m_constraints[0].m_lhs,
+ cm->get_or_add_equiv_class (model.get_rvalue (int_0, NULL)));
+ ASSERT_EQ (cm->m_constraints[0].m_rhs,
+ cm->get_or_add_equiv_class (model.get_rvalue (int_42, NULL)));
+ ASSERT_EQ (cm->m_constraints[0].m_op, CONSTRAINT_LT);
+ }
+
+ // TODO: selftest for merging ecs "in the middle"
+ // where a non-final one gets overwritten
+
+ // TODO: selftest where there are pre-existing constraints
+}
+
+/* Check that operator== and hashing works as expected for the
+ various types. */
+
+static void
+test_equality ()
+{
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+
+ {
+ region_model model0;
+ region_model model1;
+
+ constraint_manager *cm0 = model0.get_constraints ();
+ constraint_manager *cm1 = model1.get_constraints ();
+
+ ASSERT_EQ (cm0->hash (), cm1->hash ());
+ ASSERT_EQ (*cm0, *cm1);
+
+ ASSERT_EQ (model0.hash (), model1.hash ());
+ ASSERT_EQ (model0, model1);
+
+ ADD_SAT_CONSTRAINT (model1, x, EQ_EXPR, y);
+ ASSERT_NE (cm0->hash (), cm1->hash ());
+ ASSERT_NE (*cm0, *cm1);
+
+ ASSERT_NE (model0.hash (), model1.hash ());
+ ASSERT_NE (model0, model1);
+
+ region_model model2;
+ constraint_manager *cm2 = model2.get_constraints ();
+ /* Make the same change to cm2. */
+ ADD_SAT_CONSTRAINT (model2, x, EQ_EXPR, y);
+ ASSERT_EQ (cm1->hash (), cm2->hash ());
+ ASSERT_EQ (*cm1, *cm2);
+
+ ASSERT_EQ (model1.hash (), model2.hash ());
+ ASSERT_EQ (model1, model2);
+ }
+}
+
+/* Verify tracking inequality of a variable against many constants. */
+
+static void
+test_many_constants ()
+{
+ tree a = build_global_decl ("a", integer_type_node);
+
+ region_model model;
+ auto_vec<tree> constants;
+ for (int i = 0; i < 20; i++)
+ {
+ tree constant = build_int_cst (integer_type_node, i);
+ constants.safe_push (constant);
+ ADD_SAT_CONSTRAINT (model, a, NE_EXPR, constant);
+
+ /* Merge, and check the result. */
+ region_model other (model);
+
+ region_model merged;
+ ASSERT_TRUE (model.can_merge_with_p (other, &merged));
+ model.canonicalize (NULL);
+ merged.canonicalize (NULL);
+ ASSERT_EQ (model, merged);
+
+ for (int j = 0; j <= i; j++)
+ ASSERT_CONDITION_TRUE (model, a, NE_EXPR, constants[j]);
+ }
+}
+
+/* Run the selftests in this file, temporarily overriding
+ flag_analyzer_transitivity with TRANSITIVITY. */
+
+static void
+run_constraint_manager_tests (bool transitivity)
+{
+ int saved_flag_analyzer_transitivity = flag_analyzer_transitivity;
+ flag_analyzer_transitivity = transitivity;
+
+ test_constraint_conditions ();
+ if (flag_analyzer_transitivity)
+ {
+ /* These selftests assume transitivity. */
+ test_transitivity ();
+ test_constant_comparisons ();
+ }
+ test_constraint_impl ();
+ test_equality ();
+ test_many_constants ();
+
+ flag_analyzer_transitivity = saved_flag_analyzer_transitivity;
+}
+
+/* Run all of the selftests within this file. */
+
+void
+analyzer_constraint_manager_cc_tests ()
+{
+ /* Run the tests twice: with and without transitivity. */
+ run_constraint_manager_tests (true);
+ run_constraint_manager_tests (false);
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Tracking equivalence classes and constraints at a point on an execution path.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_CONSTRAINT_MANAGER_H
+#define GCC_ANALYZER_CONSTRAINT_MANAGER_H
+
+class constraint_manager;
+
+/* Abstract base class for specifying how state should be purged. */
+
+class purge_criteria
+{
+public:
+ virtual ~purge_criteria () {}
+ virtual bool should_purge_p (svalue_id sid) const = 0;
+};
+
+/* An equivalence class within a constraint manager: a set of
+ svalue_ids that are known to all be equal to each other,
+ together with an optional tree constant that they are equal to. */
+
+class equiv_class
+{
+public:
+ equiv_class ();
+ equiv_class (const equiv_class &other);
+
+ hashval_t hash () const;
+ bool operator== (const equiv_class &other);
+
+ void add (svalue_id sid, const constraint_manager &cm);
+ bool del (svalue_id sid);
+
+ tree get_any_constant () const { return m_constant; }
+
+ svalue_id get_representative () const;
+
+ void remap_svalue_ids (const svalue_id_map &map);
+
+ void canonicalize ();
+
+ void print (pretty_printer *pp) const;
+
+ /* An equivalence class can contain multiple constants (e.g. multiple
+ different zeroes, for different types); these are just for the last
+ constant added. */
+ tree m_constant;
+ svalue_id m_cst_sid;
+
+ // TODO: should this be a set rather than a vec?
+ auto_vec<svalue_id> m_vars;
+};
+
+/* The various kinds of constraint. */
+
+enum constraint_op
+{
+ CONSTRAINT_NE,
+ CONSTRAINT_LT,
+ CONSTRAINT_LE
+};
+
+const char *constraint_op_code (enum constraint_op c_op);
+
+/* An ID for an equiv_class within a constraint_manager. Internally, this
+ is an index into a vector of equiv_class * within the constraint_manager. */
+
+class equiv_class_id
+{
+public:
+ static equiv_class_id null () { return equiv_class_id (-1); }
+
+ equiv_class_id (unsigned idx) : m_idx (idx) {}
+ const equiv_class &get_obj (const constraint_manager &cm) const;
+ equiv_class &get_obj (constraint_manager &cm) const;
+
+ bool operator== (const equiv_class_id &other) const
+ {
+ return m_idx == other.m_idx;
+ }
+ bool operator!= (const equiv_class_id &other) const
+ {
+ return m_idx != other.m_idx;
+ }
+
+ bool null_p () const { return m_idx == -1; }
+
+ static equiv_class_id from_int (int idx) { return equiv_class_id (idx); }
+ int as_int () const { return m_idx; }
+
+ void print (pretty_printer *pp) const;
+
+ void update_for_removal (equiv_class_id other)
+ {
+ if (m_idx > other.m_idx)
+ m_idx--;
+ }
+
+ int m_idx;
+};
+
+/* A relationship between two equivalence classes in a constraint_manager. */
+
+class constraint
+{
+ public:
+ constraint (equiv_class_id lhs, enum constraint_op c_op, equiv_class_id rhs)
+ : m_lhs (lhs), m_op (c_op), m_rhs (rhs)
+ {
+ gcc_assert (!lhs.null_p ());
+ gcc_assert (!rhs.null_p ());
+ }
+
+ void print (pretty_printer *pp, const constraint_manager &cm) const;
+
+ hashval_t hash () const;
+ bool operator== (const constraint &other) const;
+
+ /* Is this an ordering, rather than a "!=". */
+ bool is_ordering_p () const
+ {
+ return m_op != CONSTRAINT_NE;
+ }
+
+ equiv_class_id m_lhs;
+ enum constraint_op m_op;
+ equiv_class_id m_rhs;
+};
+
+/* An abstract base class for use with constraint_manager::for_each_fact. */
+
+class fact_visitor
+{
+ public:
+ virtual ~fact_visitor () {}
+ virtual void on_fact (svalue_id lhs, enum tree_code, svalue_id rhs) = 0;
+};
+
+/* A collection of equivalence classes and constraints on them.
+
+ Given N svalues, this can be thought of as representing a subset of
+ N-dimensional space. When we call add_constraint,
+ we are effectively taking an intersection with that constraint. */
+
+class constraint_manager
+{
+public:
+ constraint_manager () {}
+ constraint_manager (const constraint_manager &other);
+ virtual ~constraint_manager () {}
+
+ virtual constraint_manager *clone (region_model *) const = 0;
+ virtual tree maybe_get_constant (svalue_id sid) const = 0;
+ virtual svalue_id get_sid_for_constant (tree cst) const = 0;
+ virtual int get_num_svalues () const = 0;
+
+ constraint_manager& operator= (const constraint_manager &other);
+
+ hashval_t hash () const;
+ bool operator== (const constraint_manager &other) const;
+ bool operator!= (const constraint_manager &other) const
+ {
+ return !(*this == other);
+ }
+
+ void print (pretty_printer *pp) const;
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump (FILE *fp) const;
+ void dump () const;
+
+ const equiv_class &get_equiv_class_by_index (unsigned idx) const
+ {
+ return *m_equiv_classes[idx];
+ }
+ equiv_class &get_equiv_class_by_index (unsigned idx)
+ {
+ return *m_equiv_classes[idx];
+ }
+
+ equiv_class &get_equiv_class (svalue_id sid)
+ {
+ equiv_class_id ec_id = get_or_add_equiv_class (sid);
+ return ec_id.get_obj (*this);
+ }
+
+ bool add_constraint (svalue_id lhs, enum tree_code op, svalue_id rhs);
+
+ bool add_constraint (equiv_class_id lhs_ec_id,
+ enum tree_code op,
+ equiv_class_id rhs_ec_id);
+
+ bool get_equiv_class_by_sid (svalue_id sid, equiv_class_id *out) const;
+ equiv_class_id get_or_add_equiv_class (svalue_id sid);
+ tristate eval_condition (equiv_class_id lhs,
+ enum tree_code op,
+ equiv_class_id rhs);
+ tristate eval_condition (svalue_id lhs,
+ enum tree_code op,
+ svalue_id rhs);
+
+ void purge (const purge_criteria &p, purge_stats *stats);
+
+ void remap_svalue_ids (const svalue_id_map &map);
+
+ void canonicalize (unsigned num_svalue_ids);
+
+ static void merge (const constraint_manager &cm_a,
+ const constraint_manager &cm_b,
+ constraint_manager *out,
+ const model_merger &merger);
+
+ void for_each_fact (fact_visitor *) const;
+
+ void validate () const;
+
+ auto_delete_vec<equiv_class> m_equiv_classes;
+ auto_vec<constraint> m_constraints;
+
+ private:
+ static void clean_merger_input (const constraint_manager &cm_in,
+ const one_way_svalue_id_map &map_sid_to_m,
+ constraint_manager *out);
+
+ void add_constraint_internal (equiv_class_id lhs_id,
+ enum constraint_op c_op,
+ equiv_class_id rhs_id);
+};
+
+#endif /* GCC_ANALYZER_CONSTRAINT_MANAGER_H */
--- /dev/null
+/* Classes for saving, deduplicating, and emitting analyzer diagnostics.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "pretty-print.h"
+#include "gcc-rich-location.h"
+#include "gimple-pretty-print.h"
+#include "function.h"
+#include "diagnostic-core.h"
+#include "diagnostic-event-id.h"
+#include "diagnostic-path.h"
+#include "alloc-pool.h"
+#include "fibonacci_heap.h"
+#include "shortest-paths.h"
+#include "sbitmap.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "ordered-hash-map.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+#include "analyzer/diagnostic-manager.h"
+#include "analyzer/region-model.h"
+#include "analyzer/constraint-manager.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "cgraph.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/program-state.h"
+#include "analyzer/exploded-graph.h"
+#include "analyzer/checker-path.h"
+
+#if ENABLE_ANALYZER
+
+/* class saved_diagnostic. */
+
+/* saved_diagnostic's ctor.
+ Take ownership of D and STMT_FINDER. */
+
+saved_diagnostic::saved_diagnostic (const state_machine *sm,
+ const exploded_node *enode,
+ const supernode *snode, const gimple *stmt,
+ stmt_finder *stmt_finder,
+ tree var, state_machine::state_t state,
+ pending_diagnostic *d)
+: m_sm (sm), m_enode (enode), m_snode (snode), m_stmt (stmt),
+ /* stmt_finder could be on-stack; we want our own copy that can
+ outlive that. */
+ m_stmt_finder (stmt_finder ? stmt_finder->clone () : NULL),
+ m_var (var), m_state (state),
+ m_d (d), m_trailing_eedge (NULL)
+{
+ gcc_assert (m_stmt || m_stmt_finder);
+
+ /* We must have an enode in order to be able to look for paths
+ through the exploded_graph to this diagnostic. */
+ gcc_assert (m_enode);
+}
+
+/* saved_diagnostic's dtor. */
+
+saved_diagnostic::~saved_diagnostic ()
+{
+ delete m_stmt_finder;
+ delete m_d;
+}
+
+/* class diagnostic_manager. */
+
+/* diagnostic_manager's ctor. */
+
+diagnostic_manager::diagnostic_manager (logger *logger, int verbosity)
+: log_user (logger), m_verbosity (verbosity)
+{
+}
+
+/* Queue pending_diagnostic D at ENODE for later emission. */
+
+void
+diagnostic_manager::add_diagnostic (const state_machine *sm,
+ const exploded_node *enode,
+ const supernode *snode, const gimple *stmt,
+ stmt_finder *finder,
+ tree var, state_machine::state_t state,
+ pending_diagnostic *d)
+{
+ LOG_FUNC (get_logger ());
+
+ /* We must have an enode in order to be able to look for paths
+ through the exploded_graph to the diagnostic. */
+ gcc_assert (enode);
+
+ saved_diagnostic *sd
+ = new saved_diagnostic (sm, enode, snode, stmt, finder, var, state, d);
+ m_saved_diagnostics.safe_push (sd);
+ if (get_logger ())
+ log ("adding saved diagnostic %i at SN %i: %qs",
+ m_saved_diagnostics.length () - 1,
+ snode->m_index, d->get_kind ());
+}
+
+/* Queue pending_diagnostic D at ENODE for later emission. */
+
+void
+diagnostic_manager::add_diagnostic (const exploded_node *enode,
+ const supernode *snode, const gimple *stmt,
+ stmt_finder *finder,
+ pending_diagnostic *d)
+{
+ gcc_assert (enode);
+ add_diagnostic (NULL, enode, snode, stmt, finder, NULL_TREE, 0, d);
+}
+
+/* A class for identifying sets of duplicated pending_diagnostic.
+
+ We want to find the simplest dedupe_candidate amongst those that share a
+ dedupe_key. */
+
+class dedupe_key
+{
+public:
+ dedupe_key (const saved_diagnostic &sd,
+ const exploded_path &epath)
+ : m_sd (sd), m_stmt (sd.m_stmt)
+ {
+ /* Support deferring the choice of stmt until after an emission path has
+ been built, using an optional stmt_finder. */
+ if (m_stmt == NULL)
+ {
+ gcc_assert (sd.m_stmt_finder);
+ m_stmt = sd.m_stmt_finder->find_stmt (epath);
+ }
+ gcc_assert (m_stmt);
+ }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_stmt);
+ // TODO: m_sd
+ return hstate.end ();
+ }
+ bool operator== (const dedupe_key &other) const
+ {
+ return (m_sd == other.m_sd
+ && m_stmt == other.m_stmt);
+ }
+
+ location_t get_location () const
+ {
+ return m_stmt->location;
+ }
+
+ /* A qsort comparator for use by dedupe_winners::emit_best
+ to sort them into location_t order. */
+
+ static int
+ comparator (const void *p1, const void *p2)
+ {
+ const dedupe_key *pk1 = *(const dedupe_key * const *)p1;
+ const dedupe_key *pk2 = *(const dedupe_key * const *)p2;
+
+ location_t loc1 = pk1->get_location ();
+ location_t loc2 = pk2->get_location ();
+
+ return linemap_compare_locations (line_table, loc2, loc1);
+ }
+
+ const saved_diagnostic &m_sd;
+ const gimple *m_stmt;
+};
+
+/* The value of a slot for a dedupe_key within dedupe_winners:
+ the exploded_path for the best candidate for that key, and the
+ number of duplicates seen so far. */
+
+class dedupe_candidate
+{
+public:
+ // has the exploded_path
+ dedupe_candidate (const shortest_exploded_paths &sp,
+ const saved_diagnostic &sd)
+ : m_epath (sp.get_shortest_path (sd.m_enode)),
+ m_num_dupes (0)
+ {
+ }
+
+ unsigned length () const { return m_epath.length (); }
+ const exploded_path &get_path () const { return m_epath; }
+
+ void add_duplicate () { m_num_dupes++; }
+ int get_num_dupes () const { return m_num_dupes; }
+
+private:
+ exploded_path m_epath;
+public:
+ int m_num_dupes;
+};
+
+/* Traits for use by dedupe_winners. */
+
+class dedupe_hash_map_traits
+{
+public:
+ typedef const dedupe_key *key_type;
+ typedef dedupe_candidate *value_type;
+ typedef dedupe_candidate *compare_type;
+
+ static inline hashval_t hash (const key_type &v)
+ {
+ return v->hash ();
+ }
+ static inline bool equal_keys (const key_type &k1, const key_type &k2)
+ {
+ return *k1 == *k2;
+ }
+ template <typename T>
+ static inline void remove (T &)
+ {
+ // TODO
+ }
+ template <typename T>
+ static inline void mark_deleted (T &entry)
+ {
+ entry.m_key = reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline void mark_empty (T &entry)
+ {
+ entry.m_key = NULL;
+ }
+ template <typename T>
+ static inline bool is_deleted (const T &entry)
+ {
+ return entry.m_key == reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline bool is_empty (const T &entry)
+ {
+ return entry.m_key == NULL;
+ }
+ static const bool empty_zero_p = true;
+};
+
+/* A class for deduplicating diagnostics and finding (and emitting) the
+ best diagnostic within each partition. */
+
+class dedupe_winners
+{
+public:
+ ~dedupe_winners ()
+ {
+ /* Delete all keys and candidates. */
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ delete (*iter).first;
+ delete (*iter).second;
+ }
+ }
+
+ /* Determine an exploded_path for SD using SP and, if it's feasible,
+ determine if it's the best seen so far for its dedupe_key.
+ Retain the winner for each dedupe_key, and discard the rest. */
+
+ void add (logger *logger,
+ const shortest_exploded_paths &sp,
+ const saved_diagnostic &sd)
+ {
+ /* Build a dedupe_candidate for SD.
+ This uses SP to build an exploded_path. */
+ dedupe_candidate *dc = new dedupe_candidate (sp, sd);
+
+ /* Verify that the epath is feasible.
+ State-merging means that not every path in the epath corresponds
+ to a feasible one w.r.t. states.
+ Here we simply check each duplicate saved_diagnostic's
+ shortest_path, and reject any that aren't feasible.
+ This could introduce false negatives, as there could be longer
+ feasible paths within the egraph. */
+ if (logger)
+ logger->log ("considering %qs at SN: %i",
+ sd.m_d->get_kind (), sd.m_snode->m_index);
+ if (!dc->get_path ().feasible_p (logger))
+ {
+ if (logger)
+ logger->log ("rejecting %qs at SN: %i"
+ " due to infeasible path",
+ sd.m_d->get_kind (), sd.m_snode->m_index);
+ delete dc;
+ return;
+ }
+ else
+ if (logger)
+ logger->log ("accepting %qs at SN: %i with feasible path",
+ sd.m_d->get_kind (), sd.m_snode->m_index);
+
+ dedupe_key *key = new dedupe_key (sd, dc->get_path ());
+ if (dedupe_candidate **slot = m_map.get (key))
+ {
+ (*slot)->add_duplicate ();
+
+ if (dc->length () < (*slot)->length ())
+ {
+ /* We've got a shorter path for the key; replace
+ the current candidate. */
+ dc->m_num_dupes = (*slot)->get_num_dupes ();
+ delete *slot;
+ *slot = dc;
+ }
+ else
+ /* We haven't beaten the current best candidate;
+ drop the new candidate. */
+ delete dc;
+ delete key;
+ }
+ else
+ /* This is the first candidate for this key. */
+ m_map.put (key, dc);
+ }
+
+ /* Emit the simplest diagnostic within each set. */
+
+ void emit_best (diagnostic_manager *dm,
+ const exploded_graph &eg)
+ {
+ LOG_SCOPE (dm->get_logger ());
+
+ /* Get keys into a vec for sorting. */
+ auto_vec<const dedupe_key *> keys (m_map.elements ());
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ keys.quick_push ((*iter).first);
+
+ dm->log ("# keys after de-duplication: %i", keys.length ());
+
+ /* Sort into a good emission order. */
+ keys.qsort (dedupe_key::comparator);
+
+ /* Emit the best candidate for each key. */
+ int i;
+ const dedupe_key *key;
+ FOR_EACH_VEC_ELT (keys, i, key)
+ {
+ dedupe_candidate **slot = m_map.get (key);
+ gcc_assert (*slot);
+ const dedupe_candidate &dc = **slot;
+
+ dm->emit_saved_diagnostic (eg, key->m_sd,
+ dc.get_path (), key->m_stmt,
+ dc.get_num_dupes ());
+ }
+ }
+
+private:
+
+ /* This maps from each dedupe_key to a current best dedupe_candidate. */
+
+ typedef hash_map<const dedupe_key *, dedupe_candidate *,
+ dedupe_hash_map_traits> map_t;
+ map_t m_map;
+};
+
+/* Emit all saved diagnostics. */
+
+void
+diagnostic_manager::emit_saved_diagnostics (const exploded_graph &eg)
+{
+ LOG_SCOPE (get_logger ());
+ auto_timevar tv (TV_ANALYZER_DIAGNOSTICS);
+ log ("# saved diagnostics: %i", m_saved_diagnostics.length ());
+
+ if (m_saved_diagnostics.length () == 0)
+ return;
+
+ /* Compute the shortest_paths once, sharing it between all diagnostics. */
+ shortest_exploded_paths sp (eg, eg.get_origin ());
+
+ /* Iterate through all saved diagnostics, adding them to a dedupe_winners
+ instance. This partitions the saved diagnostics by dedupe_key,
+ generating exploded_paths for them, and retaining the best one in each
+ partition. */
+ dedupe_winners best_candidates;
+
+ int i;
+ saved_diagnostic *sd;
+ FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
+ best_candidates.add (get_logger (), sp, *sd);
+
+ /* For each dedupe-key, call emit_saved_diagnostic on the "best"
+ saved_diagnostic. */
+ best_candidates.emit_best (this, eg);
+}
+
+/* Given a saved_diagnostic SD at STMT with feasible path EPATH through EG,
+ create an checker_path of suitable events and use it to call
+ SD's underlying pending_diagnostic "emit" vfunc to emit a diagnostic. */
+
+void
+diagnostic_manager::emit_saved_diagnostic (const exploded_graph &eg,
+ const saved_diagnostic &sd,
+ const exploded_path &epath,
+ const gimple *stmt,
+ int num_dupes)
+{
+ LOG_SCOPE (get_logger ());
+ log ("sd: %qs at SN: %i", sd.m_d->get_kind (), sd.m_snode->m_index);
+ log ("num dupes: %i", num_dupes);
+
+ pretty_printer *pp = global_dc->printer->clone ();
+
+ checker_path emission_path;
+
+ /* Populate emission_path with a full description of EPATH. */
+ build_emission_path (eg, epath, &emission_path);
+
+ /* Now prune it to just cover the most pertinent events. */
+ prune_path (&emission_path, sd.m_sm, sd.m_var, sd.m_state);
+
+ /* Add a final event to the path, covering the diagnostic itself.
+ We use the final enode from the epath, which might be different from
+ the sd.m_enode, as the dedupe code doesn't care about enodes, just
+ snodes. */
+ emission_path.add_final_event (sd.m_sm, epath.get_final_enode (), stmt,
+ sd.m_var, sd.m_state);
+
+ /* The "final" event might not be final; if the saved_diagnostic has a
+ trailing eedge stashed, add any events for it. This is for use
+ in handling longjmp, to show where a longjmp is rewinding to. */
+ if (sd.m_trailing_eedge)
+ add_events_for_eedge (*sd.m_trailing_eedge, eg.get_ext_state (),
+ &emission_path);
+
+ emission_path.prepare_for_emission (sd.m_d);
+
+ gcc_rich_location rich_loc (stmt->location);
+ rich_loc.set_path (&emission_path);
+
+ auto_diagnostic_group d;
+ auto_cfun sentinel (sd.m_snode->m_fun);
+ if (sd.m_d->emit (&rich_loc))
+ {
+ if (num_dupes > 0)
+ inform_n (stmt->location, num_dupes,
+ "%i duplicate", "%i duplicates",
+ num_dupes);
+ }
+ delete pp;
+}
+
+/* Given a state change to DST_REP, determine a tree that gives the origin
+ of that state at STMT, using DST_STATE's region model, so that state
+ changes based on assignments can be tracked back to their origins.
+
+ For example, if we have
+
+ (S1) _1 = malloc (64);
+ (S2) EXPR = _1;
+
+ then at stmt S2 we can get the origin of EXPR's state as being _1,
+ and thus track the allocation back to S1. */
+
+static tree
+get_any_origin (const gimple *stmt,
+ tree dst_rep,
+ const program_state &dst_state)
+{
+ if (!stmt)
+ return NULL_TREE;
+
+ gcc_assert (dst_rep);
+
+ if (const gassign *assign = dyn_cast <const gassign *> (stmt))
+ {
+ tree lhs = gimple_assign_lhs (assign);
+ /* Use region IDs to compare lhs with DST_REP. */
+ if (dst_state.m_region_model->get_lvalue (lhs, NULL)
+ == dst_state.m_region_model->get_lvalue (dst_rep, NULL))
+ {
+ tree rhs1 = gimple_assign_rhs1 (assign);
+ enum tree_code op = gimple_assign_rhs_code (assign);
+ switch (op)
+ {
+ default:
+ //gcc_unreachable (); // TODO
+ break;
+ case COMPONENT_REF:
+ case SSA_NAME:
+ return rhs1;
+ }
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Emit a "path" of events to EMISSION_PATH describing the exploded path
+ EPATH within EG. */
+
+void
+diagnostic_manager::build_emission_path (const exploded_graph &eg,
+ const exploded_path &epath,
+ checker_path *emission_path) const
+{
+ LOG_SCOPE (get_logger ());
+ const extrinsic_state &ext_state = eg.get_ext_state ();
+ for (unsigned i = 0; i < epath.m_edges.length (); i++)
+ {
+ const exploded_edge *eedge = epath.m_edges[i];
+ add_events_for_eedge (*eedge, ext_state, emission_path);
+ }
+}
+
+/* Subclass of state_change_visitor that creates state_change_event
+ instances. */
+
+class state_change_event_creator : public state_change_visitor
+{
+public:
+ state_change_event_creator (const exploded_edge &eedge,
+ checker_path *emission_path)
+ : m_eedge (eedge),
+ m_emission_path (emission_path)
+ {}
+
+ bool on_global_state_change (const state_machine &sm,
+ state_machine::state_t src_sm_val,
+ state_machine::state_t dst_sm_val)
+ FINAL OVERRIDE
+ {
+ const exploded_node *src_node = m_eedge.m_src;
+ const program_point &src_point = src_node->get_point ();
+ const int src_stack_depth = src_point.get_stack_depth ();
+ const exploded_node *dst_node = m_eedge.m_dest;
+ const gimple *stmt = src_point.get_stmt ();
+ const supernode *supernode = src_point.get_supernode ();
+ const program_state &dst_state = dst_node->get_state ();
+
+ int stack_depth = src_stack_depth;
+
+ m_emission_path->add_event (new state_change_event (supernode,
+ stmt,
+ stack_depth,
+ sm,
+ NULL_TREE,
+ src_sm_val,
+ dst_sm_val,
+ NULL_TREE,
+ dst_state));
+ return false;
+ }
+
+ bool on_state_change (const state_machine &sm,
+ state_machine::state_t src_sm_val,
+ state_machine::state_t dst_sm_val,
+ tree dst_rep,
+ svalue_id dst_origin_sid) FINAL OVERRIDE
+ {
+ const exploded_node *src_node = m_eedge.m_src;
+ const program_point &src_point = src_node->get_point ();
+ const int src_stack_depth = src_point.get_stack_depth ();
+ const exploded_node *dst_node = m_eedge.m_dest;
+ const gimple *stmt = src_point.get_stmt ();
+ const supernode *supernode = src_point.get_supernode ();
+ const program_state &dst_state = dst_node->get_state ();
+
+ int stack_depth = src_stack_depth;
+
+ if (m_eedge.m_sedge
+ && m_eedge.m_sedge->m_kind == SUPEREDGE_CFG_EDGE)
+ {
+ supernode = src_point.get_supernode ();
+ stmt = supernode->get_last_stmt ();
+ stack_depth = src_stack_depth;
+ }
+
+ /* Bulletproofing for state changes at calls/returns;
+ TODO: is there a better way? */
+ if (!stmt)
+ return false;
+
+ tree origin_rep
+ = dst_state.get_representative_tree (dst_origin_sid);
+
+ if (origin_rep == NULL_TREE)
+ origin_rep = get_any_origin (stmt, dst_rep, dst_state);
+ m_emission_path->add_event (new state_change_event (supernode,
+ stmt,
+ stack_depth,
+ sm,
+ dst_rep,
+ src_sm_val,
+ dst_sm_val,
+ origin_rep,
+ dst_state));
+ return false;
+ }
+
+ const exploded_edge &m_eedge;
+ checker_path *m_emission_path;
+};
+
+/* Compare SRC_STATE and DST_STATE (which use EXT_STATE), and call
+ VISITOR's on_state_change for every sm-state change that occurs
+ to a tree, and on_global_state_change for every global state change
+ that occurs.
+
+ This determines the state changes that ought to be reported to
+ the user: a combination of the effects of changes to sm_state_map
+ (which maps svalues to sm-states), and of region_model changes
+ (which map trees to svalues).
+
+ Bail out early and return true if any call to on_global_state_change
+ or on_state_change returns true, otherwise return false.
+
+ This is split out to make it easier to experiment with changes to
+ exploded_node granularity (so that we can observe what state changes
+ lead to state_change_events being emitted). */
+
+bool
+for_each_state_change (const program_state &src_state,
+ const program_state &dst_state,
+ const extrinsic_state &ext_state,
+ state_change_visitor *visitor)
+{
+ gcc_assert (src_state.m_checker_states.length ()
+ == ext_state.m_checkers.length ());
+ gcc_assert (dst_state.m_checker_states.length ()
+ == ext_state.m_checkers.length ());
+ for (unsigned i = 0; i < ext_state.m_checkers.length (); i++)
+ {
+ const state_machine &sm = ext_state.get_sm (i);
+ const sm_state_map &src_smap = *src_state.m_checker_states[i];
+ const sm_state_map &dst_smap = *dst_state.m_checker_states[i];
+
+ /* Add events for any global state changes. */
+ if (src_smap.get_global_state () != dst_smap.get_global_state ())
+ if (visitor->on_global_state_change (sm,
+ src_smap.get_global_state (),
+ dst_smap.get_global_state ()))
+ return true;
+
+ /* Add events for per-svalue state changes. */
+ for (sm_state_map::iterator_t iter = dst_smap.begin ();
+ iter != dst_smap.end ();
+ ++iter)
+ {
+ /* Ideally we'd directly compare the SM state between src state
+ and dst state, but there's no guarantee that the IDs can
+ be meaningfully compared. */
+ svalue_id dst_sid = (*iter).first;
+ state_machine::state_t dst_sm_val = (*iter).second.m_state;
+
+ auto_vec<path_var> dst_pvs;
+ dst_state.m_region_model->get_path_vars_for_svalue (dst_sid,
+ &dst_pvs);
+
+ unsigned j;
+ path_var *dst_pv;
+ FOR_EACH_VEC_ELT (dst_pvs, j, dst_pv)
+ {
+ tree dst_rep = dst_pv->m_tree;
+ gcc_assert (dst_rep);
+ if (dst_pv->m_stack_depth
+ >= src_state.m_region_model->get_stack_depth ())
+ continue;
+ svalue_id src_sid
+ = src_state.m_region_model->get_rvalue (*dst_pv, NULL);
+ if (src_sid.null_p ())
+ continue;
+ state_machine::state_t src_sm_val = src_smap.get_state (src_sid);
+ if (dst_sm_val != src_sm_val)
+ {
+ svalue_id dst_origin_sid = (*iter).second.m_origin;
+ if (visitor->on_state_change (sm, src_sm_val, dst_sm_val,
+ dst_rep, dst_origin_sid))
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+/* Subroutine of diagnostic_manager::build_emission_path.
+ Add any events for EEDGE to EMISSION_PATH. */
+
+void
+diagnostic_manager::add_events_for_eedge (const exploded_edge &eedge,
+ const extrinsic_state &ext_state,
+ checker_path *emission_path) const
+{
+ const exploded_node *src_node = eedge.m_src;
+ const program_point &src_point = src_node->get_point ();
+ const exploded_node *dst_node = eedge.m_dest;
+ const program_point &dst_point = dst_node->get_point ();
+ const int dst_stack_depth = dst_point.get_stack_depth ();
+ if (get_logger ())
+ {
+ get_logger ()->start_log_line ();
+ pretty_printer *pp = get_logger ()->get_printer ();
+ pp_printf (pp, "EN %i -> EN %i: ",
+ eedge.m_src->m_index,
+ eedge.m_dest->m_index);
+ src_point.print (pp, format (false));
+ pp_string (pp, "-> ");
+ dst_point.print (pp, format (false));
+ get_logger ()->end_log_line ();
+ }
+ const program_state &src_state = src_node->get_state ();
+ const program_state &dst_state = dst_node->get_state ();
+
+ /* Add state change events for the states that have changed.
+ We add these before events for superedges, so that if we have a
+ state_change_event due to following an edge, we'll get this sequence
+ of events:
+
+ | if (!ptr)
+ | ~
+ | |
+ | (1) assuming 'ptr' is non-NULL (state_change_event)
+ | (2) following 'false' branch... (start_cfg_edge_event)
+ ...
+ | do_something (ptr);
+ | ~~~~~~~~~~~~~^~~~~
+ | |
+ | (3) ...to here (end_cfg_edge_event). */
+ state_change_event_creator visitor (eedge, emission_path);
+ for_each_state_change (src_state, dst_state, ext_state,
+ &visitor);
+
+ /* Allow non-standard edges to add events, e.g. when rewinding from
+ longjmp to a setjmp. */
+ if (eedge.m_custom_info)
+ eedge.m_custom_info->add_events_to_path (emission_path, eedge);
+
+ /* Add events for superedges, function entries, and for statements. */
+ switch (dst_point.get_kind ())
+ {
+ default:
+ break;
+ case PK_BEFORE_SUPERNODE:
+ if (src_point.get_kind () == PK_AFTER_SUPERNODE)
+ {
+ if (eedge.m_sedge)
+ add_events_for_superedge (eedge, emission_path);
+ }
+ /* Add function entry events. */
+ if (dst_point.get_supernode ()->entry_p ())
+ {
+ emission_path->add_event
+ (new function_entry_event
+ (dst_point.get_supernode ()->get_start_location (),
+ dst_point.get_fndecl (),
+ dst_stack_depth));
+ }
+ break;
+ case PK_BEFORE_STMT:
+ {
+ const gimple *stmt = dst_point.get_stmt ();
+ if (is_setjmp_call_p (stmt))
+ emission_path->add_event
+ (new setjmp_event (stmt->location,
+ dst_node,
+ dst_point.get_fndecl (),
+ dst_stack_depth));
+ else
+ emission_path->add_event
+ (new statement_event (stmt,
+ dst_point.get_fndecl (),
+ dst_stack_depth, dst_state));
+ }
+ break;
+ }
+}
+
+/* Subroutine of diagnostic_manager::add_events_for_eedge
+ where EEDGE has an underlying superedge i.e. a CFG edge,
+ or an interprocedural call/return.
+ Add any events for the superedge to EMISSION_PATH. */
+
+void
+diagnostic_manager::add_events_for_superedge (const exploded_edge &eedge,
+ checker_path *emission_path)
+ const
+{
+ gcc_assert (eedge.m_sedge);
+
+ const exploded_node *src_node = eedge.m_src;
+ const program_point &src_point = src_node->get_point ();
+ const exploded_node *dst_node = eedge.m_dest;
+ const program_point &dst_point = dst_node->get_point ();
+ const int src_stack_depth = src_point.get_stack_depth ();
+ const int dst_stack_depth = dst_point.get_stack_depth ();
+ const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
+
+ switch (eedge.m_sedge->m_kind)
+ {
+ case SUPEREDGE_CFG_EDGE:
+ {
+ emission_path->add_event
+ (new start_cfg_edge_event (eedge,
+ (last_stmt
+ ? last_stmt->location
+ : UNKNOWN_LOCATION),
+ src_point.get_fndecl (),
+ src_stack_depth));
+ emission_path->add_event
+ (new end_cfg_edge_event (eedge,
+ dst_point.get_supernode ()->get_start_location (),
+ dst_point.get_fndecl (),
+ dst_stack_depth));
+ }
+ break;
+
+ case SUPEREDGE_CALL:
+ {
+ emission_path->add_event
+ (new call_event (eedge,
+ (last_stmt
+ ? last_stmt->location
+ : UNKNOWN_LOCATION),
+ src_point.get_fndecl (),
+ src_stack_depth));
+ }
+ break;
+
+ case SUPEREDGE_INTRAPROCEDURAL_CALL:
+ {
+ /* TODO: add a subclass for this, or generate events for the
+ summary. */
+ emission_path->add_event
+ (new debug_event ((last_stmt
+ ? last_stmt->location
+ : UNKNOWN_LOCATION),
+ src_point.get_fndecl (),
+ src_stack_depth,
+ "call summary"));
+ }
+ break;
+
+ case SUPEREDGE_RETURN:
+ {
+ const return_superedge *return_edge
+ = as_a <const return_superedge *> (eedge.m_sedge);
+
+ const gcall *call_stmt = return_edge->get_call_stmt ();
+ emission_path->add_event
+ (new return_event (eedge,
+ (call_stmt
+ ? call_stmt->location
+ : UNKNOWN_LOCATION),
+ dst_point.get_fndecl (),
+ dst_stack_depth));
+ }
+ break;
+ }
+}
+
+/* Prune PATH, based on the verbosity level, to the most pertinent
+ events for a diagnostic that involves VAR ending in state STATE
+ (for state machine SM).
+
+ PATH is updated in place, and the redundant checker_events are deleted.
+
+ As well as deleting events, call record_critical_state on events in
+ which state critical to the pending_diagnostic is being handled; see
+ the comment for diagnostic_manager::prune_for_sm_diagnostic. */
+
+void
+diagnostic_manager::prune_path (checker_path *path,
+ const state_machine *sm,
+ tree var,
+ state_machine::state_t state) const
+{
+ LOG_FUNC (get_logger ());
+ path->maybe_log (get_logger (), "path");
+ prune_for_sm_diagnostic (path, sm, var, state);
+ prune_interproc_events (path);
+ finish_pruning (path);
+ path->maybe_log (get_logger (), "pruned");
+}
+
+/* First pass of diagnostic_manager::prune_path: apply verbosity level,
+ pruning unrelated state change events.
+
+ Iterate backwards through PATH, skipping state change events that aren't
+ VAR but update the pertinent VAR when state-copying occurs.
+
+ As well as deleting events, call record_critical_state on events in
+ which state critical to the pending_diagnostic is being handled, so
+ that the event's get_desc vfunc can potentially supply a more precise
+ description of the event to the user.
+ e.g. improving
+ "calling 'foo' from 'bar'"
+ to
+ "passing possibly-NULL pointer 'ptr' to 'foo' from 'bar' as param 1"
+ when the diagnostic relates to later dereferencing 'ptr'. */
+
+void
+diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
+ const state_machine *sm,
+ tree var,
+ state_machine::state_t state) const
+{
+ int idx = path->m_events.length () - 1;
+ while (idx >= 0 && idx < (signed)path->m_events.length ())
+ {
+ checker_event *base_event = path->m_events[idx];
+ if (get_logger ())
+ {
+ if (sm)
+ {
+ if (var)
+ log ("considering event %i, with var: %qE, state: %qs",
+ idx, var, sm->get_state_name (state));
+ else
+ log ("considering event %i, with global state: %qs",
+ idx, sm->get_state_name (state));
+ }
+ else
+ log ("considering event %i", idx);
+ }
+ switch (base_event->m_kind)
+ {
+ default:
+ gcc_unreachable ();
+
+ case EK_DEBUG:
+ if (m_verbosity < 3)
+ {
+ log ("filtering event %i: debug event", idx);
+ path->delete_event (idx);
+ }
+ break;
+
+ case EK_CUSTOM:
+ /* Don't filter custom events. */
+ break;
+
+ case EK_STMT:
+ {
+ /* If this stmt is the origin of "var", update var. */
+ if (var)
+ {
+ statement_event *stmt_event = (statement_event *)base_event;
+ tree new_var = get_any_origin (stmt_event->m_stmt, var,
+ stmt_event->m_dst_state);
+ if (new_var)
+ {
+ log ("event %i: switching var of interest from %qE to %qE",
+ idx, var, new_var);
+ var = new_var;
+ }
+ }
+ if (m_verbosity < 3)
+ {
+ log ("filtering event %i: statement event", idx);
+ path->delete_event (idx);
+ }
+ }
+ break;
+
+ case EK_FUNCTION_ENTRY:
+ if (m_verbosity < 1)
+ {
+ log ("filtering event %i: function entry", idx);
+ path->delete_event (idx);
+ }
+ break;
+
+ case EK_STATE_CHANGE:
+ {
+ state_change_event *state_change = (state_change_event *)base_event;
+ if (state_change->get_lvalue (state_change->m_var)
+ == state_change->get_lvalue (var))
+ {
+ if (state_change->m_origin)
+ {
+ log ("event %i: switching var of interest from %qE to %qE",
+ idx, var, state_change->m_origin);
+ var = state_change->m_origin;
+ }
+ log ("event %i: switching state of interest from %qs to %qs",
+ idx, sm->get_state_name (state_change->m_to),
+ sm->get_state_name (state_change->m_from));
+ state = state_change->m_from;
+ }
+ else if (m_verbosity < 3)
+ {
+ if (var)
+ log ("filtering event %i:"
+ " state change to %qE unrelated to %qE",
+ idx, state_change->m_var, var);
+ else
+ log ("filtering event %i: state change to %qE",
+ idx, state_change->m_var);
+ path->delete_event (idx);
+ }
+ }
+ break;
+
+ case EK_START_CFG_EDGE:
+ {
+ cfg_edge_event *event = (cfg_edge_event *)base_event;
+ const cfg_superedge& cfg_superedge
+ = event->get_cfg_superedge ();
+ const supernode *dest = event->m_sedge->m_dest;
+ /* Do we have an SSA_NAME defined via a phi node in
+ the dest CFG node? */
+ if (var && TREE_CODE (var) == SSA_NAME)
+ if (SSA_NAME_DEF_STMT (var)->bb == dest->m_bb)
+ {
+ if (gphi *phi
+ = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (var)))
+ {
+ /* Update var based on its phi node. */
+ tree old_var = var;
+ var = cfg_superedge.get_phi_arg (phi);
+ log ("updating from %qE to %qE based on phi node",
+ old_var, var);
+ if (get_logger ())
+ {
+ pretty_printer pp;
+ pp_gimple_stmt_1 (&pp, phi, 0, (dump_flags_t)0);
+ log (" phi: %s", pp_formatted_text (&pp));
+ }
+ }
+ }
+
+ /* TODO: is this edge significant to var?
+ See if var can be in other states in the dest, but not
+ in other states in the src?
+ Must have multiple sibling edges. */
+
+ if (event->should_filter_p (m_verbosity))
+ {
+ log ("filtering event %i: CFG edge", idx);
+ path->delete_event (idx);
+ /* Also delete the corresponding EK_END_CFG_EDGE. */
+ gcc_assert (path->m_events[idx]->m_kind == EK_END_CFG_EDGE);
+ path->delete_event (idx);
+ }
+ }
+ break;
+
+ case EK_END_CFG_EDGE:
+ /* These come in pairs with EK_START_CFG_EDGE events and are
+ filtered when their start event is filtered. */
+ break;
+
+ case EK_CALL_EDGE:
+ {
+ call_event *event = (call_event *)base_event;
+ const callgraph_superedge& cg_superedge
+ = event->get_callgraph_superedge ();
+ callsite_expr expr;
+ tree caller_var
+ = cg_superedge.map_expr_from_callee_to_caller (var, &expr);
+ if (caller_var)
+ {
+ log ("event %i:"
+ " switching var of interest"
+ " from %qE in callee to %qE in caller",
+ idx, var, caller_var);
+ var = caller_var;
+ if (expr.param_p ())
+ event->record_critical_state (var, state);
+ }
+ }
+ break;
+
+ case EK_RETURN_EDGE:
+ // TODO: potentially update var/state based on return value,
+ // args etc
+ {
+ if (var)
+ {
+ return_event *event = (return_event *)base_event;
+ const callgraph_superedge& cg_superedge
+ = event->get_callgraph_superedge ();
+ callsite_expr expr;
+ tree callee_var
+ = cg_superedge.map_expr_from_caller_to_callee (var, &expr);
+ if (callee_var)
+ {
+ log ("event %i:"
+ " switching var of interest"
+ " from %qE in caller to %qE in callee",
+ idx, var, callee_var);
+ var = callee_var;
+ if (expr.return_value_p ())
+ event->record_critical_state (var, state);
+ }
+ }
+ }
+ break;
+
+ case EK_SETJMP:
+ /* TODO: only show setjmp_events that matter i.e. those for which
+ there is a later rewind event using them. */
+ case EK_REWIND_FROM_LONGJMP:
+ case EK_REWIND_TO_SETJMP:
+ break;
+
+ case EK_WARNING:
+ /* Always show the final "warning" event in the path. */
+ break;
+ }
+ idx--;
+ }
+}
+
+/* Second pass of diagnostic_manager::prune_path: remove redundant
+ interprocedural information.
+
+ For example, given:
+ (1)- calling "f2" from "f1"
+ (2)--- entry to "f2"
+ (3)--- calling "f3" from "f2"
+ (4)----- entry to "f3"
+ (5)--- returning to "f2" to "f3"
+ (6)- returning to "f1" to "f2"
+ with no other intervening events, then none of these events are
+ likely to be interesting to the user.
+
+ Prune [..., call, function-entry, return, ...] triples repeatedly
+ until nothing has changed. For the example above, this would
+ remove events (3, 4, 5), and then remove events (1, 2, 6). */
+
+void
+diagnostic_manager::prune_interproc_events (checker_path *path) const
+{
+ bool changed = false;
+ do
+ {
+ changed = false;
+ int idx = path->m_events.length () - 1;
+ while (idx >= 0)
+ {
+ /* Prune [..., call, function-entry, return, ...] triples. */
+ if (idx + 2 < (signed)path->m_events.length ()
+ && path->m_events[idx]->is_call_p ()
+ && path->m_events[idx + 1]->is_function_entry_p ()
+ && path->m_events[idx + 2]->is_return_p ())
+ {
+ if (get_logger ())
+ {
+ label_text desc (path->m_events[idx]->get_desc (false));
+ log ("filtering events %i-%i:"
+ " irrelevant call/entry/return: %s",
+ idx, idx + 2, desc.m_buffer);
+ desc.maybe_free ();
+ }
+ path->delete_event (idx + 2);
+ path->delete_event (idx + 1);
+ path->delete_event (idx);
+ changed = true;
+ idx--;
+ continue;
+ }
+
+ /* Prune [..., call, return, ...] pairs
+ (for -fanalyzer-verbosity=0). */
+ if (idx + 1 < (signed)path->m_events.length ()
+ && path->m_events[idx]->is_call_p ()
+ && path->m_events[idx + 1]->is_return_p ())
+ {
+ if (get_logger ())
+ {
+ label_text desc (path->m_events[idx]->get_desc (false));
+ log ("filtering events %i-%i:"
+ " irrelevant call/return: %s",
+ idx, idx + 1, desc.m_buffer);
+ desc.maybe_free ();
+ }
+ path->delete_event (idx + 1);
+ path->delete_event (idx);
+ changed = true;
+ idx--;
+ continue;
+ }
+
+ idx--;
+ }
+
+ }
+ while (changed);
+}
+
+/* Final pass of diagnostic_manager::prune_path.
+
+ If all we're left with is in one function, then filter function entry
+ events. */
+
+void
+diagnostic_manager::finish_pruning (checker_path *path) const
+{
+ if (!path->interprocedural_p ())
+ {
+ int idx = path->m_events.length () - 1;
+ while (idx >= 0 && idx < (signed)path->m_events.length ())
+ {
+ checker_event *base_event = path->m_events[idx];
+ if (base_event->m_kind == EK_FUNCTION_ENTRY)
+ {
+ log ("filtering event %i:"
+ " function entry for purely intraprocedural path", idx);
+ path->delete_event (idx);
+ }
+ idx--;
+ }
+ }
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Classes for saving, deduplicating, and emitting analyzer diagnostics.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_DIAGNOSTIC_MANAGER_H
+#define GCC_ANALYZER_DIAGNOSTIC_MANAGER_H
+
+/* A to-be-emitted diagnostic stored within diagnostic_manager. */
+
+class saved_diagnostic
+{
+public:
+ saved_diagnostic (const state_machine *sm,
+ const exploded_node *enode,
+ const supernode *snode, const gimple *stmt,
+ stmt_finder *stmt_finder,
+ tree var, state_machine::state_t state,
+ pending_diagnostic *d);
+ ~saved_diagnostic ();
+
+ bool operator== (const saved_diagnostic &other) const
+ {
+ return (m_sm == other.m_sm
+ /* We don't compare m_enode. */
+ && m_snode == other.m_snode
+ && m_stmt == other.m_stmt
+ /* We don't compare m_stmt_finder. */
+ && m_var == other.m_var
+ && m_state == other.m_state
+ && m_d->equal_p (*other.m_d)
+ && m_trailing_eedge == other.m_trailing_eedge);
+ }
+
+ //private:
+ const state_machine *m_sm;
+ const exploded_node *m_enode;
+ const supernode *m_snode;
+ const gimple *m_stmt;
+ stmt_finder *m_stmt_finder;
+ tree m_var;
+ state_machine::state_t m_state;
+ pending_diagnostic *m_d;
+ exploded_edge *m_trailing_eedge;
+
+private:
+ DISABLE_COPY_AND_ASSIGN (saved_diagnostic);
+};
+
+/* A class with responsibility for saving pending diagnostics, so that
+ they can be emitted after the exploded_graph is complete.
+ This lets us de-duplicate diagnostics, and find the shortest path
+ for each similar diagnostic, potentially using edges that might
+ not have been found when each diagnostic was first saved.
+
+ This also lets us compute shortest_paths once, rather than
+ per-diagnostic. */
+
+class diagnostic_manager : public log_user
+{
+public:
+ diagnostic_manager (logger *logger, int verbosity);
+
+ void add_diagnostic (const state_machine *sm,
+ const exploded_node *enode,
+ const supernode *snode, const gimple *stmt,
+ stmt_finder *finder,
+ tree var, state_machine::state_t state,
+ pending_diagnostic *d);
+
+ void add_diagnostic (const exploded_node *enode,
+ const supernode *snode, const gimple *stmt,
+ stmt_finder *finder,
+ pending_diagnostic *d);
+
+ void emit_saved_diagnostics (const exploded_graph &eg);
+
+ void emit_saved_diagnostic (const exploded_graph &eg,
+ const saved_diagnostic &sd,
+ const exploded_path &epath,
+ const gimple *stmt,
+ int num_dupes);
+
+ unsigned get_num_diagnostics () const
+ {
+ return m_saved_diagnostics.length ();
+ }
+ saved_diagnostic *get_saved_diagnostic (unsigned idx)
+ {
+ return m_saved_diagnostics[idx];
+ }
+
+private:
+ void build_emission_path (const exploded_graph &eg,
+ const exploded_path &epath,
+ checker_path *emission_path) const;
+
+ void add_events_for_eedge (const exploded_edge &eedge,
+ const extrinsic_state &ext_state,
+ checker_path *emission_path) const;
+
+ void add_events_for_superedge (const exploded_edge &eedge,
+ checker_path *emission_path) const;
+
+ void prune_path (checker_path *path,
+ const state_machine *sm,
+ tree var, state_machine::state_t state) const;
+
+ void prune_for_sm_diagnostic (checker_path *path,
+ const state_machine *sm,
+ tree var,
+ state_machine::state_t state) const;
+ void prune_interproc_events (checker_path *path) const;
+ void finish_pruning (checker_path *path) const;
+
+ auto_delete_vec<saved_diagnostic> m_saved_diagnostics;
+ const int m_verbosity;
+};
+
+#endif /* GCC_ANALYZER_DIAGNOSTIC_MANAGER_H */
--- /dev/null
+/* The analysis "engine".
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "fold-const.h"
+#include "gcc-rich-location.h"
+#include "alloc-pool.h"
+#include "fibonacci_heap.h"
+#include "shortest-paths.h"
+#include "diagnostic-core.h"
+#include "diagnostic-event-id.h"
+#include "diagnostic-path.h"
+#include "function.h"
+#include "pretty-print.h"
+#include "sbitmap.h"
+#include "tristate.h"
+#include "ordered-hash-map.h"
+#include "selftest.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/region-model.h"
+#include "analyzer/constraint-manager.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+#include "analyzer/diagnostic-manager.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "cgraph.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/program-state.h"
+#include "analyzer/exploded-graph.h"
+#include "analyzer/analysis-plan.h"
+#include "analyzer/checker-path.h"
+#include "analyzer/state-purge.h"
+
+/* For an overview, see gcc/doc/analyzer.texi. */
+
+#if ENABLE_ANALYZER
+
+static int readability_comparator (const void *p1, const void *p2);
+
+/* class impl_region_model_context : public region_model_context, public log_user. */
+
+impl_region_model_context::
+impl_region_model_context (exploded_graph &eg,
+ const exploded_node *enode_for_diag,
+ const program_state *old_state,
+ program_state *new_state,
+ state_change *change,
+ const gimple *stmt,
+ stmt_finder *stmt_finder)
+: m_eg (&eg), m_logger (eg.get_logger ()),
+ m_enode_for_diag (enode_for_diag),
+ m_old_state (old_state),
+ m_new_state (new_state),
+ m_change (change),
+ m_stmt (stmt),
+ m_stmt_finder (stmt_finder),
+ m_ext_state (eg.get_ext_state ())
+{
+}
+
+impl_region_model_context::
+impl_region_model_context (program_state *state,
+ state_change *change,
+ const extrinsic_state &ext_state)
+: m_eg (NULL), m_logger (NULL), m_enode_for_diag (NULL),
+ m_old_state (NULL),
+ m_new_state (state),
+ m_change (change),
+ m_stmt (NULL),
+ m_stmt_finder (NULL),
+ m_ext_state (ext_state)
+{
+}
+
+void
+impl_region_model_context::warn (pending_diagnostic *d)
+{
+ LOG_FUNC (get_logger ());
+ if (m_eg)
+ m_eg->get_diagnostic_manager ().add_diagnostic
+ (m_enode_for_diag, m_enode_for_diag->get_supernode (),
+ m_stmt, m_stmt_finder, d);
+}
+
+void
+impl_region_model_context::remap_svalue_ids (const svalue_id_map &map)
+{
+ m_new_state->remap_svalue_ids (map);
+ if (m_change)
+ m_change->remap_svalue_ids (map);
+}
+
+int
+impl_region_model_context::on_svalue_purge (svalue_id first_unused_sid,
+ const svalue_id_map &map)
+{
+ int total = 0;
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ {
+ const state_machine &sm = m_ext_state.get_sm (sm_idx);
+ total += smap->on_svalue_purge (sm, sm_idx, first_unused_sid,
+ map, this);
+ }
+ if (m_change)
+ total += m_change->on_svalue_purge (first_unused_sid);
+ return total;
+}
+
+/* class setjmp_svalue : public svalue. */
+
+/* Compare the fields of this setjmp_svalue with OTHER, returning true
+ if they are equal.
+ For use by svalue::operator==. */
+
+bool
+setjmp_svalue::compare_fields (const setjmp_svalue &other) const
+{
+ return m_enode == other.m_enode;
+}
+
+/* Implementation of svalue::add_to_hash vfunc for setjmp_svalue. */
+
+void
+setjmp_svalue::add_to_hash (inchash::hash &hstate) const
+{
+ hstate.add_int (m_enode->m_index);
+}
+
+/* Get the index of the stored exploded_node. */
+
+int
+setjmp_svalue::get_index () const
+{
+ return m_enode->m_index;
+}
+
+/* Implementation of svalue::print_details vfunc for setjmp_svalue. */
+
+void
+setjmp_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
+ svalue_id this_sid ATTRIBUTE_UNUSED,
+ pretty_printer *pp) const
+{
+ pp_printf (pp, "setjmp: EN: %i", m_enode->m_index);
+}
+
+/* Concrete implementation of sm_context, wiring it up to the rest of this
+ file. */
+
+class impl_sm_context : public sm_context
+{
+public:
+ impl_sm_context (exploded_graph &eg,
+ int sm_idx,
+ const state_machine &sm,
+ const exploded_node *enode_for_diag,
+ const program_state *old_state,
+ program_state *new_state,
+ state_change *change,
+ const sm_state_map *old_smap,
+ sm_state_map *new_smap,
+ stmt_finder *stmt_finder = NULL)
+ : sm_context (sm_idx, sm),
+ m_logger (eg.get_logger ()),
+ m_eg (eg), m_enode_for_diag (enode_for_diag),
+ m_old_state (old_state), m_new_state (new_state),
+ m_change (change),
+ m_old_smap (old_smap), m_new_smap (new_smap),
+ m_stmt_finder (stmt_finder)
+ {
+ }
+
+ logger *get_logger () const { return m_logger.get_logger (); }
+
+ tree get_fndecl_for_call (const gcall *call) FINAL OVERRIDE
+ {
+ impl_region_model_context old_ctxt
+ (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
+ m_change, call);
+ region_model *model = m_new_state->m_region_model;
+ return model->get_fndecl_for_call (call, &old_ctxt);
+ }
+
+ void on_transition (const supernode *node ATTRIBUTE_UNUSED,
+ const gimple *stmt ATTRIBUTE_UNUSED,
+ tree var,
+ state_machine::state_t from,
+ state_machine::state_t to,
+ tree origin) FINAL OVERRIDE
+ {
+ logger * const logger = get_logger ();
+ LOG_FUNC (logger);
+ impl_region_model_context old_ctxt
+ (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
+ m_change, stmt);
+ svalue_id var_old_sid
+ = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
+
+ impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
+ m_old_state, m_new_state,
+ m_change, NULL);
+ svalue_id var_new_sid
+ = m_new_state->m_region_model->get_rvalue (var, &new_ctxt);
+ svalue_id origin_new_sid
+ = m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
+
+ state_machine::state_t current = m_old_smap->get_state (var_old_sid);
+ if (current == from)
+ {
+ if (logger)
+ logger->log ("%s: state transition of %qE: %s -> %s",
+ m_sm.get_name (),
+ var,
+ m_sm.get_state_name (from),
+ m_sm.get_state_name (to));
+ m_new_smap->set_state (m_new_state->m_region_model, var_new_sid,
+ to, origin_new_sid);
+ if (m_change)
+ m_change->add_sm_change (m_sm_idx, var_new_sid, from, to);
+ }
+ }
+
+ void warn_for_state (const supernode *snode, const gimple *stmt,
+ tree var, state_machine::state_t state,
+ pending_diagnostic *d) FINAL OVERRIDE
+ {
+ LOG_FUNC (get_logger ());
+ gcc_assert (d); // take ownership
+
+ impl_region_model_context old_ctxt
+ (m_eg, m_enode_for_diag, m_old_state, m_new_state, m_change, NULL);
+ state_machine::state_t current;
+ if (var)
+ {
+ svalue_id var_old_sid
+ = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
+ current = m_old_smap->get_state (var_old_sid);
+ }
+ else
+ current = m_old_smap->get_global_state ();
+
+ if (state == current)
+ {
+ m_eg.get_diagnostic_manager ().add_diagnostic
+ (&m_sm, m_enode_for_diag, snode, stmt, m_stmt_finder,
+ var, state, d);
+ }
+ else
+ delete d;
+ }
+
+ /* Hook for picking more readable trees for SSA names of temporaries,
+ so that rather than e.g.
+ "double-free of '<unknown>'"
+ we can print:
+ "double-free of 'inbuf.data'". */
+
+ tree get_readable_tree (tree expr) FINAL OVERRIDE
+ {
+ /* Only for SSA_NAMEs of temporaries; otherwise, return EXPR, as it's
+ likely to be the least surprising tree to report. */
+ if (TREE_CODE (expr) != SSA_NAME)
+ return expr;
+ if (SSA_NAME_VAR (expr) != NULL)
+ return expr;
+
+ gcc_assert (m_new_state);
+ svalue_id sid = m_new_state->m_region_model->get_rvalue (expr, NULL);
+ /* Find trees for all regions storing the value. */
+ auto_vec<path_var> pvs;
+ m_new_state->m_region_model->get_path_vars_for_svalue (sid, &pvs);
+ if (pvs.length () < 1)
+ return expr;
+ /* Pick the "best" such tree. */
+ // TODO: should we also consider (and consolidate) equiv classes?
+ pvs.qsort (readability_comparator);
+ return pvs[0].m_tree;
+ }
+
+ state_machine::state_t get_global_state () const FINAL OVERRIDE
+ {
+ return m_old_state->m_checker_states[m_sm_idx]->get_global_state ();
+ }
+
+ void set_global_state (state_machine::state_t state) FINAL OVERRIDE
+ {
+ m_new_state->m_checker_states[m_sm_idx]->set_global_state (state);
+ }
+
+ void on_custom_transition (custom_transition *transition) FINAL OVERRIDE
+ {
+ transition->impl_transition (&m_eg,
+ const_cast<exploded_node *> (m_enode_for_diag),
+ m_sm_idx);
+ }
+
+ log_user m_logger;
+ exploded_graph &m_eg;
+ const exploded_node *m_enode_for_diag;
+ const program_state *m_old_state;
+ program_state *m_new_state;
+ state_change *m_change;
+ const sm_state_map *m_old_smap;
+ sm_state_map *m_new_smap;
+ stmt_finder *m_stmt_finder;
+};
+
+/* Subclass of stmt_finder for finding the best stmt to report the leak at,
+ given the emission path. */
+
+class leak_stmt_finder : public stmt_finder
+{
+public:
+ leak_stmt_finder (const exploded_graph &eg, tree var)
+ : m_eg (eg), m_var (var) {}
+
+ stmt_finder *clone () const FINAL OVERRIDE
+ {
+ return new leak_stmt_finder (m_eg, m_var);
+ }
+
+ const gimple *find_stmt (const exploded_path &epath)
+ FINAL OVERRIDE
+ {
+ logger * const logger = m_eg.get_logger ();
+ LOG_FUNC (logger);
+
+ if (TREE_CODE (m_var) == SSA_NAME)
+ {
+ /* Locate the final write to this SSA name in the path. */
+ const gimple *def_stmt = SSA_NAME_DEF_STMT (m_var);
+
+ int idx_of_def_stmt;
+ bool found = epath.find_stmt_backwards (def_stmt, &idx_of_def_stmt);
+ if (!found)
+ goto not_found;
+
+ /* What was the next write to the underlying var
+ after the SSA name was set? (if any). */
+
+ for (unsigned idx = idx_of_def_stmt + 1;
+ idx < epath.m_edges.length ();
+ ++idx)
+ {
+ const exploded_edge *eedge = epath.m_edges[idx];
+ if (logger)
+ logger->log ("eedge[%i]: EN %i -> EN %i",
+ idx,
+ eedge->m_src->m_index,
+ eedge->m_dest->m_index);
+ const exploded_node *dst_node = eedge->m_dest;
+ const program_point &dst_point = dst_node->get_point ();
+ const gimple *stmt = dst_point.get_stmt ();
+ if (!stmt)
+ continue;
+ if (const gassign *assign = dyn_cast <const gassign *> (stmt))
+ {
+ tree lhs = gimple_assign_lhs (assign);
+ if (TREE_CODE (lhs) == SSA_NAME
+ && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (m_var))
+ return assign;
+ }
+ }
+ }
+
+ not_found:
+
+ /* Look backwards for the first statement with a location. */
+ int i;
+ const exploded_edge *eedge;
+ FOR_EACH_VEC_ELT_REVERSE (epath.m_edges, i, eedge)
+ {
+ if (logger)
+ logger->log ("eedge[%i]: EN %i -> EN %i",
+ i,
+ eedge->m_src->m_index,
+ eedge->m_dest->m_index);
+ const exploded_node *dst_node = eedge->m_dest;
+ const program_point &dst_point = dst_node->get_point ();
+ const gimple *stmt = dst_point.get_stmt ();
+ if (stmt)
+ if (stmt->location != UNKNOWN_LOCATION)
+ return stmt;
+ }
+
+ gcc_unreachable ();
+ return NULL;
+ }
+
+private:
+ const exploded_graph &m_eg;
+ tree m_var;
+};
+
+/* A measurement of how good EXPR is for presenting to the user, so
+ that e.g. we can say prefer printing
+ "leak of 'tmp.m_ptr'"
+ over:
+ "leak of '<unknown>'". */
+
+static int
+readability (const_tree expr)
+{
+ gcc_assert (expr);
+ switch (TREE_CODE (expr))
+ {
+ case COMPONENT_REF:
+ case MEM_REF:
+ /* Impose a slight readability penalty relative to that of
+ operand 0. */
+ return readability (TREE_OPERAND (expr, 0)) - 1;
+
+ case SSA_NAME:
+ {
+ if (tree var = SSA_NAME_VAR (expr))
+ return readability (var);
+ /* Avoid printing '<unknown>' for SSA names for temporaries. */
+ return -1;
+ }
+ break;
+
+ case VAR_DECL:
+ /* Arbitrarily-chosen "high readability" value. */
+ return 256;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/* A qsort comparator for trees to sort them into most user-readable to
+ least user-readable. */
+
+static int
+readability_comparator (const void *p1, const void *p2)
+{
+ path_var pv1 = *(path_var const *)p1;
+ path_var pv2 = *(path_var const *)p2;
+
+ /* TODO: should we consider stack depths? */
+ int r1 = readability (pv1.m_tree);
+ int r2 = readability (pv2.m_tree);
+
+ return r2 - r1;
+}
+
+/* Create an sm_context and use it to call SM's on_leak vfunc, so that
+ it can potentially complain about a leak of DST_SID (in a new region_model)
+ in the given STATE, where MAP can be used to map SID back to an "old"
+ region_model. */
+
+void
+impl_region_model_context::on_state_leak (const state_machine &sm,
+ int sm_idx,
+ svalue_id dst_sid,
+ svalue_id first_unused_sid,
+ const svalue_id_map &map,
+ state_machine::state_t state)
+{
+ logger * const logger = get_logger ();
+ LOG_SCOPE (logger);
+ if (logger)
+ logger->log ("considering leak of sv%i", dst_sid.as_int ());
+
+ if (!m_eg)
+ return;
+
+ /* m_old_state also needs to be non-NULL so that the sm_ctxt can look
+ up the old state of the sid. */
+ gcc_assert (m_old_state);
+
+ /* Don't report on sid leaking if it's equal to one of the used sids.
+ For example, given:
+ some_non_trivial_expression = malloc (sizeof (struct foo));
+ we have:
+ _1 = malloc; (void *)
+ some_non_trivial_expression = _1; (struct foo *)
+ and at leak-detection time we may have:
+ sv5: {type: 'struct foo *', &r3} (used)
+ sv6: {type: 'void *', &r3} (unused)
+ where both point to the same region. We don't want to report a
+ leak of sv6, so we reject the report due to its equality with sv5. */
+ gcc_assert (m_new_state);
+ gcc_assert (!first_unused_sid.null_p ());
+ for (int i = 0; i < first_unused_sid.as_int (); i++)
+ {
+ svalue_id used_sid = svalue_id::from_int (i);
+
+ /* Use the "_without_cm" form of eval_condition, since
+ we're half-way through purging - we don't want to introduce new
+ equivalence classes into the constraint_manager for "sid" and
+ for each of the used_sids. */
+ const region_model &rm = *m_new_state->m_region_model;
+ tristate eq = rm.eval_condition_without_cm (dst_sid, EQ_EXPR, used_sid);
+ if (eq.is_true ())
+ {
+ if (logger)
+ logger->log ("rejecting leak of sv%i due to equality with sv%i",
+ dst_sid.as_int (), used_sid.as_int ());
+ return;
+ }
+ }
+
+ /* SID has leaked within the new state: no regions use it.
+ We need to convert it back to a tree, but since no regions use it, we
+ have to use MAP to convert it back to an svalue_id within the old state.
+ We can then look that svalue_id up to locate regions and thus tree(s)
+ that use it. */
+
+ svalue_id old_sid = map.get_src_for_dst (dst_sid);
+
+ auto_vec<path_var> leaked_pvs;
+ m_old_state->m_region_model->get_path_vars_for_svalue (old_sid, &leaked_pvs);
+
+ if (leaked_pvs.length () < 1)
+ return;
+
+ /* Find "best" leaked tree.
+ Sort the leaks into most human-readable first, through
+ to least user-readable. Given that we only emit one
+ leak per EC, this ought to ensure that we pick the most
+ user-readable description of each leaking EC.
+ This assumes that all vars in the EC have the same state. */
+ leaked_pvs.qsort (readability_comparator);
+
+ tree leaked_tree = leaked_pvs[0].m_tree;
+ if (logger)
+ logger->log ("best leaked_tree: %qE", leaked_tree);
+
+ leak_stmt_finder stmt_finder (*m_eg, leaked_tree);
+ impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
+ m_old_state, m_new_state,
+ m_change,
+ m_old_state->m_checker_states[sm_idx],
+ m_new_state->m_checker_states[sm_idx],
+ &stmt_finder);
+ gcc_assert (m_enode_for_diag);
+
+ /* Don't complain about leaks when returning from "main". */
+ if (m_enode_for_diag->get_supernode ()
+ && m_enode_for_diag->get_supernode ()->return_p ())
+ {
+ tree fndecl = m_enode_for_diag->get_function ()->decl;
+ if (0 == strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main"))
+ {
+ if (logger)
+ logger->log ("not reporting leak from main");
+ return;
+ }
+ }
+
+ pending_diagnostic *pd = sm.on_leak (leaked_tree);
+ if (pd)
+ m_eg->get_diagnostic_manager ().add_diagnostic
+ (&sm, m_enode_for_diag, m_enode_for_diag->get_supernode (),
+ m_stmt, &stmt_finder,
+ leaked_tree, state, pd);
+}
+
+/* Implementation of region_model_context::on_inherited_svalue vfunc
+ for impl_region_model_context.
+ Notify all checkers that CHILD_SID has been created from PARENT_SID,
+ so that those state machines that inherit state can propagate the state
+ from parent to child. */
+
+void
+impl_region_model_context::on_inherited_svalue (svalue_id parent_sid,
+ svalue_id child_sid)
+{
+ if (!m_new_state)
+ return;
+
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ {
+ const state_machine &sm = m_ext_state.get_sm (sm_idx);
+ if (sm.inherited_state_p ())
+ smap->on_inherited_svalue (parent_sid, child_sid);
+ }
+}
+
+/* Implementation of region_model_context::on_cast vfunc
+ for impl_region_model_context.
+ Notify all checkers that DST_SID is a cast of SRC_SID, so that sm-state
+ can be propagated from src to dst. */
+
+void
+impl_region_model_context::on_cast (svalue_id src_sid,
+ svalue_id dst_sid)
+{
+ if (!m_new_state)
+ return;
+
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ smap->on_cast (src_sid, dst_sid);
+}
+
+/* Implementation of region_model_context::on_condition vfunc.
+ Notify all state machines about the condition, which could lead to
+ state transitions. */
+
+void
+impl_region_model_context::on_condition (tree lhs, enum tree_code op, tree rhs)
+{
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ {
+ const state_machine &sm = m_ext_state.get_sm (sm_idx);
+ impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
+ m_old_state, m_new_state,
+ m_change,
+ m_old_state->m_checker_states[sm_idx],
+ m_new_state->m_checker_states[sm_idx]);
+ sm.on_condition (&sm_ctxt,
+ m_enode_for_diag->get_supernode (), m_stmt,
+ lhs, op, rhs);
+ }
+}
+
+/* struct point_and_state. */
+
+/* Assert that this object is sane. */
+
+void
+point_and_state::validate (const extrinsic_state &ext_state) const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ m_point.validate ();
+
+ m_state.validate (ext_state);
+
+ /* Verify that the callstring's model of the stack corresponds to that
+ of the region_model. */
+ /* They should have the same depth. */
+ gcc_assert (m_point.get_stack_depth ()
+ == m_state.m_region_model->get_stack_depth ());
+ /* Check the functions in the callstring vs those in the frames
+ at each depth. */
+ for (int depth = 0; depth < m_point.get_stack_depth (); ++depth)
+ {
+ gcc_assert (m_point.get_function_at_depth (depth)
+ == m_state.m_region_model->get_function_at_depth (depth));
+ }
+}
+
+/* Subroutine of print_enode_indices: print a run of indices from START_IDX
+ to END_IDX to PP, using and updating *FIRST_RUN. */
+
+static void
+print_run (pretty_printer *pp, int start_idx, int end_idx,
+ bool *first_run)
+{
+ if (!(*first_run))
+ pp_string (pp, ", ");
+ *first_run = false;
+ if (start_idx == end_idx)
+ pp_printf (pp, "EN: %i", start_idx);
+ else
+ pp_printf (pp, "EN: %i-%i", start_idx, end_idx);
+}
+
+/* Print the indices within ENODES to PP, collecting them as
+ runs/singletons e.g. "EN: 4-7, EN: 20-23, EN: 42". */
+
+static void
+print_enode_indices (pretty_printer *pp,
+ const auto_vec<exploded_node *> &enodes)
+{
+ int cur_start_idx = -1;
+ int cur_finish_idx = -1;
+ bool first_run = true;
+ unsigned i;
+ exploded_node *enode;
+ FOR_EACH_VEC_ELT (enodes, i, enode)
+ {
+ if (cur_start_idx == -1)
+ {
+ gcc_assert (cur_finish_idx == -1);
+ cur_start_idx = cur_finish_idx = enode->m_index;
+ }
+ else
+ {
+ if (enode->m_index == cur_finish_idx + 1)
+ /* Continuation of a run. */
+ cur_finish_idx = enode->m_index;
+ else
+ {
+ /* Finish existing run, start a new one. */
+ gcc_assert (cur_start_idx >= 0);
+ gcc_assert (cur_finish_idx >= 0);
+ print_run (pp, cur_start_idx, cur_finish_idx,
+ &first_run);
+ cur_start_idx = cur_finish_idx = enode->m_index;
+ }
+ }
+ }
+ /* Finish any existing run. */
+ if (cur_start_idx >= 0)
+ {
+ gcc_assert (cur_finish_idx >= 0);
+ print_run (pp, cur_start_idx, cur_finish_idx,
+ &first_run);
+ }
+}
+
+/* For use by dump_dot, get a value for the .dot "fillcolor" attribute.
+ Colorize by sm-state, to make it easier to see how sm-state propagates
+ through the exploded_graph. */
+
+const char *
+exploded_node::get_dot_fillcolor () const
+{
+ const program_state &state = get_state ();
+
+ /* We want to be able to easily distinguish the no-sm-state case,
+ and to be able to distinguish cases where there's a single state
+ from each other.
+
+ Sum the sm_states, and use the result to choose from a table,
+ modulo table-size, special-casing the "no sm-state" case. */
+ int total_sm_state = 0;
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
+ {
+ for (sm_state_map::iterator_t iter = smap->begin ();
+ iter != smap->end ();
+ ++iter)
+ total_sm_state += (*iter).second.m_state;
+ total_sm_state += smap->get_global_state ();
+ }
+
+ if (total_sm_state > 0)
+ {
+ /* An arbitrarily-picked collection of light colors. */
+ const char * const colors[]
+ = {"azure", "coral", "cornsilk", "lightblue", "yellow"};
+ const int num_colors = sizeof (colors) / sizeof (colors[0]);
+ return colors[total_sm_state % num_colors];
+ }
+ else
+ /* No sm-state. */
+ return "lightgrey";
+}
+
+/* Implementation of dnode::dump_dot vfunc for exploded_node. */
+
+void
+exploded_node::dump_dot (graphviz_out *gv, const dump_args_t &args) const
+{
+ pretty_printer *pp = gv->get_pp ();
+
+ dump_dot_id (pp);
+ pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
+ get_dot_fillcolor ());
+ pp_write_text_to_stream (pp);
+
+ pp_printf (pp, "EN: %i", m_index);
+ pp_newline (pp);
+
+ format f (true);
+ m_ps.get_point ().print (pp, f);
+ pp_newline (pp);
+
+ const extrinsic_state &ext_state = args.m_eg.get_ext_state ();
+ const program_state &state = m_ps.get_state ();
+ state.dump_to_pp (ext_state, true, pp);
+ pp_newline (pp);
+
+ {
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
+ {
+ if (!smap->is_empty_p ())
+ {
+ pp_printf (pp, "%s: ", ext_state.get_name (i));
+ smap->print (ext_state.get_sm (i), pp);
+ pp_newline (pp);
+ }
+ }
+ }
+
+ pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
+
+ pp_string (pp, "\"];\n\n");
+ pp_flush (pp);
+}
+
+/* Dump this to PP in a form suitable for use as an id in .dot output. */
+
+void
+exploded_node::dump_dot_id (pretty_printer *pp) const
+{
+ pp_printf (pp, "exploded_node_%i", m_index);
+}
+
+/* Dump a multiline representation of this node to PP. */
+
+void
+exploded_node::dump_to_pp (pretty_printer *pp,
+ const extrinsic_state &ext_state) const
+{
+ pp_printf (pp, "EN: %i", m_index);
+ pp_newline (pp);
+
+ format f (true);
+ m_ps.get_point ().print (pp, f);
+ pp_newline (pp);
+
+ m_ps.get_state ().dump_to_pp (ext_state, false, pp);
+ pp_newline (pp);
+}
+
+/* Dump a multiline representation of this node to FILE. */
+
+void
+exploded_node::dump (FILE *fp,
+ const extrinsic_state &ext_state) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp, ext_state);
+ pp_flush (&pp);
+}
+
+/* Dump a multiline representation of this node to stderr. */
+
+DEBUG_FUNCTION void
+exploded_node::dump (const extrinsic_state &ext_state) const
+{
+ dump (stderr, ext_state);
+}
+
+/* Return true if FNDECL has a gimple body. */
+// TODO: is there a pre-canned way to do this?
+
+static bool
+fndecl_has_gimple_body_p (tree fndecl)
+{
+ if (fndecl == NULL_TREE)
+ return false;
+
+ cgraph_node *n = cgraph_node::get (fndecl);
+ if (!n)
+ return false;
+
+ return n->has_gimple_body_p ();
+}
+
+/* A pending_diagnostic subclass for implementing "__analyzer_dump_path". */
+
+class dump_path_diagnostic
+ : public pending_diagnostic_subclass<dump_path_diagnostic>
+{
+public:
+ bool emit (rich_location *richloc) FINAL OVERRIDE
+ {
+ inform (richloc, "path");
+ return true;
+ }
+
+ const char *get_kind () const FINAL OVERRIDE { return "dump_path_diagnostic"; }
+
+ bool operator== (const dump_path_diagnostic &) const
+ {
+ return true;
+ }
+};
+
+/* Modify STATE in place, applying the effects of the stmt at this node's
+ point. */
+
+exploded_node::on_stmt_flags
+exploded_node::on_stmt (exploded_graph &eg,
+ const supernode *snode,
+ const gimple *stmt,
+ program_state *state,
+ state_change *change) const
+{
+ /* Preserve the old state. It is used here for looking
+ up old checker states, for determining state transitions, and
+ also within impl_region_model_context and impl_sm_context for
+ going from tree to svalue_id. */
+ const program_state old_state (*state);
+
+ impl_region_model_context ctxt (eg, this,
+ &old_state, state, change,
+ stmt);
+
+ if (const gassign *assign = dyn_cast <const gassign *> (stmt))
+ state->m_region_model->on_assignment (assign, &ctxt);
+
+ if (const greturn *return_ = dyn_cast <const greturn *> (stmt))
+ state->m_region_model->on_return (return_, &ctxt);
+
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ {
+ /* Debugging/test support. */
+ if (is_special_named_call_p (call, "__analyzer_dump", 0))
+ {
+ /* Handle the builtin "__analyzer_dump" by dumping state
+ to stderr. */
+ dump (eg.get_ext_state ());
+ }
+ else if (is_special_named_call_p (call, "__analyzer_dump_path", 0))
+ {
+ /* Handle the builtin "__analyzer_dump_path" by queuing a
+ diagnostic at this exploded_node. */
+ ctxt.warn (new dump_path_diagnostic ());
+ }
+ else if (is_special_named_call_p (call, "__analyzer_dump_region_model", 0))
+ {
+ /* Handle the builtin "__analyzer_dump_region_model" by dumping
+ the region model's state to stderr. */
+ state->m_region_model->dump (false);
+ }
+ else if (is_special_named_call_p (call, "__analyzer_eval", 1))
+ {
+ /* Handle the builtin "__analyzer_eval" by evaluating the input
+ and dumping as a dummy warning, so that test cases can use
+ dg-warning to validate the result (and so unexpected warnings will
+ lead to DejaGnu failures). */
+ tree t_arg = gimple_call_arg (call, 0);
+ tristate t
+ = state->m_region_model->eval_condition (t_arg,
+ NE_EXPR,
+ integer_zero_node,
+ &ctxt);
+ warning_at (call->location, 0, "%s", t.as_string ());
+ }
+ else if (is_special_named_call_p (call, "__analyzer_break", 0))
+ {
+ /* Handle the builtin "__analyzer_break" by triggering a
+ breakpoint. */
+ /* TODO: is there a good cross-platform way to do this? */
+ raise (SIGINT);
+ }
+ else if (is_setjmp_call_p (stmt))
+ state->m_region_model->on_setjmp (call, this, &ctxt);
+ else if (is_longjmp_call_p (call))
+ {
+ on_longjmp (eg, call, state, &ctxt);
+ return on_stmt_flags::terminate_path ();
+ }
+ else
+ state->m_region_model->on_call_pre (call, &ctxt);
+ }
+
+ bool any_sm_changes = false;
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
+ {
+ const state_machine &sm = eg.get_ext_state ().get_sm (sm_idx);
+ const sm_state_map *old_smap
+ = old_state.m_checker_states[sm_idx];
+ sm_state_map *new_smap = state->m_checker_states[sm_idx];
+ impl_sm_context ctxt (eg, sm_idx, sm, this, &old_state, state,
+ change,
+ old_smap, new_smap);
+ /* Allow the state_machine to handle the stmt. */
+ if (!sm.on_stmt (&ctxt, snode, stmt))
+ {
+ /* For those stmts that were not handled by the state machine. */
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ {
+ tree callee_fndecl = gimple_call_fndecl (call);
+ // TODO: maybe we can be smarter about handling function pointers?
+
+ if (!fndecl_has_gimple_body_p (callee_fndecl))
+ new_smap->purge_for_unknown_fncall (eg, sm, call, callee_fndecl,
+ state->m_region_model);
+ }
+ }
+ if (*old_smap != *new_smap)
+ any_sm_changes = true;
+ }
+
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ state->m_region_model->on_call_post (call, &ctxt);
+
+ return on_stmt_flags (any_sm_changes);
+}
+
+/* Consider the effect of following superedge SUCC from this node.
+
+ Return true if it's feasible to follow the edge, or false
+ if it's infeasible.
+
+ Examples: if it's the "true" branch within
+ a CFG and we know the conditional is false, we know it's infeasible.
+ If it's one of multiple interprocedual "return" edges, then only
+ the edge back to the most recent callsite is feasible.
+
+ Update NEXT_STATE accordingly (e.g. to record that a condition was
+ true or false, or that the NULL-ness of a pointer has been checked,
+ pushing/popping stack frames, etc).
+
+ Update NEXT_POINT accordingly (updating the call string). */
+
+bool
+exploded_node::on_edge (exploded_graph &eg,
+ const superedge *succ,
+ program_point *next_point,
+ program_state *next_state,
+ state_change *change) const
+{
+ LOG_FUNC (eg.get_logger ());
+
+ if (!next_point->on_edge (eg, succ))
+ return false;
+
+ if (!next_state->on_edge (eg, *this, succ, change))
+ return false;
+
+ return true;
+}
+
+/* Verify that the stack at LONGJMP_POINT is still valid, given a call
+ to "setjmp" at SETJMP_POINT - the stack frame that "setjmp" was
+ called in must still be valid.
+
+ Caveat: this merely checks the call_strings in the points; it doesn't
+ detect the case where a frame returns and is then called again. */
+
+static bool
+valid_longjmp_stack_p (const program_point &longjmp_point,
+ const program_point &setjmp_point)
+{
+ const call_string &cs_at_longjmp = longjmp_point.get_call_string ();
+ const call_string &cs_at_setjmp = setjmp_point.get_call_string ();
+
+ if (cs_at_longjmp.length () < cs_at_setjmp.length ())
+ return false;
+
+ /* Check that the call strings match, up to the depth of the
+ setjmp point. */
+ for (unsigned depth = 0; depth < cs_at_setjmp.length (); depth++)
+ if (cs_at_longjmp[depth] != cs_at_setjmp[depth])
+ return false;
+
+ return true;
+}
+
+/* A pending_diagnostic subclass for complaining about bad longjmps,
+ where the enclosing function of the "setjmp" has returned (and thus
+ the stack frame no longer exists). */
+
+class stale_jmp_buf : public pending_diagnostic_subclass<dump_path_diagnostic>
+{
+public:
+ stale_jmp_buf (const gcall *setjmp_call, const gcall *longjmp_call)
+ : m_setjmp_call (setjmp_call), m_longjmp_call (longjmp_call)
+ {}
+
+ bool emit (rich_location *richloc) FINAL OVERRIDE
+ {
+ return warning_at
+ (richloc, OPT_Wanalyzer_stale_setjmp_buffer,
+ "%qs called after enclosing function of %qs has returned",
+ "longjmp", "setjmp");
+ }
+
+ const char *get_kind () const FINAL OVERRIDE
+ { return "stale_jmp_buf"; }
+
+ bool operator== (const stale_jmp_buf &other) const
+ {
+ return (m_setjmp_call == other.m_setjmp_call
+ && m_longjmp_call == other.m_longjmp_call);
+ }
+
+private:
+ const gcall *m_setjmp_call;
+ const gcall *m_longjmp_call;
+};
+
+/* Handle LONGJMP_CALL, a call to "longjmp".
+
+ Attempt to locate where "setjmp" was called on the jmp_buf and build an
+ exploded_node and exploded_edge to it representing a rewind to that frame,
+ handling the various kinds of failure that can occur. */
+
+void
+exploded_node::on_longjmp (exploded_graph &eg,
+ const gcall *longjmp_call,
+ program_state *new_state,
+ region_model_context *ctxt) const
+{
+ tree buf_ptr = gimple_call_arg (longjmp_call, 0);
+
+ region_model *new_region_model = new_state->m_region_model;
+ region_id buf_rid = new_region_model->deref_rvalue (buf_ptr, ctxt);
+ region *buf = new_region_model->get_region (buf_rid);
+ if (!buf)
+ return;
+
+ svalue_id buf_content_sid
+ = buf->get_value (*new_region_model, false, ctxt);
+ svalue *buf_content_sval = new_region_model->get_svalue (buf_content_sid);
+ if (!buf_content_sval)
+ return;
+ setjmp_svalue *setjmp_sval = buf_content_sval->dyn_cast_setjmp_svalue ();
+ if (!setjmp_sval)
+ return;
+
+ /* Build a custom enode and eedge for rewinding from the longjmp
+ call back to the setjmp. */
+
+ const exploded_node *enode_origin = setjmp_sval->get_exploded_node ();
+ rewind_info_t rewind_info (enode_origin);
+
+ const gcall *setjmp_call = rewind_info.get_setjmp_call ();
+ const program_point &setjmp_point = rewind_info.get_setjmp_point ();
+
+ const program_point &longjmp_point = get_point ();
+
+ /* Verify that the setjmp's call_stack hasn't been popped. */
+ if (!valid_longjmp_stack_p (longjmp_point, setjmp_point))
+ {
+ ctxt->warn (new stale_jmp_buf (setjmp_call, longjmp_call));
+ return;
+ }
+
+ gcc_assert (longjmp_point.get_stack_depth ()
+ >= setjmp_point.get_stack_depth ());
+
+ /* Update the state for use by the destination node. */
+
+ /* Stash the current number of diagnostics so that we can update
+ any that this adds to show where the longjmp is rewinding to. */
+
+ diagnostic_manager *dm = &eg.get_diagnostic_manager ();
+ unsigned prev_num_diagnostics = dm->get_num_diagnostics ();
+
+ new_region_model->on_longjmp (longjmp_call, setjmp_call,
+ setjmp_point.get_stack_depth (), ctxt);
+
+ program_point next_point
+ = program_point::after_supernode (setjmp_point.get_supernode (),
+ setjmp_point.get_call_string ());
+
+ state_change change;
+ exploded_node *next = eg.get_or_create_node (next_point, *new_state, &change);
+
+ /* Create custom exploded_edge for a longjmp. */
+ if (next)
+ {
+ exploded_edge *eedge
+ = eg.add_edge (const_cast<exploded_node *> (this), next, NULL,
+ change,
+ new rewind_info_t (enode_origin));
+
+ /* For any diagnostics that were queued here (such as leaks) we want
+ the checker_path to show the rewinding events after the "final event"
+ so that the user sees where the longjmp is rewinding to (otherwise the
+ path is meaningless).
+
+ For example, we want to emit something like:
+ | NN | {
+ | NN | longjmp (env, 1);
+ | | ~~~~~~~~~~~~~~~~
+ | | |
+ | | (10) 'ptr' leaks here; was allocated at (7)
+ | | (11) rewinding from 'longjmp' in 'inner'...
+ |
+ <-------------+
+ |
+ 'outer': event 12
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (12) ...to 'setjmp' in 'outer' (saved at (2))
+
+ where the "final" event above is event (10), but we want to append
+ events (11) and (12) afterwards.
+
+ Do this by setting m_trailing_eedge on any diagnostics that were
+ just saved. */
+ unsigned num_diagnostics = dm->get_num_diagnostics ();
+ for (unsigned i = prev_num_diagnostics; i < num_diagnostics; i++)
+ {
+ saved_diagnostic *sd = dm->get_saved_diagnostic (i);
+ sd->m_trailing_eedge = eedge;
+ }
+ }
+}
+
+/* Subroutine of exploded_graph::process_node for finding the successors
+ of the supernode for a function exit basic block.
+
+ Ensure that pop_frame is called, potentially queuing diagnostics about
+ leaks. */
+
+void
+exploded_node::detect_leaks (exploded_graph &eg) const
+{
+ LOG_FUNC_1 (eg.get_logger (), "EN: %i", m_index);
+
+ gcc_assert (get_point ().get_supernode ()->return_p ());
+
+ /* If we're not a "top-level" function, do nothing; pop_frame
+ will be called when handling the return superedge. */
+ if (get_point ().get_stack_depth () > 1)
+ return;
+
+ /* We have a "top-level" function. */
+ gcc_assert (get_point ().get_stack_depth () == 1);
+
+ const program_state &old_state = get_state ();
+
+ /* Work with a temporary copy of the state: pop the frame, and see
+ what leaks (via purge_unused_svalues). */
+ program_state new_state (old_state);
+
+ gcc_assert (new_state.m_region_model);
+
+ purge_stats stats;
+ impl_region_model_context ctxt (eg, this,
+ &old_state, &new_state,
+ NULL,
+ get_stmt ());
+ new_state.m_region_model->pop_frame (true, &stats, &ctxt);
+}
+
+/* Dump the successors and predecessors of this enode to OUTF. */
+
+void
+exploded_node::dump_succs_and_preds (FILE *outf) const
+{
+ unsigned i;
+ exploded_edge *e;
+ {
+ auto_vec<exploded_node *> preds (m_preds.length ());
+ FOR_EACH_VEC_ELT (m_preds, i, e)
+ preds.quick_push (e->m_src);
+ pretty_printer pp;
+ print_enode_indices (&pp, preds);
+ fprintf (outf, "preds: %s\n",
+ pp_formatted_text (&pp));
+ }
+ {
+ auto_vec<exploded_node *> succs (m_succs.length ());
+ FOR_EACH_VEC_ELT (m_succs, i, e)
+ succs.quick_push (e->m_dest);
+ pretty_printer pp;
+ print_enode_indices (&pp, succs);
+ fprintf (outf, "succs: %s\n",
+ pp_formatted_text (&pp));
+ }
+}
+
+/* class rewind_info_t : public exploded_edge::custom_info_t. */
+
+/* Implementation of exploded_edge::custom_info_t::update_model vfunc
+ for rewind_info_t.
+
+ Update state for the special-case of a rewind of a longjmp
+ to a setjmp (which doesn't have a superedge, but does affect
+ state). */
+
+void
+rewind_info_t::update_model (region_model *model,
+ const exploded_edge &eedge)
+{
+ const exploded_node &src_enode = *eedge.m_src;
+ const program_point &src_point = src_enode.get_point ();
+
+ const gimple *last_stmt
+ = src_point.get_supernode ()->get_last_stmt ();
+ gcc_assert (last_stmt);
+ const gcall *longjmp_call = as_a <const gcall *> (last_stmt);
+
+ const program_point &longjmp_point = eedge.m_src->get_point ();
+ const program_point &setjmp_point = eedge.m_dest->get_point ();
+
+ gcc_assert (longjmp_point.get_stack_depth ()
+ >= setjmp_point.get_stack_depth ());
+
+ model->on_longjmp (longjmp_call,
+ get_setjmp_call (),
+ setjmp_point.get_stack_depth (), NULL);
+}
+
+/* Implementation of exploded_edge::custom_info_t::add_events_to_path vfunc
+ for rewind_info_t. */
+
+void
+rewind_info_t::add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge)
+{
+ const exploded_node *src_node = eedge.m_src;
+ const program_point &src_point = src_node->get_point ();
+ const int src_stack_depth = src_point.get_stack_depth ();
+ const exploded_node *dst_node = eedge.m_dest;
+ const program_point &dst_point = dst_node->get_point ();
+ const int dst_stack_depth = dst_point.get_stack_depth ();
+
+ emission_path->add_event
+ (new rewind_from_longjmp_event
+ (&eedge, src_point.get_supernode ()->get_end_location (),
+ src_point.get_fndecl (),
+ src_stack_depth));
+ emission_path->add_event
+ (new rewind_to_setjmp_event
+ (&eedge, get_setjmp_call ()->location,
+ dst_point.get_fndecl (),
+ dst_stack_depth, this));
+}
+
+/* class exploded_edge : public dedge. */
+
+/* exploded_edge's ctor. */
+
+exploded_edge::exploded_edge (exploded_node *src, exploded_node *dest,
+ const superedge *sedge,
+ const state_change &change,
+ custom_info_t *custom_info)
+: dedge (src, dest), m_sedge (sedge), m_change (change),
+ m_custom_info (custom_info)
+{
+ change.validate (dest->get_state ());
+}
+
+/* exploded_edge's dtor. */
+
+exploded_edge::~exploded_edge ()
+{
+ delete m_custom_info;
+}
+
+/* Implementation of dedge::dump_dot vfunc for exploded_edge.
+ Use the label of the underlying superedge, if any. */
+
+void
+exploded_edge::dump_dot (graphviz_out *gv, const dump_args_t &args) const
+{
+ pretty_printer *pp = gv->get_pp ();
+
+ const char *style = "\"solid,bold\"";
+ const char *color = "black";
+ int weight = 10;
+ const char *constraint = "true";
+
+ if (m_sedge)
+ switch (m_sedge->m_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case SUPEREDGE_CFG_EDGE:
+ break;
+ case SUPEREDGE_CALL:
+ color = "red";
+ //constraint = "false";
+ break;
+ case SUPEREDGE_RETURN:
+ color = "green";
+ //constraint = "false";
+ break;
+ case SUPEREDGE_INTRAPROCEDURAL_CALL:
+ style = "\"dotted\"";
+ break;
+ }
+ if (m_custom_info)
+ {
+ color = "red";
+ style = "\"dotted\"";
+ }
+
+ m_src->dump_dot_id (pp);
+ pp_string (pp, " -> ");
+ m_dest->dump_dot_id (pp);
+ pp_printf (pp,
+ (" [style=%s, color=%s, weight=%d, constraint=%s,"
+ " headlabel=\""),
+ style, color, weight, constraint);
+
+ if (m_sedge)
+ m_sedge->dump_label_to_pp (pp, false);
+ else if (m_custom_info)
+ m_custom_info->print (pp);
+
+ m_change.dump (pp, args.m_eg.get_ext_state ());
+ //pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
+
+ pp_printf (pp, "\"];\n");
+}
+
+/* struct stats. */
+
+/* stats' ctor. */
+
+stats::stats (int num_supernodes)
+: m_node_reuse_count (0),
+ m_node_reuse_after_merge_count (0),
+ m_num_supernodes (num_supernodes)
+{
+ for (int i = 0; i < NUM_POINT_KINDS; i++)
+ m_num_nodes[i] = 0;
+}
+
+/* Log these stats in multiline form to LOGGER. */
+
+void
+stats::log (logger *logger) const
+{
+ gcc_assert (logger);
+ for (int i = 0; i < NUM_POINT_KINDS; i++)
+ logger->log ("m_num_nodes[%s]: %i",
+ point_kind_to_string (static_cast <enum point_kind> (i)),
+ m_num_nodes[i]);
+ logger->log ("m_node_reuse_count: %i", m_node_reuse_count);
+ logger->log ("m_node_reuse_after_merge_count: %i",
+ m_node_reuse_after_merge_count);
+}
+
+/* Dump these stats in multiline form to OUT. */
+
+void
+stats::dump (FILE *out) const
+{
+ for (int i = 0; i < NUM_POINT_KINDS; i++)
+ fprintf (out, "m_num_nodes[%s]: %i\n",
+ point_kind_to_string (static_cast <enum point_kind> (i)),
+ m_num_nodes[i]);
+ fprintf (out, "m_node_reuse_count: %i\n", m_node_reuse_count);
+ fprintf (out, "m_node_reuse_after_merge_count: %i\n",
+ m_node_reuse_after_merge_count);
+
+ if (m_num_supernodes > 0)
+ fprintf (out, "PK_AFTER_SUPERNODE nodes per supernode: %.2f\n",
+ (float)m_num_nodes[PK_AFTER_SUPERNODE] / (float)m_num_supernodes);
+}
+
+/* strongly_connected_components's ctor. Tarjan's SCC algorithm. */
+
+strongly_connected_components::
+strongly_connected_components (const supergraph &sg, logger *logger)
+: m_sg (sg), m_per_node (m_sg.num_nodes ())
+{
+ LOG_SCOPE (logger);
+ auto_timevar tv (TV_ANALYZER_SCC);
+
+ for (int i = 0; i < m_sg.num_nodes (); i++)
+ m_per_node.quick_push (per_node_data ());
+
+ for (int i = 0; i < m_sg.num_nodes (); i++)
+ if (m_per_node[i].m_index == -1)
+ strong_connect (i);
+
+ if (0)
+ dump ();
+}
+
+/* Dump this object to stderr. */
+
+DEBUG_FUNCTION void
+strongly_connected_components::dump () const
+{
+ for (int i = 0; i < m_sg.num_nodes (); i++)
+ {
+ const per_node_data &v = m_per_node[i];
+ fprintf (stderr, "SN %i: index: %i lowlink: %i on_stack: %i\n",
+ i, v.m_index, v.m_lowlink, v.m_on_stack);
+ }
+}
+
+/* Subroutine of strongly_connected_components's ctor, part of Tarjan's
+ SCC algorithm. */
+
+void
+strongly_connected_components::strong_connect (unsigned index)
+{
+ supernode *v_snode = m_sg.get_node_by_index (index);
+
+ /* Set the depth index for v to the smallest unused index. */
+ per_node_data *v = &m_per_node[index];
+ v->m_index = index;
+ v->m_lowlink = index;
+ m_stack.safe_push (index);
+ v->m_on_stack = true;
+ index++;
+
+ /* Consider successors of v. */
+ unsigned i;
+ superedge *sedge;
+ FOR_EACH_VEC_ELT (v_snode->m_succs, i, sedge)
+ {
+ supernode *w_snode = sedge->m_dest;
+ per_node_data *w = &m_per_node[w_snode->m_index];
+ if (w->m_index == -1)
+ {
+ /* Successor w has not yet been visited; recurse on it. */
+ strong_connect (w_snode->m_index);
+ v->m_lowlink = MIN (v->m_lowlink, w->m_lowlink);
+ }
+ else if (w->m_on_stack)
+ {
+ /* Successor w is in stack S and hence in the current SCC
+ If w is not on stack, then (v, w) is a cross-edge in the DFS
+ tree and must be ignored. */
+ v->m_lowlink = MIN (v->m_lowlink, w->m_index);
+ }
+ }
+
+ /* If v is a root node, pop the stack and generate an SCC. */
+
+ if (v->m_lowlink == v->m_index)
+ {
+ per_node_data *w;
+ do {
+ int idx = m_stack.pop ();
+ w = &m_per_node[idx];
+ w->m_on_stack = false;
+ } while (w != v);
+ }
+}
+
+/* worklist's ctor. */
+
+worklist::worklist (const exploded_graph &eg, const analysis_plan &plan)
+: m_eg (eg),
+ m_scc (eg.get_supergraph (), eg.get_logger ()),
+ m_plan (plan),
+ m_queue (key_t (*this, NULL))
+{
+}
+
+/* Return the number of nodes in the worklist. */
+
+unsigned
+worklist::length () const
+{
+ return m_queue.nodes ();
+}
+
+/* Return the next node in the worklist, removing it. */
+
+exploded_node *
+worklist::take_next ()
+{
+ return m_queue.extract_min ();
+}
+
+/* Return the next node in the worklist without removing it. */
+
+exploded_node *
+worklist::peek_next ()
+{
+ return m_queue.min ();
+}
+
+/* Add ENODE to the worklist. */
+
+void
+worklist::add_node (exploded_node *enode)
+{
+ m_queue.insert (key_t (*this, enode), enode);
+}
+
+/* Comparator for implementing worklist::key_t comparison operators.
+ Return negative if KA is before KB
+ Return positive if KA is after KB
+ Return 0 if they are equal. */
+
+int
+worklist::key_t::cmp_1 (const worklist::key_t &ka, const worklist::key_t &kb)
+{
+ const program_point &point_a = ka.m_enode->get_point ();
+ const program_point &point_b = kb.m_enode->get_point ();
+ const call_string &call_string_a = point_a.get_call_string ();
+ const call_string &call_string_b = point_b.get_call_string ();
+
+ /* Order empty-callstring points with different functions based on the
+ analysis_plan, so that we generate summaries before they are used. */
+ if (flag_analyzer_call_summaries
+ && call_string_a.empty_p ()
+ && call_string_b.empty_p ()
+ && point_a.get_function () != NULL
+ && point_b.get_function () != NULL
+ && point_a.get_function () != point_b.get_function ())
+ {
+ return ka.m_worklist.m_plan.cmp_function (point_a.get_function (),
+ point_b.get_function ());
+ }
+
+ /* First, order by SCC. */
+ int scc_id_a = ka.get_scc_id (ka.m_enode);
+ int scc_id_b = kb.get_scc_id (kb.m_enode);
+ if (scc_id_a != scc_id_b)
+ return scc_id_a - scc_id_b;
+
+ /* If in same SCC, order by supernode index (an arbitrary but stable
+ ordering). */
+ const supernode *snode_a = ka.m_enode->get_supernode ();
+ const supernode *snode_b = kb.m_enode->get_supernode ();
+ if (snode_a == NULL)
+ {
+ if (snode_b != NULL)
+ /* One is NULL. */
+ return -1;
+ else
+ /* Both are NULL. */
+ return 0;
+ }
+ if (snode_b == NULL)
+ /* One is NULL. */
+ return 1;
+ /* Neither are NULL. */
+ gcc_assert (snode_a && snode_b);
+ if (snode_a->m_index != snode_b->m_index)
+ return snode_a->m_index - snode_b->m_index;
+
+ gcc_assert (snode_a == snode_b);
+
+ /* Order within supernode via program point. */
+ int within_snode_cmp
+ = function_point::cmp_within_supernode (point_a.get_function_point (),
+ point_b.get_function_point ());
+ if (within_snode_cmp)
+ return within_snode_cmp;
+
+ /* The points might vary by callstring; try sorting by callstring. */
+ int cs_cmp = call_string::cmp (call_string_a, call_string_b);
+ if (cs_cmp)
+ return cs_cmp;
+
+ /* Otherwise, we ought to have the same program_point. */
+ gcc_assert (point_a == point_b);
+
+ const program_state &state_a = ka.m_enode->get_state ();
+ const program_state &state_b = kb.m_enode->get_state ();
+
+ /* Sort by sm-state, so that identical sm-states are grouped
+ together in the worklist.
+ For now, sort by the hash value (might not be deterministic). */
+ for (unsigned sm_idx = 0; sm_idx < state_a.m_checker_states.length ();
+ ++sm_idx)
+ {
+ sm_state_map *smap_a = state_a.m_checker_states[sm_idx];
+ sm_state_map *smap_b = state_b.m_checker_states[sm_idx];
+
+ int sm_cmp = smap_a->hash () - smap_b->hash ();
+ if (sm_cmp)
+ return sm_cmp;
+ }
+
+ /* Otherwise, we have two enodes at the same program point but with
+ different states. We don't have a good total ordering on states,
+ so order them by enode index, so that we have at least have a
+ stable sort. */
+ return ka.m_enode->m_index - kb.m_enode->m_index;
+}
+
+/* Comparator for implementing worklist::key_t comparison operators.
+ Return negative if KA is before KB
+ Return positive if KA is after KB
+ Return 0 if they are equal. */
+
+int
+worklist::key_t::cmp (const worklist::key_t &ka, const worklist::key_t &kb)
+{
+ int result = cmp_1 (ka, kb);
+
+ /* Check that the ordering is symmetric */
+#if CHECKING_P
+ int reversed = cmp_1 (kb, ka);
+ gcc_assert (reversed == -result);
+#endif
+
+ /* We should only have 0 for equal (point, state) pairs. */
+ gcc_assert (result != 0
+ || (*ka.m_enode->get_ps_key ()
+ == *kb.m_enode->get_ps_key ()));
+
+ return result;
+}
+
+/* exploded_graph's ctor. */
+
+exploded_graph::exploded_graph (const supergraph &sg, logger *logger,
+ const extrinsic_state &ext_state,
+ const state_purge_map *purge_map,
+ const analysis_plan &plan,
+ int verbosity)
+: m_sg (sg), m_logger (logger),
+ m_worklist (*this, plan),
+ m_ext_state (ext_state),
+ m_purge_map (purge_map),
+ m_plan (plan),
+ m_diagnostic_manager (logger, verbosity),
+ m_global_stats (m_sg.num_nodes ()),
+ m_functionless_stats (m_sg.num_nodes ()),
+ m_PK_AFTER_SUPERNODE_per_snode (m_sg.num_nodes ())
+{
+ m_origin = get_or_create_node (program_point (function_point (NULL, NULL,
+ 0, PK_ORIGIN),
+ call_string ()),
+ program_state (ext_state), NULL);
+ for (int i = 0; i < m_sg.num_nodes (); i++)
+ m_PK_AFTER_SUPERNODE_per_snode.quick_push (i);
+}
+
+/* exploded_graph's dtor. */
+
+exploded_graph::~exploded_graph ()
+{
+ for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
+ iter != m_per_function_stats.end ();
+ ++iter)
+ delete (*iter).second;
+
+ for (point_map_t::iterator iter = m_per_point_data.begin ();
+ iter != m_per_point_data.end ();
+ ++iter)
+ delete (*iter).second;
+}
+
+/* Ensure that there is an exploded_node representing an external call to
+ FUN, adding it to the worklist if creating it.
+
+ Add an edge from the origin exploded_node to the function entrypoint
+ exploded_node.
+
+ Return the exploded_node for the entrypoint to the function. */
+
+exploded_node *
+exploded_graph::add_function_entry (function *fun)
+{
+ program_point point = program_point::from_function_entry (m_sg, fun);
+ program_state state (m_ext_state);
+ state.m_region_model->push_frame (fun, NULL, NULL);
+
+ exploded_node *enode = get_or_create_node (point, state, NULL);
+ /* We should never fail to add such a node. */
+ gcc_assert (enode);
+ state_change change;
+ add_edge (m_origin, enode, NULL, change);
+ return enode;
+}
+
+/* Get or create an exploded_node for (POINT, STATE).
+ If a new node is created, it is added to the worklist.
+ If CHANGE is non-NULL, use it to suppress some purging of state,
+ to make generation of state_change_event instances easier. */
+
+exploded_node *
+exploded_graph::get_or_create_node (const program_point &point,
+ const program_state &state,
+ state_change *change)
+{
+ logger * const logger = get_logger ();
+ LOG_FUNC (logger);
+ if (logger)
+ {
+ format f (false);
+ pretty_printer *pp = logger->get_printer ();
+ logger->start_log_line ();
+ pp_string (pp, "point: ");
+ point.print (pp, f);
+ logger->end_log_line ();
+ logger->start_log_line ();
+ pp_string (pp, "state: ");
+ state.dump (m_ext_state, true);
+ logger->end_log_line ();
+ }
+
+ auto_cfun sentinel (point.get_function ());
+
+ state.validate (get_ext_state ());
+
+ //state.dump (get_ext_state ());
+
+ /* Prune state to try to improve the chances of a cache hit,
+ avoiding generating redundant nodes. */
+ program_state pruned_state = state.prune_for_point (*this, point, change);
+
+ pruned_state.validate (get_ext_state ());
+
+ //pruned_state.dump (get_ext_state ());
+
+ if (logger)
+ {
+ pretty_printer *pp = logger->get_printer ();
+ logger->start_log_line ();
+ pp_string (pp, "pruned_state: ");
+ pruned_state.dump_to_pp (m_ext_state, true, pp);
+ logger->end_log_line ();
+ pruned_state.m_region_model->dump_to_pp (logger->get_printer (), true);
+ }
+
+ stats *per_fn_stats = get_or_create_function_stats (point.get_function ());
+
+ stats *per_cs_stats
+ = &get_or_create_per_call_string_data (point.get_call_string ())->m_stats;
+
+ point_and_state ps (point, pruned_state);
+ ps.validate (m_ext_state);
+ if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
+ {
+ /* An exploded_node for PS already exists. */
+ if (logger)
+ logger->log ("reused EN: %i", (*slot)->m_index);
+ m_global_stats.m_node_reuse_count++;
+ per_fn_stats->m_node_reuse_count++;
+ per_cs_stats->m_node_reuse_count++;
+ return *slot;
+ }
+
+ per_program_point_data *per_point_data
+ = get_or_create_per_program_point_data (point);
+
+ /* Consider merging state with another enode at this program_point. */
+ if (flag_analyzer_state_merge)
+ {
+ exploded_node *existing_enode;
+ unsigned i;
+ FOR_EACH_VEC_ELT (per_point_data->m_enodes, i, existing_enode)
+ {
+ if (logger)
+ logger->log ("considering merging with existing EN: %i for point",
+ existing_enode->m_index);
+ gcc_assert (existing_enode->get_point () == point);
+ const program_state &existing_state = existing_enode->get_state ();
+
+ /* This merges successfully within the loop. */
+
+ program_state merged_state (m_ext_state);
+ if (pruned_state.can_merge_with_p (existing_state, m_ext_state,
+ &merged_state))
+ {
+ if (logger)
+ logger->log ("merging new state with that of EN: %i",
+ existing_enode->m_index);
+
+ /* Try again for a cache hit. */
+ ps.set_state (merged_state);
+ if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
+ {
+ /* An exploded_node for PS already exists. */
+ if (logger)
+ logger->log ("reused EN: %i", (*slot)->m_index);
+ m_global_stats.m_node_reuse_after_merge_count++;
+ per_fn_stats->m_node_reuse_after_merge_count++;
+ per_cs_stats->m_node_reuse_after_merge_count++;
+ return *slot;
+ }
+
+ /* Otherwise, continue, using the merged state in "ps".
+ Given that merged_state's svalue_ids have no relationship
+ to those of the input state, and thus to those of CHANGE,
+ purge any svalue_ids from *CHANGE. */
+ if (change)
+ change->on_svalue_purge (svalue_id::from_int (0));
+ }
+ else
+ if (logger)
+ logger->log ("not merging new state with that of EN: %i",
+ existing_enode->m_index);
+ }
+ }
+
+ /* Impose a limit on the number of enodes per program point, and
+ simply stop if we exceed it. */
+ if ((int)per_point_data->m_enodes.length ()
+ > param_analyzer_max_enodes_per_program_point)
+ {
+ if (logger)
+ logger->log ("not creating enode; too many at program point");
+ warning_at (point.get_location (), OPT_Wanalyzer_too_complex,
+ "terminating analysis for this program point");
+ return NULL;
+ }
+
+ ps.validate (m_ext_state);
+
+ /* An exploded_node for "ps" doesn't already exist; create one. */
+ exploded_node *node = new exploded_node (ps, m_nodes.length ());
+ add_node (node);
+ m_point_and_state_to_node.put (node->get_ps_key (), node);
+
+ /* Update per-program_point data. */
+ per_point_data->m_enodes.safe_push (node);
+
+ const enum point_kind node_pk = node->get_point ().get_kind ();
+ m_global_stats.m_num_nodes[node_pk]++;
+ per_fn_stats->m_num_nodes[node_pk]++;
+ per_cs_stats->m_num_nodes[node_pk]++;
+
+ if (node_pk == PK_AFTER_SUPERNODE)
+ m_PK_AFTER_SUPERNODE_per_snode[point.get_supernode ()->m_index]++;
+
+ if (logger)
+ {
+ format f (false);
+ pretty_printer *pp = logger->get_printer ();
+ logger->log ("created EN: %i", node->m_index);
+ logger->start_log_line ();
+ pp_string (pp, "point: ");
+ point.print (pp, f);
+ logger->end_log_line ();
+ logger->start_log_line ();
+ pp_string (pp, "pruned_state: ");
+ pruned_state.dump_to_pp (m_ext_state, true, pp);
+ logger->end_log_line ();
+ }
+
+ /* Add the new node to the worlist. */
+ m_worklist.add_node (node);
+ return node;
+}
+
+/* Add an exploded_edge from SRC to DEST, recording its association
+ with SEDGE (which may be NULL), and, if non-NULL, taking ownership
+ of REWIND_INFO.
+ Return the newly-created eedge. */
+
+exploded_edge *
+exploded_graph::add_edge (exploded_node *src, exploded_node *dest,
+ const superedge *sedge,
+ const state_change &change,
+ exploded_edge::custom_info_t *custom_info)
+{
+ exploded_edge *e = new exploded_edge (src, dest, sedge, change, custom_info);
+ digraph::add_edge (e);
+ return e;
+}
+
+/* Ensure that this graph has per-program_point-data for POINT;
+ borrow a pointer to it. */
+
+per_program_point_data *
+exploded_graph::
+get_or_create_per_program_point_data (const program_point &point)
+{
+ if (per_program_point_data **slot = m_per_point_data.get (&point))
+ return *slot;
+
+ per_program_point_data *per_point_data = new per_program_point_data (point);
+ m_per_point_data.put (&per_point_data->m_key, per_point_data);
+ return per_point_data;
+}
+
+/* Ensure that this graph has per-call_string-data for CS;
+ borrow a pointer to it. */
+
+per_call_string_data *
+exploded_graph::get_or_create_per_call_string_data (const call_string &cs)
+{
+ if (per_call_string_data **slot = m_per_call_string_data.get (&cs))
+ return *slot;
+
+ per_call_string_data *data = new per_call_string_data (cs, m_sg.num_nodes ());
+ m_per_call_string_data.put (&data->m_key,
+ data);
+ return data;
+}
+
+/* Ensure that this graph has per-function-data for FUN;
+ borrow a pointer to it. */
+
+per_function_data *
+exploded_graph::get_or_create_per_function_data (function *fun)
+{
+ if (per_function_data **slot = m_per_function_data.get (fun))
+ return *slot;
+
+ per_function_data *data = new per_function_data ();
+ m_per_function_data.put (fun, data);
+ return data;
+}
+
+/* Get this graph's per-function-data for FUN if there is any,
+ otherwise NULL. */
+
+per_function_data *
+exploded_graph::get_per_function_data (function *fun) const
+{
+ if (per_function_data **slot
+ = const_cast <per_function_data_t &> (m_per_function_data).get (fun))
+ return *slot;
+
+ return NULL;
+}
+
+/* Return true if NODE and FUN should be traversed directly, rather than
+ called via other functions. */
+
+static bool
+toplevel_function_p (cgraph_node *node, function *fun, logger *logger)
+{
+ /* TODO: better logic here
+ e.g. only if more than one caller, and significantly complicated.
+ Perhaps some whole-callgraph analysis to decide if it's worth summarizing
+ an edge, and if so, we need summaries. */
+ if (flag_analyzer_call_summaries)
+ {
+ int num_call_sites = 0;
+ for (cgraph_edge *edge = node->callers; edge; edge = edge->next_caller)
+ ++num_call_sites;
+
+ /* For now, if there's more than one in-edge, and we want call
+ summaries, do it at the top level so that there's a chance
+ we'll have a summary when we need one. */
+ if (num_call_sites > 1)
+ {
+ if (logger)
+ logger->log ("traversing %qE (%i call sites)",
+ fun->decl, num_call_sites);
+ return true;
+ }
+ }
+
+ if (!TREE_PUBLIC (fun->decl))
+ {
+ if (logger)
+ logger->log ("not traversing %qE (static)", fun->decl);
+ return false;
+ }
+
+ if (logger)
+ logger->log ("traversing %qE (all checks passed)", fun->decl);
+
+ return true;
+}
+
+/* Add initial nodes to EG, with entrypoints for externally-callable
+ functions. */
+
+void
+exploded_graph::build_initial_worklist ()
+{
+ logger * const logger = get_logger ();
+ LOG_SCOPE (logger);
+
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ function *fun = node->get_fun ();
+ if (!toplevel_function_p (node, fun, logger))
+ continue;
+ exploded_node *enode = add_function_entry (fun);
+ if (logger)
+ logger->log ("created EN %i for %qE entrypoint",
+ enode->m_index, fun->decl);
+ }
+}
+
+/* The main loop of the analysis.
+ Take freshly-created exploded_nodes from the worklist, calling
+ process_node on them to explore the <point, state> graph.
+ Add edges to their successors, potentially creating new successors
+ (which are also added to the worklist). */
+
+void
+exploded_graph::process_worklist ()
+{
+ logger * const logger = get_logger ();
+ LOG_SCOPE (logger);
+ auto_timevar tv (TV_ANALYZER_WORKLIST);
+
+ while (m_worklist.length () > 0)
+ {
+ exploded_node *node = m_worklist.take_next ();
+ gcc_assert (node->m_succs.length () == 0
+ || node == m_origin);
+
+ if (logger)
+ logger->log ("next to process: EN: %i", node->m_index);
+
+ /* Avoid exponential explosions of nodes by attempting to merge
+ nodes that are at the same program point and which have
+ sufficiently similar state. */
+ if (flag_analyzer_state_merge && node != m_origin)
+ if (exploded_node *node_2 = m_worklist.peek_next ())
+ {
+ gcc_assert (node->m_succs.length () == 0);
+ gcc_assert (node_2->m_succs.length () == 0);
+
+ gcc_assert (node != node_2);
+
+ if (logger)
+ logger->log ("peek worklist: EN: %i", node_2->m_index);
+
+ if (node->get_point () == node_2->get_point ())
+ {
+ if (logger)
+ {
+ format f (false);
+ pretty_printer *pp = logger->get_printer ();
+ logger->start_log_line ();
+ logger->log_partial
+ ("got potential merge EN: %i and EN: %i at ",
+ node->m_index, node_2->m_index);
+ node->get_point ().print (pp, f);
+ logger->end_log_line ();
+ }
+
+ const program_state &state = node->get_state ();
+ const program_state &state_2 = node_2->get_state ();
+
+ /* They shouldn't be equal, or we wouldn't have two
+ separate nodes. */
+ gcc_assert (state != state_2);
+
+ program_state merged_state (m_ext_state);
+ state_change change;
+ if (state.can_merge_with_p (state_2, m_ext_state,
+ &merged_state))
+ {
+ if (logger)
+ logger->log ("merging EN: %i and EN: %i",
+ node->m_index, node_2->m_index);
+
+ if (merged_state == state)
+ {
+ /* Then merge node_2 into node by adding an edge. */
+ add_edge (node_2, node, NULL, change);
+
+ /* Remove node_2 from the worklist. */
+ m_worklist.take_next ();
+
+ /* Continue processing "node" below. */
+ }
+ else if (merged_state == state_2)
+ {
+ /* Then merge node into node_2, and leave node_2
+ in the worklist, to be processed on the next
+ iteration. */
+ add_edge (node, node_2, NULL, change);
+ continue;
+ }
+ else
+ {
+ /* We have a merged state that differs from
+ both state and state_2. */
+
+ /* Remove node_2 from the worklist. */
+ m_worklist.take_next ();
+
+ /* Create (or get) an exploded node for the merged
+ states, adding to the worklist. */
+ exploded_node *merged_enode
+ = get_or_create_node (node->get_point (),
+ merged_state, &change);
+ if (merged_enode == NULL)
+ continue;
+
+ if (logger)
+ logger->log ("merged EN: %i and EN: %i into EN: %i",
+ node->m_index, node_2->m_index,
+ merged_enode->m_index);
+
+ /* "node" and "node_2" have both now been removed
+ from the worklist; we should not process them.
+
+ "merged_enode" may be a new node; if so it will be
+ processed in a subsequent iteration.
+ Alternatively, "merged_enode" could be an existing
+ node; one way the latter can
+ happen is if we end up merging a succession of
+ similar nodes into one. */
+
+ /* If merged_node is one of the two we were merging,
+ add it back to the worklist to ensure it gets
+ processed.
+
+ Add edges from the merged nodes to it (but not a
+ self-edge). */
+ if (merged_enode == node)
+ m_worklist.add_node (merged_enode);
+ else
+ add_edge (node, merged_enode, NULL, change);
+
+ if (merged_enode == node_2)
+ m_worklist.add_node (merged_enode);
+ else
+ add_edge (node_2, merged_enode, NULL, change);
+
+ continue;
+ }
+ }
+
+ /* TODO: should we attempt more than two nodes,
+ or just do pairs of nodes? (and hope that we get
+ a cascade of mergers). */
+ }
+ }
+
+ process_node (node);
+
+ /* Impose a hard limit on the number of exploded nodes, to ensure
+ that the analysis terminates in the face of pathological state
+ explosion (or bugs).
+
+ Specifically, the limit is on the number of PK_AFTER_SUPERNODE
+ exploded nodes, looking at supernode exit events.
+
+ We use exit rather than entry since there can be multiple
+ entry ENs, one per phi; the number of PK_AFTER_SUPERNODE ought
+ to be equivalent to the number of supernodes multiplied by the
+ number of states. */
+ const int limit = m_sg.num_nodes () * param_analyzer_bb_explosion_factor;
+ if (m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE] > limit)
+ {
+ if (logger)
+ logger->log ("bailing out; too many nodes");
+ warning_at (node->get_point ().get_location (),
+ OPT_Wanalyzer_too_complex,
+ "analysis bailed out early"
+ " (%i 'after-snode' enodes; %i enodes)",
+ m_global_stats.m_num_nodes[PK_AFTER_SUPERNODE],
+ m_nodes.length ());
+ return;
+ }
+ }
+}
+
+/* Return true if STMT must appear at the start of its exploded node, and
+ thus we can't consolidate its effects within a run of other statements,
+ where PREV_STMT was the previous statement. */
+
+static bool
+stmt_requires_new_enode_p (const gimple *stmt,
+ const gimple *prev_stmt)
+{
+ /* Stop consolidating at calls to
+ "__analyzer_dump_exploded_nodes", so they always appear at the
+ start of an exploded_node. */
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
+ 1))
+ return true;
+
+ /* If we had a PREV_STMT with an unknown location, and this stmt
+ has a known location, then if a state change happens here, it
+ could be consolidated into PREV_STMT, giving us an event with
+ no location. Ensure that STMT gets its own exploded_node to
+ avoid this. */
+ if (prev_stmt->location == UNKNOWN_LOCATION
+ && stmt->location != UNKNOWN_LOCATION)
+ return true;
+
+ return false;
+}
+
+/* The core of exploded_graph::process_worklist (the main analysis loop),
+ handling one node in the worklist.
+
+ Get successor <point, state> pairs for NODE, calling get_or_create on
+ them, and adding an exploded_edge to each successors.
+
+ Freshly-created nodes will be added to the worklist. */
+
+void
+exploded_graph::process_node (exploded_node *node)
+{
+ logger * const logger = get_logger ();
+ LOG_FUNC_1 (logger, "EN: %i", node->m_index);
+
+ const program_point &point = node->get_point ();
+
+ /* Update cfun and input_location in case of an ICE: make it easier to
+ track down which source construct we're failing to handle. */
+ auto_cfun sentinel (node->get_function ());
+ const gimple *stmt = point.get_stmt ();
+ if (stmt)
+ input_location = stmt->location;
+
+ const program_state &state = node->get_state ();
+ if (logger)
+ {
+ pretty_printer *pp = logger->get_printer ();
+ logger->start_log_line ();
+ pp_string (pp, "point: ");
+ point.print (pp, format (false));
+ pp_string (pp, ", state: ");
+ state.dump_to_pp (m_ext_state, true, pp);
+ logger->end_log_line ();
+ }
+
+ switch (point.get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+ case PK_ORIGIN:
+ /* This node exists to simplify finding the shortest path
+ to an exploded_node. */
+ break;
+
+ case PK_BEFORE_SUPERNODE:
+ {
+ program_state next_state (state);
+ state_change change;
+
+ if (point.get_from_edge ())
+ {
+ impl_region_model_context ctxt (*this, node,
+ &state, &next_state, &change,
+ NULL);
+ const cfg_superedge *last_cfg_superedge
+ = point.get_from_edge ()->dyn_cast_cfg_superedge ();
+ if (last_cfg_superedge)
+ next_state.m_region_model->update_for_phis
+ (node->get_supernode (),
+ last_cfg_superedge,
+ &ctxt);
+ }
+
+ if (point.get_supernode ()->m_stmts.length () > 0)
+ {
+ program_point next_point
+ = program_point::before_stmt (point.get_supernode (), 0,
+ point.get_call_string ());
+ exploded_node *next
+ = get_or_create_node (next_point, next_state, &change);
+ if (next)
+ add_edge (node, next, NULL, change);
+ }
+ else
+ {
+ program_point next_point
+ = program_point::after_supernode (point.get_supernode (),
+ point.get_call_string ());
+ exploded_node *next = get_or_create_node (next_point, next_state,
+ &change);
+ if (next)
+ add_edge (node, next, NULL, change);
+ }
+ }
+ break;
+ case PK_BEFORE_STMT:
+ {
+ /* Determine the effect of a run of one or more statements
+ within one supernode, generating an edge to the program_point
+ after the last statement that's processed.
+
+ Stop iterating statements and thus consolidating into one enode
+ when:
+ - reaching the end of the statements in the supernode
+ - if an sm-state-change occurs (so that it gets its own
+ exploded_node)
+ - if "-fanalyzer-fine-grained" is active
+ - encountering certain statements must appear at the start of
+ their enode (for which stmt_requires_new_enode_p returns true)
+
+ Update next_state in-place, to get the result of the one
+ or more stmts that are processed. */
+ program_state next_state (state);
+ state_change change;
+ const supernode *snode = point.get_supernode ();
+ unsigned stmt_idx;
+ const gimple *prev_stmt = NULL;
+ for (stmt_idx = point.get_stmt_idx ();
+ stmt_idx < snode->m_stmts.length ();
+ stmt_idx++)
+ {
+ const gimple *stmt = snode->m_stmts[stmt_idx];
+
+ if (stmt_idx > point.get_stmt_idx ())
+ if (stmt_requires_new_enode_p (stmt, prev_stmt))
+ {
+ stmt_idx--;
+ break;
+ }
+ prev_stmt = stmt;
+
+ /* Process the stmt. */
+ exploded_node::on_stmt_flags flags
+ = node->on_stmt (*this, snode, stmt, &next_state, &change);
+
+ /* If flags.m_terminate_path, stop analyzing; any nodes/edges
+ will have been added by on_stmt (e.g. for handling longjmp). */
+ if (flags.m_terminate_path)
+ return;
+
+ if (flags.m_sm_changes || flag_analyzer_fine_grained)
+ break;
+ }
+ unsigned next_idx = stmt_idx + 1;
+ program_point next_point
+ = (next_idx < point.get_supernode ()->m_stmts.length ()
+ ? program_point::before_stmt (point.get_supernode (), next_idx,
+ point.get_call_string ())
+ : program_point::after_supernode (point.get_supernode (),
+ point.get_call_string ()));
+ exploded_node *next = get_or_create_node (next_point,
+ next_state, &change);
+ if (next)
+ add_edge (node, next, NULL, change);
+ }
+ break;
+ case PK_AFTER_SUPERNODE:
+ {
+ /* If this is an EXIT BB, detect leaks, and potentially
+ create a function summary. */
+ if (point.get_supernode ()->return_p ())
+ {
+ node->detect_leaks (*this);
+ if (flag_analyzer_call_summaries
+ && point.get_call_string ().empty_p ())
+ {
+ /* TODO: create function summary
+ There can be more than one; each corresponds to a different
+ final enode in the function. */
+ if (logger)
+ {
+ pretty_printer *pp = logger->get_printer ();
+ logger->start_log_line ();
+ logger->log_partial
+ ("would create function summary for %qE; state: ",
+ point.get_fndecl ());
+ state.dump_to_pp (m_ext_state, true, pp);
+ logger->end_log_line ();
+ }
+ per_function_data *per_fn_data
+ = get_or_create_per_function_data (point.get_function ());
+ per_fn_data->add_call_summary (node);
+ }
+ }
+ /* Traverse into successors of the supernode. */
+ int i;
+ superedge *succ;
+ FOR_EACH_VEC_ELT (point.get_supernode ()->m_succs, i, succ)
+ {
+ if (logger)
+ logger->log ("considering SN: %i -> SN: %i",
+ succ->m_src->m_index, succ->m_dest->m_index);
+
+ state_change change;
+
+ program_point next_point
+ = program_point::before_supernode (succ->m_dest, succ,
+ point.get_call_string ());
+ program_state next_state (state);
+
+ if (!node->on_edge (*this, succ, &next_point, &next_state, &change))
+ {
+ if (logger)
+ logger->log ("skipping impossible edge to SN: %i",
+ succ->m_dest->m_index);
+ continue;
+ }
+
+ exploded_node *next = get_or_create_node (next_point, next_state,
+ &change);
+ if (next)
+ add_edge (node, next, succ, change);
+ }
+ }
+ break;
+ }
+}
+
+/* Ensure that this graph has a stats instance for FN, return it.
+ FN can be NULL, in which case a stats instances is returned covering
+ "functionless" parts of the graph (the origin node). */
+
+stats *
+exploded_graph::get_or_create_function_stats (function *fn)
+{
+ if (!fn)
+ return &m_functionless_stats;
+
+ if (stats **slot = m_per_function_stats.get (fn))
+ return *slot;
+ else
+ {
+ int num_supernodes = fn ? n_basic_blocks_for_fn (fn) : 0;
+ /* not quite the num supernodes, but nearly. */
+ stats *new_stats = new stats (num_supernodes);
+ m_per_function_stats.put (fn, new_stats);
+ return new_stats;
+ }
+}
+
+/* Write all stats information to this graph's logger, if any. */
+
+void
+exploded_graph::log_stats () const
+{
+ logger * const logger = get_logger ();
+ if (!logger)
+ return;
+
+ LOG_SCOPE (logger);
+
+ logger->log ("m_sg.num_nodes (): %i", m_sg.num_nodes ());
+ logger->log ("m_nodes.length (): %i", m_nodes.length ());
+ logger->log ("m_edges.length (): %i", m_edges.length ());
+
+ logger->log ("global stats:");
+ m_global_stats.log (logger);
+
+ for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
+ iter != m_per_function_stats.end ();
+ ++iter)
+ {
+ function *fn = (*iter).first;
+ log_scope s (logger, function_name (fn));
+ (*iter).second->log (logger);
+ }
+}
+
+/* Dump all stats information to OUT. */
+
+void
+exploded_graph::dump_stats (FILE *out) const
+{
+ fprintf (out, "m_sg.num_nodes (): %i\n", m_sg.num_nodes ());
+ fprintf (out, "m_nodes.length (): %i\n", m_nodes.length ());
+ fprintf (out, "m_edges.length (): %i\n", m_edges.length ());
+
+ fprintf (out, "global stats:\n");
+ m_global_stats.dump (out);
+
+ for (function_stat_map_t::iterator iter = m_per_function_stats.begin ();
+ iter != m_per_function_stats.end ();
+ ++iter)
+ {
+ function *fn = (*iter).first;
+ fprintf (out, "function: %s\n", function_name (fn));
+ (*iter).second->dump (out);
+ }
+
+ fprintf (out, "PK_AFTER_SUPERNODE per supernode:\n");
+ for (unsigned i = 0; i < m_PK_AFTER_SUPERNODE_per_snode.length (); i++)
+ fprintf (out, " SN %i: %3i\n", i, m_PK_AFTER_SUPERNODE_per_snode[i]);
+}
+
+void
+exploded_graph::dump_states_for_supernode (FILE *out,
+ const supernode *snode) const
+{
+ fprintf (out, "PK_AFTER_SUPERNODE nodes for SN: %i\n", snode->m_index);
+ int i;
+ exploded_node *enode;
+ int state_idx = 0;
+ FOR_EACH_VEC_ELT (m_nodes, i, enode)
+ {
+ const supernode *iter_snode = enode->get_supernode ();
+ if (enode->get_point ().get_kind () == PK_AFTER_SUPERNODE
+ && iter_snode == snode)
+ {
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ enode->get_state ().dump_to_pp (m_ext_state, true, &pp);
+ fprintf (out, "state %i: EN: %i\n %s\n",
+ state_idx++, enode->m_index,
+ pp_formatted_text (&pp));
+ }
+ }
+ fprintf (out, "#exploded_node for PK_AFTER_SUPERNODE for SN: %i = %i\n",
+ snode->m_index, state_idx);
+}
+
+/* Look for the last use of SEARCH_STMT within this path.
+ If found write the edge's index to *OUT_IDX and return true, otherwise
+ return false. */
+
+bool
+exploded_path::find_stmt_backwards (const gimple *search_stmt,
+ int *out_idx) const
+{
+ int i;
+ const exploded_edge *eedge;
+ FOR_EACH_VEC_ELT_REVERSE (m_edges, i, eedge)
+ {
+ const exploded_node *dst_node = eedge->m_dest;
+ const program_point &dst_point = dst_node->get_point ();
+ const gimple *stmt = dst_point.get_stmt ();
+ if (stmt == search_stmt)
+ {
+ *out_idx = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Get the final exploded_node in this path, which must be non-empty. */
+
+exploded_node *
+exploded_path::get_final_enode () const
+{
+ gcc_assert (m_edges.length () > 0);
+ return m_edges[m_edges.length () - 1]->m_dest;
+}
+
+/* Check state along this path, returning true if it is feasible. */
+
+bool
+exploded_path::feasible_p (logger *logger) const
+{
+ LOG_SCOPE (logger);
+
+ /* Traverse the path, updating this model. */
+ region_model model;
+ for (unsigned i = 0; i < m_edges.length (); i++)
+ {
+ const exploded_edge *eedge = m_edges[i];
+ if (logger)
+ logger->log ("considering edge %i: EN:%i -> EN:%i",
+ i,
+ eedge->m_src->m_index,
+ eedge->m_dest->m_index);
+ const exploded_node &src_enode = *eedge->m_src;
+ const program_point &src_point = src_enode.get_point ();
+ if (logger)
+ {
+ logger->start_log_line ();
+ src_point.print (logger->get_printer (), format (false));
+ logger->end_log_line ();
+ }
+
+ if (const gimple *stmt = src_point.get_stmt ())
+ {
+ /* Update cfun and input_location in case of ICE: make it easier to
+ track down which source construct we're failing to handle. */
+ auto_cfun sentinel (src_point.get_function ());
+ input_location = stmt->location;
+
+ if (const gassign *assign = dyn_cast <const gassign *> (stmt))
+ model.on_assignment (assign, NULL);
+ else if (const greturn *return_ = dyn_cast <const greturn *> (stmt))
+ model.on_return (return_, NULL);
+ }
+
+ const superedge *sedge = eedge->m_sedge;
+ if (sedge)
+ {
+ if (logger)
+ logger->log (" sedge: SN:%i -> SN:%i %s",
+ sedge->m_src->m_index,
+ sedge->m_dest->m_index,
+ sedge->get_description (false));
+
+ const gimple *last_stmt = src_point.get_supernode ()->get_last_stmt ();
+ if (!model.maybe_update_for_edge (*sedge, last_stmt, NULL))
+ {
+ if (logger)
+ {
+ logger->log ("rejecting due to region model");
+ model.dump_to_pp (logger->get_printer (), false);
+ }
+ return false;
+ }
+ }
+ else
+ {
+ /* Special-case the initial eedge from the origin node to the
+ initial function by pushing a frame for it. */
+ if (i == 0)
+ {
+ gcc_assert (eedge->m_src->m_index == 0);
+ gcc_assert (src_point.get_kind () == PK_ORIGIN);
+ gcc_assert (eedge->m_dest->get_point ().get_kind ()
+ == PK_BEFORE_SUPERNODE);
+ function *fun = eedge->m_dest->get_function ();
+ gcc_assert (fun);
+ model.push_frame (fun, NULL, NULL);
+ if (logger)
+ logger->log (" pushing frame for %qD", fun->decl);
+ }
+ else if (eedge->m_custom_info)
+ eedge->m_custom_info->update_model (&model, *eedge);
+ }
+
+ /* Handle phi nodes on an edge leaving a PK_BEFORE_SUPERNODE (to
+ a PK_BEFORE_STMT, or a PK_AFTER_SUPERNODE if no stmts).
+ This will typically not be associated with a superedge. */
+ if (src_point.get_from_edge ())
+ {
+ const cfg_superedge *last_cfg_superedge
+ = src_point.get_from_edge ()->dyn_cast_cfg_superedge ();
+ if (last_cfg_superedge)
+ {
+ if (logger)
+ logger->log (" update for phis");
+ model.update_for_phis (src_enode.get_supernode (),
+ last_cfg_superedge,
+ NULL);
+ }
+ }
+
+ if (logger)
+ {
+ logger->log ("state after edge %i: EN:%i -> EN:%i",
+ i,
+ eedge->m_src->m_index,
+ eedge->m_dest->m_index);
+ logger->start_log_line ();
+ model.dump_to_pp (logger->get_printer (), true);
+ logger->end_log_line ();
+ }
+ }
+
+ return true;
+}
+
+/* Dump this path in multiline form to PP. */
+
+void
+exploded_path::dump_to_pp (pretty_printer *pp) const
+{
+ for (unsigned i = 0; i < m_edges.length (); i++)
+ {
+ const exploded_edge *eedge = m_edges[i];
+ pp_printf (pp, "m_edges[%i]: EN %i -> EN %i",
+ i,
+ eedge->m_src->m_index,
+ eedge->m_dest->m_index);
+ pp_newline (pp);
+ }
+}
+
+/* Dump this path in multiline form to FP. */
+
+void
+exploded_path::dump (FILE *fp) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Dump this path in multiline form to stderr. */
+
+DEBUG_FUNCTION void
+exploded_path::dump () const
+{
+ dump (stderr);
+}
+
+/* A family of cluster subclasses for use when generating .dot output for
+ exploded graphs (-fdump-analyzer-exploded-graph), for grouping the
+ enodes into hierarchical boxes.
+
+ All functionless enodes appear in the top-level graph.
+ Every (function, call_string) pair gets its own cluster. Within that
+ cluster, each supernode gets its own cluster.
+
+ Hence all enodes relating to a particular function with a particular
+ callstring will be be in a cluster together; all enodes for the same
+ function but with a different callstring will be in a different
+ cluster. */
+
+/* Base class of cluster for clustering exploded_node instances in .dot
+ output, based on various subclass-specific criteria. */
+
+class exploded_cluster : public cluster<eg_traits>
+{
+};
+
+/* Cluster containing all exploded_node instances for one supernode. */
+
+class supernode_cluster : public exploded_cluster
+{
+public:
+ supernode_cluster (const supernode *supernode) : m_supernode (supernode) {}
+
+ // TODO: dtor?
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
+ {
+ gv->println ("subgraph \"cluster_supernode_%p\" {",
+ (const void *)this);
+ gv->indent ();
+ gv->println ("style=\"dashed\";");
+ gv->println ("label=\"SN: %i\";", m_supernode->m_index);
+
+ int i;
+ exploded_node *enode;
+ FOR_EACH_VEC_ELT (m_enodes, i, enode)
+ enode->dump_dot (gv, args);
+
+ /* Terminate subgraph. */
+ gv->outdent ();
+ gv->println ("}");
+ }
+
+ void add_node (exploded_node *en) FINAL OVERRIDE
+ {
+ m_enodes.safe_push (en);
+ }
+
+private:
+ const supernode *m_supernode;
+ auto_vec <exploded_node *> m_enodes;
+};
+
+/* Cluster containing all supernode_cluster instances for one
+ (function, call_string) pair. */
+
+class function_call_string_cluster : public exploded_cluster
+{
+public:
+ function_call_string_cluster (function *fun, call_string cs)
+ : m_fun (fun), m_cs (cs) {}
+
+ ~function_call_string_cluster ()
+ {
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ delete (*iter).second;
+ }
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
+ {
+ const char *funcname = function_name (m_fun);
+
+ gv->println ("subgraph \"cluster_function_%p\" {", (const void *)this);
+ gv->indent ();
+ gv->write_indent ();
+ gv->print ("label=\"call string: ");
+ m_cs.print (gv->get_pp ());
+ gv->print (" function: %s \";", funcname);
+ gv->print ("\n");
+
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ (*iter).second->dump_dot (gv, args);
+
+ /* Terminate subgraph. */
+ gv->outdent ();
+ gv->println ("}");
+ }
+
+ void add_node (exploded_node *en) FINAL OVERRIDE
+ {
+ const supernode *supernode = en->get_supernode ();
+ gcc_assert (supernode);
+ supernode_cluster **slot = m_map.get (supernode);
+ if (slot)
+ (*slot)->add_node (en);
+ else
+ {
+ supernode_cluster *child = new supernode_cluster (supernode);
+ m_map.put (supernode, child);
+ child->add_node (en);
+ }
+ }
+
+private:
+ function *m_fun;
+ call_string m_cs;
+ typedef ordered_hash_map<const supernode *, supernode_cluster *> map_t;
+ map_t m_map;
+};
+
+/* Keys for root_cluster. */
+
+struct function_call_string
+{
+ function_call_string (function *fun, call_string cs)
+ : m_fun (fun), m_cs (cs)
+ {
+ gcc_assert (fun);
+ }
+
+ function *m_fun;
+ call_string m_cs;
+};
+
+template <> struct default_hash_traits<function_call_string>
+: public pod_hash_traits<function_call_string>
+{
+ static const bool empty_zero_p = false;
+};
+
+template <>
+inline hashval_t
+pod_hash_traits<function_call_string>::hash (value_type v)
+{
+ return pointer_hash <function>::hash (v.m_fun) ^ v.m_cs.hash ();
+}
+
+template <>
+inline bool
+pod_hash_traits<function_call_string>::equal (const value_type &existing,
+ const value_type &candidate)
+{
+ return existing.m_fun == candidate.m_fun && existing.m_cs == candidate.m_cs;
+}
+template <>
+inline void
+pod_hash_traits<function_call_string>::mark_deleted (value_type &v)
+{
+ v.m_fun = reinterpret_cast<function *> (1);
+}
+template <>
+inline void
+pod_hash_traits<function_call_string>::mark_empty (value_type &v)
+{
+ v.m_fun = reinterpret_cast<function *> (NULL);
+}
+template <>
+inline bool
+pod_hash_traits<function_call_string>::is_deleted (value_type v)
+{
+ return v.m_fun == reinterpret_cast<function *> (1);
+}
+template <>
+inline bool
+pod_hash_traits<function_call_string>::is_empty (value_type v)
+{
+ return v.m_fun == reinterpret_cast<function *> (NULL);
+}
+
+/* Top-level cluster for generating .dot output for exploded graphs,
+ handling the functionless nodes, and grouping the remaining nodes by
+ callstring. */
+
+class root_cluster : public exploded_cluster
+{
+public:
+ ~root_cluster ()
+ {
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ delete (*iter).second;
+ }
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
+ {
+ int i;
+ exploded_node *enode;
+ FOR_EACH_VEC_ELT (m_functionless_enodes, i, enode)
+ enode->dump_dot (gv, args);
+
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ (*iter).second->dump_dot (gv, args);
+ }
+
+ void add_node (exploded_node *en) FINAL OVERRIDE
+ {
+ function *fun = en->get_function ();
+ if (!fun)
+ {
+ m_functionless_enodes.safe_push (en);
+ return;
+ }
+
+ const call_string &cs = en->get_point ().get_call_string ();
+ function_call_string key (fun, cs);
+ function_call_string_cluster **slot = m_map.get (key);
+ if (slot)
+ (*slot)->add_node (en);
+ else
+ {
+ function_call_string_cluster *child
+ = new function_call_string_cluster (fun, cs);
+ m_map.put (key, child);
+ child->add_node (en);
+ }
+ }
+
+private:
+ /* This can't be an ordered_hash_map, as we can't store vec<call_string>,
+ since it's not a POD; vec<>::quick_push has:
+ *slot = obj;
+ and the slot isn't initialized, so the assignment op dies when cleaning up
+ un-inited *slot (within the truncate call). */
+ typedef hash_map<function_call_string, function_call_string_cluster *> map_t;
+ map_t m_map;
+
+ /* This should just be the origin exploded_node. */
+ auto_vec <exploded_node *> m_functionless_enodes;
+};
+
+/* Subclass of range_label for use within
+ exploded_graph::dump_exploded_nodes for implementing
+ -fdump-analyzer-exploded-nodes: a label for a specific
+ exploded_node. */
+
+class enode_label : public range_label
+{
+ public:
+ enode_label (const extrinsic_state &ext_state,
+ exploded_node *enode)
+ : m_ext_state (ext_state), m_enode (enode) {}
+
+ label_text get_text (unsigned) const FINAL OVERRIDE
+ {
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ m_enode->get_state ().dump_to_pp (m_ext_state, true, &pp);
+ return make_label_text (false, "EN: %i: %s",
+ m_enode->m_index, pp_formatted_text (&pp));
+ }
+
+private:
+ const extrinsic_state &m_ext_state;
+ exploded_node *m_enode;
+};
+
+/* Postprocessing support for dumping the exploded nodes.
+ Handle -fdump-analyzer-exploded-nodes,
+ -fdump-analyzer-exploded-nodes-2, and the
+ "__analyzer_dump_exploded_nodes" builtin. */
+
+void
+exploded_graph::dump_exploded_nodes () const
+{
+ // TODO
+ /* Locate calls to __analyzer_dump_exploded_nodes. */
+ // Print how many egs there are for them?
+ /* Better: log them as we go, and record the exploded nodes
+ in question. */
+
+ /* Show every enode. */
+
+ /* Gather them by stmt, so that we can more clearly see the
+ "hotspots" requiring numerous exploded nodes. */
+
+ /* Alternatively, simply throw them all into one big rich_location
+ and see if the label-printing will sort it out...
+ This requires them all to be in the same source file. */
+
+ if (flag_dump_analyzer_exploded_nodes)
+ {
+ auto_timevar tv (TV_ANALYZER_DUMP);
+ gcc_rich_location richloc (UNKNOWN_LOCATION);
+ unsigned i;
+ exploded_node *enode;
+ FOR_EACH_VEC_ELT (m_nodes, i, enode)
+ {
+ if (const gimple *stmt = enode->get_stmt ())
+ {
+ if (richloc.get_loc () == UNKNOWN_LOCATION)
+ richloc.set_range (0, stmt->location, SHOW_RANGE_WITH_CARET);
+ else
+ richloc.add_range (stmt->location,
+ SHOW_RANGE_WITHOUT_CARET,
+ new enode_label (m_ext_state, enode));
+ }
+ }
+ warning_at (&richloc, 0, "%i exploded nodes", m_nodes.length ());
+
+ /* Repeat the warning without all the labels, so that message is visible
+ (the other one may well have scrolled past the terminal limit). */
+ warning_at (richloc.get_loc (), 0,
+ "%i exploded nodes", m_nodes.length ());
+
+ if (m_worklist.length () > 0)
+ warning_at (richloc.get_loc (), 0,
+ "worklist still contains %i nodes", m_worklist.length ());
+ }
+
+ /* Dump the egraph in textual form to a dump file. */
+ if (flag_dump_analyzer_exploded_nodes_2)
+ {
+ auto_timevar tv (TV_ANALYZER_DUMP);
+ char *filename
+ = concat (dump_base_name, ".eg.txt", NULL);
+ FILE *outf = fopen (filename, "w");
+ if (!outf)
+ error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
+ free (filename);
+
+ fprintf (outf, "exploded graph for %s\n", dump_base_name);
+ fprintf (outf, " nodes: %i\n", m_nodes.length ());
+ fprintf (outf, " edges: %i\n", m_edges.length ());
+
+ unsigned i;
+ exploded_node *enode;
+ FOR_EACH_VEC_ELT (m_nodes, i, enode)
+ {
+ fprintf (outf, "\nEN %i:\n", enode->m_index);
+ enode->dump_succs_and_preds (outf);
+ pretty_printer pp;
+ enode->get_point ().print (&pp, format (true));
+ fprintf (outf, "%s\n", pp_formatted_text (&pp));
+ enode->get_state ().dump_to_file (m_ext_state, false, outf);
+ }
+
+ fclose (outf);
+ }
+
+ /* Dump the egraph in textual form to multiple dump files, one per enode. */
+ if (flag_dump_analyzer_exploded_nodes_3)
+ {
+ auto_timevar tv (TV_ANALYZER_DUMP);
+
+ unsigned i;
+ exploded_node *enode;
+ FOR_EACH_VEC_ELT (m_nodes, i, enode)
+ {
+ char *filename
+ = xasprintf ("%s.en-%i.txt", dump_base_name, i);
+ FILE *outf = fopen (filename, "w");
+ if (!outf)
+ error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
+ free (filename);
+
+ fprintf (outf, "EN %i:\n", enode->m_index);
+ enode->dump_succs_and_preds (outf);
+ pretty_printer pp;
+ enode->get_point ().print (&pp, format (true));
+ fprintf (outf, "%s\n", pp_formatted_text (&pp));
+ enode->get_state ().dump_to_file (m_ext_state, false, outf);
+
+ fclose (outf);
+ }
+ }
+
+ /* Emit a warning at any call to "__analyzer_dump_exploded_nodes",
+ giving the number of exploded nodes for "before-stmt", and their
+ IDs. */
+
+ unsigned i;
+ exploded_node *enode;
+ hash_set<const gimple *> seen;
+ FOR_EACH_VEC_ELT (m_nodes, i, enode)
+ {
+ if (enode->get_point ().get_kind () != PK_BEFORE_STMT)
+ continue;
+
+ if (const gimple *stmt = enode->get_stmt ())
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (is_special_named_call_p (call, "__analyzer_dump_exploded_nodes",
+ 1))
+ {
+ if (seen.contains (stmt))
+ continue;
+
+ /* This is O(N^2). */
+ unsigned j;
+ auto_vec<exploded_node *> enodes;
+ exploded_node *other_enode;
+ FOR_EACH_VEC_ELT (m_nodes, j, other_enode)
+ {
+ if (other_enode->get_point ().get_kind () != PK_BEFORE_STMT)
+ continue;
+ if (other_enode->get_stmt () == stmt)
+ enodes.safe_push (other_enode);
+ }
+
+ pretty_printer pp;
+ print_enode_indices (&pp, enodes);
+
+ warning_n (stmt->location, 0, enodes.length (),
+ "%i exploded node: %s",
+ "%i exploded nodes: %s",
+ enodes.length (), pp_formatted_text (&pp));
+ seen.add (stmt);
+
+ /* If the argument is non-zero, then print all of the states
+ of the various enodes. */
+ tree t_arg = fold (gimple_call_arg (call, 0));
+ if (TREE_CODE (t_arg) != INTEGER_CST)
+ {
+ error_at (call->location,
+ "integer constant required for arg 1");
+ return;
+ }
+ int i_arg = TREE_INT_CST_LOW (t_arg);
+ if (i_arg)
+ {
+ exploded_node *other_enode;
+ FOR_EACH_VEC_ELT (enodes, j, other_enode)
+ {
+ fprintf (stderr, "%i of %i: EN %i:\n",
+ j + 1, enodes.length (), other_enode->m_index);
+ other_enode->dump_succs_and_preds (stderr);
+ /* Dump state. */
+ other_enode->get_state ().dump (m_ext_state, false);
+ }
+ }
+ }
+ }
+}
+
+/* A collection of classes for visualizing the callgraph in .dot form
+ (as represented in the supergraph). */
+
+/* Forward decls. */
+class viz_callgraph_node;
+class viz_callgraph_edge;
+class viz_callgraph;
+class viz_callgraph_cluster;
+
+/* Traits for using "digraph.h" to visualize the callgraph. */
+
+struct viz_callgraph_traits
+{
+ typedef viz_callgraph_node node_t;
+ typedef viz_callgraph_edge edge_t;
+ typedef viz_callgraph graph_t;
+ struct dump_args_t
+ {
+ dump_args_t (const exploded_graph *eg) : m_eg (eg) {}
+ const exploded_graph *m_eg;
+ };
+ typedef viz_callgraph_cluster cluster_t;
+};
+
+/* Subclass of dnode representing a function within the callgraph. */
+
+class viz_callgraph_node : public dnode<viz_callgraph_traits>
+{
+ friend class viz_callgraph;
+
+public:
+ viz_callgraph_node (function *fun, int index)
+ : m_fun (fun), m_index (index), m_num_supernodes (0), m_num_superedges (0)
+ {
+ gcc_assert (fun);
+ }
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
+ {
+ pretty_printer *pp = gv->get_pp ();
+
+ dump_dot_id (pp);
+ pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=<",
+ "lightgrey");
+ pp_string (pp, "<TABLE BORDER=\"0\">");
+ pp_write_text_to_stream (pp);
+
+ gv->begin_tr ();
+ pp_printf (pp, "VCG: %i: %s", m_index, function_name (m_fun));
+ gv->end_tr ();
+ pp_newline (pp);
+
+ gv->begin_tr ();
+ pp_printf (pp, "supernodes: %i\n", m_num_supernodes);
+ gv->end_tr ();
+ pp_newline (pp);
+
+ gv->begin_tr ();
+ pp_printf (pp, "superedges: %i\n", m_num_superedges);
+ gv->end_tr ();
+ pp_newline (pp);
+
+ if (args.m_eg)
+ {
+ unsigned i;
+ exploded_node *enode;
+ unsigned num_enodes = 0;
+ FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
+ {
+ if (enode->get_point ().get_function () == m_fun)
+ num_enodes++;
+ }
+ gv->begin_tr ();
+ pp_printf (pp, "enodes: %i\n", num_enodes);
+ gv->end_tr ();
+ pp_newline (pp);
+
+ // TODO: also show the per-callstring breakdown
+ const exploded_graph::call_string_data_map_t *per_cs_data
+ = args.m_eg->get_per_call_string_data ();
+ for (typename exploded_graph::call_string_data_map_t::iterator iter
+ = per_cs_data->begin ();
+ iter != per_cs_data->end ();
+ ++iter)
+ {
+ const call_string *cs = (*iter).first;
+ //per_call_string_data *data = (*iter).second;
+ num_enodes = 0;
+ FOR_EACH_VEC_ELT (args.m_eg->m_nodes, i, enode)
+ {
+ if (enode->get_point ().get_function () == m_fun
+ && enode->get_point ().get_call_string () == *cs)
+ num_enodes++;
+ }
+ if (num_enodes > 0)
+ {
+ gv->begin_tr ();
+ cs->print (pp);
+ pp_printf (pp, ": %i\n", num_enodes);
+ pp_write_text_as_html_like_dot_to_stream (pp);
+ gv->end_tr ();
+ }
+ }
+
+ /* Show any summaries. */
+ per_function_data *data = args.m_eg->get_per_function_data (m_fun);
+ if (data)
+ {
+ pp_newline (pp);
+ gv->begin_tr ();
+ pp_printf (pp, "summaries: %i\n", data->m_summaries.length ());
+ pp_write_text_as_html_like_dot_to_stream (pp);
+ gv->end_tr ();
+ }
+ }
+
+ pp_string (pp, "</TABLE>>];\n\n");
+ pp_flush (pp);
+ }
+
+ void dump_dot_id (pretty_printer *pp) const
+ {
+ pp_printf (pp, "vcg_%i", m_index);
+ }
+
+private:
+ function *m_fun;
+ int m_index;
+ int m_num_supernodes;
+ int m_num_superedges;
+};
+
+/* Subclass of dedge representing a callgraph edge. */
+
+class viz_callgraph_edge : public dedge<viz_callgraph_traits>
+{
+public:
+ viz_callgraph_edge (viz_callgraph_node *src, viz_callgraph_node *dest,
+ const call_superedge *call_sedge)
+ : dedge (src, dest),
+ m_call_sedge (call_sedge)
+ {}
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &) const
+ FINAL OVERRIDE
+ {
+ pretty_printer *pp = gv->get_pp ();
+
+ const char *style = "\"solid,bold\"";
+ const char *color = "black";
+ int weight = 10;
+ const char *constraint = "true";
+
+ m_src->dump_dot_id (pp);
+ pp_string (pp, " -> ");
+ m_dest->dump_dot_id (pp);
+ pp_printf (pp,
+ (" [style=%s, color=%s, weight=%d, constraint=%s,"
+ " headlabel=\""),
+ style, color, weight, constraint);
+ pp_printf (pp, "\"];\n");
+ }
+
+private:
+ const call_superedge * const m_call_sedge;
+};
+
+/* Subclass of digraph representing the callgraph. */
+
+class viz_callgraph : public digraph<viz_callgraph_traits>
+{
+public:
+ viz_callgraph (const supergraph &sg);
+
+ viz_callgraph_node *get_vcg_node_for_function (function *fun)
+ {
+ return *m_map.get (fun);
+ }
+
+ viz_callgraph_node *get_vcg_node_for_snode (supernode *snode)
+ {
+ return get_vcg_node_for_function (snode->m_fun);
+ }
+
+private:
+ const supergraph &m_sg;
+ hash_map<function *, viz_callgraph_node *> m_map;
+};
+
+/* Placeholder subclass of cluster. */
+
+class viz_callgraph_cluster : public cluster<viz_callgraph_traits>
+{
+};
+
+/* viz_callgraph's ctor. */
+
+viz_callgraph::viz_callgraph (const supergraph &sg)
+: m_sg (sg)
+{
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ function *fun = node->get_fun ();
+ viz_callgraph_node *vcg_node
+ = new viz_callgraph_node (fun, m_nodes.length ());
+ m_map.put (fun, vcg_node);
+ add_node (vcg_node);
+ }
+
+ unsigned i;
+ superedge *sedge;
+ FOR_EACH_VEC_ELT (sg.m_edges, i, sedge)
+ {
+ viz_callgraph_node *vcg_src = get_vcg_node_for_snode (sedge->m_src);
+ if (vcg_src->m_fun)
+ get_vcg_node_for_function (vcg_src->m_fun)->m_num_superedges++;
+ if (const call_superedge *call_sedge = sedge->dyn_cast_call_superedge ())
+ {
+ viz_callgraph_node *vcg_dest = get_vcg_node_for_snode (sedge->m_dest);
+ viz_callgraph_edge *vcg_edge
+ = new viz_callgraph_edge (vcg_src, vcg_dest, call_sedge);
+ add_edge (vcg_edge);
+ }
+ }
+
+ supernode *snode;
+ FOR_EACH_VEC_ELT (sg.m_nodes, i, snode)
+ {
+ if (snode->m_fun)
+ get_vcg_node_for_function (snode->m_fun)->m_num_supernodes++;
+ }
+}
+
+/* Dump the callgraph to FILENAME. */
+
+static void
+dump_callgraph (const supergraph &sg, const char *filename,
+ const exploded_graph *eg)
+{
+ FILE *outf = fopen (filename, "w");
+ if (!outf)
+ return;
+
+ // TODO
+ viz_callgraph vcg (sg);
+ vcg.dump_dot (filename, NULL, viz_callgraph_traits::dump_args_t (eg));
+
+ fclose (outf);
+}
+
+/* Dump the callgraph to "<srcfile>.callgraph.dot". */
+
+static void
+dump_callgraph (const supergraph &sg, const exploded_graph *eg)
+{
+ auto_timevar tv (TV_ANALYZER_DUMP);
+ char *filename = concat (dump_base_name, ".callgraph.dot", NULL);
+ dump_callgraph (sg, filename, eg);
+ free (filename);
+}
+
+/* Run the analysis "engine". */
+
+void
+impl_run_checkers (logger *logger)
+{
+ LOG_SCOPE (logger);
+
+ /* If using LTO, ensure that the cgraph nodes have function bodies. */
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ node->get_untransformed_body ();
+
+ /* Create the supergraph. */
+ supergraph sg (logger);
+
+ state_purge_map *purge_map = NULL;
+
+ if (flag_analyzer_state_purge)
+ purge_map = new state_purge_map (sg, logger);
+
+ if (flag_dump_analyzer_supergraph)
+ {
+ auto_timevar tv (TV_ANALYZER_DUMP);
+ char *filename = concat (dump_base_name, ".supergraph.dot", NULL);
+ supergraph::dump_args_t args ((enum supergraph_dot_flags)0, NULL);
+ sg.dump_dot (filename, args);
+ free (filename);
+ }
+
+ if (flag_dump_analyzer_state_purge)
+ {
+ auto_timevar tv (TV_ANALYZER_DUMP);
+ state_purge_annotator a (purge_map);
+ char *filename = concat (dump_base_name, ".state-purge.dot", NULL);
+ supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
+ sg.dump_dot (filename, args);
+ free (filename);
+ }
+
+ auto_delete_vec <state_machine> checkers;
+ make_checkers (checkers, logger);
+
+ if (logger)
+ {
+ int i;
+ state_machine *sm;
+ FOR_EACH_VEC_ELT (checkers, i, sm)
+ logger->log ("checkers[%i]: %s", i, sm->get_name ());
+ }
+
+ /* Extrinsic state shared by nodes in the graph. */
+ const extrinsic_state ext_state (checkers);
+
+ const analysis_plan plan (sg, logger);
+
+ /* The exploded graph. */
+ exploded_graph eg (sg, logger, ext_state, purge_map, plan,
+ analyzer_verbosity);
+
+ /* Add entrypoints to the graph for externally-callable functions. */
+ eg.build_initial_worklist ();
+
+ /* Now process the worklist, exploring the <point, state> graph. */
+ eg.process_worklist ();
+
+ if (flag_dump_analyzer_exploded_graph)
+ {
+ auto_timevar tv (TV_ANALYZER_DUMP);
+ char *filename
+ = concat (dump_base_name, ".eg.dot", NULL);
+ exploded_graph::dump_args_t args (eg);
+ root_cluster c;
+ eg.dump_dot (filename, &c, args);
+ free (filename);
+ }
+
+ /* Now emit any saved diagnostics. */
+ eg.get_diagnostic_manager ().emit_saved_diagnostics (eg);
+
+ eg.dump_exploded_nodes ();
+
+ eg.log_stats ();
+
+ if (flag_dump_analyzer_callgraph)
+ dump_callgraph (sg, &eg);
+
+ delete purge_map;
+}
+
+/* External entrypoint to the analysis "engine".
+ Set up any dumps, then call impl_run_checkers. */
+
+void
+run_checkers ()
+{
+ /* Handle -fdump-analyzer and -fdump-analyzer-stderr. */
+ FILE *dump_fout = NULL;
+ /* Track if we're responsible for closing dump_fout. */
+ bool owns_dump_fout = false;
+ if (flag_dump_analyzer_stderr)
+ dump_fout = stderr;
+ else if (flag_dump_analyzer)
+ {
+ char *dump_filename = concat (dump_base_name, ".analyzer.txt", NULL);
+ dump_fout = fopen (dump_filename, "w");
+ free (dump_filename);
+ if (dump_fout)
+ owns_dump_fout = true;
+ }
+
+ {
+ log_user the_logger (NULL);
+ if (dump_fout)
+ the_logger.set_logger (new logger (dump_fout, 0, 0,
+ *global_dc->printer));
+ LOG_SCOPE (the_logger.get_logger ());
+
+ impl_run_checkers (the_logger.get_logger ());
+
+ /* end of lifetime of the_logger (so that dump file is closed after the
+ various dtors run). */
+ }
+
+ if (owns_dump_fout)
+ fclose (dump_fout);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* The analysis "engine".
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_ENGINE_H
+#define GCC_ANALYZER_ENGINE_H
+
+extern void run_checkers ();
+
+#endif /* GCC_ANALYZER_ENGINE_H */
--- /dev/null
+/* Classes for managing a directed graph of <point, state> pairs.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_EXPLODED_GRAPH_H
+#define GCC_ANALYZER_EXPLODED_GRAPH_H
+
+/* Concrete implementation of region_model_context, wiring it up to the
+ rest of the analysis engine. */
+
+class impl_region_model_context : public region_model_context
+{
+ public:
+ impl_region_model_context (exploded_graph &eg,
+ const exploded_node *enode_for_diag,
+
+ /* TODO: should we be getting the ECs from the
+ old state, rather than the new? */
+ const program_state *old_state,
+ program_state *new_state,
+ state_change *change,
+
+ const gimple *stmt,
+ stmt_finder *stmt_finder = NULL);
+
+ impl_region_model_context (program_state *state,
+ state_change *change,
+ const extrinsic_state &ext_state);
+
+ void warn (pending_diagnostic *d) FINAL OVERRIDE;
+
+ void remap_svalue_ids (const svalue_id_map &map) FINAL OVERRIDE;
+
+ int on_svalue_purge (svalue_id first_unused_sid,
+ const svalue_id_map &map) FINAL OVERRIDE;
+
+ logger *get_logger () FINAL OVERRIDE
+ {
+ return m_logger.get_logger ();
+ }
+
+ void on_state_leak (const state_machine &sm,
+ int sm_idx,
+ svalue_id sid,
+ svalue_id first_unused_sid,
+ const svalue_id_map &map,
+ state_machine::state_t state);
+
+ void on_inherited_svalue (svalue_id parent_sid,
+ svalue_id child_sid) FINAL OVERRIDE;
+
+ void on_cast (svalue_id src_sid,
+ svalue_id dst_sid) FINAL OVERRIDE;
+
+ void on_condition (tree lhs, enum tree_code op, tree rhs) FINAL OVERRIDE;
+
+ exploded_graph *m_eg;
+ log_user m_logger;
+ const exploded_node *m_enode_for_diag;
+ const program_state *m_old_state;
+ program_state *m_new_state;
+ state_change *m_change;
+ const gimple *m_stmt;
+ stmt_finder *m_stmt_finder;
+ const extrinsic_state &m_ext_state;
+};
+
+/* A <program_point, program_state> pair, used internally by
+ exploded_node as its immutable data, and as a key for identifying
+ exploded_nodes we've already seen in the graph. */
+
+class point_and_state
+{
+public:
+ point_and_state (const program_point &point,
+ const program_state &state)
+ : m_point (point),
+ m_state (state),
+ m_hash (m_point.hash () ^ m_state.hash ())
+ {
+ }
+
+ hashval_t hash () const
+ {
+ return m_hash;
+ }
+ bool operator== (const point_and_state &other) const
+ {
+ return m_point == other.m_point && m_state == other.m_state;
+ }
+
+ const program_point &get_point () const { return m_point; }
+ const program_state &get_state () const { return m_state; }
+
+ void set_state (const program_state &state)
+ {
+ m_state = state;
+ m_hash = m_point.hash () ^ m_state.hash ();
+ }
+
+ void validate (const extrinsic_state &ext_state) const;
+
+private:
+ program_point m_point;
+ program_state m_state;
+ hashval_t m_hash;
+};
+
+/* A traits class for exploded graphs and their nodes and edges. */
+
+struct eg_traits
+{
+ typedef exploded_node node_t;
+ typedef exploded_edge edge_t;
+ typedef exploded_graph graph_t;
+ struct dump_args_t
+ {
+ dump_args_t (const exploded_graph &eg) : m_eg (eg) {}
+ const exploded_graph &m_eg;
+ };
+ typedef exploded_cluster cluster_t;
+};
+
+/* An exploded_node is a unique, immutable <point, state> pair within the
+ exploded_graph.
+ Each exploded_node has a unique index within the graph
+ (for ease of debugging). */
+
+class exploded_node : public dnode<eg_traits>
+{
+ public:
+ exploded_node (point_and_state ps,
+ int index)
+ : m_ps (ps), m_index (index)
+ {
+ gcc_checking_assert (ps.get_state ().m_region_model->canonicalized_p ());
+ }
+
+ hashval_t hash () const { return m_ps.hash (); }
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &args)
+ const FINAL OVERRIDE;
+ void dump_dot_id (pretty_printer *pp) const;
+
+ void dump_to_pp (pretty_printer *pp, const extrinsic_state &ext_state) const;
+ void dump (FILE *fp, const extrinsic_state &ext_state) const;
+ void dump (const extrinsic_state &ext_state) const;
+
+ /* The result of on_stmt. */
+ struct on_stmt_flags
+ {
+ on_stmt_flags (bool sm_changes)
+ : m_sm_changes (sm_changes),
+ m_terminate_path (false)
+ {}
+
+ static on_stmt_flags terminate_path ()
+ {
+ return on_stmt_flags (true, true);
+ }
+
+ static on_stmt_flags state_change (bool any_sm_changes)
+ {
+ return on_stmt_flags (any_sm_changes, false);
+ }
+
+ /* Did any sm-changes occur handling the stmt. */
+ bool m_sm_changes : 1;
+
+ /* Should we stop analyzing this path (on_stmt may have already
+ added nodes/edges, e.g. when handling longjmp). */
+ bool m_terminate_path : 1;
+
+ private:
+ on_stmt_flags (bool sm_changes,
+ bool terminate_path)
+ : m_sm_changes (sm_changes),
+ m_terminate_path (terminate_path)
+ {}
+ };
+
+ on_stmt_flags on_stmt (exploded_graph &eg,
+ const supernode *snode,
+ const gimple *stmt,
+ program_state *state,
+ state_change *change) const;
+ bool on_edge (exploded_graph &eg,
+ const superedge *succ,
+ program_point *next_point,
+ program_state *next_state,
+ state_change *change) const;
+ void on_longjmp (exploded_graph &eg,
+ const gcall *call,
+ program_state *new_state,
+ region_model_context *ctxt) const;
+
+ void detect_leaks (exploded_graph &eg) const;
+
+ const program_point &get_point () const { return m_ps.get_point (); }
+ const supernode *get_supernode () const
+ {
+ return get_point ().get_supernode ();
+ }
+ function *get_function () const
+ {
+ return get_point ().get_function ();
+ }
+ int get_stack_depth () const
+ {
+ return get_point ().get_stack_depth ();
+ }
+ const gimple *get_stmt () const { return get_point ().get_stmt (); }
+
+ const program_state &get_state () const { return m_ps.get_state (); }
+
+ const point_and_state *get_ps_key () const { return &m_ps; }
+ const program_point *get_point_key () const { return &m_ps.get_point (); }
+
+ void dump_succs_and_preds (FILE *outf) const;
+
+private:
+ DISABLE_COPY_AND_ASSIGN (exploded_node);
+
+ const char * get_dot_fillcolor () const;
+
+ /* The <program_point, program_state> pair. This is const, as it
+ is immutable once the exploded_node has been created. */
+ const point_and_state m_ps;
+
+public:
+ /* The index of this exploded_node. */
+ const int m_index;
+};
+
+/* An edge within the exploded graph.
+ Some exploded_edges have an underlying superedge; others don't. */
+
+class exploded_edge : public dedge<eg_traits>
+{
+ public:
+ /* Abstract base class for associating custom data with an
+ exploded_edge, for handling non-standard edges such as
+ rewinding from a longjmp, signal handlers, etc. */
+ class custom_info_t
+ {
+ public:
+ virtual ~custom_info_t () {}
+
+ /* Hook for making .dot label more readable . */
+ virtual void print (pretty_printer *pp) = 0;
+
+ /* Hook for updating MODEL within exploded_path::feasible_p. */
+ virtual void update_model (region_model *model,
+ const exploded_edge &eedge) = 0;
+
+ virtual void add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge) = 0;
+ };
+
+ exploded_edge (exploded_node *src, exploded_node *dest,
+ const superedge *sedge,
+ const state_change &change,
+ custom_info_t *custom_info);
+ ~exploded_edge ();
+ void dump_dot (graphviz_out *gv, const dump_args_t &args)
+ const FINAL OVERRIDE;
+
+ //private:
+ const superedge *const m_sedge;
+
+ const state_change m_change;
+
+ /* NULL for most edges; will be non-NULL for special cases
+ such as an unwind from a longjmp to a setjmp, or when
+ a signal is delivered to a signal-handler.
+
+ Owned by this class. */
+ custom_info_t *m_custom_info;
+
+private:
+ DISABLE_COPY_AND_ASSIGN (exploded_edge);
+};
+
+/* Extra data for an exploded_edge that represents a rewind from a
+ longjmp to a setjmp. */
+
+class rewind_info_t : public exploded_edge::custom_info_t
+{
+public:
+ rewind_info_t (const exploded_node *enode_origin)
+ : m_enode_origin (enode_origin)
+ {}
+
+ void print (pretty_printer *pp) FINAL OVERRIDE
+ {
+ pp_string (pp, "rewind");
+ }
+
+ void update_model (region_model *model,
+ const exploded_edge &eedge) FINAL OVERRIDE;
+
+ void add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge) FINAL OVERRIDE;
+
+ const program_point &get_setjmp_point () const
+ {
+ const program_point &origin_point = m_enode_origin->get_point ();
+
+ /* "origin_point" ought to be before the call to "setjmp". */
+ gcc_assert (origin_point.get_kind () == PK_BEFORE_STMT);
+
+ /* TODO: assert that it's the final stmt in its supernode. */
+
+ return origin_point;
+ }
+
+ const gcall *get_setjmp_call () const
+ {
+ return as_a <const gcall *> (get_setjmp_point ().get_stmt ());
+ }
+
+ const exploded_node *get_enode_origin () const { return m_enode_origin; }
+
+private:
+ const exploded_node *m_enode_origin;
+};
+
+/* Statistics about aspects of an exploded_graph. */
+
+struct stats
+{
+ stats (int num_supernodes);
+ void log (logger *logger) const;
+ void dump (FILE *out) const;
+
+ int m_num_nodes[NUM_POINT_KINDS];
+ int m_node_reuse_count;
+ int m_node_reuse_after_merge_count;
+ int m_num_supernodes;
+};
+
+/* Traits class for ensuring uniqueness of point_and_state data within
+ an exploded_graph. */
+
+struct eg_hash_map_traits
+{
+ typedef const point_and_state *key_type;
+ typedef exploded_node *value_type;
+ typedef exploded_node *compare_type;
+
+ static inline hashval_t hash (const key_type &k)
+ {
+ gcc_assert (k != NULL);
+ gcc_assert (k != reinterpret_cast<key_type> (1));
+ return k->hash ();
+ }
+ static inline bool equal_keys (const key_type &k1, const key_type &k2)
+ {
+ gcc_assert (k1 != NULL);
+ gcc_assert (k2 != NULL);
+ gcc_assert (k1 != reinterpret_cast<key_type> (1));
+ gcc_assert (k2 != reinterpret_cast<key_type> (1));
+ if (k1 && k2)
+ return *k1 == *k2;
+ else
+ /* Otherwise they must both be non-NULL. */
+ return k1 == k2;
+ }
+ template <typename T>
+ static inline void remove (T &)
+ {
+ /* empty; the nodes are handled elsewhere. */
+ }
+ template <typename T>
+ static inline void mark_deleted (T &entry)
+ {
+ entry.m_key = reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline void mark_empty (T &entry)
+ {
+ entry.m_key = NULL;
+ }
+ template <typename T>
+ static inline bool is_deleted (const T &entry)
+ {
+ return entry.m_key == reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline bool is_empty (const T &entry)
+ {
+ return entry.m_key == NULL;
+ }
+ static const bool empty_zero_p = false;
+};
+
+/* Per-program_point data for an exploded_graph. */
+
+struct per_program_point_data
+{
+ per_program_point_data (const program_point &key)
+ : m_key (key)
+ {}
+
+ const program_point m_key;
+ auto_vec<exploded_node *> m_enodes;
+};
+
+/* Traits class for storing per-program_point data within
+ an exploded_graph. */
+
+struct eg_point_hash_map_traits
+{
+ typedef const program_point *key_type;
+ typedef per_program_point_data *value_type;
+ typedef per_program_point_data *compare_type;
+
+ static inline hashval_t hash (const key_type &k)
+ {
+ gcc_assert (k != NULL);
+ gcc_assert (k != reinterpret_cast<key_type> (1));
+ return k->hash ();
+ }
+ static inline bool equal_keys (const key_type &k1, const key_type &k2)
+ {
+ gcc_assert (k1 != NULL);
+ gcc_assert (k2 != NULL);
+ gcc_assert (k1 != reinterpret_cast<key_type> (1));
+ gcc_assert (k2 != reinterpret_cast<key_type> (1));
+ if (k1 && k2)
+ return *k1 == *k2;
+ else
+ /* Otherwise they must both be non-NULL. */
+ return k1 == k2;
+ }
+ template <typename T>
+ static inline void remove (T &)
+ {
+ /* empty; the nodes are handled elsewhere. */
+ }
+ template <typename T>
+ static inline void mark_deleted (T &entry)
+ {
+ entry.m_key = reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline void mark_empty (T &entry)
+ {
+ entry.m_key = NULL;
+ }
+ template <typename T>
+ static inline bool is_deleted (const T &entry)
+ {
+ return entry.m_key == reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline bool is_empty (const T &entry)
+ {
+ return entry.m_key == NULL;
+ }
+ static const bool empty_zero_p = false;
+};
+
+/* Data about a particular call_string within an exploded_graph. */
+
+struct per_call_string_data
+{
+ per_call_string_data (const call_string &key, int num_supernodes)
+ : m_key (key), m_stats (num_supernodes)
+ {}
+
+ const call_string m_key;
+ stats m_stats;
+};
+
+/* Traits class for storing per-call_string data within
+ an exploded_graph. */
+
+struct eg_call_string_hash_map_traits
+{
+ typedef const call_string *key_type;
+ typedef per_call_string_data *value_type;
+ typedef per_call_string_data *compare_type;
+
+ static inline hashval_t hash (const key_type &k)
+ {
+ gcc_assert (k != NULL);
+ gcc_assert (k != reinterpret_cast<key_type> (1));
+ return k->hash ();
+ }
+ static inline bool equal_keys (const key_type &k1, const key_type &k2)
+ {
+ gcc_assert (k1 != NULL);
+ gcc_assert (k2 != NULL);
+ gcc_assert (k1 != reinterpret_cast<key_type> (1));
+ gcc_assert (k2 != reinterpret_cast<key_type> (1));
+ if (k1 && k2)
+ return *k1 == *k2;
+ else
+ /* Otherwise they must both be non-NULL. */
+ return k1 == k2;
+ }
+ template <typename T>
+ static inline void remove (T &)
+ {
+ /* empty; the nodes are handled elsewhere. */
+ }
+ template <typename T>
+ static inline void mark_deleted (T &entry)
+ {
+ entry.m_key = reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline void mark_empty (T &entry)
+ {
+ entry.m_key = NULL;
+ }
+ template <typename T>
+ static inline bool is_deleted (const T &entry)
+ {
+ return entry.m_key == reinterpret_cast<key_type> (1);
+ }
+ template <typename T>
+ static inline bool is_empty (const T &entry)
+ {
+ return entry.m_key == NULL;
+ }
+ static const bool empty_zero_p = false;
+};
+
+/* Data about a particular function within an exploded_graph. */
+
+struct per_function_data
+{
+ per_function_data () {}
+
+ void add_call_summary (exploded_node *node)
+ {
+ m_summaries.safe_push (node);
+ }
+
+ auto_vec<exploded_node *> m_summaries;
+};
+
+
+/* The strongly connected components of a supergraph.
+ In particular, this allows us to compute a partial ordering
+ of supernodes. */
+
+class strongly_connected_components
+{
+public:
+ strongly_connected_components (const supergraph &sg, logger *logger);
+
+ int get_scc_id (int node_index) const
+ {
+ return m_per_node[node_index].m_lowlink;
+ }
+
+ void dump () const;
+
+private:
+ struct per_node_data
+ {
+ per_node_data ()
+ : m_index (-1), m_lowlink (-1), m_on_stack (false)
+ {}
+
+ int m_index;
+ int m_lowlink;
+ bool m_on_stack;
+ };
+
+ void strong_connect (unsigned index);
+
+ const supergraph &m_sg;
+ auto_vec<unsigned> m_stack;
+ auto_vec<per_node_data> m_per_node;
+};
+
+/* The worklist of exploded_node instances that have been added to
+ an exploded_graph, but that haven't yet been processed to find
+ their successors (or warnings).
+
+ The enodes are stored in a priority queue, ordered by a topological
+ sort of the SCCs in the supergraph, so that enodes for the same
+ program_point should appear at the front of the queue together.
+ This allows for state-merging at CFG join-points, so that
+ sufficiently-similar enodes can be merged into one. */
+
+class worklist
+{
+public:
+ worklist (const exploded_graph &eg, const analysis_plan &plan);
+ unsigned length () const;
+ exploded_node *take_next ();
+ exploded_node *peek_next ();
+ void add_node (exploded_node *enode);
+
+private:
+ class key_t
+ {
+ public:
+ key_t (const worklist &w, exploded_node *enode)
+ : m_worklist (w), m_enode (enode)
+ {}
+
+ bool operator< (const key_t &other) const
+ {
+ return cmp (*this, other) < 0;
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return cmp (*this, other) == 0;
+ }
+
+ bool operator> (const key_t &other) const
+ {
+ return !(*this == other || *this < other);
+ }
+
+ private:
+ static int cmp_1 (const key_t &ka, const key_t &kb);
+ static int cmp (const key_t &ka, const key_t &kb);
+
+ int get_scc_id (const exploded_node *enode) const
+ {
+ const supernode *snode = enode->get_supernode ();
+ if (snode == NULL)
+ return 0;
+ return m_worklist.m_scc.get_scc_id (snode->m_index);
+ }
+
+ const worklist &m_worklist;
+ exploded_node *m_enode;
+ };
+
+ /* The order is important here: m_scc needs to stick around
+ until after m_queue has finished being cleaned up (the dtor
+ calls the ordering fns). */
+ const exploded_graph &m_eg;
+ strongly_connected_components m_scc;
+ const analysis_plan &m_plan;
+
+ /* Priority queue, backed by a fibonacci_heap. */
+ typedef fibonacci_heap<key_t, exploded_node> queue_t;
+ queue_t m_queue;
+};
+
+/* An exploded_graph is a directed graph of unique <point, state> pairs.
+ It also has a worklist of nodes that are waiting for their successors
+ to be added to the graph. */
+
+class exploded_graph : public digraph<eg_traits>
+{
+public:
+ typedef hash_map <const call_string *, per_call_string_data *,
+ eg_call_string_hash_map_traits> call_string_data_map_t;
+
+ exploded_graph (const supergraph &sg, logger *logger,
+ const extrinsic_state &ext_state,
+ const state_purge_map *purge_map,
+ const analysis_plan &plan,
+ int verbosity);
+ ~exploded_graph ();
+
+ logger *get_logger () const { return m_logger.get_logger (); }
+
+ const supergraph &get_supergraph () const { return m_sg; }
+ const extrinsic_state &get_ext_state () const { return m_ext_state; }
+ const state_purge_map *get_purge_map () const { return m_purge_map; }
+ const analysis_plan &get_analysis_plan () const { return m_plan; }
+
+ exploded_node *get_origin () const { return m_origin; }
+
+ exploded_node *add_function_entry (function *fun);
+
+ void build_initial_worklist ();
+ void process_worklist ();
+ void process_node (exploded_node *node);
+
+ exploded_node *get_or_create_node (const program_point &point,
+ const program_state &state,
+ state_change *change);
+ exploded_edge *add_edge (exploded_node *src, exploded_node *dest,
+ const superedge *sedge,
+ const state_change &change,
+ exploded_edge::custom_info_t *custom = NULL);
+
+ per_program_point_data *
+ get_or_create_per_program_point_data (const program_point &);
+
+ per_call_string_data *
+ get_or_create_per_call_string_data (const call_string &);
+
+ per_function_data *
+ get_or_create_per_function_data (function *);
+ per_function_data *get_per_function_data (function *) const;
+
+ void save_diagnostic (const state_machine &sm,
+ const exploded_node *enode,
+ const supernode *node, const gimple *stmt,
+ stmt_finder *finder,
+ tree var, state_machine::state_t state,
+ pending_diagnostic *d);
+
+ diagnostic_manager &get_diagnostic_manager ()
+ {
+ return m_diagnostic_manager;
+ }
+
+ stats *get_global_stats () { return &m_global_stats; }
+ stats *get_or_create_function_stats (function *fn);
+ void log_stats () const;
+ void dump_stats (FILE *) const;
+ void dump_states_for_supernode (FILE *, const supernode *snode) const;
+ void dump_exploded_nodes () const;
+
+ const call_string_data_map_t *get_per_call_string_data () const
+ { return &m_per_call_string_data; }
+
+private:
+ DISABLE_COPY_AND_ASSIGN (exploded_graph);
+
+ const supergraph &m_sg;
+
+ log_user m_logger;
+
+ /* Map from point/state to exploded node.
+ To avoid duplication we store point_and_state
+ *pointers* as keys, rather than point_and_state, using the
+ instance from within the exploded_node, with a custom hasher. */
+ typedef hash_map <const point_and_state *, exploded_node *,
+ eg_hash_map_traits> map_t;
+ map_t m_point_and_state_to_node;
+
+ /* Map from program_point to per-program_point data. */
+ typedef hash_map <const program_point *, per_program_point_data *,
+ eg_point_hash_map_traits> point_map_t;
+ point_map_t m_per_point_data;
+
+ worklist m_worklist;
+
+ exploded_node *m_origin;
+
+ const extrinsic_state &m_ext_state;
+
+ const state_purge_map *const m_purge_map;
+
+ const analysis_plan &m_plan;
+
+ typedef hash_map<function *, per_function_data *> per_function_data_t;
+ per_function_data_t m_per_function_data;
+
+ diagnostic_manager m_diagnostic_manager;
+
+ /* Stats. */
+ stats m_global_stats;
+ typedef ordered_hash_map<function *, stats *> function_stat_map_t;
+ function_stat_map_t m_per_function_stats;
+ stats m_functionless_stats;
+
+ call_string_data_map_t m_per_call_string_data;
+
+ auto_vec<int> m_PK_AFTER_SUPERNODE_per_snode;
+};
+
+/* A path within an exploded_graph: a sequence of edges. */
+
+class exploded_path
+{
+public:
+ exploded_path () : m_edges () {}
+ exploded_path (const exploded_path &other);
+ exploded_path & operator= (const exploded_path &other);
+
+ unsigned length () const { return m_edges.length (); }
+
+ bool find_stmt_backwards (const gimple *search_stmt,
+ int *out_idx) const;
+
+ exploded_node *get_final_enode () const;
+
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump (FILE *fp) const;
+ void dump () const;
+
+ bool feasible_p (logger *logger) const;
+
+ auto_vec<const exploded_edge *> m_edges;
+};
+
+/* Finding the shortest exploded_path within an exploded_graph. */
+
+typedef shortest_paths<eg_traits, exploded_path> shortest_exploded_paths;
+
+/* Abstract base class for use when passing NULL as the stmt for
+ a possible warning, allowing the choice of stmt to be deferred
+ until after we have an emission path (and know we're emitting a
+ warning). */
+
+class stmt_finder
+{
+public:
+ virtual ~stmt_finder () {}
+ virtual stmt_finder *clone () const = 0;
+ virtual const gimple *find_stmt (const exploded_path &epath) = 0;
+};
+
+// TODO: split the above up?
+
+#endif /* GCC_ANALYZER_EXPLODED_GRAPH_H */
--- /dev/null
+/* Classes for analyzer diagnostics.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "intl.h"
+#include "diagnostic.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+
+#if ENABLE_ANALYZER
+
+/* Generate a label_text by printing FMT.
+
+ Use a clone of the global_dc for formatting callbacks.
+
+ Use this evdesc::event_desc's m_colorize flag to control colorization
+ (so that e.g. we can disable it for JSON output). */
+
+label_text
+evdesc::event_desc::formatted_print (const char *fmt, ...) const
+{
+ pretty_printer *pp = global_dc->printer->clone ();
+
+ pp_show_color (pp) = m_colorize;
+
+ text_info ti;
+ rich_location rich_loc (line_table, UNKNOWN_LOCATION);
+ va_list ap;
+ va_start (ap, fmt);
+ ti.format_spec = _(fmt);
+ ti.args_ptr = ≈
+ ti.err_no = 0;
+ ti.x_data = NULL;
+ ti.m_richloc = &rich_loc;
+ pp_format (pp, &ti);
+ pp_output_formatted_text (pp);
+ va_end (ap);
+
+ label_text result = label_text::take (xstrdup (pp_formatted_text (pp)));
+ delete pp;
+ return result;
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Classes for analyzer diagnostics.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_PENDING_DIAGNOSTIC_H
+#define GCC_ANALYZER_PENDING_DIAGNOSTIC_H
+
+/* Various bundles of information used for generating more precise
+ messages for events within a diagnostic_path, for passing to the
+ various "describe_*" vfuncs of pending_diagnostic. See those
+ for more information. */
+
+namespace evdesc {
+
+struct event_desc
+{
+ event_desc (bool colorize) : m_colorize (colorize) {}
+
+ label_text formatted_print (const char *fmt, ...) const
+ ATTRIBUTE_GCC_DIAG(2,3);
+
+ bool m_colorize;
+};
+
+/* For use by pending_diagnostic::describe_state_change. */
+
+struct state_change : public event_desc
+{
+ state_change (bool colorize,
+ tree expr,
+ tree origin,
+ state_machine::state_t old_state,
+ state_machine::state_t new_state,
+ diagnostic_event_id_t event_id,
+ const state_change_event &event)
+ : event_desc (colorize),
+ m_expr (expr), m_origin (origin),
+ m_old_state (old_state), m_new_state (new_state),
+ m_event_id (event_id), m_event (event)
+ {}
+
+ bool is_global_p () const { return m_expr == NULL_TREE; }
+
+ tree m_expr;
+ tree m_origin;
+ state_machine::state_t m_old_state;
+ state_machine::state_t m_new_state;
+ diagnostic_event_id_t m_event_id;
+ const state_change_event &m_event;
+};
+
+/* For use by pending_diagnostic::describe_call_with_state. */
+
+struct call_with_state : public event_desc
+{
+ call_with_state (bool colorize,
+ tree caller_fndecl, tree callee_fndecl,
+ tree expr, state_machine::state_t state)
+ : event_desc (colorize),
+ m_caller_fndecl (caller_fndecl),
+ m_callee_fndecl (callee_fndecl),
+ m_expr (expr),
+ m_state (state)
+ {
+ }
+
+ tree m_caller_fndecl;
+ tree m_callee_fndecl;
+ tree m_expr;
+ state_machine::state_t m_state;
+};
+
+/* For use by pending_diagnostic::describe_return_of_state. */
+
+struct return_of_state : public event_desc
+{
+ return_of_state (bool colorize,
+ tree caller_fndecl, tree callee_fndecl,
+ state_machine::state_t state)
+ : event_desc (colorize),
+ m_caller_fndecl (caller_fndecl),
+ m_callee_fndecl (callee_fndecl),
+ m_state (state)
+ {
+ }
+
+ tree m_caller_fndecl;
+ tree m_callee_fndecl;
+ state_machine::state_t m_state;
+};
+
+/* For use by pending_diagnostic::describe_final_event. */
+
+struct final_event : public event_desc
+{
+ final_event (bool colorize,
+ tree expr, state_machine::state_t state)
+ : event_desc (colorize),
+ m_expr (expr), m_state (state)
+ {}
+
+ tree m_expr;
+ state_machine::state_t m_state;
+};
+
+} /* end of namespace evdesc */
+
+/* An abstract base class for capturing information about a diagnostic in
+ a form that is ready to emit at a later point (or be rejected).
+ Each kind of diagnostic will have a concrete subclass of
+ pending_diagnostic.
+
+ Normally, gcc diagnostics are emitted using va_list, which can't be
+ portably stored for later use, so we have to use an "emit" virtual
+ function.
+
+ This class also supports comparison, so that multiple pending_diagnostic
+ instances can be de-duplicated.
+
+ As well as emitting a diagnostic, the class has various "precision of
+ wording" virtual functions, for generating descriptions for events
+ within a diagnostic_path. These are optional, but implementing these
+ allows for more precise wordings than the more generic
+ implementation. */
+
+class pending_diagnostic
+{
+ public:
+ virtual ~pending_diagnostic () {}
+
+ /* Vfunc for emitting the diagnostic. The rich_location will have been
+ populated with a diagnostic_path.
+ Return true if a diagnostic is actually emitted. */
+ virtual bool emit (rich_location *) = 0;
+
+ /* Hand-coded RTTI: get an ID for the subclass. */
+ virtual const char *get_kind () const = 0;
+
+ /* Compare for equality with OTHER, which might be of a different
+ subclass. */
+
+ bool equal_p (const pending_diagnostic &other)
+ {
+ /* Check for pointer equality on the IDs from get_kind. */
+ if (get_kind () != other.get_kind ())
+ return false;
+ /* Call vfunc now we know they have the same ID: */
+ return subclass_equal_p (other);
+ }
+
+ /* A vfunc for testing for equality, where we've already
+ checked they have the same ID. See pending_diagnostic_subclass
+ below for a convenience subclass for implementing this. */
+ virtual bool subclass_equal_p (const pending_diagnostic &other) const = 0;
+
+ /* For greatest precision-of-wording, the various following "describe_*"
+ virtual functions give the pending diagnostic a way to describe events
+ in a diagnostic_path in terms that make sense for that diagnostic.
+
+ In each case, return a non-NULL label_text to give the event a custom
+ description; NULL otherwise (falling back on a more generic
+ description). */
+
+ /* Precision-of-wording vfunc for describing a critical state change
+ within the diagnostic_path.
+
+ For example, a double-free diagnostic might use the descriptions:
+ - "first 'free' happens here"
+ - "second 'free' happens here"
+ for the pertinent events, whereas a use-after-free might use the
+ descriptions:
+ - "freed here"
+ - "use after free here"
+ Note how in both cases the first event is a "free": the best
+ description to use depends on the diagnostic. */
+
+ virtual label_text describe_state_change (const evdesc::state_change &)
+ {
+ /* Default no-op implementation. */
+ return label_text ();
+ }
+
+ /* Precision-of-wording vfunc for describing an interprocedural call
+ carrying critial state for the diagnostic, from caller to callee.
+
+ For example a double-free diagnostic might use:
+ - "passing freed pointer 'ptr' in call to 'deallocator' from 'test'"
+ to make it clearer how the freed value moves from caller to
+ callee. */
+
+ virtual label_text describe_call_with_state (const evdesc::call_with_state &)
+ {
+ /* Default no-op implementation. */
+ return label_text ();
+ }
+
+ /* Precision-of-wording vfunc for describing an interprocedural return
+ within the diagnostic_path that carries critial state for the
+ diagnostic, from callee back to caller.
+
+ For example, a deref-of-unchecked-malloc diagnostic might use:
+ - "returning possibly-NULL pointer to 'make_obj' from 'allocator'"
+ to make it clearer how the unchecked value moves from callee
+ back to caller. */
+
+ virtual label_text describe_return_of_state (const evdesc::return_of_state &)
+ {
+ /* Default no-op implementation. */
+ return label_text ();
+ }
+
+ /* Precision-of-wording vfunc for describing the final event within a
+ diagnostic_path.
+
+ For example a double-free diagnostic might use:
+ - "second 'free' here; first 'free' was at (3)"
+ and a use-after-free might use
+ - "use after 'free' here; memory was freed at (2)". */
+
+ virtual label_text describe_final_event (const evdesc::final_event &)
+ {
+ /* Default no-op implementation. */
+ return label_text ();
+ }
+
+ /* End of precision-of-wording vfuncs. */
+};
+
+/* A template to make it easier to make subclasses of pending_diagnostic.
+
+ This uses the curiously-recurring template pattern, to implement
+ pending_diagnostic::subclass_equal_p by casting and calling
+ the operator==
+
+ This assumes that BASE_OTHER has already been checked to have
+ been of the same subclass (which pending_diagnostic::equal_p does). */
+
+template <class Subclass>
+class pending_diagnostic_subclass : public pending_diagnostic
+{
+ public:
+ bool subclass_equal_p (const pending_diagnostic &base_other) const
+ FINAL OVERRIDE
+ {
+ const Subclass &other = (const Subclass &)base_other;
+ return *(const Subclass*)this == other;
+ }
+};
+
+#endif /* GCC_ANALYZER_PENDING_DIAGNOSTIC_H */
--- /dev/null
+/* Classes for representing locations within the program.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "gimple-pretty-print.h"
+#include "gcc-rich-location.h"
+#include "analyzer/call-string.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "function.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "digraph.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/program-point.h"
+#include "sbitmap.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "analyzer/region-model.h"
+#include "analyzer/sm.h"
+#include "analyzer/program-state.h"
+#include "alloc-pool.h"
+#include "fibonacci_heap.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/pending-diagnostic.h"
+#include "analyzer/diagnostic-manager.h"
+#include "shortest-paths.h"
+#include "analyzer/exploded-graph.h"
+#include "analyzer/analysis-plan.h"
+
+#if ENABLE_ANALYZER
+
+/* Get a string for PK. */
+
+const char *
+point_kind_to_string (enum point_kind pk)
+{
+ switch (pk)
+ {
+ default:
+ gcc_unreachable ();
+ case PK_ORIGIN:
+ return "PK_ORIGIN";
+ case PK_BEFORE_SUPERNODE:
+ return "PK_BEFORE_SUPERNODE";
+ case PK_BEFORE_STMT:
+ return "PK_BEFORE_STMT";
+ case PK_AFTER_SUPERNODE:
+ return "PK_AFTER_SUPERNODE";
+ case PK_EMPTY:
+ return "PK_EMPTY";
+ case PK_DELETED:
+ return "PK_DELETED";
+ }
+}
+
+/* class function_point. */
+
+/* Print this function_point to PP. */
+
+void
+function_point::print (pretty_printer *pp, const format &f) const
+{
+ switch (get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+
+ case PK_ORIGIN:
+ pp_printf (pp, "origin");
+ break;
+
+ case PK_BEFORE_SUPERNODE:
+ {
+ if (m_from_edge)
+ pp_printf (pp, "before SN: %i (from SN: %i)",
+ m_supernode->m_index, m_from_edge->m_src->m_index);
+ else
+ pp_printf (pp, "before SN: %i (NULL from-edge)",
+ m_supernode->m_index);
+ f.spacer (pp);
+ for (gphi_iterator gpi
+ = const_cast<supernode *>(get_supernode ())->start_phis ();
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ const gphi *phi = gpi.phi ();
+ pp_gimple_stmt_1 (pp, phi, 0, (dump_flags_t)0);
+ }
+ }
+ break;
+
+ case PK_BEFORE_STMT:
+ pp_printf (pp, "before (SN: %i stmt: %i): ", m_supernode->m_index,
+ m_stmt_idx);
+ f.spacer (pp);
+ pp_gimple_stmt_1 (pp, get_stmt (), 0, (dump_flags_t)0);
+ if (f.m_newlines)
+ {
+ pp_newline (pp);
+ print_source_line (pp);
+ }
+ break;
+
+ case PK_AFTER_SUPERNODE:
+ pp_printf (pp, "after SN: %i", m_supernode->m_index);
+ break;
+ }
+}
+
+/* Generate a hash value for this function_point. */
+
+hashval_t
+function_point::hash () const
+{
+ inchash::hash hstate;
+ if (m_supernode)
+ hstate.add_int (m_supernode->m_index);
+ hstate.add_ptr (m_from_edge);
+ hstate.add_int (m_stmt_idx);
+ hstate.add_int (m_kind);
+ return hstate.end ();
+}
+
+/* Get the gimple stmt for this function_point, if any. */
+
+const gimple *
+function_point::get_stmt () const
+{
+ if (m_kind == PK_BEFORE_STMT)
+ return m_supernode->m_stmts[m_stmt_idx];
+ else if (m_kind == PK_AFTER_SUPERNODE)
+ return m_supernode->get_last_stmt ();
+ else
+ return NULL;
+}
+
+/* Get a location for this function_point, if any. */
+
+location_t
+function_point::get_location () const
+{
+ const gimple *stmt = get_stmt ();
+ if (stmt)
+ return stmt->location;
+
+ return UNKNOWN_LOCATION;
+}
+
+/* A subclass of diagnostic_context for use by
+ program_point::print_source_line. */
+
+class debug_diagnostic_context : public diagnostic_context
+{
+public:
+ debug_diagnostic_context ()
+ {
+ diagnostic_initialize (this, 0);
+ show_line_numbers_p = true;
+ show_caret = true;
+ }
+ ~debug_diagnostic_context ()
+ {
+ diagnostic_finish (this);
+ }
+};
+
+/* Print the source line (if any) for this function_point to PP. */
+
+void
+function_point::print_source_line (pretty_printer *pp) const
+{
+ const gimple *stmt = get_stmt ();
+ if (!stmt)
+ return;
+ // TODO: monospace font
+ debug_diagnostic_context tmp_dc;
+ gcc_rich_location richloc (stmt->location);
+ diagnostic_show_locus (&tmp_dc, &richloc, DK_ERROR);
+ pp_string (pp, pp_formatted_text (tmp_dc.printer));
+}
+
+/* class program_point. */
+
+/* Print this program_point to PP. */
+
+void
+program_point::print (pretty_printer *pp, const format &f) const
+{
+ pp_string (pp, "callstring: ");
+ m_call_string.print (pp);
+ f.spacer (pp);
+
+ m_function_point.print (pp, f);
+}
+
+/* Dump this point to stderr. */
+
+DEBUG_FUNCTION void
+program_point::dump () const
+{
+ pretty_printer pp;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ print (&pp, format (true));
+ pp_flush (&pp);
+}
+
+/* Generate a hash value for this program_point. */
+
+hashval_t
+program_point::hash () const
+{
+ inchash::hash hstate;
+ hstate.merge_hash (m_function_point.hash ());
+ hstate.merge_hash (m_call_string.hash ());
+ return hstate.end ();
+}
+
+/* Get the function * at DEPTH within the call stack. */
+
+function *
+program_point::get_function_at_depth (unsigned depth) const
+{
+ gcc_assert (depth <= m_call_string.length ());
+ if (depth == m_call_string.length ())
+ return m_function_point.get_function ();
+ else
+ return m_call_string[depth]->get_caller_function ();
+}
+
+/* Assert that this object is sane. */
+
+void
+program_point::validate () const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ m_call_string.validate ();
+ /* The "callee" of the final entry in the callstring should be the
+ function of the m_function_point. */
+ if (m_call_string.length () > 0)
+ gcc_assert (m_call_string[m_call_string.length () - 1]->get_callee_function ()
+ == get_function ());
+}
+
+/* Check to see if SUCC is a valid edge to take (ensuring that we have
+ interprocedurally valid paths in the exploded graph, and enforcing
+ recursion limits).
+
+ Update the call string if SUCC is a call or a return.
+
+ Return true if SUCC can be taken, or false otherwise.
+
+ This is the "point" half of exploded_node::on_edge. */
+
+bool
+program_point::on_edge (exploded_graph &eg,
+ const superedge *succ)
+{
+ logger * const logger = eg.get_logger ();
+ LOG_FUNC (logger);
+ switch (succ->m_kind)
+ {
+ case SUPEREDGE_CFG_EDGE:
+ {
+ const cfg_superedge *cfg_sedge = as_a <const cfg_superedge *> (succ);
+
+ /* Reject abnormal edges; we special-case setjmp/longjmp. */
+ if (cfg_sedge->get_flags () & EDGE_ABNORMAL)
+ return false;
+ }
+ break;
+
+ case SUPEREDGE_CALL:
+ {
+ const call_superedge *call_sedge = as_a <const call_superedge *> (succ);
+
+ if (eg.get_analysis_plan ().use_summary_p (call_sedge->m_cedge))
+ {
+ if (logger)
+ logger->log ("rejecting call edge: using summary instead");
+ return false;
+ }
+
+ /* Add the callsite to the call string. */
+ m_call_string.push_call (eg.get_supergraph (), call_sedge);
+
+ /* Impose a maximum recursion depth and don't analyze paths
+ that exceed it further.
+ This is something of a blunt workaround, but it only
+ applies to recursion (and mutual recursion), not to
+ general call stacks. */
+ if (m_call_string.calc_recursion_depth ()
+ > param_analyzer_max_recursion_depth)
+ {
+ if (logger)
+ logger->log ("rejecting call edge: recursion limit exceeded");
+ // TODO: issue a sorry for this?
+ return false;
+ }
+ }
+ break;
+
+ case SUPEREDGE_RETURN:
+ {
+ /* Require that we return to the call site in the call string. */
+ if (m_call_string.empty_p ())
+ {
+ if (logger)
+ logger->log ("rejecting return edge: empty call string");
+ return false;
+ }
+ const return_superedge *top_of_stack = m_call_string.pop ();
+ if (top_of_stack != succ)
+ {
+ if (logger)
+ logger->log ("rejecting return edge: return to wrong callsite");
+ return false;
+ }
+ }
+ break;
+
+ case SUPEREDGE_INTRAPROCEDURAL_CALL:
+ {
+ const callgraph_superedge *cg_sedge
+ = as_a <const callgraph_superedge *> (succ);
+ /* Consider turning this edge into a use of an
+ interprocedural summary. */
+ if (eg.get_analysis_plan ().use_summary_p (cg_sedge->m_cedge))
+ {
+ if (logger)
+ logger->log ("using function summary for %qE in %qE",
+ cg_sedge->get_callee_decl (),
+ cg_sedge->get_caller_decl ());
+ return true;
+ }
+ else
+ {
+ /* Otherwise, we ignore these edges */
+ if (logger)
+ logger->log ("rejecting interprocedural edge");
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/* Comparator for program points within the same supernode,
+ for implementing worklist::key_t comparison operators.
+ Return negative if POINT_A is before POINT_B
+ Return positive if POINT_A is after POINT_B
+ Return 0 if they are equal. */
+
+int
+function_point::cmp_within_supernode_1 (const function_point &point_a,
+ const function_point &point_b)
+{
+ gcc_assert (point_a.get_supernode () == point_b.get_supernode ());
+
+ switch (point_a.m_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case PK_BEFORE_SUPERNODE:
+ switch (point_b.m_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case PK_BEFORE_SUPERNODE:
+ {
+ int a_src_idx = -1;
+ int b_src_idx = -1;
+ if (point_a.m_from_edge)
+ a_src_idx = point_a.m_from_edge->m_src->m_index;
+ if (point_b.m_from_edge)
+ b_src_idx = point_b.m_from_edge->m_src->m_index;
+ return a_src_idx - b_src_idx;
+ }
+ break;
+
+ case PK_BEFORE_STMT:
+ case PK_AFTER_SUPERNODE:
+ return -1;
+ }
+ break;
+ case PK_BEFORE_STMT:
+ switch (point_b.m_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case PK_BEFORE_SUPERNODE:
+ return 1;
+
+ case PK_BEFORE_STMT:
+ return point_a.m_stmt_idx - point_b.m_stmt_idx;
+
+ case PK_AFTER_SUPERNODE:
+ return -1;
+ }
+ break;
+ case PK_AFTER_SUPERNODE:
+ switch (point_b.m_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case PK_BEFORE_SUPERNODE:
+ case PK_BEFORE_STMT:
+ return 1;
+
+ case PK_AFTER_SUPERNODE:
+ return 0;
+ }
+ break;
+ }
+}
+
+/* Comparator for program points within the same supernode,
+ for implementing worklist::key_t comparison operators.
+ Return negative if POINT_A is before POINT_B
+ Return positive if POINT_A is after POINT_B
+ Return 0 if they are equal. */
+
+int
+function_point::cmp_within_supernode (const function_point &point_a,
+ const function_point &point_b)
+{
+ int result = cmp_within_supernode_1 (point_a, point_b);
+
+ /* Check that the ordering is symmetric */
+#if CHECKING_P
+ int reversed = cmp_within_supernode_1 (point_b, point_a);
+ gcc_assert (reversed == -result);
+#endif
+
+ return result;
+}
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Verify that function_point::operator== works as expected. */
+
+static void
+test_function_point_equality ()
+{
+ const supernode *snode = NULL;
+
+ function_point a = function_point (snode, NULL, 0,
+ PK_BEFORE_SUPERNODE);
+ function_point b = function_point::before_supernode (snode, NULL);
+ ASSERT_EQ (a, b);
+}
+
+/* Verify that function_point::cmp_within_supernode works as expected. */
+
+static void
+test_function_point_ordering ()
+{
+ const supernode *snode = NULL;
+ const call_string call_string;
+
+ /* Populate an array with various points within the same
+ snode, in order. */
+ auto_vec<function_point> points;
+ points.safe_push (function_point::before_supernode (snode, NULL));
+ points.safe_push (function_point::before_stmt (snode, 0));
+ points.safe_push (function_point::before_stmt (snode, 1));
+ points.safe_push (function_point::after_supernode (snode));
+
+ /* Check all pairs. */
+ unsigned i;
+ function_point *point_a;
+ FOR_EACH_VEC_ELT (points, i, point_a)
+ {
+ unsigned j;
+ function_point *point_b;
+ FOR_EACH_VEC_ELT (points, j, point_b)
+ {
+ int cmp = function_point::cmp_within_supernode (*point_a, *point_b);
+ if (i == j)
+ ASSERT_EQ (cmp, 0);
+ if (i < j)
+ ASSERT_TRUE (cmp < 0);
+ if (i > j)
+ ASSERT_TRUE (cmp > 0);
+ }
+ }
+}
+
+/* Verify that program_point::operator== works as expected. */
+
+static void
+test_program_point_equality ()
+{
+ const supernode *snode = NULL;
+
+ const call_string cs;
+
+ program_point a = program_point::before_supernode (snode, NULL,
+ cs);
+
+ program_point b = program_point::before_supernode (snode, NULL,
+ cs);
+
+ ASSERT_EQ (a, b);
+ // TODO: verify with non-empty callstrings, with different edges
+}
+
+/* Run all of the selftests within this file. */
+
+void
+analyzer_program_point_cc_tests ()
+{
+ test_function_point_equality ();
+ test_function_point_ordering ();
+ test_program_point_equality ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Classes for representing locations within the program.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_PROGRAM_POINT_H
+#define GCC_ANALYZER_PROGRAM_POINT_H
+
+class exploded_graph;
+
+/* An enum for distinguishing the various kinds of program_point. */
+
+enum point_kind {
+ /* A "fake" node which has edges to all entrypoints. */
+ PK_ORIGIN,
+
+ PK_BEFORE_SUPERNODE,
+ PK_BEFORE_STMT,
+ PK_AFTER_SUPERNODE,
+
+ /* Special values used for hash_map: */
+ PK_EMPTY,
+ PK_DELETED,
+
+ NUM_POINT_KINDS
+};
+
+extern const char *point_kind_to_string (enum point_kind pk);
+
+class format
+{
+public:
+ format (bool newlines) : m_newlines (newlines) {}
+
+ void spacer (pretty_printer *pp) const
+ {
+ if (m_newlines)
+ pp_newline (pp);
+ else
+ pp_space (pp);
+ }
+
+ bool m_newlines;
+};
+
+/* A class for representing a location within the program, without
+ interprocedural information.
+
+ This represents a fine-grained location within the supergraph (or
+ within one of its nodes). */
+
+class function_point
+{
+public:
+ function_point (const supernode *supernode,
+ const superedge *from_edge,
+ unsigned stmt_idx,
+ enum point_kind kind)
+ : m_supernode (supernode), m_from_edge (from_edge),
+ m_stmt_idx (stmt_idx), m_kind (kind)
+ {
+ if (from_edge)
+ {
+ gcc_checking_assert (m_kind == PK_BEFORE_SUPERNODE);
+ gcc_checking_assert (from_edge->get_kind () == SUPEREDGE_CFG_EDGE);
+ }
+ if (stmt_idx)
+ gcc_checking_assert (m_kind == PK_BEFORE_STMT);
+ }
+
+ void print (pretty_printer *pp, const format &f) const;
+ void print_source_line (pretty_printer *pp) const;
+ void dump () const;
+
+ hashval_t hash () const;
+ bool operator== (const function_point &other) const
+ {
+ return (m_supernode == other.m_supernode
+ && m_from_edge == other.m_from_edge
+ && m_stmt_idx == other.m_stmt_idx
+ && m_kind == other.m_kind);
+ }
+
+ /* Accessors. */
+
+ const supernode *get_supernode () const { return m_supernode; }
+ function *get_function () const
+ {
+ if (m_supernode)
+ return m_supernode->m_fun;
+ else
+ return NULL;
+ }
+ const gimple *get_stmt () const;
+ location_t get_location () const;
+ enum point_kind get_kind () const { return m_kind; }
+ const superedge *get_from_edge () const
+ {
+ return m_from_edge;
+ }
+ unsigned get_stmt_idx () const
+ {
+ gcc_assert (m_kind == PK_BEFORE_STMT);
+ return m_stmt_idx;
+ }
+
+ /* Factory functions for making various kinds of program_point. */
+
+ static function_point from_function_entry (const supergraph &sg,
+ function *fun)
+ {
+ return before_supernode (sg.get_node_for_function_entry (fun),
+ NULL);
+ }
+
+ static function_point before_supernode (const supernode *supernode,
+ const superedge *from_edge)
+ {
+ if (from_edge && from_edge->get_kind () != SUPEREDGE_CFG_EDGE)
+ from_edge = NULL;
+ return function_point (supernode, from_edge, 0, PK_BEFORE_SUPERNODE);
+ }
+
+ static function_point before_stmt (const supernode *supernode,
+ unsigned stmt_idx)
+ {
+ return function_point (supernode, NULL, stmt_idx, PK_BEFORE_STMT);
+ }
+
+ static function_point after_supernode (const supernode *supernode)
+ {
+ return function_point (supernode, NULL, 0, PK_AFTER_SUPERNODE);
+ }
+
+ /* Support for hash_map. */
+
+ static function_point empty ()
+ {
+ return function_point (NULL, NULL, 0, PK_EMPTY);
+ }
+ static function_point deleted ()
+ {
+ return function_point (NULL, NULL, 0, PK_DELETED);
+ }
+
+ static int cmp_within_supernode_1 (const function_point &point_a,
+ const function_point &point_b);
+ static int cmp_within_supernode (const function_point &point_a,
+ const function_point &point_b);
+
+ private:
+ const supernode *m_supernode;
+
+ /* For PK_BEFORE_SUPERNODE, and only for CFG edges. */
+ const superedge *m_from_edge;
+
+ /* Only for PK_BEFORE_STMT. */
+ unsigned m_stmt_idx;
+
+ enum point_kind m_kind;
+};
+
+/* A class for representing a location within the program, including
+ interprocedural information.
+
+ This represents a fine-grained location within the supergraph (or
+ within one of its nodes), along with a call string giving the
+ interprocedural context. */
+
+class program_point
+{
+public:
+ program_point (const function_point &fn_point,
+ const call_string &call_string)
+ : m_function_point (fn_point),
+ m_call_string (call_string)
+ {
+ }
+
+ void print (pretty_printer *pp, const format &f) const;
+ void print_source_line (pretty_printer *pp) const;
+ void dump () const;
+
+ hashval_t hash () const;
+ bool operator== (const program_point &other) const
+ {
+ return (m_function_point == other.m_function_point
+ && m_call_string == other.m_call_string);
+ }
+
+ /* Accessors. */
+
+ const function_point &get_function_point () const { return m_function_point; }
+ const call_string &get_call_string () const { return m_call_string; }
+
+ const supernode *get_supernode () const
+ {
+ return m_function_point.get_supernode ();
+ }
+ function *get_function () const
+ {
+ return m_function_point.get_function ();
+ }
+ function *get_function_at_depth (unsigned depth) const;
+ tree get_fndecl () const
+ {
+ gcc_assert (get_kind () != PK_ORIGIN);
+ return get_function ()->decl;
+ }
+ const gimple *get_stmt () const
+ {
+ return m_function_point.get_stmt ();
+ }
+ location_t get_location () const
+ {
+ return m_function_point.get_location ();
+ }
+ enum point_kind get_kind () const
+ {
+ return m_function_point.get_kind ();
+ }
+ const superedge *get_from_edge () const
+ {
+ return m_function_point.get_from_edge ();
+ }
+ unsigned get_stmt_idx () const
+ {
+ return m_function_point.get_stmt_idx ();
+ }
+
+ /* Get the number of frames we expect at this program point.
+ This will be one more than the length of the call_string
+ (which stores the parent callsites), apart from the origin
+ node, which doesn't have any frames. */
+ int get_stack_depth () const
+ {
+ if (get_kind () == PK_ORIGIN)
+ return 0;
+ return m_call_string.length () + 1;
+ }
+
+ /* Factory functions for making various kinds of program_point. */
+
+ static program_point from_function_entry (const supergraph &sg,
+ function *fun)
+ {
+ return program_point (function_point::from_function_entry (sg, fun),
+ call_string ());
+ }
+
+ static program_point before_supernode (const supernode *supernode,
+ const superedge *from_edge,
+ const call_string &call_string)
+ {
+ return program_point (function_point::before_supernode (supernode,
+ from_edge),
+ call_string);
+ }
+
+ static program_point before_stmt (const supernode *supernode,
+ unsigned stmt_idx,
+ const call_string &call_string)
+ {
+ return program_point (function_point::before_stmt (supernode, stmt_idx),
+ call_string);
+ }
+
+ static program_point after_supernode (const supernode *supernode,
+ const call_string &call_string)
+ {
+ return program_point (function_point::after_supernode (supernode),
+ call_string);
+ }
+
+ /* Support for hash_map. */
+
+ static program_point empty ()
+ {
+ return program_point (function_point::empty (), call_string ());
+ }
+ static program_point deleted ()
+ {
+ return program_point (function_point::deleted (), call_string ());
+ }
+
+ bool on_edge (exploded_graph &eg, const superedge *succ);
+
+ void validate () const;
+
+ private:
+ const function_point m_function_point;
+ call_string m_call_string;
+};
+
+#endif /* GCC_ANALYZER_PROGRAM_POINT_H */
--- /dev/null
+/* Classes for representing the state of interest at a given path of analysis.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "diagnostic.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "sbitmap.h"
+#include "tristate.h"
+#include "ordered-hash-map.h"
+#include "selftest.h"
+#include "analyzer/region-model.h"
+#include "analyzer/program-state.h"
+#include "analyzer/constraint-manager.h"
+#include "alloc-pool.h"
+#include "fibonacci_heap.h"
+#include "shortest-paths.h"
+#include "analyzer/constraint-manager.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/pending-diagnostic.h"
+#include "analyzer/diagnostic-manager.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "cgraph.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/program-state.h"
+#include "analyzer/exploded-graph.h"
+#include "analyzer/state-purge.h"
+#include "analyzer/analyzer-selftests.h"
+
+#if ENABLE_ANALYZER
+
+/* class sm_state_map. */
+
+/* sm_state_map's ctor. */
+
+sm_state_map::sm_state_map ()
+: m_map (), m_global_state (0)
+{
+}
+
+/* Clone the sm_state_map. */
+
+sm_state_map *
+sm_state_map::clone () const
+{
+ return new sm_state_map (*this);
+}
+
+/* Clone this sm_state_map, remapping all svalue_ids within it with ID_MAP.
+
+ Return NULL if there are any svalue_ids that have sm-state for which
+ ID_MAP maps them to svalue_id::null (and thus the clone would have lost
+ the sm-state information). */
+
+sm_state_map *
+sm_state_map::clone_with_remapping (const one_way_svalue_id_map &id_map) const
+{
+ sm_state_map *result = new sm_state_map ();
+ for (typename map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ svalue_id sid = (*iter).first;
+ gcc_assert (!sid.null_p ());
+ entry_t e = (*iter).second;
+ /* TODO: what should we do if the origin maps from non-null to null?
+ Is that loss of information acceptable? */
+ id_map.update (&e.m_origin);
+
+ svalue_id new_sid = id_map.get_dst_for_src (sid);
+ if (new_sid.null_p ())
+ {
+ delete result;
+ return NULL;
+ }
+ result->m_map.put (new_sid, e);
+ }
+ return result;
+}
+
+/* Print this sm_state_map (for SM) to PP. */
+
+void
+sm_state_map::print (const state_machine &sm, pretty_printer *pp) const
+{
+ bool first = true;
+ pp_string (pp, "{");
+ if (m_global_state != 0)
+ {
+ pp_printf (pp, "global: %s", sm.get_state_name (m_global_state));
+ first = false;
+ }
+ for (typename map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ if (!first)
+ pp_string (pp, ", ");
+ first = false;
+ svalue_id sid = (*iter).first;
+ sid.print (pp);
+
+ entry_t e = (*iter).second;
+ pp_printf (pp, ": %s (origin: ",
+ sm.get_state_name (e.m_state));
+ e.m_origin.print (pp);
+ pp_string (pp, ")");
+ }
+ pp_string (pp, "}");
+}
+
+/* Dump this object (for SM) to stderr. */
+
+DEBUG_FUNCTION void
+sm_state_map::dump (const state_machine &sm) const
+{
+ pretty_printer pp;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ print (sm, &pp);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* Return true if no states have been set within this map
+ (all expressions are for the start state). */
+
+bool
+sm_state_map::is_empty_p () const
+{
+ return m_map.elements () == 0 && m_global_state == 0;
+}
+
+/* Generate a hash value for this sm_state_map. */
+
+hashval_t
+sm_state_map::hash () const
+{
+ hashval_t result = 0;
+
+ /* Accumulate the result by xoring a hash for each slot, so that the
+ result doesn't depend on the ordering of the slots in the map. */
+
+ for (typename map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ inchash::hash hstate;
+ inchash::add ((*iter).first, hstate);
+ entry_t e = (*iter).second;
+ hstate.add_int (e.m_state);
+ inchash::add (e.m_origin, hstate);
+ result ^= hstate.end ();
+ }
+ result ^= m_global_state;
+
+ return result;
+}
+
+/* Equality operator for sm_state_map. */
+
+bool
+sm_state_map::operator== (const sm_state_map &other) const
+{
+ if (m_global_state != other.m_global_state)
+ return false;
+
+ if (m_map.elements () != other.m_map.elements ())
+ return false;
+
+ for (typename map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ svalue_id sid = (*iter).first;
+ entry_t e = (*iter).second;
+ entry_t *other_slot = const_cast <map_t &> (other.m_map).get (sid);
+ if (other_slot == NULL)
+ return false;
+ if (e != *other_slot)
+ return false;
+ }
+
+ gcc_checking_assert (hash () == other.hash ());
+
+ return true;
+}
+
+/* Get the state of SID within this object.
+ States default to the start state. */
+
+state_machine::state_t
+sm_state_map::get_state (svalue_id sid) const
+{
+ gcc_assert (!sid.null_p ());
+
+ if (entry_t *slot
+ = const_cast <map_t &> (m_map).get (sid))
+ return slot->m_state;
+ else
+ return 0;
+}
+
+/* Get the "origin" svalue_id for any state of SID. */
+
+svalue_id
+sm_state_map::get_origin (svalue_id sid) const
+{
+ gcc_assert (!sid.null_p ());
+
+ entry_t *slot
+ = const_cast <map_t &> (m_map).get (sid);
+ if (slot)
+ return slot->m_origin;
+ else
+ return svalue_id::null ();
+}
+
+/* Set the state of SID within MODEL to STATE, recording that
+ the state came from ORIGIN. */
+
+void
+sm_state_map::set_state (region_model *model,
+ svalue_id sid,
+ state_machine::state_t state,
+ svalue_id origin)
+{
+ if (model == NULL)
+ return;
+ equiv_class &ec = model->get_constraints ()->get_equiv_class (sid);
+ set_state (ec, state, origin);
+
+ /* Also do it for all svalues that are equal via non-cm, so that
+ e.g. (void *)&r and (foo *)&r transition together. */
+ for (unsigned i = 0; i < model->get_num_svalues (); i++)
+ {
+ svalue_id other_sid = svalue_id::from_int (i);
+ if (other_sid == sid)
+ continue;
+
+ tristate eq = model->eval_condition_without_cm (sid, EQ_EXPR, other_sid);
+ if (eq.is_true ())
+ impl_set_state (other_sid, state, origin);
+ }
+}
+
+/* Set the state of EC to STATE, recording that the state came from
+ ORIGIN. */
+
+void
+sm_state_map::set_state (const equiv_class &ec,
+ state_machine::state_t state,
+ svalue_id origin)
+{
+ int i;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (ec.m_vars, i, sid)
+ impl_set_state (*sid, state, origin);
+}
+
+/* Set state of PV to STATE, bypassing equivalence classes. */
+
+void
+sm_state_map::impl_set_state (svalue_id sid, state_machine::state_t state,
+ svalue_id origin)
+{
+ /* Special-case state 0 as the default value. */
+ if (state == 0)
+ {
+ if (m_map.get (sid))
+ m_map.remove (sid);
+ return;
+ }
+ gcc_assert (!sid.null_p ());
+ m_map.put (sid, entry_t (state, origin));
+}
+
+/* Set the "global" state within this state map to STATE. */
+
+void
+sm_state_map::set_global_state (state_machine::state_t state)
+{
+ m_global_state = state;
+}
+
+/* Get the "global" state within this state map. */
+
+state_machine::state_t
+sm_state_map::get_global_state () const
+{
+ return m_global_state;
+}
+
+/* Handle CALL to unknown FNDECL with an unknown function body, which
+ could do anything to the states passed to it.
+ Clear any state for SM for the params and any LHS.
+ Note that the function might be known to other state machines, but
+ not to this one. */
+
+void
+sm_state_map::purge_for_unknown_fncall (const exploded_graph &eg,
+ const state_machine &sm,
+ const gcall *call,
+ tree fndecl,
+ region_model *new_model)
+{
+ logger * const logger = eg.get_logger ();
+ if (logger)
+ {
+ if (fndecl)
+ logger->log ("function %qE is unknown to checker %qs",
+ fndecl, sm.get_name ());
+ else
+ logger->log ("unknown function pointer for checker %qs",
+ sm.get_name ());
+ }
+
+ /* Purge any state for parms. */
+ tree iter_param_types = NULL_TREE;
+ if (fndecl)
+ iter_param_types = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ for (unsigned arg_idx = 0; arg_idx < gimple_call_num_args (call); arg_idx++)
+ {
+ /* Track expected param type, where available. */
+ if (iter_param_types)
+ {
+ tree param_type = TREE_VALUE (iter_param_types);
+ gcc_assert (param_type);
+ iter_param_types = TREE_CHAIN (iter_param_types);
+
+ /* Don't purge state if it was passed as a const pointer
+ e.g. for things like strlen (PTR). */
+ if (TREE_CODE (param_type) == POINTER_TYPE)
+ if (TYPE_READONLY (TREE_TYPE (param_type)))
+ continue;
+ }
+ tree parm = gimple_call_arg (call, arg_idx);
+ svalue_id parm_sid = new_model->get_rvalue (parm, NULL);
+ set_state (new_model, parm_sid, 0, svalue_id::null ());
+
+ /* Also clear sm-state from svalue_ids that are passed via a
+ pointer. */
+ if (TREE_CODE (parm) == ADDR_EXPR)
+ {
+ tree pointee = TREE_OPERAND (parm, 0);
+ svalue_id parm_sid = new_model->get_rvalue (pointee, NULL);
+ set_state (new_model, parm_sid, 0, svalue_id::null ());
+ }
+ }
+
+ /* Purge any state for any LHS. */
+ if (tree lhs = gimple_call_lhs (call))
+ {
+ svalue_id lhs_sid = new_model->get_rvalue (lhs, NULL);
+ set_state (new_model, lhs_sid, 0, svalue_id::null ());
+ }
+}
+
+/* Update this map based on MAP. */
+
+void
+sm_state_map::remap_svalue_ids (const svalue_id_map &map)
+{
+ map_t tmp_map;
+
+ /* Build an intermediate map, using the new sids. */
+ for (typename map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ svalue_id sid = (*iter).first;
+ entry_t e = (*iter).second;
+
+ map.update (&sid);
+ map.update (&e.m_origin);
+ tmp_map.put (sid, e);
+ }
+
+ /* Clear the existing values. */
+ m_map.empty ();
+
+ /* Copy over from intermediate map. */
+ for (typename map_t::iterator iter = tmp_map.begin ();
+ iter != tmp_map.end ();
+ ++iter)
+ {
+ svalue_id sid = (*iter).first;
+ entry_t e = (*iter).second;
+
+ impl_set_state (sid, e.m_state, e.m_origin);
+ }
+}
+
+/* Purge any state for svalue_ids >= FIRST_UNUSED_SID.
+ If !SM::can_purge_p, then report the state as leaking,
+ using SM_IDX, CTXT, and MAP.
+ Return the number of states that were purged. */
+
+int
+sm_state_map::on_svalue_purge (const state_machine &sm,
+ int sm_idx,
+ svalue_id first_unused_sid,
+ const svalue_id_map &map,
+ impl_region_model_context *ctxt)
+{
+ /* TODO: ideally remove the slot directly; for now
+ do it in two stages. */
+ auto_vec<svalue_id> to_remove;
+ for (typename map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ svalue_id dst_sid ((*iter).first);
+ if (dst_sid.as_int () >= first_unused_sid.as_int ())
+ {
+ /* Complain about leaks here. */
+ entry_t e = (*iter).second;
+
+ if (!sm.can_purge_p (e.m_state))
+ ctxt->on_state_leak (sm, sm_idx, dst_sid, first_unused_sid,
+ map, e.m_state);
+
+ to_remove.safe_push (dst_sid);
+ }
+ }
+
+ int i;
+ svalue_id *dst_sid;
+ FOR_EACH_VEC_ELT (to_remove, i, dst_sid)
+ m_map.remove (*dst_sid);
+
+ return to_remove.length ();
+}
+
+/* Set the state of CHILD_SID to that of PARENT_SID. */
+
+void
+sm_state_map::on_inherited_svalue (svalue_id parent_sid,
+ svalue_id child_sid)
+{
+ state_machine::state_t state = get_state (parent_sid);
+ impl_set_state (child_sid, state, parent_sid);
+}
+
+/* Set the state of DST_SID to that of SRC_SID. */
+
+void
+sm_state_map::on_cast (svalue_id src_sid,
+ svalue_id dst_sid)
+{
+ state_machine::state_t state = get_state (src_sid);
+ impl_set_state (dst_sid, state, get_origin (src_sid));
+}
+
+/* Assert that this object is sane. */
+
+void
+sm_state_map::validate (const state_machine &sm,
+ int num_svalues) const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ for (typename map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ svalue_id sid = (*iter).first;
+ entry_t e = (*iter).second;
+
+ gcc_assert (sid.as_int () < num_svalues);
+ sm.validate (e.m_state);
+ gcc_assert (e.m_origin.as_int () < num_svalues);
+ }
+}
+
+/* class program_state. */
+
+/* program_state's ctor. */
+
+program_state::program_state (const extrinsic_state &ext_state)
+: m_region_model (new region_model ()),
+ m_checker_states (ext_state.m_checkers.length ())
+{
+ int num_states = ext_state.m_checkers.length ();
+ for (int i = 0; i < num_states; i++)
+ m_checker_states.quick_push (new sm_state_map ());
+}
+
+/* program_state's copy ctor. */
+
+program_state::program_state (const program_state &other)
+: m_region_model (new region_model (*other.m_region_model)),
+ m_checker_states (other.m_checker_states.length ())
+{
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (other.m_checker_states, i, smap)
+ m_checker_states.quick_push (smap->clone ());
+}
+
+/* program_state's assignment operator. */
+
+program_state&
+program_state::operator= (const program_state &other)
+{
+ delete m_region_model;
+ m_region_model = new region_model (*other.m_region_model);
+
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ delete smap;
+ m_checker_states.truncate (0);
+ gcc_assert (m_checker_states.space (other.m_checker_states.length ()));
+
+ FOR_EACH_VEC_ELT (other.m_checker_states, i, smap)
+ m_checker_states.quick_push (smap->clone ());
+
+ return *this;
+}
+
+#if __cplusplus >= 201103
+/* Move constructor for program_state (when building with C++11). */
+program_state::program_state (program_state &&other)
+: m_region_model (other.m_region_model),
+ m_checker_states (other.m_checker_states.length ())
+{
+ other.m_region_model = NULL;
+
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (other.m_checker_states, i, smap)
+ m_checker_states.quick_push (smap);
+ other.m_checker_states.truncate (0);
+}
+#endif
+
+/* program_state's dtor. */
+
+program_state::~program_state ()
+{
+ delete m_region_model;
+}
+
+/* Generate a hash value for this program_state. */
+
+hashval_t
+program_state::hash () const
+{
+ hashval_t result = m_region_model->hash ();
+
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ result ^= smap->hash ();
+ return result;
+}
+
+/* Equality operator for program_state.
+ All parts of the program_state (region model, checker states) must
+ equal their counterparts in OTHER for the two program_states to be
+ considered equal. */
+
+bool
+program_state::operator== (const program_state &other) const
+{
+ if (!(*m_region_model == *other.m_region_model))
+ return false;
+
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ if (!(*smap == *other.m_checker_states[i]))
+ return false;
+
+ gcc_checking_assert (hash () == other.hash ());
+
+ return true;
+}
+
+/* Print a compact representation of this state to PP. */
+
+void
+program_state::print (const extrinsic_state &ext_state,
+ pretty_printer *pp) const
+{
+ pp_printf (pp, "rmodel: ");
+ m_region_model->print (pp);
+ pp_newline (pp);
+
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ {
+ if (!smap->is_empty_p ())
+ {
+ pp_printf (pp, "%s: ", ext_state.get_name (i));
+ smap->print (ext_state.get_sm (i), pp);
+ pp_newline (pp);
+ }
+ }
+}
+
+/* Dump a multiline representation of this state to PP. */
+
+void
+program_state::dump_to_pp (const extrinsic_state &ext_state,
+ bool summarize,
+ pretty_printer *pp) const
+{
+ pp_printf (pp, "rmodel: ");
+ m_region_model->dump_to_pp (pp, summarize);
+
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ {
+ if (!smap->is_empty_p ())
+ {
+ pp_printf (pp, "%s: ", ext_state.get_name (i));
+ smap->print (ext_state.get_sm (i), pp);
+ pp_newline (pp);
+ }
+ }
+}
+
+/* Dump a multiline representation of this state to OUTF. */
+
+void
+program_state::dump_to_file (const extrinsic_state &ext_state,
+ bool summarize,
+ FILE *outf) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ if (outf == stderr)
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = outf;
+ dump_to_pp (ext_state, summarize, &pp);
+ pp_flush (&pp);
+}
+
+/* Dump a multiline representation of this state to stderr. */
+
+DEBUG_FUNCTION void
+program_state::dump (const extrinsic_state &ext_state,
+ bool summarize) const
+{
+ dump_to_file (ext_state, summarize, stderr);
+}
+
+/* Determine if following edge SUCC from ENODE is valid within the graph EG
+ and update this state accordingly in-place.
+
+ Return true if the edge can be followed, or false otherwise.
+
+ Check for relevant conditionals and switch-values for conditionals
+ and switch statements, adding the relevant conditions to this state.
+ Push/pop frames for interprocedural edges and update params/returned
+ values.
+
+ This is the "state" half of exploded_node::on_edge. */
+
+bool
+program_state::on_edge (exploded_graph &eg,
+ const exploded_node &enode,
+ const superedge *succ,
+ state_change *change)
+{
+ /* Update state. */
+ const program_point &point = enode.get_point ();
+ const gimple *last_stmt = point.get_supernode ()->get_last_stmt ();
+
+ /* For conditionals and switch statements, add the
+ relevant conditions (for the specific edge) to new_state;
+ skip edges for which the resulting constraints
+ are impossible.
+ This also updates frame information for call/return superedges.
+ Adding the relevant conditions for the edge could also trigger
+ sm-state transitions (e.g. transitions due to ptrs becoming known
+ to be NULL or non-NULL) */
+
+ impl_region_model_context ctxt (eg, &enode,
+ &enode.get_state (),
+ this, change,
+ last_stmt);
+ if (!m_region_model->maybe_update_for_edge (*succ,
+ last_stmt,
+ &ctxt))
+ {
+ logger * const logger = eg.get_logger ();
+ if (logger)
+ logger->log ("edge to SN: %i is impossible"
+ " due to region_model constraints",
+ succ->m_dest->m_index);
+ return false;
+ }
+
+ return true;
+}
+
+/* Generate a simpler version of THIS, discarding state that's no longer
+ relevant at POINT.
+ The idea is that we're more likely to be able to consolidate
+ multiple (point, state) into single exploded_nodes if we discard
+ irrelevant state (e.g. at the end of functions).
+
+ Retain state affected by CHANGE, to make it easier to generate
+ state_change_events. */
+
+program_state
+program_state::prune_for_point (exploded_graph &eg,
+ const program_point &point,
+ state_change *change) const
+{
+ logger * const logger = eg.get_logger ();
+ LOG_SCOPE (logger);
+
+ function *fun = point.get_function ();
+ if (!fun)
+ return *this;
+
+ program_state new_state (*this);
+
+ purge_stats stats;
+
+ const state_purge_map *pm = eg.get_purge_map ();
+ if (pm)
+ {
+ region_id_set purgeable_ssa_regions (new_state.m_region_model);
+ region_id frame_rid
+ = new_state.m_region_model->get_current_frame_id ();
+ frame_region *frame
+ = new_state.m_region_model->get_region <frame_region>(frame_rid);
+
+ /* TODO: maybe move to a member of region_model? */
+
+ auto_vec<tree> ssa_names_to_purge;
+ for (frame_region::map_t::iterator iter = frame->begin ();
+ iter != frame->end ();
+ ++iter)
+ {
+ tree var = (*iter).first;
+ region_id rid = (*iter).second;
+ if (TREE_CODE (var) == SSA_NAME)
+ {
+ const state_purge_per_ssa_name &per_ssa
+ = pm->get_data_for_ssa_name (var);
+ if (!per_ssa.needed_at_point_p (point.get_function_point ()))
+ {
+ region *region
+ = new_state.m_region_model->get_region (rid);
+ svalue_id sid = region->get_value_direct ();
+ if (!sid.null_p ())
+ {
+ if (!new_state.can_purge_p (eg.get_ext_state (), sid))
+ {
+ /* (currently only state maps can keep things
+ alive). */
+ if (logger)
+ logger->log ("not purging RID: %i for %qE"
+ " (used by state map)",
+ rid.as_int (), var);
+ continue;
+ }
+
+ /* Don't purge regions containing svalues that
+ have a change of sm-state, to make it easier to
+ generate state_change_event messages. */
+ if (change)
+ if (change->affects_p (sid))
+ {
+ if (logger)
+ logger->log ("not purging RID: %i for %qE"
+ " (affected by change)",
+ rid.as_int (), var);
+ continue;
+ }
+ }
+ purgeable_ssa_regions.add_region (rid);
+ ssa_names_to_purge.safe_push (var);
+ if (logger)
+ logger->log ("purging RID: %i for %qE", rid.as_int (), var);
+ /* We also need to remove the region from the map.
+ We're in mid-traversal, so the removal is done in
+ unbind below. */
+ }
+ }
+ }
+
+ /* Unbind the regions from the frame's map of vars-to-regions. */
+ unsigned i;
+ tree var;
+ FOR_EACH_VEC_ELT (ssa_names_to_purge, i, var)
+ frame->unbind (var);
+
+ /* Purge the regions. Nothing should point to them, and they
+ should have no children, as they are for SSA names. */
+ new_state.m_region_model->purge_regions (purgeable_ssa_regions,
+ &stats,
+ eg.get_logger ());
+ }
+
+ /* Purge unused svalues. */
+ // TODO: which enode to use, if any?
+ impl_region_model_context ctxt (eg, NULL,
+ this,
+ &new_state,
+ change,
+ NULL);
+ new_state.m_region_model->purge_unused_svalues (&stats, &ctxt);
+ if (logger)
+ {
+ logger->log ("num svalues purged: %i", stats.m_num_svalues);
+ logger->log ("num regions purged: %i", stats.m_num_regions);
+ logger->log ("num equiv_classes purged: %i", stats.m_num_equiv_classes);
+ logger->log ("num constraints purged: %i", stats.m_num_constraints);
+ logger->log ("num sm map items purged: %i", stats.m_num_client_items);
+ }
+
+ new_state.m_region_model->canonicalize (&ctxt);
+
+ return new_state;
+}
+
+/* Remap all svalue_ids in this state's m_checker_states according to MAP.
+ The svalues_ids in the region_model are assumed to already have been
+ remapped. */
+
+void
+program_state::remap_svalue_ids (const svalue_id_map &map)
+{
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ smap->remap_svalue_ids (map);
+}
+
+/* Attempt to return a tree that represents SID, or return NULL_TREE.
+ Find the first region that stores the value (e.g. a local) and
+ generate a representative tree for it. */
+
+tree
+program_state::get_representative_tree (svalue_id sid) const
+{
+ return m_region_model->get_representative_tree (sid);
+}
+
+/* Attempt to merge this state with OTHER, both using EXT_STATE.
+ Write the result to *OUT.
+ If the states were merged successfully, return true. */
+
+bool
+program_state::can_merge_with_p (const program_state &other,
+ const extrinsic_state &ext_state,
+ program_state *out) const
+{
+ gcc_assert (out);
+
+ /* TODO: initially I had an early reject here if there
+ are sm-differences between the states. However, this was
+ falsely rejecting merger opportunities for states where the
+ only difference was in svalue_id ordering. */
+
+ /* Attempt to merge the region_models. */
+
+ svalue_id_merger_mapping sid_mapping (*m_region_model,
+ *other.m_region_model);
+ if (!m_region_model->can_merge_with_p (*other.m_region_model,
+ out->m_region_model,
+ &sid_mapping))
+ return false;
+
+ /* Copy m_checker_states to result, remapping svalue_ids using
+ sid_mapping. */
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (out->m_checker_states, i, smap)
+ delete smap;
+ out->m_checker_states.truncate (0);
+
+ /* Remap this and other's m_checker_states using sid_mapping.
+ Only merge states that have equality between the two end-results:
+ sm-state differences are likely to be interesting to end-users, and
+ hence are worth exploring as separate paths in the exploded graph. */
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ {
+ sm_state_map *other_smap = other.m_checker_states[i];
+
+ /* If clone_with_remapping returns NULL for one of the input smaps,
+ then it has sm-state for an svalue_id where the svalue_id is
+ being mapped to svalue_id::null in its sid_mapping, meaning that
+ the svalue is to be dropped during the merger. We don't want
+ to lose sm-state during a state merger, so return false for these
+ cases. */
+ sm_state_map *remapped_a_smap
+ = smap->clone_with_remapping (sid_mapping.m_map_from_a_to_m);
+ if (!remapped_a_smap)
+ return false;
+ sm_state_map *remapped_b_smap
+ = other_smap->clone_with_remapping (sid_mapping.m_map_from_b_to_m);
+ if (!remapped_b_smap)
+ {
+ delete remapped_a_smap;
+ return false;
+ }
+
+ /* Both states have sm-state for the same values; now ensure that the
+ states are equal. */
+ if (*remapped_a_smap == *remapped_b_smap)
+ {
+ out->m_checker_states.safe_push (remapped_a_smap);
+ delete remapped_b_smap;
+ }
+ else
+ {
+ /* Don't merge if there are sm-state differences. */
+ delete remapped_a_smap;
+ delete remapped_b_smap;
+ return false;
+ }
+ }
+
+ impl_region_model_context ctxt (out, NULL, ext_state);
+ out->m_region_model->canonicalize (&ctxt);
+
+ return true;
+}
+
+/* Assert that this object is valid. */
+
+void
+program_state::validate (const extrinsic_state &ext_state) const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ m_region_model->validate ();
+ gcc_assert (m_checker_states.length () == ext_state.get_num_checkers ());
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, sm_idx, smap)
+ {
+ const state_machine &sm = ext_state.get_sm (sm_idx);
+ smap->validate (sm, m_region_model->get_num_svalues ());
+ }
+}
+
+/* Dump this sm_change to PP. */
+
+void
+state_change::sm_change::dump (pretty_printer *pp,
+ const extrinsic_state &ext_state) const
+{
+ const state_machine &sm = get_sm (ext_state);
+ pp_string (pp, "(");
+ m_new_sid.print (pp);
+ pp_printf (pp, ": %s: %qs -> %qs)",
+ sm.get_name (),
+ sm.get_state_name (m_old_state),
+ sm.get_state_name (m_new_state));
+}
+
+/* Remap all svalue_ids in this change according to MAP. */
+
+void
+state_change::sm_change::remap_svalue_ids (const svalue_id_map &map)
+{
+ map.update (&m_new_sid);
+}
+
+/* Purge any svalue_ids >= FIRST_UNUSED_SID.
+ Return the number of states that were purged. */
+
+int
+state_change::sm_change::on_svalue_purge (svalue_id first_unused_sid)
+{
+ if (m_new_sid.as_int () >= first_unused_sid.as_int ())
+ {
+ m_new_sid = svalue_id::null ();
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Assert that this object is sane. */
+
+void
+state_change::sm_change::validate (const program_state &new_state) const
+{
+ m_new_sid.validate (*new_state.m_region_model);
+}
+
+/* state_change's ctor. */
+
+state_change::state_change ()
+{
+}
+
+/* state_change's copy ctor. */
+
+state_change::state_change (const state_change &other)
+: m_sm_changes (other.m_sm_changes.length ())
+{
+ unsigned i;
+ sm_change *change;
+ FOR_EACH_VEC_ELT (other.m_sm_changes, i, change)
+ m_sm_changes.quick_push (*change);
+}
+
+/* Record a state-machine state change. */
+
+void
+state_change::add_sm_change (int sm_idx,
+ svalue_id new_sid,
+ state_machine::state_t old_state,
+ state_machine::state_t new_state)
+{
+ m_sm_changes.safe_push (sm_change (sm_idx,
+ new_sid,
+ old_state, new_state));
+}
+
+/* Return true if SID (in the new state) was affected by any
+ sm-state changes. */
+
+bool
+state_change::affects_p (svalue_id sid) const
+{
+ unsigned i;
+ sm_change *change;
+ FOR_EACH_VEC_ELT (m_sm_changes, i, change)
+ {
+ if (sid == change->m_new_sid)
+ return true;
+ }
+ return false;
+}
+
+/* Dump this state_change to PP. */
+
+void
+state_change::dump (pretty_printer *pp,
+ const extrinsic_state &ext_state) const
+{
+ unsigned i;
+ sm_change *change;
+ FOR_EACH_VEC_ELT (m_sm_changes, i, change)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ change->dump (pp, ext_state);
+ }
+}
+
+/* Dump this state_change to stderr. */
+
+void
+state_change::dump (const extrinsic_state &ext_state) const
+{
+ pretty_printer pp;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump (&pp, ext_state);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* Remap all svalue_ids in this state_change according to MAP. */
+
+void
+state_change::remap_svalue_ids (const svalue_id_map &map)
+{
+ unsigned i;
+ sm_change *change;
+ FOR_EACH_VEC_ELT (m_sm_changes, i, change)
+ change->remap_svalue_ids (map);
+}
+
+/* Purge any svalue_ids >= FIRST_UNUSED_SID.
+ Return the number of states that were purged. */
+
+int
+state_change::on_svalue_purge (svalue_id first_unused_sid)
+{
+ int result = 0;
+ unsigned i;
+ sm_change *change;
+ FOR_EACH_VEC_ELT (m_sm_changes, i, change)
+ result += change->on_svalue_purge (first_unused_sid);
+ return result;
+}
+
+/* Assert that this object is sane. */
+
+void
+state_change::validate (const program_state &new_state) const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+ unsigned i;
+ sm_change *change;
+ FOR_EACH_VEC_ELT (m_sm_changes, i, change)
+ change->validate (new_state);
+}
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Tests for sm_state_map. */
+
+static void
+test_sm_state_map ()
+{
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+ tree z = build_global_decl ("z", integer_type_node);
+
+ /* Test setting states on svalue_id instances directly. */
+ {
+ region_model model;
+ svalue_id sid_x = model.get_rvalue (x, NULL);
+ svalue_id sid_y = model.get_rvalue (y, NULL);
+ svalue_id sid_z = model.get_rvalue (z, NULL);
+
+ sm_state_map map;
+ ASSERT_TRUE (map.is_empty_p ());
+ ASSERT_EQ (map.get_state (sid_x), 0);
+
+ map.impl_set_state (sid_x, 42, sid_z);
+ ASSERT_EQ (map.get_state (sid_x), 42);
+ ASSERT_EQ (map.get_origin (sid_x), sid_z);
+ ASSERT_EQ (map.get_state (sid_y), 0);
+ ASSERT_FALSE (map.is_empty_p ());
+
+ map.impl_set_state (sid_y, 0, sid_z);
+ ASSERT_EQ (map.get_state (sid_y), 0);
+
+ map.impl_set_state (sid_x, 0, sid_z);
+ ASSERT_EQ (map.get_state (sid_x), 0);
+ ASSERT_TRUE (map.is_empty_p ());
+ }
+
+ /* Test setting states via equivalence classes. */
+ {
+ region_model model;
+ svalue_id sid_x = model.get_rvalue (x, NULL);
+ svalue_id sid_y = model.get_rvalue (y, NULL);
+ svalue_id sid_z = model.get_rvalue (z, NULL);
+
+ sm_state_map map;
+ ASSERT_TRUE (map.is_empty_p ());
+ ASSERT_EQ (map.get_state (sid_x), 0);
+ ASSERT_EQ (map.get_state (sid_y), 0);
+
+ model.add_constraint (x, EQ_EXPR, y, NULL);
+
+ /* Setting x to a state should also update y, as they
+ are in the same equivalence class. */
+ map.set_state (&model, sid_x, 5, sid_z);
+ ASSERT_EQ (map.get_state (sid_x), 5);
+ ASSERT_EQ (map.get_state (sid_y), 5);
+ ASSERT_EQ (map.get_origin (sid_x), sid_z);
+ ASSERT_EQ (map.get_origin (sid_y), sid_z);
+ }
+
+ /* Test equality and hashing. */
+ {
+ region_model model;
+ svalue_id sid_y = model.get_rvalue (y, NULL);
+ svalue_id sid_z = model.get_rvalue (z, NULL);
+
+ sm_state_map map0;
+ sm_state_map map1;
+ sm_state_map map2;
+
+ ASSERT_EQ (map0.hash (), map1.hash ());
+ ASSERT_EQ (map0, map1);
+
+ map1.impl_set_state (sid_y, 5, sid_z);
+ ASSERT_NE (map0.hash (), map1.hash ());
+ ASSERT_NE (map0, map1);
+
+ /* Make the same change to map2. */
+ map2.impl_set_state (sid_y, 5, sid_z);
+ ASSERT_EQ (map1.hash (), map2.hash ());
+ ASSERT_EQ (map1, map2);
+ }
+
+ /* Equality and hashing shouldn't depend on ordering. */
+ {
+ sm_state_map map0;
+ sm_state_map map1;
+ sm_state_map map2;
+
+ ASSERT_EQ (map0.hash (), map1.hash ());
+ ASSERT_EQ (map0, map1);
+
+ map1.impl_set_state (svalue_id::from_int (14), 2, svalue_id::null ());
+ map1.impl_set_state (svalue_id::from_int (16), 3, svalue_id::null ());
+ map1.impl_set_state (svalue_id::from_int (1), 2, svalue_id::null ());
+ map1.impl_set_state (svalue_id::from_int (9), 2, svalue_id::null ());
+
+ map2.impl_set_state (svalue_id::from_int (1), 2, svalue_id::null ());
+ map2.impl_set_state (svalue_id::from_int (16), 3, svalue_id::null ());
+ map2.impl_set_state (svalue_id::from_int (14), 2, svalue_id::null ());
+ map2.impl_set_state (svalue_id::from_int (9), 2, svalue_id::null ());
+
+ ASSERT_EQ (map1.hash (), map2.hash ());
+ ASSERT_EQ (map1, map2);
+ }
+
+ /* Test sm_state_map::remap_svalue_ids. */
+ {
+ sm_state_map map;
+ svalue_id sid_0 = svalue_id::from_int (0);
+ svalue_id sid_1 = svalue_id::from_int (1);
+ svalue_id sid_2 = svalue_id::from_int (2);
+
+ map.impl_set_state (sid_0, 42, sid_2);
+ ASSERT_EQ (map.get_state (sid_0), 42);
+ ASSERT_EQ (map.get_origin (sid_0), sid_2);
+ ASSERT_EQ (map.get_state (sid_1), 0);
+ ASSERT_EQ (map.get_state (sid_2), 0);
+
+ /* Apply a remapping to the IDs. */
+ svalue_id_map remapping (3);
+ remapping.put (sid_0, sid_1);
+ remapping.put (sid_1, sid_2);
+ remapping.put (sid_2, sid_0);
+ map.remap_svalue_ids (remapping);
+
+ /* Verify that the IDs have been remapped. */
+ ASSERT_EQ (map.get_state (sid_1), 42);
+ ASSERT_EQ (map.get_origin (sid_1), sid_0);
+ ASSERT_EQ (map.get_state (sid_2), 0);
+ ASSERT_EQ (map.get_state (sid_0), 0);
+ }
+
+ // TODO: coverage for purging
+}
+
+/* Verify that program_states with identical sm-state can be merged,
+ and that the merged program_state preserves the sm-state. */
+
+static void
+test_program_state_merging ()
+{
+ /* Create a program_state for a global ptr "p" that has
+ malloc sm-state, pointing to a region on the heap. */
+ tree p = build_global_decl ("p", ptr_type_node);
+
+ auto_delete_vec <state_machine> checkers;
+ checkers.safe_push (make_malloc_state_machine (NULL));
+ extrinsic_state ext_state (checkers);
+
+ program_state s0 (ext_state);
+ impl_region_model_context ctxt (&s0, NULL, ext_state);
+
+ region_model *model0 = s0.m_region_model;
+ region_id new_rid = model0->add_new_malloc_region ();
+ svalue_id ptr_sid
+ = model0->get_or_create_ptr_svalue (ptr_type_node, new_rid);
+ model0->set_value (model0->get_lvalue (p, &ctxt),
+ ptr_sid, &ctxt);
+ sm_state_map *smap = s0.m_checker_states[0];
+ const state_machine::state_t TEST_STATE = 3;
+ smap->impl_set_state (ptr_sid, TEST_STATE, svalue_id::null ());
+ ASSERT_EQ (smap->get_state (ptr_sid), TEST_STATE);
+
+ model0->canonicalize (&ctxt);
+
+ /* Verify that canonicalization preserves sm-state. */
+ ASSERT_EQ (smap->get_state (model0->get_rvalue (p, NULL)), TEST_STATE);
+
+ /* Make a copy of the program_state. */
+ program_state s1 (s0);
+ ASSERT_EQ (s0, s1);
+
+ /* We have two identical states with "p" pointing to a heap region
+ with the given sm-state.
+ They ought to be mergeable, preserving the sm-state. */
+ program_state merged (ext_state);
+ ASSERT_TRUE (s0.can_merge_with_p (s1, ext_state, &merged));
+ merged.validate (ext_state);
+
+ /* Verify that the merged state has the sm-state for "p". */
+ region_model *merged_model = merged.m_region_model;
+ sm_state_map *merged_smap = merged.m_checker_states[0];
+ ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL)),
+ TEST_STATE);
+
+ /* Try canonicalizing. */
+ impl_region_model_context merged_ctxt (&merged, NULL, ext_state);
+ merged.m_region_model->canonicalize (&merged_ctxt);
+ merged.validate (ext_state);
+
+ /* Verify that the merged state still has the sm-state for "p". */
+ ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL)),
+ TEST_STATE);
+
+ /* After canonicalization, we ought to have equality with the inputs. */
+ ASSERT_EQ (s0, merged);
+}
+
+/* Run all of the selftests within this file. */
+
+void
+analyzer_program_state_cc_tests ()
+{
+ test_sm_state_map ();
+ test_program_state_merging ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Classes for representing the state of interest at a given path of analysis.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_PROGRAM_STATE_H
+#define GCC_ANALYZER_PROGRAM_STATE_H
+
+/* Data shared by all program_state instances. */
+
+class extrinsic_state
+{
+public:
+ extrinsic_state (auto_delete_vec <state_machine> &checkers)
+ : m_checkers (checkers)
+ {
+ }
+
+ const state_machine &get_sm (int idx) const
+ {
+ return *m_checkers[idx];
+ }
+
+ const char *get_name (int idx) const
+ {
+ return m_checkers[idx]->get_name ();
+ }
+
+ unsigned get_num_checkers () const { return m_checkers.length (); }
+
+ /* The state machines. */
+ auto_delete_vec <state_machine> &m_checkers;
+};
+
+template <> struct default_hash_traits<svalue_id>
+: public pod_hash_traits<svalue_id>
+{
+ static const bool empty_zero_p = false;
+};
+
+template <>
+inline hashval_t
+pod_hash_traits<svalue_id>::hash (value_type v)
+{
+ return v.as_int ();
+}
+
+template <>
+inline bool
+pod_hash_traits<svalue_id>::equal (const value_type &existing,
+ const value_type &candidate)
+{
+ return existing == candidate;
+}
+template <>
+inline void
+pod_hash_traits<svalue_id>::mark_deleted (value_type &v)
+{
+ v = svalue_id::from_int (-2);
+}
+template <>
+inline void
+pod_hash_traits<svalue_id>::mark_empty (value_type &v)
+{
+ v = svalue_id::null ();
+}
+template <>
+inline bool
+pod_hash_traits<svalue_id>::is_deleted (value_type v)
+{
+ return v.as_int () == -2;
+}
+template <>
+inline bool
+pod_hash_traits<svalue_id>::is_empty (value_type v)
+{
+ return v.null_p ();
+}
+
+/* Map from svalue_id to state machine state, also capturing the origin of
+ each state. */
+
+class sm_state_map
+{
+public:
+ /* An entry in the hash_map. */
+ struct entry_t
+ {
+ /* Default ctor needed by hash_map::empty. */
+ entry_t ()
+ : m_state (0), m_origin (svalue_id::null ())
+ {
+ }
+
+ entry_t (state_machine::state_t state,
+ svalue_id origin)
+ : m_state (state), m_origin (origin)
+ {}
+
+ bool operator== (const entry_t &other) const
+ {
+ return (m_state == other.m_state
+ && m_origin == other.m_origin);
+ }
+ bool operator!= (const entry_t &other) const
+ {
+ return !(*this == other);
+ }
+
+ state_machine::state_t m_state;
+ svalue_id m_origin;
+ };
+ typedef hash_map <svalue_id, entry_t> map_t;
+ typedef typename map_t::iterator iterator_t;
+
+ sm_state_map ();
+
+ sm_state_map *clone () const;
+
+ sm_state_map *
+ clone_with_remapping (const one_way_svalue_id_map &id_map) const;
+
+ void print (const state_machine &sm, pretty_printer *pp) const;
+ void dump (const state_machine &sm) const;
+
+ bool is_empty_p () const;
+
+ hashval_t hash () const;
+
+ bool operator== (const sm_state_map &other) const;
+ bool operator!= (const sm_state_map &other) const
+ {
+ return !(*this == other);
+ }
+
+ state_machine::state_t get_state (svalue_id sid) const;
+ svalue_id get_origin (svalue_id sid) const;
+
+ void set_state (region_model *model,
+ svalue_id sid,
+ state_machine::state_t state,
+ svalue_id origin);
+ void set_state (const equiv_class &ec,
+ state_machine::state_t state,
+ svalue_id origin);
+ void impl_set_state (svalue_id sid,
+ state_machine::state_t state,
+ svalue_id origin);
+
+ void set_global_state (state_machine::state_t state);
+ state_machine::state_t get_global_state () const;
+
+ void purge_for_unknown_fncall (const exploded_graph &eg,
+ const state_machine &sm,
+ const gcall *call, tree fndecl,
+ region_model *new_model);
+
+ void remap_svalue_ids (const svalue_id_map &map);
+
+ int on_svalue_purge (const state_machine &sm,
+ int sm_idx,
+ svalue_id first_unused_sid,
+ const svalue_id_map &map,
+ impl_region_model_context *ctxt);
+
+ void on_inherited_svalue (svalue_id parent_sid,
+ svalue_id child_sid);
+
+ void on_cast (svalue_id src_sid,
+ svalue_id dst_sid);
+
+ void validate (const state_machine &sm, int num_svalues) const;
+
+ iterator_t begin () const { return m_map.begin (); }
+ iterator_t end () const { return m_map.end (); }
+
+private:
+ map_t m_map;
+ state_machine::state_t m_global_state;
+};
+
+/* A class for representing the state of interest at a given path of
+ analysis.
+
+ Currently this is a combination of:
+ (a) a region_model, giving:
+ (a.1) a hierarchy of memory regions
+ (a.2) values for the regions
+ (a.3) inequalities between values
+ (b) sm_state_maps per state machine, giving a sparse mapping of
+ values to states. */
+
+class program_state
+{
+public:
+ program_state (const extrinsic_state &ext_state);
+ program_state (const program_state &other);
+ program_state& operator= (const program_state &other);
+
+#if __cplusplus >= 201103
+ program_state (program_state &&other);
+ program_state& operator= (program_state &&other); // doesn't seem to be used
+#endif
+
+ ~program_state ();
+
+ hashval_t hash () const;
+ bool operator== (const program_state &other) const;
+ bool operator!= (const program_state &other) const
+ {
+ return !(*this == other);
+ }
+
+ void print (const extrinsic_state &ext_state,
+ pretty_printer *pp) const;
+
+ void dump_to_pp (const extrinsic_state &ext_state, bool summarize,
+ pretty_printer *pp) const;
+ void dump_to_file (const extrinsic_state &ext_state, bool summarize,
+ FILE *outf) const;
+ void dump (const extrinsic_state &ext_state, bool summarize) const;
+
+ bool on_edge (exploded_graph &eg,
+ const exploded_node &enode,
+ const superedge *succ,
+ state_change *change);
+
+ program_state prune_for_point (exploded_graph &eg,
+ const program_point &point,
+ state_change *change) const;
+
+ void remap_svalue_ids (const svalue_id_map &map);
+
+ tree get_representative_tree (svalue_id sid) const;
+
+ bool can_purge_p (const extrinsic_state &ext_state,
+ svalue_id sid)
+ {
+ /* Don't purge vars that have non-purgeable sm state, to avoid
+ generating false "leak" complaints. */
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_checker_states, i, smap)
+ {
+ const state_machine &sm = ext_state.get_sm (i);
+ if (!sm.can_purge_p (smap->get_state (sid)))
+ return false;
+ }
+ return true;
+ }
+
+ bool can_merge_with_p (const program_state &other,
+ const extrinsic_state &ext_state,
+ program_state *out) const;
+
+ void validate (const extrinsic_state &ext_state) const;
+
+ /* TODO: lose the pointer here (const-correctness issues?). */
+ region_model *m_region_model;
+ auto_delete_vec<sm_state_map> m_checker_states;
+};
+
+/* An abstract base class for use with for_each_state_change. */
+
+class state_change_visitor
+{
+public:
+ virtual ~state_change_visitor () {}
+
+ /* Return true for early exit, false to keep iterating. */
+ virtual bool on_global_state_change (const state_machine &sm,
+ state_machine::state_t src_sm_val,
+ state_machine::state_t dst_sm_val) = 0;
+
+ /* Return true for early exit, false to keep iterating. */
+ virtual bool on_state_change (const state_machine &sm,
+ state_machine::state_t src_sm_val,
+ state_machine::state_t dst_sm_val,
+ tree dst_rep,
+ svalue_id dst_origin_sid) = 0;
+};
+
+extern bool for_each_state_change (const program_state &src_state,
+ const program_state &dst_state,
+ const extrinsic_state &ext_state,
+ state_change_visitor *visitor);
+
+/* A class for recording "interesting" state changes.
+ This is used for annotating edges in the GraphViz output of the
+ exploded_graph, and for recording sm-state-changes, so that
+ values that change aren't purged (to make it easier to generate
+ state_change_event instances in the diagnostic_path). */
+
+class state_change
+{
+ public:
+ struct sm_change
+ {
+ sm_change (int sm_idx,
+ svalue_id new_sid,
+ state_machine::state_t old_state,
+ state_machine::state_t new_state)
+ : m_sm_idx (sm_idx),
+ m_new_sid (new_sid),
+ m_old_state (old_state), m_new_state (new_state)
+ {}
+
+ const state_machine &get_sm (const extrinsic_state &ext_state) const
+ {
+ return ext_state.get_sm (m_sm_idx);
+ }
+
+ void dump (pretty_printer *pp, const extrinsic_state &ext_state) const;
+
+ void remap_svalue_ids (const svalue_id_map &map);
+ int on_svalue_purge (svalue_id first_unused_sid);
+
+ void validate (const program_state &new_state) const;
+
+ int m_sm_idx;
+ svalue_id m_new_sid;
+ state_machine::state_t m_old_state;
+ state_machine::state_t m_new_state;
+ };
+
+ state_change ();
+ state_change (const state_change &other);
+
+ void add_sm_change (int sm_idx,
+ svalue_id new_sid,
+ state_machine::state_t old_state,
+ state_machine::state_t new_state);
+
+ bool affects_p (svalue_id sid) const;
+
+ void dump (pretty_printer *pp, const extrinsic_state &ext_state) const;
+ void dump (const extrinsic_state &ext_state) const;
+
+ void remap_svalue_ids (const svalue_id_map &map);
+ int on_svalue_purge (svalue_id first_unused_sid);
+
+ void validate (const program_state &new_state) const;
+
+ private:
+ auto_vec<sm_change> m_sm_changes;
+};
+
+#endif /* GCC_ANALYZER_PROGRAM_STATE_H */
--- /dev/null
+/* Classes for modeling the state of memory.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "graphviz.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "convert.h"
+#include "target.h"
+#include "fold-const.h"
+#include "tree-pretty-print.h"
+#include "diagnostic-color.h"
+#include "diagnostic-metadata.h"
+#include "diagnostic-core.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "sbitmap.h"
+#include "analyzer/region-model.h"
+#include "analyzer/constraint-manager.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/sm.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+#include "analyzer/analyzer-selftests.h"
+
+#if ENABLE_ANALYZER
+
+/* Dump T to PP in language-independent form, for debugging/logging/dumping
+ purposes. */
+
+static void
+dump_tree (pretty_printer *pp, tree t)
+{
+ dump_generic_node (pp, t, 0, TDF_SLIM, 0);
+}
+
+/* Dump this path_var to PP (which must support %E for trees).
+
+ Express the stack depth using an "@DEPTH" suffix, so e.g. given
+ void foo (int j);
+ void bar (int i)
+ {
+ foo (i);
+ }
+ then:
+ - the "i" in "bar" would be "(i @ 0)"
+ - the "j" in "foo" would be "(j @ 1)". */
+
+void
+path_var::dump (pretty_printer *pp) const
+{
+PUSH_IGNORE_WFORMAT
+ if (m_tree == NULL_TREE)
+ pp_string (pp, "NULL");
+ if (CONSTANT_CLASS_P (m_tree))
+ pp_printf (pp, "%qE", m_tree);
+ else
+ pp_printf (pp, "(%qE @ %i)", m_tree, m_stack_depth);
+POP_IGNORE_WFORMAT
+}
+
+/* For use in printing a comma-separated list. */
+
+static void
+dump_separator (pretty_printer *pp, bool *is_first)
+{
+ if (!*is_first)
+ pp_string (pp, ", ");
+ *is_first = false;
+}
+
+/* Concrete subclass of constraint_manager that wires it up to a region_model
+ (whilst allowing the constraint_manager and region_model to be somewhat
+ at arms length).
+ TODO: revisit this; maybe put the region_model * into the constraint_manager
+ base class. */
+
+class impl_constraint_manager : public constraint_manager
+{
+ public:
+ impl_constraint_manager (region_model *model)
+ : constraint_manager (),
+ m_model (model)
+ {}
+
+ impl_constraint_manager (const impl_constraint_manager &other,
+ region_model *model)
+ : constraint_manager (other),
+ m_model (model)
+ {}
+
+ constraint_manager *clone (region_model *model) const
+ {
+ return new impl_constraint_manager (*this, model);
+ }
+
+ tree maybe_get_constant (svalue_id sid) const FINAL OVERRIDE
+ {
+ svalue *svalue = m_model->get_svalue (sid);
+ return svalue->maybe_get_constant ();
+ }
+
+ svalue_id get_sid_for_constant (tree cst) const FINAL OVERRIDE
+ {
+ gcc_assert (CONSTANT_CLASS_P (cst));
+ return m_model->get_rvalue (cst, NULL);
+ }
+
+ int get_num_svalues () const FINAL OVERRIDE
+ {
+ return m_model->get_num_svalues ();
+ }
+
+ private:
+ region_model *m_model;
+};
+
+/* class svalue_id. */
+
+/* Print this svalue_id to PP. */
+
+void
+svalue_id::print (pretty_printer *pp) const
+{
+ if (null_p ())
+ pp_printf (pp, "null");
+ else
+ pp_printf (pp, "sv%i", m_idx);
+}
+
+/* Print this svalue_id in .dot format to PP. */
+
+void
+svalue_id::dump_node_name_to_pp (pretty_printer *pp) const
+{
+ gcc_assert (!null_p ());
+ pp_printf (pp, "svalue_%i", m_idx);
+}
+
+/* Assert that this object is valid (w.r.t. MODEL). */
+
+void
+svalue_id::validate (const region_model &model) const
+{
+ gcc_assert (null_p () || m_idx < (int)model.get_num_svalues ());
+}
+
+/* class region_id. */
+
+/* Print this region_id to PP. */
+
+void
+region_id::print (pretty_printer *pp) const
+{
+ if (null_p ())
+ pp_printf (pp, "null");
+ else
+ pp_printf (pp, "r%i", m_idx);
+}
+
+/* Print this region_id in .dot format to PP. */
+
+void
+region_id::dump_node_name_to_pp (pretty_printer *pp) const
+{
+ gcc_assert (!null_p ());
+ pp_printf (pp, "region_%i", m_idx);
+}
+
+/* Assert that this object is valid (w.r.t. MODEL). */
+
+void
+region_id::validate (const region_model &model) const
+{
+ gcc_assert (null_p () || m_idx < (int)model.get_num_regions ());
+}
+
+/* class id_set. */
+
+/* id_set<region_id>'s ctor. */
+
+template<>
+id_set<region_id>::id_set (const region_model *model)
+: m_bitmap (model->get_num_regions ())
+{
+ bitmap_clear (m_bitmap);
+}
+
+/* class svalue and its various subclasses. */
+
+/* class svalue. */
+
+/* svalue's equality operator. Most of the work is done by the
+ a "compare_fields" implementation on each subclass. */
+
+bool
+svalue::operator== (const svalue &other) const
+{
+ enum svalue_kind this_kind = get_kind ();
+ enum svalue_kind other_kind = other.get_kind ();
+ if (this_kind != other_kind)
+ return false;
+
+ if (m_type != other.m_type)
+ return false;
+
+ switch (this_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case SK_REGION:
+ {
+ const region_svalue &this_sub
+ = (const region_svalue &)*this;
+ const region_svalue &other_sub
+ = (const region_svalue &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ break;
+ case SK_CONSTANT:
+ {
+ const constant_svalue &this_sub
+ = (const constant_svalue &)*this;
+ const constant_svalue &other_sub
+ = (const constant_svalue &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ break;
+ case SK_UNKNOWN:
+ {
+ const unknown_svalue &this_sub
+ = (const unknown_svalue &)*this;
+ const unknown_svalue &other_sub
+ = (const unknown_svalue &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ break;
+ case SK_POISONED:
+ {
+ const poisoned_svalue &this_sub
+ = (const poisoned_svalue &)*this;
+ const poisoned_svalue &other_sub
+ = (const poisoned_svalue &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ break;
+ case SK_SETJMP:
+ {
+ const setjmp_svalue &this_sub
+ = (const setjmp_svalue &)*this;
+ const setjmp_svalue &other_sub
+ = (const setjmp_svalue &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ break;
+ }
+}
+
+/* Generate a hash value for this svalue. Most of the work is done by the
+ add_to_hash vfunc. */
+
+hashval_t
+svalue::hash () const
+{
+ inchash::hash hstate;
+ if (m_type)
+ hstate.add_int (TYPE_UID (m_type));
+ add_to_hash (hstate);
+ return hstate.end ();
+}
+
+/* Print this svalue and its ID to PP. */
+
+void
+svalue::print (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+{
+ this_sid.print (pp);
+ pp_string (pp, ": {");
+
+PUSH_IGNORE_WFORMAT
+ if (m_type)
+ {
+ gcc_assert (TYPE_P (m_type));
+ pp_printf (pp, "type: %qT, ", m_type);
+ }
+POP_IGNORE_WFORMAT
+
+ /* vfunc. */
+ print_details (model, this_sid, pp);
+
+ pp_string (pp, "}");
+}
+
+/* Dump this svalue in the form of a .dot record to PP. */
+
+void
+svalue::dump_dot_to_pp (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+{
+ this_sid.dump_node_name_to_pp (pp);
+ pp_printf (pp, " [label=\"");
+ pp_write_text_to_stream (pp);
+ this_sid.print (pp);
+ pp_string (pp, ": {");
+ print (model, this_sid, pp);
+ pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
+ pp_string (pp, "}\"];");
+ pp_newline (pp);
+}
+
+/* Base implementation of svalue::remap_region_ids vfunc. */
+
+void
+svalue::remap_region_ids (const region_id_map &)
+{
+ /* Empty. */
+}
+
+/* Base implementation of svalue::walk_for_canonicalization vfunc. */
+
+void
+svalue::walk_for_canonicalization (canonicalization *) const
+{
+ /* Empty. */
+}
+
+/* Base implementation of svalue::get_child_sid vfunc. */
+
+svalue_id
+svalue::get_child_sid (region *parent ATTRIBUTE_UNUSED,
+ region *child,
+ region_model &model,
+ region_model_context *ctxt ATTRIBUTE_UNUSED)
+{
+ svalue *new_child_value = clone ();
+ if (child->get_type ())
+ new_child_value->m_type = child->get_type ();
+ svalue_id new_child_sid = model.add_svalue (new_child_value);
+ return new_child_sid;
+}
+
+/* If this svalue is a constant_svalue, return the underlying tree constant.
+ Otherwise return NULL_TREE. */
+
+tree
+svalue::maybe_get_constant () const
+{
+ if (const constant_svalue *cst_sval = dyn_cast_constant_svalue ())
+ return cst_sval->get_constant ();
+ else
+ return NULL_TREE;
+}
+
+/* class region_svalue : public svalue. */
+
+/* Compare the fields of this region_svalue with OTHER, returning true
+ if they are equal.
+ For use by svalue::operator==. */
+
+bool
+region_svalue::compare_fields (const region_svalue &other) const
+{
+ return m_rid == other.m_rid;
+}
+
+/* Implementation of svalue::add_to_hash vfunc for region_svalue. */
+
+void
+region_svalue::add_to_hash (inchash::hash &hstate) const
+{
+ inchash::add (m_rid, hstate);
+}
+
+/* Implementation of svalue::print_details vfunc for region_svalue. */
+
+void
+region_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
+ svalue_id this_sid ATTRIBUTE_UNUSED,
+ pretty_printer *pp) const
+{
+ if (m_rid.null_p ())
+ pp_string (pp, "NULL");
+ else
+ {
+ pp_string (pp, "&");
+ m_rid.print (pp);
+ }
+}
+
+/* Implementation of svalue::dump_dot_to_pp for region_svalue. */
+
+void
+region_svalue::dump_dot_to_pp (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+{
+ svalue::dump_dot_to_pp (model, this_sid, pp);
+
+ /* If non-NULL, add an edge to the pointed-to region. */
+ if (!m_rid.null_p ())
+ {
+ this_sid.dump_node_name_to_pp (pp);
+ pp_string (pp, " -> ");
+ m_rid.dump_node_name_to_pp (pp);
+ pp_string (pp, ";");
+ pp_newline (pp);
+ }
+}
+
+/* Implementation of svalue::remap_region_ids vfunc for region_svalue. */
+
+void
+region_svalue::remap_region_ids (const region_id_map &map)
+{
+ map.update (&m_rid);
+}
+
+/* Merge REGION_SVAL_A and REGION_SVAL_B using MERGER, writing the result
+ into *MERGED_SID. */
+
+void
+region_svalue::merge_values (const region_svalue ®ion_sval_a,
+ const region_svalue ®ion_sval_b,
+ svalue_id *merged_sid,
+ tree type,
+ model_merger *merger)
+{
+ region_id a_rid = region_sval_a.get_pointee ();
+ region_id b_rid = region_sval_b.get_pointee ();
+
+ /* Both are non-NULL. */
+ gcc_assert (!a_rid.null_p () && !b_rid.null_p ());
+
+ /* Have these ptr-values already been merged? */
+
+ region_id a_rid_in_m
+ = merger->m_map_regions_from_a_to_m.get_dst_for_src (a_rid);
+ region_id b_rid_in_m
+ = merger->m_map_regions_from_b_to_m.get_dst_for_src (b_rid);
+
+ /* "null_p" here means "we haven't seen this ptr-value before".
+ If we've seen one but not the other, or we have different
+ regions, then the merged ptr has to be "unknown". */
+ if (a_rid_in_m != b_rid_in_m)
+ {
+ svalue *merged_sval = new unknown_svalue (type);
+ *merged_sid = merger->m_merged_model->add_svalue (merged_sval);
+ return;
+ }
+
+ /* Have we seen this yet? If so, reuse the value. */
+ if (!a_rid_in_m.null_p ())
+ {
+ *merged_sid
+ = merger->m_merged_model->get_or_create_ptr_svalue (type, a_rid_in_m);
+ return;
+ }
+
+ /* Otherwise we have A/B regions that haven't been referenced yet. */
+
+ /* Are the regions the "same", when seen from the tree point-of-view.
+ If so, create a merged pointer to it. */
+ path_var pv_a = merger->m_model_a->get_representative_path_var (a_rid);
+ path_var pv_b = merger->m_model_b->get_representative_path_var (b_rid);
+ if (pv_a.m_tree
+ && pv_a == pv_b)
+ {
+ region_id merged_pointee_rid
+ = merger->m_merged_model->get_lvalue (pv_a, NULL);
+ *merged_sid
+ = merger->m_merged_model->get_or_create_ptr_svalue (type,
+ merged_pointee_rid);
+ merger->record_regions (a_rid, b_rid, merged_pointee_rid);
+ return;
+ }
+
+ /* Handle an A/B pair of ptrs that both point at heap regions.
+ If they both have a heap region in the merger model, merge them. */
+ region *region_a = merger->m_model_a->get_region (a_rid);
+ region *region_b = merger->m_model_b->get_region (b_rid);
+ region_id a_parent_rid = region_a->get_parent ();
+ region_id b_parent_rid = region_b->get_parent ();
+ region *parent_region_a = merger->m_model_a->get_region (a_parent_rid);
+ region *parent_region_b = merger->m_model_b->get_region (b_parent_rid);
+ if (parent_region_a
+ && parent_region_b
+ && parent_region_a->get_kind () == RK_HEAP
+ && parent_region_b->get_kind () == RK_HEAP)
+ {
+ /* We have an A/B pair of ptrs that both point at heap regions. */
+ /* presumably we want to see if each A/B heap region already
+ has a merged region, and, if so, is it the same one. */
+ // This check is above
+
+ region_id merged_pointee_rid
+ = merger->m_merged_model->add_new_malloc_region ();
+ *merged_sid
+ = merger->m_merged_model->get_or_create_ptr_svalue
+ (type, merged_pointee_rid);
+ merger->record_regions (a_rid, b_rid, merged_pointee_rid);
+ return;
+ }
+
+ /* Two different non-NULL pointers? Merge to unknown. */
+ svalue *merged_sval = new unknown_svalue (type);
+ *merged_sid = merger->m_merged_model->add_svalue (merged_sval);
+ return;
+}
+
+/* Implementation of svalue::walk_for_canonicalization vfunc for
+ region_svalue. */
+
+void
+region_svalue::walk_for_canonicalization (canonicalization *c) const
+{
+ c->walk_rid (m_rid);
+}
+
+/* Evaluate the condition LHS OP RHS.
+ Subroutine of region_model::eval_condition for when we have a pair of
+ pointers. */
+
+tristate
+region_svalue::eval_condition (region_svalue *lhs,
+ enum tree_code op,
+ region_svalue *rhs)
+{
+ /* See if they point to the same region. */
+ /* TODO: what about child regions where the child is the first child
+ (or descendent)? */
+ region_id lhs_rid = lhs->get_pointee ();
+ region_id rhs_rid = rhs->get_pointee ();
+ switch (op)
+ {
+ default:
+ gcc_unreachable ();
+
+ case EQ_EXPR:
+ if (lhs_rid == rhs_rid)
+ return tristate::TS_TRUE;
+ else
+ return tristate::TS_FALSE;
+ break;
+
+ case NE_EXPR:
+ if (lhs_rid != rhs_rid)
+ return tristate::TS_TRUE;
+ else
+ return tristate::TS_FALSE;
+ break;
+
+ case GE_EXPR:
+ case LE_EXPR:
+ if (lhs_rid == rhs_rid)
+ return tristate::TS_TRUE;
+ break;
+
+ case GT_EXPR:
+ case LT_EXPR:
+ if (lhs_rid == rhs_rid)
+ return tristate::TS_FALSE;
+ break;
+ }
+
+ return tristate::TS_UNKNOWN;
+}
+
+/* class constant_svalue : public svalue. */
+
+/* Compare the fields of this constant_svalue with OTHER, returning true
+ if they are equal.
+ For use by svalue::operator==. */
+
+bool
+constant_svalue::compare_fields (const constant_svalue &other) const
+{
+ return m_cst_expr == other.m_cst_expr;
+}
+
+/* Implementation of svalue::add_to_hash vfunc for constant_svalue. */
+
+void
+constant_svalue::add_to_hash (inchash::hash &hstate) const
+{
+ inchash::add_expr (m_cst_expr, hstate);
+}
+
+/* Merge the CST_SVAL_A and CST_SVAL_B using MERGER, writing the id of
+ the resulting svalue into *MERGED_SID. */
+
+void
+constant_svalue::merge_values (const constant_svalue &cst_sval_a,
+ const constant_svalue &cst_sval_b,
+ svalue_id *merged_sid,
+ model_merger *merger)
+{
+ tree cst_a = cst_sval_a.get_constant ();
+ tree cst_b = cst_sval_b.get_constant ();
+ svalue *merged_sval;
+ if (cst_a == cst_b)
+ {
+ /* If they are the same constant, merge as that constant value. */
+ merged_sval = new constant_svalue (cst_a);
+ }
+ else
+ {
+ /* Otherwise, we have two different constant values.
+ Merge as an unknown value.
+ TODO: impose constraints on the value?
+ (maybe just based on A, to avoid infinite chains) */
+ merged_sval = new unknown_svalue (TREE_TYPE (cst_a));
+ }
+ *merged_sid = merger->m_merged_model->add_svalue (merged_sval);
+}
+
+/* Evaluate the condition LHS OP RHS.
+ Subroutine of region_model::eval_condition for when we have a pair of
+ constants. */
+
+tristate
+constant_svalue::eval_condition (constant_svalue *lhs,
+ enum tree_code op,
+ constant_svalue *rhs)
+{
+ tree lhs_const = lhs->get_constant ();
+ tree rhs_const = rhs->get_constant ();
+
+ gcc_assert (CONSTANT_CLASS_P (lhs_const));
+ gcc_assert (CONSTANT_CLASS_P (rhs_const));
+
+ tree comparison
+ = fold_build2 (op, boolean_type_node, lhs_const, rhs_const);
+ if (comparison == boolean_true_node)
+ return tristate (tristate::TS_TRUE);
+ if (comparison == boolean_false_node)
+ return tristate (tristate::TS_FALSE);
+ return tristate::TS_UNKNOWN;
+}
+
+/* Implementation of svalue::print_details vfunc for constant_svalue. */
+
+void
+constant_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
+ svalue_id this_sid ATTRIBUTE_UNUSED,
+ pretty_printer *pp) const
+{
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%qE", m_cst_expr);
+POP_IGNORE_WFORMAT
+}
+
+/* Implementation of svalue::get_child_sid vfunc for constant_svalue. */
+
+svalue_id
+constant_svalue::get_child_sid (region *parent ATTRIBUTE_UNUSED,
+ region *child,
+ region_model &model,
+ region_model_context *ctxt ATTRIBUTE_UNUSED)
+{
+ /* TODO: handle the all-zeroes case by returning an all-zeroes of the
+ child type. */
+
+ /* Otherwise, we don't have a good way to get a child value out of a
+ constant.
+
+ Handle this case by using an unknown value. */
+ svalue *unknown_sval = new unknown_svalue (child->get_type ());
+ return model.add_svalue (unknown_sval);
+}
+
+/* class unknown_svalue : public svalue. */
+
+/* Compare the fields of this unknown_svalue with OTHER, returning true
+ if they are equal.
+ For use by svalue::operator==. */
+
+bool
+unknown_svalue::compare_fields (const unknown_svalue &) const
+{
+ /* I *think* we want to return true here, in that when comparing
+ two region models, we want two peer unknown_svalue instances
+ to be the "same". */
+ return true;
+}
+
+/* Implementation of svalue::add_to_hash vfunc for unknown_svalue. */
+
+void
+unknown_svalue::add_to_hash (inchash::hash &) const
+{
+ /* Empty. */
+}
+
+/* Implementation of svalue::print_details vfunc for unknown_svalue. */
+
+void
+unknown_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
+ svalue_id this_sid ATTRIBUTE_UNUSED,
+ pretty_printer *pp) const
+{
+ pp_string (pp, "unknown");
+}
+
+/* Get a string for KIND for use in debug dumps. */
+
+const char *
+poison_kind_to_str (enum poison_kind kind)
+{
+ switch (kind)
+ {
+ default:
+ gcc_unreachable ();
+ case POISON_KIND_UNINIT:
+ return "uninit";
+ case POISON_KIND_FREED:
+ return "freed";
+ case POISON_KIND_POPPED_STACK:
+ return "popped stack";
+ }
+}
+
+/* class poisoned_svalue : public svalue. */
+
+/* Compare the fields of this poisoned_svalue with OTHER, returning true
+ if they are equal.
+ For use by svalue::operator==. */
+
+bool
+poisoned_svalue::compare_fields (const poisoned_svalue &other) const
+{
+ return m_kind == other.m_kind;
+}
+
+/* Implementation of svalue::add_to_hash vfunc for poisoned_svalue. */
+
+void
+poisoned_svalue::add_to_hash (inchash::hash &hstate) const
+{
+ hstate.add_int (m_kind);
+}
+
+/* Implementation of svalue::print_details vfunc for poisoned_svalue. */
+
+void
+poisoned_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
+ svalue_id this_sid ATTRIBUTE_UNUSED,
+ pretty_printer *pp) const
+{
+ pp_printf (pp, "poisoned: %s", poison_kind_to_str (m_kind));
+}
+
+/* class setjmp_svalue's implementation is in engine.cc, so that it can use
+ the declaration of exploded_node. */
+
+/* class region and its various subclasses. */
+
+/* Get a string for KIND for use in debug dumps. */
+
+const char *
+region_kind_to_str (enum region_kind kind)
+{
+ switch (kind)
+ {
+ default:
+ gcc_unreachable ();
+ case RK_PRIMITIVE:
+ return "primitive";
+ case RK_STRUCT:
+ return "struct";
+ case RK_UNION:
+ return "union";
+ case RK_ARRAY:
+ return "array";
+ case RK_FRAME:
+ return "frame";
+ case RK_GLOBALS:
+ return "globals";
+ case RK_CODE:
+ return "code";
+ case RK_FUNCTION:
+ return "function";
+ case RK_STACK:
+ return "stack";
+ case RK_HEAP:
+ return "heap";
+ case RK_ROOT:
+ return "root";
+ case RK_SYMBOLIC:
+ return "symbolic";
+ }
+}
+
+/* class region. */
+
+/* Equality operator for region.
+ After comparing base class fields and kind, the rest of the
+ comparison is handled off to a "compare_fields" member function
+ specific to the appropriate subclass. */
+
+bool
+region::operator== (const region &other) const
+{
+ if (m_parent_rid != other.m_parent_rid)
+ return false;
+ if (m_sval_id != other.m_sval_id)
+ return false;
+ if (m_type != other.m_type)
+ return false;
+
+ enum region_kind this_kind = get_kind ();
+ enum region_kind other_kind = other.get_kind ();
+ if (this_kind != other_kind)
+ return false;
+
+ /* Compare views. */
+ if (m_view_rids.length () != other.m_view_rids.length ())
+ return false;
+ int i;
+ region_id *rid;
+ FOR_EACH_VEC_ELT (m_view_rids, i, rid)
+ if (! (*rid == other.m_view_rids[i]))
+ return false;
+
+ switch (this_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case RK_PRIMITIVE:
+ {
+#if 1
+ return true;
+#else
+ const primitive_region &this_sub
+ = (const primitive_region &)*this;
+ const primitive_region &other_sub
+ = (const primitive_region &)other;
+ return this_sub.compare_fields (other_sub);
+#endif
+ }
+ case RK_STRUCT:
+ {
+ const struct_region &this_sub
+ = (const struct_region &)*this;
+ const struct_region &other_sub
+ = (const struct_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_UNION:
+ {
+ const union_region &this_sub
+ = (const union_region &)*this;
+ const union_region &other_sub
+ = (const union_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_ARRAY:
+ {
+ const array_region &this_sub
+ = (const array_region &)*this;
+ const array_region &other_sub
+ = (const array_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_FRAME:
+ {
+ const frame_region &this_sub
+ = (const frame_region &)*this;
+ const frame_region &other_sub
+ = (const frame_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_GLOBALS:
+ {
+ const globals_region &this_sub
+ = (const globals_region &)*this;
+ const globals_region &other_sub
+ = (const globals_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_CODE:
+ {
+ const code_region &this_sub
+ = (const code_region &)*this;
+ const code_region &other_sub
+ = (const code_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_FUNCTION:
+ {
+ const function_region &this_sub
+ = (const function_region &)*this;
+ const function_region &other_sub
+ = (const function_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_STACK:
+ {
+ const stack_region &this_sub
+ = (const stack_region &)*this;
+ const stack_region &other_sub
+ = (const stack_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_ROOT:
+ {
+ const root_region &this_sub
+ = (const root_region &)*this;
+ const root_region &other_sub
+ = (const root_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_SYMBOLIC:
+ {
+ const symbolic_region &this_sub
+ = (const symbolic_region &)*this;
+ const symbolic_region &other_sub
+ = (const symbolic_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ case RK_HEAP:
+ {
+ const heap_region &this_sub
+ = (const heap_region &)*this;
+ const heap_region &other_sub
+ = (const heap_region &)other;
+ return this_sub.compare_fields (other_sub);
+ }
+ }
+}
+
+/* Get the parent region of this region. */
+
+region *
+region::get_parent_region (const region_model &model) const
+{
+ return model.get_region (m_parent_rid);
+}
+
+/* Set this region's value to RHS_SID (or potentially a variant of it,
+ for some kinds of casts). */
+
+void
+region::set_value (region_model &model, region_id this_rid, svalue_id rhs_sid,
+ region_model_context *ctxt)
+{
+ /* Handle some kinds of casting. */
+ if (m_type)
+ {
+ svalue *sval = model.get_svalue (rhs_sid);
+ if (sval->get_type ())
+ rhs_sid = model.maybe_cast (m_type, rhs_sid, ctxt);
+
+ sval = model.get_svalue (rhs_sid);
+ if (sval->get_type ())
+ gcc_assert (m_type == sval->get_type ());
+ }
+
+ m_sval_id = rhs_sid;
+
+ /* Update views.
+ If this is a view, it becomes its parent's active view.
+ If there was already an active views, invalidate its value; otherwise
+ if the parent itself had a value, invalidate it.
+ If it's not a view, then deactivate any view that is active on this
+ region. */
+ {
+ if (m_is_view)
+ become_active_view (model, this_rid);
+ else
+ {
+ deactivate_any_active_view (model);
+ gcc_assert (m_active_view_rid.null_p ());
+ }
+ }
+}
+
+/* Make this region (with id THIS_RID) the "active" view of its parent.
+ Any other active view has its value set to "unknown" and descendent values
+ cleared.
+ If there wasn't an active view, then set the parent's value to unknown, and
+ clear its descendent values (apart from this view). */
+
+void
+region::become_active_view (region_model &model, region_id this_rid)
+{
+ gcc_assert (m_is_view);
+
+ region *parent_reg = model.get_region (m_parent_rid);
+ gcc_assert (parent_reg);
+
+ region_id old_active_view_rid = parent_reg->m_active_view_rid;
+
+ if (old_active_view_rid == this_rid)
+ {
+ /* Already the active view: do nothing. */
+ return;
+ }
+
+ /* We have a change of active view. */
+ parent_reg->m_active_view_rid = this_rid;
+
+ if (old_active_view_rid.null_p ())
+ {
+ /* No previous active view, but the parent and its other children
+ might have values.
+ If so, invalidate those values - but not that of the new view. */
+ region_id_set below_region (&model);
+ model.get_descendents (m_parent_rid, &below_region, this_rid);
+ for (unsigned i = 0; i < model.get_num_regions (); i++)
+ {
+ region_id rid (region_id::from_int (i));
+ if (below_region.region_p (rid))
+ {
+ region *other_reg = model.get_region (rid);
+ other_reg->m_sval_id = svalue_id::null ();
+ }
+ }
+ region *parent = model.get_region (m_parent_rid);
+ parent->m_sval_id
+ = model.add_svalue (new unknown_svalue (parent->get_type ()));
+ }
+ else
+ {
+ /* If there was an active view, invalidate it. */
+ region *old_active_view = model.get_region (old_active_view_rid);
+ old_active_view->deactivate_view (model, old_active_view_rid);
+ }
+}
+
+/* If this region (with id THIS_RID) has an active view, deactivate it,
+ clearing m_active_view_rid. */
+
+void
+region::deactivate_any_active_view (region_model &model)
+{
+ if (m_active_view_rid.null_p ())
+ return;
+ region *view = model.get_region (m_active_view_rid);
+ view->deactivate_view (model, m_active_view_rid);
+ m_active_view_rid = region_id::null ();
+}
+
+/* Clear any values for regions below THIS_RID.
+ Set the view's value to unknown. */
+
+void
+region::deactivate_view (region_model &model, region_id this_view_rid)
+{
+ gcc_assert (is_view_p ());
+
+ /* Purge values from old_active_this_view_rid and all its
+ descendents. Potentially we could use a poison value
+ for this, but let's use unknown for now. */
+ region_id_set below_view (&model);
+ model.get_descendents (this_view_rid, &below_view, region_id::null ());
+
+ for (unsigned i = 0; i < model.get_num_regions (); i++)
+ {
+ region_id rid (region_id::from_int (i));
+ if (below_view.region_p (rid))
+ {
+ region *other_reg = model.get_region (rid);
+ other_reg->m_sval_id = svalue_id::null ();
+ }
+ }
+
+ m_sval_id = model.add_svalue (new unknown_svalue (get_type ()));
+}
+
+/* Get a value for this region, either its value if it has one,
+ or, failing that, "inherit" a value from first ancestor with a
+ non-null value.
+
+ For example, when getting the value for a local variable within
+ a stack frame that doesn't have one, the frame doesn't have a value
+ either, but the stack as a whole will have an "uninitialized" poison
+ value, so inherit that. */
+
+svalue_id
+region::get_value (region_model &model, bool non_null,
+ region_model_context *ctxt)
+{
+ /* If this region has a value, use it. */
+ if (!m_sval_id.null_p ())
+ return m_sval_id;
+
+ /* Otherwise, "inherit" value from first ancestor with a
+ non-null value. */
+
+ region *parent = model.get_region (m_parent_rid);
+ if (parent)
+ {
+ svalue_id inherited_sid
+ = parent->get_inherited_child_sid (this, model, ctxt);
+ if (!inherited_sid.null_p ())
+ return inherited_sid;
+ }
+
+ /* If a non-null value has been requested, then generate
+ a new unknown value. Store it, so that repeated reads from this
+ region will yield the same unknown value. */
+ if (non_null)
+ {
+ svalue_id unknown_sid = model.add_svalue (new unknown_svalue (m_type));
+ m_sval_id = unknown_sid;
+ return unknown_sid;
+ }
+
+ return svalue_id::null ();
+}
+
+/* Get a value for CHILD, inheriting from this region.
+
+ Recurse, so this region will inherit a value if it doesn't already
+ have one. */
+
+svalue_id
+region::get_inherited_child_sid (region *child,
+ region_model &model,
+ region_model_context *ctxt)
+{
+ if (m_sval_id.null_p ())
+ {
+ /* Recurse. */
+ if (!m_parent_rid.null_p ())
+ {
+ region *parent = model.get_region (m_parent_rid);
+ m_sval_id = parent->get_inherited_child_sid (this, model, ctxt);
+ }
+ }
+
+ if (!m_sval_id.null_p ())
+ {
+ /* Clone the parent's value, so that attempts to update it
+ (e.g giving a specific value to an inherited "uninitialized"
+ value) touch the child, and not the parent. */
+ svalue *this_value = model.get_svalue (m_sval_id);
+ svalue_id new_child_sid
+ = this_value->get_child_sid (this, child, model, ctxt);
+ if (ctxt)
+ ctxt->on_inherited_svalue (m_sval_id, new_child_sid);
+ child->m_sval_id = new_child_sid;
+ return new_child_sid;
+ }
+
+ return svalue_id::null ();
+}
+
+/* Generate a hash value for this region. The work is done by the
+ add_to_hash vfunc. */
+
+hashval_t
+region::hash () const
+{
+ inchash::hash hstate;
+ add_to_hash (hstate);
+ return hstate.end ();
+}
+
+/* Print a one-liner representation of this region to PP, assuming
+ that this region is within MODEL and its id is THIS_RID. */
+
+void
+region::print (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ this_rid.print (pp);
+ pp_string (pp, ": {");
+
+ /* vfunc. */
+ print_fields (model, this_rid, pp);
+
+ pp_string (pp, "}");
+}
+
+/* Base class implementation of region::dump_dot_to_pp vfunc. */
+
+void
+region::dump_dot_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ this_rid.dump_node_name_to_pp (pp);
+ pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
+ "lightgrey");
+ pp_write_text_to_stream (pp);
+ print (model, this_rid, pp);
+ pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
+ pp_string (pp, "\"];");
+ pp_newline (pp);
+
+ /* Add edge to svalue. */
+ if (!m_sval_id.null_p ())
+ {
+ this_rid.dump_node_name_to_pp (pp);
+ pp_string (pp, " -> ");
+ m_sval_id.dump_node_name_to_pp (pp);
+ pp_string (pp, ";");
+ pp_newline (pp);
+ }
+
+ /* Add edge to parent. */
+ if (!m_parent_rid.null_p ())
+ {
+ this_rid.dump_node_name_to_pp (pp);
+ pp_string (pp, " -> ");
+ m_parent_rid.dump_node_name_to_pp (pp);
+ pp_string (pp, ";");
+ pp_newline (pp);
+ }
+}
+
+/* Dump a tree-like ASCII-art representation of this region to PP. */
+
+void
+region::dump_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp,
+ const char *prefix,
+ bool is_last_child) const
+{
+ print (model, this_rid, pp);
+ pp_newline (pp);
+
+ const char *new_prefix;
+ if (!m_parent_rid.null_p ())
+ new_prefix = ACONCAT ((prefix, is_last_child ? " " : "| ", NULL));
+ else
+ new_prefix = prefix;
+
+ const char *begin_color = colorize_start (pp_show_color (pp), "note");
+ const char *end_color = colorize_stop (pp_show_color (pp));
+ char *field_prefix
+ = ACONCAT ((begin_color, new_prefix, "|:", end_color, NULL));
+
+ if (!m_sval_id.null_p ())
+ {
+ pp_printf (pp, "%s sval: ", field_prefix);
+ model.get_svalue (m_sval_id)->print (model, m_sval_id, pp);
+ pp_newline (pp);
+ }
+ if (m_type)
+ {
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%s type: %qT", field_prefix, m_type);
+POP_IGNORE_WFORMAT
+ pp_newline (pp);
+ }
+
+ /* Find the children. */
+
+ auto_vec<region_id> child_rids;
+ unsigned i;
+ for (unsigned i = 0; i < model.get_num_regions (); ++i)
+ {
+ region_id rid = region_id::from_int (i);
+ region *child = model.get_region (rid);
+ if (child->m_parent_rid == this_rid)
+ child_rids.safe_push (rid);
+ }
+
+ /* Print the children, using dump_child_label to label them. */
+
+ region_id *child_rid;
+ FOR_EACH_VEC_ELT (child_rids, i, child_rid)
+ {
+ is_last_child = (i == child_rids.length () - 1);
+ if (!this_rid.null_p ())
+ {
+ const char *tail = is_last_child ? "`-" : "|-";
+ pp_printf (pp, "%r%s%s%R", "note", new_prefix, tail);
+ }
+ dump_child_label (model, this_rid, *child_rid, pp);
+ model.get_region (*child_rid)->dump_to_pp (model, *child_rid, pp,
+ new_prefix,
+ is_last_child);
+ }
+}
+
+/* Base implementation of region::dump_child_label vfunc. */
+
+void
+region::dump_child_label (const region_model &model,
+ region_id this_rid ATTRIBUTE_UNUSED,
+ region_id child_rid,
+ pretty_printer *pp) const
+{
+ region *child = model.get_region (child_rid);
+ if (child->m_is_view)
+ {
+ gcc_assert (TYPE_P (child->get_type ()));
+ if (m_active_view_rid == child_rid)
+ pp_string (pp, "active ");
+ else
+ pp_string (pp, "inactive ");
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "view as %qT: ", child->get_type ());
+POP_IGNORE_WFORMAT
+ }
+}
+
+/* Assert that this object is valid. */
+
+void
+region::validate (const region_model *model) const
+{
+ m_parent_rid.validate (*model);
+ m_sval_id.validate (*model);
+ unsigned i;
+ region_id *view_rid;
+ FOR_EACH_VEC_ELT (m_view_rids, i, view_rid)
+ {
+ gcc_assert (!view_rid->null_p ());
+ view_rid->validate (*model);
+ }
+ m_active_view_rid.validate (*model);
+}
+
+/* Apply MAP to svalue_ids to this region. This updates the value
+ for the region (if any). */
+
+void
+region::remap_svalue_ids (const svalue_id_map &map)
+{
+ map.update (&m_sval_id);
+}
+
+/* Base implementation of region::remap_region_ids vfunc; subclasses should
+ chain up to this, updating any region_id data. */
+
+void
+region::remap_region_ids (const region_id_map &map)
+{
+ map.update (&m_parent_rid);
+ unsigned i;
+ region_id *view_rid;
+ FOR_EACH_VEC_ELT (m_view_rids, i, view_rid)
+ map.update (view_rid);
+ map.update (&m_active_view_rid);
+}
+
+/* Add a new region with id VIEW_RID as a view of this region. */
+
+void
+region::add_view (region_id view_rid, region_model *model)
+{
+ gcc_assert (!view_rid.null_p ());
+ region *new_view = model->get_region (view_rid);
+ new_view->m_is_view = true;
+ gcc_assert (!new_view->m_parent_rid.null_p ());
+ gcc_assert (new_view->m_sval_id.null_p ());
+
+ //gcc_assert (new_view->get_type () != NULL_TREE);
+ // TODO: this can sometimes be NULL, when viewing through a (void *)
+
+ // TODO: the type ought to not be present yet
+
+ m_view_rids.safe_push (view_rid);
+}
+
+/* Look for a view of type TYPE of this region, returning its id if found,
+ or null otherwise. */
+
+region_id
+region::get_view (tree type, region_model *model) const
+{
+ unsigned i;
+ region_id *view_rid;
+ FOR_EACH_VEC_ELT (m_view_rids, i, view_rid)
+ {
+ region *view = model->get_region (*view_rid);
+ gcc_assert (view->m_is_view);
+ if (view->get_type () == type)
+ return *view_rid;
+ }
+ return region_id::null ();
+}
+
+/* region's ctor. */
+
+region::region (region_id parent_rid, svalue_id sval_id, tree type)
+: m_parent_rid (parent_rid), m_sval_id (sval_id), m_type (type),
+ m_view_rids (), m_is_view (false), m_active_view_rid (region_id::null ())
+{
+ gcc_assert (type == NULL_TREE || TYPE_P (type));
+}
+
+/* region's copy ctor. */
+
+region::region (const region &other)
+: m_parent_rid (other.m_parent_rid), m_sval_id (other.m_sval_id),
+ m_type (other.m_type), m_view_rids (other.m_view_rids.length ()),
+ m_is_view (other.m_is_view), m_active_view_rid (other.m_active_view_rid)
+{
+ int i;
+ region_id *rid;
+ FOR_EACH_VEC_ELT (other.m_view_rids, i, rid)
+ m_view_rids.quick_push (*rid);
+}
+
+/* Base implementation of region::add_to_hash vfunc; subclasses should
+ chain up to this. */
+
+void
+region::add_to_hash (inchash::hash &hstate) const
+{
+ inchash::add (m_parent_rid, hstate);
+ inchash::add (m_sval_id, hstate);
+ hstate.add_ptr (m_type);
+ // TODO: views
+}
+
+/* Base implementation of region::print_fields vfunc. */
+
+void
+region::print_fields (const region_model &model ATTRIBUTE_UNUSED,
+ region_id this_rid ATTRIBUTE_UNUSED,
+ pretty_printer *pp) const
+{
+ pp_printf (pp, "kind: %qs", region_kind_to_str (get_kind ()));
+
+ pp_string (pp, ", parent: ");
+ m_parent_rid.print (pp);
+
+ pp_printf (pp, ", sval: ");
+ m_sval_id.print (pp);
+
+PUSH_IGNORE_WFORMAT
+ if (m_type)
+ pp_printf (pp, ", type: %qT", m_type);
+POP_IGNORE_WFORMAT
+}
+
+/* Determine if a pointer to this region must be non-NULL.
+
+ Generally, pointers to regions must be non-NULL, but pointers
+ to symbolic_regions might, in fact, be NULL.
+
+ This allows us to simulate functions like malloc and calloc with:
+ - only one "outcome" from each statement,
+ - the idea that the pointer is on the heap if non-NULL
+ - the possibility that the pointer could be NULL
+ - the idea that successive values returned from malloc are non-equal
+ - to be able to zero-fill for calloc. */
+
+bool
+region::non_null_p (const region_model &model) const
+{
+ /* Look through views to get at the underlying region. */
+ if (is_view_p ())
+ return model.get_region (m_parent_rid)->non_null_p (model);
+
+ /* Are we within a symbolic_region? If so, it could be NULL. */
+ if (const symbolic_region *sym_reg = dyn_cast_symbolic_region ())
+ {
+ if (sym_reg->m_possibly_null)
+ return false;
+ }
+
+ return true;
+}
+
+/* class primitive_region : public region. */
+
+/* Implementation of region::clone vfunc for primitive_region. */
+
+region *
+primitive_region::clone () const
+{
+ return new primitive_region (*this);
+}
+
+/* Implementation of region::walk_for_canonicalization vfunc for
+ primitive_region. */
+
+void
+primitive_region::walk_for_canonicalization (canonicalization *) const
+{
+ /* Empty. */
+}
+
+/* class map_region : public region. */
+
+/* map_region's copy ctor. */
+
+map_region::map_region (const map_region &other)
+: region (other),
+ m_map (other.m_map)
+{
+}
+
+/* Compare the fields of this map_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+map_region::compare_fields (const map_region &other) const
+{
+ if (m_map.elements () != other.m_map.elements ())
+ return false;
+
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ tree key = (*iter).first;
+ region_id e = (*iter).second;
+ region_id *other_slot = const_cast <map_t &> (other.m_map).get (key);
+ if (other_slot == NULL)
+ return false;
+ if (e != *other_slot)
+ return false;
+ }
+ return true;
+}
+
+/* Implementation of region::print_fields vfunc for map_region. */
+
+void
+map_region::print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ region::print_fields (model, this_rid, pp);
+ pp_string (pp, ", map: {");
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ if (iter != m_map.begin ())
+ pp_string (pp, ", ");
+ tree expr = (*iter).first;
+ region_id child_rid = (*iter).second;
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%qE: ", expr);
+POP_IGNORE_WFORMAT
+ child_rid.print (pp);
+ }
+ pp_string (pp, "}");
+}
+
+/* Implementation of region::dump_dot_to_pp vfunc for map_region. */
+
+void
+map_region::dump_dot_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ region::dump_dot_to_pp (model, this_rid, pp);
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ // TODO: add nodes/edges to label things
+
+ tree expr = (*iter).first;
+ region_id child_rid = (*iter).second;
+
+ pp_printf (pp, "rid_label_%i [label=\"", child_rid.as_int ());
+ pp_write_text_to_stream (pp);
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%qE", expr);
+POP_IGNORE_WFORMAT
+ pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
+ pp_string (pp, "\"];");
+ pp_newline (pp);
+
+ pp_printf (pp, "rid_label_%i", child_rid.as_int ());
+ pp_string (pp, " -> ");
+ child_rid.dump_node_name_to_pp (pp);
+ pp_string (pp, ";");
+ pp_newline (pp);
+ }
+}
+
+/* Implementation of region::dump_child_label vfunc for map_region. */
+
+void
+map_region::dump_child_label (const region_model &model,
+ region_id this_rid,
+ region_id child_rid,
+ pretty_printer *pp) const
+{
+ region::dump_child_label (model, this_rid, child_rid, pp);
+
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ if (child_rid == (*iter).second)
+ {
+ tree key = (*iter).first;
+PUSH_IGNORE_WFORMAT
+ if (DECL_P (key))
+ pp_printf (pp, "%qD: ", key);
+ else
+ pp_printf (pp, "%qE: ", key);
+POP_IGNORE_WFORMAT
+ }
+ }
+}
+
+/* Look for a child region for KEY within this map_region.
+ If it doesn't already exist, create a child map_region, using TYPE for
+ its type.
+ Return the region_id of the child (whether pre-existing, or
+ newly-created). */
+
+region_id
+map_region::get_or_create (region_model *model,
+ region_id this_rid,
+ tree key,
+ tree type)
+{
+ gcc_assert (key);
+ gcc_assert (valid_key_p (key));
+ region_id *slot = m_map.get (key);
+ if (slot)
+ return *slot;
+ region_id child_rid = model->add_region_for_type (this_rid, type);
+ m_map.put (key, child_rid);
+ return child_rid;
+}
+
+/* Get the region_id for the child region for KEY within this
+ MAP_REGION, or NULL if there is no such child region. */
+
+region_id *
+map_region::get (tree key)
+{
+ gcc_assert (key);
+ gcc_assert (valid_key_p (key));
+ region_id *slot = m_map.get (key);
+ return slot;
+}
+
+/* Implementation of region::add_to_hash vfunc for map_region. */
+
+void
+map_region::add_to_hash (inchash::hash &hstate) const
+{
+ region::add_to_hash (hstate);
+ // TODO
+}
+
+/* Implementation of region::remap_region_ids vfunc for map_region. */
+
+void
+map_region::remap_region_ids (const region_id_map &map)
+{
+ region::remap_region_ids (map);
+
+ /* Remap the region ids within the map entries. */
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end (); ++iter)
+ map.update (&(*iter).second);
+}
+
+/* Remove the binding of KEY to its child region (but not the
+ child region itself).
+ For use when purging unneeded SSA names. */
+
+void
+map_region::unbind (tree key)
+{
+ gcc_assert (key);
+ gcc_assert (valid_key_p (key));
+ m_map.remove (key);
+}
+
+/* Look for a child region with id CHILD_RID within this map_region.
+ If one is found, return its tree key, otherwise return NULL_TREE. */
+
+tree
+map_region::get_tree_for_child_region (region_id child_rid) const
+{
+ // TODO: do we want to store an inverse map?
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ tree key = (*iter).first;
+ region_id r = (*iter).second;
+ if (r == child_rid)
+ return key;
+ }
+
+ return NULL_TREE;
+}
+
+/* Look for a child region CHILD within this map_region.
+ If one is found, return its tree key, otherwise return NULL_TREE. */
+
+tree
+map_region::get_tree_for_child_region (region *child,
+ const region_model &model) const
+{
+ // TODO: do we want to store an inverse map?
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ tree key = (*iter).first;
+ region_id r = (*iter).second;
+ if (model.get_region (r) == child)
+ return key;
+ }
+
+ return NULL_TREE;
+}
+
+/* Comparator for trees to impose a deterministic ordering on
+ T1 and T2. */
+
+static int
+tree_cmp (const_tree t1, const_tree t2)
+{
+ gcc_assert (t1);
+ gcc_assert (t2);
+
+ /* Test tree codes first. */
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return TREE_CODE (t1) - TREE_CODE (t2);
+
+ /* From this point on, we know T1 and T2 have the same tree code. */
+
+ if (DECL_P (t1))
+ {
+ if (DECL_NAME (t1) && DECL_NAME (t2))
+ return strcmp (IDENTIFIER_POINTER (DECL_NAME (t1)),
+ IDENTIFIER_POINTER (DECL_NAME (t2)));
+ else
+ {
+ if (DECL_NAME (t1))
+ return -1;
+ else if (DECL_NAME (t2))
+ return 1;
+ else
+ return DECL_UID (t1) - DECL_UID (t2);
+ }
+ }
+
+ switch (TREE_CODE (t1))
+ {
+ case SSA_NAME:
+ {
+ if (SSA_NAME_VAR (t1) && SSA_NAME_VAR (t2))
+ {
+ int var_cmp = tree_cmp (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
+ if (var_cmp)
+ return var_cmp;
+ return SSA_NAME_VERSION (t1) - SSA_NAME_VERSION (t2);
+ }
+ else
+ {
+ if (SSA_NAME_VAR (t1))
+ return -1;
+ else if (SSA_NAME_VAR (t2))
+ return 1;
+ else
+ return SSA_NAME_VERSION (t1) - SSA_NAME_VERSION (t2);
+ }
+ }
+ break;
+
+ case INTEGER_CST:
+ return tree_int_cst_compare (t1, t2);
+
+ case REAL_CST:
+ {
+ real_value *rv1 = TREE_REAL_CST_PTR (t1);
+ real_value *rv2 = TREE_REAL_CST_PTR (t2);
+ if (real_compare (LT_EXPR, rv1, rv2))
+ return -1;
+ if (real_compare (LT_EXPR, rv2, rv1))
+ return 1;
+ return 0;
+ }
+
+ case STRING_CST:
+ return strcmp (TREE_STRING_POINTER (t1),
+ TREE_STRING_POINTER (t2));
+
+ default:
+ gcc_unreachable ();
+ break;
+ }
+
+ gcc_unreachable ();
+
+ return 0;
+}
+
+/* qsort comparator for trees to impose a deterministic ordering on
+ P1 and P2. */
+
+static int
+tree_cmp (const void *p1, const void *p2)
+{
+ const_tree t1 = *(const_tree const *)p1;
+ const_tree t2 = *(const_tree const *)p2;
+
+ int result = tree_cmp (t1, t2);
+
+ /* Check that the ordering is symmetric */
+#if CHECKING_P
+ int reversed = tree_cmp (t2, t1);
+ gcc_assert (reversed == -result);
+#endif
+
+ /* We should only have 0 for equal pairs. */
+#if 0
+ gcc_assert (result != 0
+ || t1 == t2);
+#endif
+
+ return result;
+}
+
+/* Attempt to merge MAP_REGION_A and MAP_REGION_B into MERGED_MAP_REGION,
+ which has region_id MERGED_RID, using MERGER.
+ Return true if the merger is possible, false otherwise. */
+
+bool
+map_region::can_merge_p (const map_region *map_region_a,
+ const map_region *map_region_b,
+ map_region *merged_map_region,
+ region_id merged_rid,
+ model_merger *merger)
+{
+ for (map_t::iterator iter = map_region_a->m_map.begin ();
+ iter != map_region_a->m_map.end ();
+ ++iter)
+ {
+ tree key_a = (*iter).first;
+ region_id rid_a = (*iter).second;
+
+ if (const region_id *slot_b
+ = const_cast<map_region *>(map_region_b)->m_map.get (key_a))
+ {
+ region_id rid_b = *slot_b;
+
+ region *child_region_a = merger->get_region_a <region> (rid_a);
+ region *child_region_b = merger->get_region_b <region> (rid_b);
+
+ gcc_assert (child_region_a->get_type ()
+ == child_region_b->get_type ());
+
+ gcc_assert (child_region_a->get_kind ()
+ == child_region_b->get_kind ());
+
+ region_id child_merged_rid
+ = merged_map_region->get_or_create (merger->m_merged_model,
+ merged_rid,
+ key_a,
+ child_region_a->get_type ());
+
+ region *child_merged_region
+ = merger->m_merged_model->get_region (child_merged_rid);
+
+ /* Consider values. */
+ svalue_id child_a_sid = child_region_a->get_value_direct ();
+ svalue_id child_b_sid = child_region_b->get_value_direct ();
+ svalue_id child_merged_sid;
+ if (!merger->can_merge_values_p (child_a_sid, child_b_sid,
+ &child_merged_sid))
+ return false;
+ if (!child_merged_sid.null_p ())
+ child_merged_region->set_value (*merger->m_merged_model,
+ child_merged_rid,
+ child_merged_sid,
+ NULL);
+
+ if (map_region *map_region_a = child_region_a->dyn_cast_map_region ())
+ {
+ /* Recurse. */
+ if (!can_merge_p (map_region_a,
+ as_a <map_region *> (child_region_b),
+ as_a <map_region *> (child_merged_region),
+ child_merged_rid,
+ merger))
+ return false;
+ }
+
+ }
+ else
+ {
+ /* TODO: region is present in A, but absent in B. */
+ }
+ }
+
+ /* TODO: check for keys in B that aren't in A. */
+
+ return true;
+}
+
+
+/* Implementation of region::walk_for_canonicalization vfunc for
+ map_region. */
+
+void
+map_region::walk_for_canonicalization (canonicalization *c) const
+{
+ auto_vec<tree> keys (m_map.elements ());
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ tree key_a = (*iter).first;
+ keys.quick_push (key_a);
+ }
+ keys.qsort (tree_cmp);
+
+ unsigned i;
+ tree key;
+ FOR_EACH_VEC_ELT (keys, i, key)
+ {
+ region_id rid = *const_cast<map_region *>(this)->m_map.get (key);
+ c->walk_rid (rid);
+ }
+}
+
+/* For debugging purposes: look for a child region for a decl named
+ IDENTIFIER (or an SSA_NAME for such a decl), returning its value,
+ or svalue_id::null if none are found. */
+
+svalue_id
+map_region::get_value_by_name (tree identifier,
+ const region_model &model) const
+{
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ tree key = (*iter).first;
+ if (TREE_CODE (key) == SSA_NAME)
+ if (SSA_NAME_VAR (key))
+ key = SSA_NAME_VAR (key);
+ if (DECL_P (key))
+ if (DECL_NAME (key) == identifier)
+ {
+ region_id rid = (*iter).second;
+ region *region = model.get_region (rid);
+ return region->get_value (const_cast<region_model &>(model),
+ false, NULL);
+ }
+ }
+ return svalue_id::null ();
+}
+
+/* class struct_or_union_region : public map_region. */
+
+/* Implementation of map_region::valid_key_p vfunc for
+ struct_or_union_region. */
+
+bool
+struct_or_union_region::valid_key_p (tree key) const
+{
+ return TREE_CODE (key) == FIELD_DECL;
+}
+
+/* Compare the fields of this struct_or_union_region with OTHER, returning
+ true if they are equal.
+ For use by region::operator==. */
+
+bool
+struct_or_union_region::compare_fields (const struct_or_union_region &other)
+ const
+{
+ return map_region::compare_fields (other);
+}
+
+/* class struct_region : public struct_or_union_region. */
+
+/* Implementation of region::clone vfunc for struct_region. */
+
+region *
+struct_region::clone () const
+{
+ return new struct_region (*this);
+}
+
+/* Compare the fields of this struct_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+struct_region::compare_fields (const struct_region &other) const
+{
+ return struct_or_union_region::compare_fields (other);
+}
+
+/* class union_region : public struct_or_union_region. */
+
+/* Implementation of region::clone vfunc for union_region. */
+
+region *
+union_region::clone () const
+{
+ return new union_region (*this);
+}
+
+/* Compare the fields of this union_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+union_region::compare_fields (const union_region &other) const
+{
+ return struct_or_union_region::compare_fields (other);
+}
+
+/* class frame_region : public map_region. */
+
+/* Compare the fields of this frame_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+frame_region::compare_fields (const frame_region &other) const
+{
+ if (!map_region::compare_fields (other))
+ return false;
+ if (m_fun != other.m_fun)
+ return false;
+ if (m_depth != other.m_depth)
+ return false;
+ return true;
+}
+
+/* Implementation of region::clone vfunc for frame_region. */
+
+region *
+frame_region::clone () const
+{
+ return new frame_region (*this);
+}
+
+/* Implementation of map_region::valid_key_p vfunc for frame_region. */
+
+bool
+frame_region::valid_key_p (tree key) const
+{
+ // TODO: could also check that VAR_DECLs are locals
+ return (TREE_CODE (key) == PARM_DECL
+ || TREE_CODE (key) == VAR_DECL
+ || TREE_CODE (key) == SSA_NAME
+ || TREE_CODE (key) == RESULT_DECL);
+}
+
+/* Implementation of region::print_fields vfunc for frame_region. */
+
+void
+frame_region::print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ map_region::print_fields (model, this_rid, pp);
+ pp_printf (pp, ", function: %qs, depth: %i", function_name (m_fun), m_depth);
+}
+
+/* Implementation of region::add_to_hash vfunc for frame_region. */
+
+void
+frame_region::add_to_hash (inchash::hash &hstate) const
+{
+ map_region::add_to_hash (hstate);
+ hstate.add_ptr (m_fun);
+ hstate.add_int (m_depth);
+}
+
+/* class globals_region : public scope_region. */
+
+/* Compare the fields of this globals_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+globals_region::compare_fields (const globals_region &other) const
+{
+ return map_region::compare_fields (other);
+}
+
+/* Implementation of region::clone vfunc for globals_region. */
+
+region *
+globals_region::clone () const
+{
+ return new globals_region (*this);
+}
+
+/* Implementation of map_region::valid_key_p vfunc for globals_region. */
+
+bool
+globals_region::valid_key_p (tree key) const
+{
+ return TREE_CODE (key) == VAR_DECL;
+}
+
+/* class code_region : public map_region. */
+
+/* Compare the fields of this code_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+code_region::compare_fields (const code_region &other) const
+{
+ return map_region::compare_fields (other);
+}
+
+/* Implementation of region::clone vfunc for code_region. */
+
+region *
+code_region::clone () const
+{
+ return new code_region (*this);
+}
+
+/* Implementation of map_region::valid_key_p vfunc for code_region. */
+
+bool
+code_region::valid_key_p (tree key) const
+{
+ return TREE_CODE (key) == FUNCTION_DECL;
+}
+
+/* class array_region : public region. */
+
+/* array_region's copy ctor. */
+
+array_region::array_region (const array_region &other)
+: region (other),
+ m_map (other.m_map)
+{
+}
+
+/* Get a child region for the element with index INDEX_SID. */
+
+region_id
+array_region::get_element (region_model *model,
+ region_id this_rid,
+ svalue_id index_sid,
+ region_model_context *ctxt ATTRIBUTE_UNUSED)
+{
+ tree element_type = TREE_TYPE (get_type ());
+ svalue *index_sval = model->get_svalue (index_sid);
+ if (tree cst_index = index_sval->maybe_get_constant ())
+ {
+ key_t key = key_from_constant (cst_index);
+ region_id element_rid
+ = get_or_create (model, this_rid, key, element_type);
+ return element_rid;
+ }
+
+ return model->get_or_create_view (this_rid, element_type);
+}
+
+/* Implementation of region::clone vfunc for array_region. */
+
+region *
+array_region::clone () const
+{
+ return new array_region (*this);
+}
+
+/* Compare the fields of this array_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+array_region::compare_fields (const array_region &other) const
+{
+ if (m_map.elements () != other.m_map.elements ())
+ return false;
+
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ int key = (*iter).first;
+ region_id e = (*iter).second;
+ region_id *other_slot = const_cast <map_t &> (other.m_map).get (key);
+ if (other_slot == NULL)
+ return false;
+ if (e != *other_slot)
+ return false;
+ }
+ return true;
+}
+
+/* Implementation of region::print_fields vfunc for array_region. */
+
+void
+array_region::print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ region::print_fields (model, this_rid, pp);
+ pp_string (pp, ", array: {");
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ if (iter != m_map.begin ())
+ pp_string (pp, ", ");
+ int key = (*iter).first;
+ region_id child_rid = (*iter).second;
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "[%i]: ", key);
+POP_IGNORE_WFORMAT
+ child_rid.print (pp);
+ }
+ pp_string (pp, "}");
+}
+
+/* Implementation of region::dump_dot_to_pp vfunc for array_region. */
+
+void
+array_region::dump_dot_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ region::dump_dot_to_pp (model, this_rid, pp);
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ // TODO: add nodes/edges to label things
+
+ int key = (*iter).first;
+ region_id child_rid = (*iter).second;
+
+ pp_printf (pp, "rid_label_%i [label=\"", child_rid.as_int ());
+ pp_write_text_to_stream (pp);
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%qi", key);
+POP_IGNORE_WFORMAT
+ pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
+ pp_string (pp, "\"];");
+ pp_newline (pp);
+
+ pp_printf (pp, "rid_label_%i", child_rid.as_int ());
+ pp_string (pp, " -> ");
+ child_rid.dump_node_name_to_pp (pp);
+ pp_string (pp, ";");
+ pp_newline (pp);
+ }
+}
+
+/* Implementation of region::dump_child_label vfunc for array_region. */
+
+void
+array_region::dump_child_label (const region_model &model,
+ region_id this_rid,
+ region_id child_rid,
+ pretty_printer *pp) const
+{
+ region::dump_child_label (model, this_rid, child_rid, pp);
+
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ if (child_rid == (*iter).second)
+ {
+ int key = (*iter).first;
+ pp_printf (pp, "[%i]: ", key);
+ }
+ }
+}
+
+/* Look for a child region for KEY within this array_region.
+ If it doesn't already exist, create a child array_region, using TYPE for
+ its type.
+ Return the region_id of the child (whether pre-existing, or
+ newly-created). */
+
+region_id
+array_region::get_or_create (region_model *model,
+ region_id this_rid,
+ key_t key,
+ tree type)
+{
+ region_id *slot = m_map.get (key);
+ if (slot)
+ return *slot;
+ region_id child_rid = model->add_region_for_type (this_rid, type);
+ m_map.put (key, child_rid);
+ return child_rid;
+}
+
+/* Get the region_id for the child region for KEY within this
+ ARRAY_REGION, or NULL if there is no such child region. */
+
+region_id *
+array_region::get (key_t key)
+{
+ region_id *slot = m_map.get (key);
+ return slot;
+}
+
+/* Implementation of region::add_to_hash vfunc for array_region. */
+
+void
+array_region::add_to_hash (inchash::hash &hstate) const
+{
+ region::add_to_hash (hstate);
+ // TODO
+}
+
+/* Implementation of region::remap_region_ids vfunc for array_region. */
+
+void
+array_region::remap_region_ids (const region_id_map &map)
+{
+ region::remap_region_ids (map);
+
+ /* Remap the region ids within the map entries. */
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end (); ++iter)
+ map.update (&(*iter).second);
+}
+
+/* Look for a child region with id CHILD_RID within this array_region.
+ If one is found, write its key to *OUT and return true,
+ otherwise return false. */
+
+bool
+array_region::get_key_for_child_region (region_id child_rid, key_t *out) const
+{
+ // TODO: do we want to store an inverse map?
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ key_t key = (*iter).first;
+ region_id r = (*iter).second;
+ if (r == child_rid)
+ {
+ *out = key;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* qsort comparator for int. */
+
+static int
+int_cmp (const void *p1, const void *p2)
+{
+ int i1 = *(const int *)p1;
+ int i2 = *(const int *)p2;
+
+ return i1 - i2;
+}
+
+/* Implementation of region::walk_for_canonicalization vfunc for
+ array_region. */
+
+void
+array_region::walk_for_canonicalization (canonicalization *c) const
+{
+ auto_vec<int> keys (m_map.elements ());
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ int key_a = (*iter).first;
+ keys.quick_push (key_a);
+ }
+ keys.qsort (int_cmp);
+
+ unsigned i;
+ int key;
+ FOR_EACH_VEC_ELT (keys, i, key)
+ {
+ region_id rid = *const_cast<array_region *>(this)->m_map.get (key);
+ c->walk_rid (rid);
+ }
+}
+
+/* Convert constant CST into an array_region::key_t. */
+
+array_region::key_t
+array_region::key_from_constant (tree cst)
+{
+ gcc_assert (CONSTANT_CLASS_P (cst));
+ wide_int w = wi::to_wide (cst);
+ key_t result = w.to_shwi ();
+ return result;
+}
+
+/* class function_region : public map_region. */
+
+/* Compare the fields of this function_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+function_region::compare_fields (const function_region &other) const
+{
+ return map_region::compare_fields (other);
+}
+
+/* Implementation of region::clone vfunc for function_region. */
+
+region *
+function_region::clone () const
+{
+ return new function_region (*this);
+}
+
+/* Implementation of map_region::valid_key_p vfunc for function_region. */
+
+bool
+function_region::valid_key_p (tree key) const
+{
+ return TREE_CODE (key) == LABEL_DECL;
+}
+
+/* class stack_region : public region. */
+
+/* stack_region's copy ctor. */
+
+stack_region::stack_region (const stack_region &other)
+: region (other),
+ m_frame_rids (other.m_frame_rids.length ())
+{
+ int i;
+ region_id *frame_rid;
+ FOR_EACH_VEC_ELT (other.m_frame_rids, i, frame_rid)
+ m_frame_rids.quick_push (*frame_rid);
+}
+
+/* Compare the fields of this stack_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+stack_region::compare_fields (const stack_region &other) const
+{
+ if (m_frame_rids.length () != other.m_frame_rids.length ())
+ return false;
+
+ int i;
+ region_id *frame_rid;
+ FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
+ if (m_frame_rids[i] != other.m_frame_rids[i])
+ return false;
+
+ return true;
+}
+
+/* Implementation of region::clone vfunc for stack_region. */
+
+region *
+stack_region::clone () const
+{
+ return new stack_region (*this);
+}
+
+/* Implementation of region::print_fields vfunc for stack_region. */
+
+void
+stack_region::print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ region::print_fields (model, this_rid, pp);
+ // TODO
+}
+
+/* Implementation of region::dump_child_label vfunc for stack_region. */
+
+void
+stack_region::dump_child_label (const region_model &model,
+ region_id this_rid ATTRIBUTE_UNUSED,
+ region_id child_rid,
+ pretty_printer *pp) const
+{
+ function *fun = model.get_region<frame_region> (child_rid)->get_function ();
+ pp_printf (pp, "frame for %qs: ", function_name (fun));
+}
+
+/* Push FRAME_RID (for a frame_region) onto this stack. */
+
+void
+stack_region::push_frame (region_id frame_rid)
+{
+ m_frame_rids.safe_push (frame_rid);
+}
+
+/* Get the region_id of the top-most frame in this stack, if any. */
+
+region_id
+stack_region::get_current_frame_id () const
+{
+ if (m_frame_rids.length () > 0)
+ return m_frame_rids[m_frame_rids.length () - 1];
+ else
+ return region_id::null ();
+}
+
+/* Pop the topmost frame_region from this stack.
+
+ Purge the frame region and all its descendent regions.
+ Convert any pointers that point into such regions into
+ POISON_KIND_POPPED_STACK svalues.
+
+ Return the ID of any return value from the frame.
+
+ If PURGE, then purge all unused svalues, with the exception of any
+ return value for the frame, which is temporarily
+ preserved in case no regions reference it, so it can
+ be written into a region in the caller.
+
+ Accumulate stats on purged entities into STATS. */
+
+svalue_id
+stack_region::pop_frame (region_model *model, bool purge, purge_stats *stats,
+ region_model_context *ctxt)
+{
+ gcc_assert (m_frame_rids.length () > 0);
+
+ region_id frame_rid = get_current_frame_id ();
+ frame_region *frame = model->get_region<frame_region> (frame_rid);
+
+ /* Evaluate the result, within the callee frame. */
+ svalue_id result_sid;
+ tree fndecl = frame->get_function ()->decl;
+ tree result = DECL_RESULT (fndecl);
+ if (result && TREE_TYPE (result) != void_type_node)
+ result_sid = model->get_rvalue (result, ctxt);
+
+ /* Pop the frame RID. */
+ m_frame_rids.pop ();
+
+ model->delete_region_and_descendents (frame_rid,
+ POISON_KIND_POPPED_STACK,
+ stats,
+ ctxt ? ctxt->get_logger () : NULL);
+
+ /* Delete unused svalues, but don't delete the return value. */
+ if (purge)
+ model->purge_unused_svalues (stats, ctxt, &result_sid);
+
+ model->validate ();
+
+ return result_sid;
+}
+
+/* Implementation of region::add_to_hash vfunc for stack_region. */
+
+void
+stack_region::add_to_hash (inchash::hash &hstate) const
+{
+ region::add_to_hash (hstate);
+
+ int i;
+ region_id *frame_rid;
+ FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
+ inchash::add (*frame_rid, hstate);
+}
+
+/* Implementation of region::remap_region_ids vfunc for stack_region. */
+
+void
+stack_region::remap_region_ids (const region_id_map &map)
+{
+ region::remap_region_ids (map);
+ int i;
+ region_id *frame_rid;
+ FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
+ map.update (&m_frame_rids[i]);
+}
+
+/* Attempt to merge STACK_REGION_A and STACK_REGION_B using MERGER.
+ Return true if the merger is possible, false otherwise. */
+
+bool
+stack_region::can_merge_p (const stack_region *stack_region_a,
+ const stack_region *stack_region_b,
+ model_merger *merger)
+{
+ if (stack_region_a->get_num_frames ()
+ != stack_region_b->get_num_frames ())
+ return false;
+
+ region_model *merged_model = merger->m_merged_model;
+
+ region_id rid_merged_stack
+ = merged_model->get_root_region ()->ensure_stack_region (merged_model);
+
+ stack_region *merged_stack
+ = merged_model->get_region <stack_region> (rid_merged_stack);
+
+ for (unsigned i = 0; i < stack_region_a->get_num_frames (); i++)
+ {
+ region_id rid_a = stack_region_a->get_frame_rid (i);
+ frame_region *frame_a = merger->get_region_a <frame_region> (rid_a);
+
+ region_id rid_b = stack_region_b->get_frame_rid (i);
+ frame_region *frame_b = merger->get_region_b <frame_region> (rid_b);
+
+ if (frame_a->get_function () != frame_b->get_function ())
+ return false;
+ frame_region *merged_frame = new frame_region (rid_merged_stack,
+ frame_a->get_function (),
+ frame_a->get_depth ());
+ region_id rid_merged_frame = merged_model->add_region (merged_frame);
+ merged_stack->push_frame (rid_merged_frame);
+
+ if (!map_region::can_merge_p (frame_a, frame_b,
+ merged_frame, rid_merged_frame,
+ merger))
+ return false;
+ }
+
+ return true;
+}
+
+/* Implementation of region::walk_for_canonicalization vfunc for
+ stack_region. */
+
+void
+stack_region::walk_for_canonicalization (canonicalization *c) const
+{
+ int i;
+ region_id *frame_rid;
+ FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
+ c->walk_rid (*frame_rid);
+}
+
+/* For debugging purposes: look for a grandchild region within one of
+ the child frame regions, where the grandchild is for a decl named
+ IDENTIFIER (or an SSA_NAME for such a decl):
+
+ stack_region
+ `-frame_region
+ `-region for decl named IDENTIFIER
+
+ returning its value, or svalue_id::null if none are found. */
+
+svalue_id
+stack_region::get_value_by_name (tree identifier,
+ const region_model &model) const
+{
+ int i;
+ region_id *frame_rid;
+ FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
+ {
+ frame_region *frame = model.get_region<frame_region> (*frame_rid);
+ svalue_id sid = frame->get_value_by_name (identifier, model);
+ if (!sid.null_p ())
+ return sid;
+ }
+
+ return svalue_id::null ();
+}
+
+/* class heap_region : public region. */
+
+/* heap_region's copy ctor. */
+
+heap_region::heap_region (const heap_region &other)
+: region (other)
+{
+}
+
+/* Compare the fields of this heap_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+heap_region::compare_fields (const heap_region &) const
+{
+ /* Empty. */
+ return true;
+}
+
+/* Implementation of region::clone vfunc for heap_region. */
+
+region *
+heap_region::clone () const
+{
+ return new heap_region (*this);
+}
+
+/* Implementation of region::walk_for_canonicalization vfunc for
+ heap_region. */
+
+void
+heap_region::walk_for_canonicalization (canonicalization *) const
+{
+ /* Empty. */
+}
+
+/* class root_region : public region. */
+
+/* root_region's default ctor. */
+
+root_region::root_region ()
+: region (region_id::null (),
+ svalue_id::null (),
+ NULL_TREE)
+{
+}
+
+/* root_region's copy ctor. */
+
+root_region::root_region (const root_region &other)
+: region (other),
+ m_stack_rid (other.m_stack_rid),
+ m_globals_rid (other.m_globals_rid),
+ m_code_rid (other.m_code_rid),
+ m_heap_rid (other.m_heap_rid)
+{
+}
+
+/* Compare the fields of this root_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+root_region::compare_fields (const root_region &other) const
+{
+ if (m_stack_rid != other.m_stack_rid)
+ return false;
+ if (m_globals_rid != other.m_globals_rid)
+ return false;
+ if (m_code_rid != other.m_code_rid)
+ return false;
+ if (m_heap_rid != other.m_heap_rid)
+ return false;
+ return true;
+}
+
+/* Implementation of region::clone vfunc for root_region. */
+
+region *
+root_region::clone () const
+{
+ return new root_region (*this);
+}
+
+/* Implementation of region::print_fields vfunc for root_region. */
+
+void
+root_region::print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+{
+ region::print_fields (model, this_rid, pp);
+ // TODO
+}
+
+/* Implementation of region::dump_child_label vfunc for root_region. */
+
+void
+root_region::dump_child_label (const region_model &model ATTRIBUTE_UNUSED,
+ region_id this_rid ATTRIBUTE_UNUSED,
+ region_id child_rid,
+ pretty_printer *pp) const
+{
+ if (child_rid == m_stack_rid)
+ pp_printf (pp, "stack: ");
+ else if (child_rid == m_globals_rid)
+ pp_printf (pp, "globals: ");
+ else if (child_rid == m_code_rid)
+ pp_printf (pp, "code: ");
+ else if (child_rid == m_heap_rid)
+ pp_printf (pp, "heap: ");
+}
+
+/* Create a new frame_region for a call to FUN and push it onto
+ the stack.
+
+ If ARG_SIDS is non-NULL, use it to populate the parameters
+ in the new frame.
+ Otherwise, populate them with unknown values.
+
+ Return the region_id of the new frame. */
+
+region_id
+root_region::push_frame (region_model *model, function *fun,
+ vec<svalue_id> *arg_sids,
+ region_model_context *ctxt)
+{
+ gcc_assert (fun);
+ /* arg_sids can be NULL. */
+
+ ensure_stack_region (model);
+ stack_region *stack = model->get_region <stack_region> (m_stack_rid);
+
+ frame_region *region = new frame_region (m_stack_rid, fun,
+ stack->get_num_frames ());
+ region_id frame_rid = model->add_region (region);
+
+ // TODO: unify these cases by building a vec of unknown?
+
+ if (arg_sids)
+ {
+ /* Arguments supplied from a caller frame. */
+
+ tree fndecl = fun->decl;
+ unsigned idx = 0;
+ for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
+ iter_parm = DECL_CHAIN (iter_parm), ++idx)
+ {
+ /* If there's a mismatching declaration, the call stmt might
+ not have enough args. Handle this case by leaving the
+ rest of the params as uninitialized. */
+ if (idx >= arg_sids->length ())
+ break;
+ svalue_id arg_sid = (*arg_sids)[idx];
+ region_id parm_rid
+ = region->get_or_create (model, frame_rid, iter_parm,
+ TREE_TYPE (iter_parm));
+ model->set_value (parm_rid, arg_sid, ctxt);
+
+ /* Also do it for default SSA name (sharing the same unknown
+ value). */
+ tree parm_default_ssa = ssa_default_def (fun, iter_parm);
+ if (parm_default_ssa)
+ {
+ region_id defssa_rid
+ = region->get_or_create (model, frame_rid, parm_default_ssa,
+ TREE_TYPE (iter_parm));
+ model->set_value (defssa_rid, arg_sid, ctxt);
+ }
+ }
+ }
+ else
+ {
+ /* No known arguments (a top-level call within the analysis). */
+
+ /* Params have a defined, unknown value; they should not inherit
+ from the poisoned uninit value. */
+ tree fndecl = fun->decl;
+ for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
+ iter_parm = DECL_CHAIN (iter_parm))
+ {
+ region_id parm_rid
+ = region->get_or_create (model, frame_rid, iter_parm,
+ TREE_TYPE (iter_parm));
+ svalue_id parm_sid
+ = model->set_to_new_unknown_value (parm_rid, TREE_TYPE (iter_parm),
+ ctxt);
+
+ /* Also do it for default SSA name (sharing the same unknown
+ value). */
+ tree parm_default_ssa = ssa_default_def (fun, iter_parm);
+ if (parm_default_ssa)
+ {
+ region_id defssa_rid
+ = region->get_or_create (model, frame_rid, parm_default_ssa,
+ TREE_TYPE (iter_parm));
+ model->get_region (defssa_rid)->set_value (*model, defssa_rid,
+ parm_sid, ctxt);
+ }
+ }
+ }
+
+ stack->push_frame (frame_rid);
+
+ return frame_rid;
+}
+
+/* Get the region_id of the top-most frame in this root_region's stack,
+ if any. */
+
+region_id
+root_region::get_current_frame_id (const region_model &model) const
+{
+ stack_region *stack = model.get_region <stack_region> (m_stack_rid);
+ if (stack)
+ return stack->get_current_frame_id ();
+ else
+ return region_id::null ();
+}
+
+/* Pop the topmost frame_region from this root_region's stack;
+ see the comment for stack_region::pop_frame. */
+
+svalue_id
+root_region::pop_frame (region_model *model, bool purge, purge_stats *out,
+ region_model_context *ctxt)
+{
+ stack_region *stack = model->get_region <stack_region> (m_stack_rid);
+ return stack->pop_frame (model, purge, out, ctxt);
+}
+
+/* Return the region_id of the stack region, creating it if doesn't
+ already exist. */
+
+region_id
+root_region::ensure_stack_region (region_model *model)
+{
+ if (m_stack_rid.null_p ())
+ {
+ svalue_id uninit_sid
+ = model->add_svalue (new poisoned_svalue (POISON_KIND_UNINIT,
+ NULL_TREE));
+ m_stack_rid
+ = model->add_region (new stack_region (model->get_root_rid (),
+ uninit_sid));
+ }
+ return m_stack_rid;
+}
+
+/* Return the stack region (which could be NULL). */
+
+stack_region *
+root_region::get_stack_region (const region_model *model) const
+{
+ return model->get_region <stack_region> (m_stack_rid);
+}
+
+/* Return the region_id of the globals region, creating it if doesn't
+ already exist. */
+
+region_id
+root_region::ensure_globals_region (region_model *model)
+{
+ if (m_globals_rid.null_p ())
+ m_globals_rid
+ = model->add_region (new globals_region (model->get_root_rid ()));
+ return m_globals_rid;
+}
+
+/* Return the code region (which could be NULL). */
+
+code_region *
+root_region::get_code_region (const region_model *model) const
+{
+ return model->get_region <code_region> (m_code_rid);
+}
+
+/* Return the region_id of the code region, creating it if doesn't
+ already exist. */
+
+region_id
+root_region::ensure_code_region (region_model *model)
+{
+ if (m_code_rid.null_p ())
+ m_code_rid
+ = model->add_region (new code_region (model->get_root_rid ()));
+ return m_code_rid;
+}
+
+/* Return the globals region (which could be NULL). */
+
+globals_region *
+root_region::get_globals_region (const region_model *model) const
+{
+ return model->get_region <globals_region> (m_globals_rid);
+}
+
+/* Return the region_id of the heap region, creating it if doesn't
+ already exist. */
+
+region_id
+root_region::ensure_heap_region (region_model *model)
+{
+ if (m_heap_rid.null_p ())
+ {
+ svalue_id uninit_sid
+ = model->add_svalue (new poisoned_svalue (POISON_KIND_UNINIT,
+ NULL_TREE));
+ m_heap_rid
+ = model->add_region (new heap_region (model->get_root_rid (),
+ uninit_sid));
+ }
+ return m_heap_rid;
+}
+
+/* Return the heap region (which could be NULL). */
+
+heap_region *
+root_region::get_heap_region (const region_model *model) const
+{
+ return model->get_region <heap_region> (m_heap_rid);
+}
+
+/* Implementation of region::remap_region_ids vfunc for root_region. */
+
+void
+root_region::remap_region_ids (const region_id_map &map)
+{
+ map.update (&m_stack_rid);
+ map.update (&m_globals_rid);
+ map.update (&m_code_rid);
+ map.update (&m_heap_rid);
+}
+
+/* Attempt to merge ROOT_REGION_A and ROOT_REGION_B into
+ MERGED_ROOT_REGION using MERGER.
+ Return true if the merger is possible, false otherwise. */
+
+bool
+root_region::can_merge_p (const root_region *root_region_a,
+ const root_region *root_region_b,
+ root_region *merged_root_region,
+ model_merger *merger)
+{
+ /* We can only merge if the stacks are sufficiently similar. */
+ stack_region *stack_a = root_region_a->get_stack_region (merger->m_model_a);
+ stack_region *stack_b = root_region_b->get_stack_region (merger->m_model_b);
+ if (stack_a && stack_b)
+ {
+ /* If the two models both have a stack, attempt to merge them. */
+ merged_root_region->ensure_stack_region (merger->m_merged_model);
+ if (!stack_region::can_merge_p (stack_a, stack_b, merger))
+ return false;
+ }
+ else if (stack_a || stack_b)
+ /* Don't attempt to merge if one model has a stack and the other
+ doesn't. */
+ return false;
+
+ map_region *globals_a = root_region_a->get_globals_region (merger->m_model_a);
+ map_region *globals_b = root_region_b->get_globals_region (merger->m_model_b);
+ if (globals_a && globals_b)
+ {
+ /* If both models have globals regions, attempt to merge them. */
+ region_id merged_globals_rid
+ = merged_root_region->ensure_globals_region (merger->m_merged_model);
+ map_region *merged_globals
+ = merged_root_region->get_globals_region (merger->m_merged_model);
+ if (!map_region::can_merge_p (globals_a, globals_b,
+ merged_globals, merged_globals_rid,
+ merger))
+ return false;
+ }
+ /* otherwise, merge as "no globals". */
+
+ map_region *code_a = root_region_a->get_code_region (merger->m_model_a);
+ map_region *code_b = root_region_b->get_code_region (merger->m_model_b);
+ if (code_a && code_b)
+ {
+ /* If both models have code regions, attempt to merge them. */
+ region_id merged_code_rid
+ = merged_root_region->ensure_code_region (merger->m_merged_model);
+ map_region *merged_code
+ = merged_root_region->get_code_region (merger->m_merged_model);
+ if (!map_region::can_merge_p (code_a, code_b,
+ merged_code, merged_code_rid,
+ merger))
+ return false;
+ }
+ /* otherwise, merge as "no code". */
+
+ heap_region *heap_a = root_region_a->get_heap_region (merger->m_model_a);
+ heap_region *heap_b = root_region_b->get_heap_region (merger->m_model_b);
+ if (heap_a && heap_b)
+ {
+ /* If both have a heap, create a "merged" heap.
+ Actually merging the heap contents happens via the region_svalue
+ instances, as needed, when seeing pairs of region_svalue instances. */
+ merged_root_region->ensure_heap_region (merger->m_merged_model);
+ }
+ /* otherwise, merge as "no heap". */
+
+ return true;
+}
+
+/* Implementation of region::add_to_hash vfunc for root_region. */
+
+void
+root_region::add_to_hash (inchash::hash &hstate) const
+{
+ region::add_to_hash (hstate);
+ inchash::add (m_stack_rid, hstate);
+ inchash::add (m_globals_rid, hstate);
+ inchash::add (m_code_rid, hstate);
+ inchash::add (m_heap_rid, hstate);
+}
+
+/* Implementation of region::walk_for_canonicalization vfunc for
+ root_region. */
+
+void
+root_region::walk_for_canonicalization (canonicalization *c) const
+{
+ c->walk_rid (m_stack_rid);
+ c->walk_rid (m_globals_rid);
+ c->walk_rid (m_code_rid);
+ c->walk_rid (m_heap_rid);
+}
+
+/* For debugging purposes: look for a descendant region for a local
+ or global decl named IDENTIFIER (or an SSA_NAME for such a decl),
+ returning its value, or svalue_id::null if none are found. */
+
+svalue_id
+root_region::get_value_by_name (tree identifier,
+ const region_model &model) const
+{
+ if (stack_region *stack = get_stack_region (&model))
+ {
+ svalue_id sid = stack->get_value_by_name (identifier, model);
+ if (!sid.null_p ())
+ return sid;
+ }
+ if (map_region *globals = get_globals_region (&model))
+ {
+ svalue_id sid = globals->get_value_by_name (identifier, model);
+ if (!sid.null_p ())
+ return sid;
+ }
+ return svalue_id::null ();
+}
+
+/* class symbolic_region : public map_region. */
+
+/* symbolic_region's copy ctor. */
+
+symbolic_region::symbolic_region (const symbolic_region &other)
+: region (other),
+ m_possibly_null (other.m_possibly_null)
+{
+}
+
+/* Compare the fields of this symbolic_region with OTHER, returning true
+ if they are equal.
+ For use by region::operator==. */
+
+bool
+symbolic_region::compare_fields (const symbolic_region &other) const
+{
+ return m_possibly_null == other.m_possibly_null;
+}
+
+/* Implementation of region::clone vfunc for symbolic_region. */
+
+region *
+symbolic_region::clone () const
+{
+ return new symbolic_region (*this);
+}
+
+/* Implementation of region::walk_for_canonicalization vfunc for
+ symbolic_region. */
+
+void
+symbolic_region::walk_for_canonicalization (canonicalization *) const
+{
+ /* Empty. */
+}
+
+/* class region_model. */
+
+/* region_model's default ctor. */
+
+region_model::region_model ()
+{
+ m_root_rid = add_region (new root_region ());
+ m_constraints = new impl_constraint_manager (this);
+ // TODO
+}
+
+/* region_model's copy ctor. */
+
+region_model::region_model (const region_model &other)
+: m_svalues (other.m_svalues.length ()),
+ m_regions (other.m_regions.length ()),
+ m_root_rid (other.m_root_rid)
+{
+ /* Clone the svalues and regions. */
+ int i;
+
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (other.m_svalues, i, svalue)
+ m_svalues.quick_push (svalue->clone ());
+
+ region *region;
+ FOR_EACH_VEC_ELT (other.m_regions, i, region)
+ m_regions.quick_push (region->clone ());
+
+ m_constraints = other.m_constraints->clone (this);
+}
+
+/* region_model's dtor. */
+
+region_model::~region_model ()
+{
+ delete m_constraints;
+}
+
+/* region_model's assignment operator. */
+
+region_model &
+region_model::operator= (const region_model &other)
+{
+ unsigned i;
+ svalue *svalue;
+ region *region;
+
+ /* Delete existing content. */
+ FOR_EACH_VEC_ELT (m_svalues, i, svalue)
+ delete svalue;
+ m_svalues.truncate (0);
+
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ delete region;
+ m_regions.truncate (0);
+
+ delete m_constraints;
+
+ /* Clone the svalues and regions. */
+ m_svalues.reserve (other.m_svalues.length (), true);
+ FOR_EACH_VEC_ELT (other.m_svalues, i, svalue)
+ m_svalues.quick_push (svalue->clone ());
+
+ m_regions.reserve (other.m_regions.length (), true);
+ FOR_EACH_VEC_ELT (other.m_regions, i, region)
+ m_regions.quick_push (region->clone ());
+
+ m_root_rid = other.m_root_rid;
+
+ m_constraints = other.m_constraints->clone (this);
+
+ return *this;
+}
+
+/* Equality operator for region_model.
+
+ Amongst other things this directly compares the svalue and region
+ vectors and so for this to be meaningful both this and OTHER should
+ have been canonicalized. */
+
+bool
+region_model::operator== (const region_model &other) const
+{
+ if (m_root_rid != other.m_root_rid)
+ return false;
+
+ if (m_svalues.length () != other.m_svalues.length ())
+ return false;
+
+ if (m_regions.length () != other.m_regions.length ())
+ return false;
+
+ if (*m_constraints != *other.m_constraints)
+ return false;
+
+ unsigned i;
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (other.m_svalues, i, svalue)
+ if (!(*m_svalues[i] == *other.m_svalues[i]))
+ return false;
+
+ region *region;
+ FOR_EACH_VEC_ELT (other.m_regions, i, region)
+ if (!(*m_regions[i] == *other.m_regions[i]))
+ return false;
+
+ gcc_checking_assert (hash () == other.hash ());
+
+ return true;
+}
+
+/* Generate a hash value for this region_model. */
+
+hashval_t
+region_model::hash () const
+{
+ hashval_t result = 0;
+ int i;
+
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (m_svalues, i, svalue)
+ result ^= svalue->hash ();
+
+ region *region;
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ result ^= region->hash ();
+
+ result ^= m_constraints->hash ();
+
+ return result;
+}
+
+/* Print an all-on-one-line representation of this region_model to PP,
+ which must support %E for trees. */
+
+void
+region_model::print (pretty_printer *pp) const
+{
+ int i;
+
+ pp_string (pp, "svalues: [");
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (m_svalues, i, svalue)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ print_svalue (svalue_id::from_int (i), pp);
+ }
+
+ pp_string (pp, "], regions: [");
+
+ region *region;
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ region->print (*this, region_id::from_int (i), pp);
+ }
+
+ pp_string (pp, "], constraints: ");
+
+ m_constraints->print (pp);
+}
+
+/* Print the svalue with id SID to PP. */
+
+void
+region_model::print_svalue (svalue_id sid, pretty_printer *pp) const
+{
+ get_svalue (sid)->print (*this, sid, pp);
+}
+
+/* Dump a .dot representation of this region_model to PP, showing
+ the values and the hierarchy of regions. */
+
+void
+region_model::dump_dot_to_pp (pretty_printer *pp) const
+{
+ graphviz_out gv (pp);
+
+ pp_string (pp, "digraph \"");
+ pp_write_text_to_stream (pp);
+ pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
+ pp_string (pp, "\" {\n");
+
+ gv.indent ();
+
+ pp_string (pp, "overlap=false;\n");
+ pp_string (pp, "compound=true;\n");
+
+ int i;
+
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (m_svalues, i, svalue)
+ svalue->dump_dot_to_pp (*this, svalue_id::from_int (i), pp);
+
+ region *region;
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ region->dump_dot_to_pp (*this, region_id::from_int (i), pp);
+
+ /* TODO: constraints. */
+
+ /* Terminate "digraph" */
+ gv.outdent ();
+ pp_string (pp, "}");
+ pp_newline (pp);
+}
+
+/* Dump a .dot representation of this region_model to FP. */
+
+void
+region_model::dump_dot_to_file (FILE *fp) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp.buffer->stream = fp;
+ dump_dot_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Dump a .dot representation of this region_model to PATH. */
+
+void
+region_model::dump_dot (const char *path) const
+{
+ FILE *fp = fopen (path, "w");
+ dump_dot_to_file (fp);
+ fclose (fp);
+}
+
+/* Dump a multiline representation of this model to PP, showing the
+ region hierarchy, the svalues, and any constraints.
+
+ If SUMMARIZE is true, show only the most pertient information,
+ in a form that attempts to be less verbose.
+ Otherwise, show all information. */
+
+void
+region_model::dump_to_pp (pretty_printer *pp, bool summarize) const
+{
+ if (summarize)
+ {
+ bool is_first = true;
+ region_id frame_id = get_current_frame_id ();
+ frame_region *frame = get_region <frame_region> (frame_id);
+ if (frame)
+ dump_summary_of_map (pp, frame, &is_first);
+
+ region_id globals_id = get_globals_region_id ();
+ map_region *globals = get_region <map_region> (globals_id);
+ if (globals)
+ dump_summary_of_map (pp, globals, &is_first);
+
+ unsigned i;
+
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_constraints->m_equiv_classes, i, ec)
+ {
+ for (unsigned j = 0; j < ec->m_vars.length (); j++)
+ {
+ svalue_id lhs_sid = ec->m_vars[j];
+ tree lhs_tree = get_representative_tree (lhs_sid);
+ if (lhs_tree == NULL_TREE)
+ continue;
+ for (unsigned k = j + 1; k < ec->m_vars.length (); k++)
+ {
+ svalue_id rhs_sid = ec->m_vars[k];
+ tree rhs_tree = get_representative_tree (rhs_sid);
+ if (rhs_tree
+ && !(CONSTANT_CLASS_P (lhs_tree)
+ && CONSTANT_CLASS_P (rhs_tree)))
+ {
+ dump_separator (pp, &is_first);
+ dump_tree (pp, lhs_tree);
+ pp_string (pp, " == ");
+ dump_tree (pp, rhs_tree);
+ }
+ }
+ }
+ }
+
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints->m_constraints, i, c)
+ {
+ const equiv_class &lhs = c->m_lhs.get_obj (*m_constraints);
+ const equiv_class &rhs = c->m_rhs.get_obj (*m_constraints);
+ svalue_id lhs_sid = lhs.get_representative ();
+ svalue_id rhs_sid = rhs.get_representative ();
+ tree lhs_tree = get_representative_tree (lhs_sid);
+ tree rhs_tree = get_representative_tree (rhs_sid);
+ if (lhs_tree && rhs_tree
+ && !(CONSTANT_CLASS_P (lhs_tree) && CONSTANT_CLASS_P (rhs_tree)))
+ {
+ dump_separator (pp, &is_first);
+ dump_tree (pp, lhs_tree);
+ pp_printf (pp, " %s ", constraint_op_code (c->m_op));
+ dump_tree (pp, rhs_tree);
+ }
+ }
+
+ return;
+ }
+
+ get_region (m_root_rid)->dump_to_pp (*this, m_root_rid, pp, "", true);
+
+ pp_string (pp, "svalues:");
+ pp_newline (pp);
+ int i;
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (m_svalues, i, svalue)
+ {
+ pp_string (pp, " ");
+ svalue_id sid = svalue_id::from_int (i);
+ print_svalue (sid, pp);
+ pp_newline (pp);
+ }
+
+ pp_string (pp, "constraint manager:");
+ pp_newline (pp);
+ m_constraints->dump_to_pp (pp);
+}
+
+/* Dump a multiline representation of this model to FILE. */
+
+void
+region_model::dump (FILE *fp, bool summarize) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp, summarize);
+ pp_flush (&pp);
+}
+
+/* Dump a multiline representation of this model to stderr. */
+
+DEBUG_FUNCTION void
+region_model::dump (bool summarize) const
+{
+ dump (stderr, summarize);
+}
+
+/* Dump RMODEL fully to stderr (i.e. without summarization). */
+
+DEBUG_FUNCTION void
+region_model::debug () const
+{
+ dump (false);
+}
+
+/* Dump VEC to PP, in the form "{VEC elements}: LABEL". */
+
+static void
+dump_vec_of_tree (pretty_printer *pp,
+ bool *is_first,
+ const auto_vec<tree> &vec,
+ const char *label)
+{
+ if (vec.length () == 0)
+ return;
+
+ dump_separator (pp, is_first);
+ pp_printf (pp, "{");
+ unsigned i;
+ tree key;
+ FOR_EACH_VEC_ELT (vec, i, key)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ dump_tree (pp, key);
+ }
+ pp_printf (pp, "}: %s", label);
+}
+
+/* Dump *MAP_REGION to PP in compact form, updating *IS_FIRST.
+ Subroutine of region_model::dump_to_pp for use on stack frames and for
+ the "globals" region. */
+
+void
+region_model::dump_summary_of_map (pretty_printer *pp,
+ map_region *map_region,
+ bool *is_first) const
+{
+ /* Get the keys, sorted by tree_cmp. In particular, this ought
+ to alphabetize any decls. */
+ auto_vec<tree> keys (map_region->elements ());
+ for (map_region::iterator_t iter = map_region->begin ();
+ iter != map_region->end ();
+ ++iter)
+ {
+ tree key_a = (*iter).first;
+ keys.quick_push (key_a);
+ }
+ keys.qsort (tree_cmp);
+
+ /* Print pointers, constants, and poisoned values that aren't "uninit";
+ gather keys for unknown and uninit values. */
+ unsigned i;
+ tree key;
+ auto_vec<tree> unknown_keys;
+ auto_vec<tree> uninit_keys;
+ FOR_EACH_VEC_ELT (keys, i, key)
+ {
+ region_id child_rid = *map_region->get (key);
+
+ region *child_region = get_region (child_rid);
+ if (!child_region)
+ continue;
+ svalue_id sid = child_region->get_value_direct ();
+ if (sid.null_p ())
+ continue;
+ svalue *sval = get_svalue (sid);
+ switch (sval->get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+ case SK_REGION:
+ {
+ region_svalue *region_sval = as_a <region_svalue *> (sval);
+ region_id pointee_rid = region_sval->get_pointee ();
+ tree pointee = get_representative_path_var (pointee_rid).m_tree;
+ dump_separator (pp, is_first);
+ dump_tree (pp, key);
+ pp_string (pp, ": ");
+ if (pointee)
+ {
+ pp_character (pp, '&');
+ dump_tree (pp, pointee);
+ }
+ else
+ pp_string (pp, "NULL");
+ }
+ break;
+ case SK_CONSTANT:
+ dump_separator (pp, is_first);
+ dump_tree (pp, key);
+ pp_string (pp, ": ");
+ dump_tree (pp, sval->dyn_cast_constant_svalue ()->get_constant ());
+ break;
+ case SK_UNKNOWN:
+ unknown_keys.safe_push (key);
+ break;
+ case SK_POISONED:
+ {
+ poisoned_svalue *poisoned_sval = as_a <poisoned_svalue *> (sval);
+ enum poison_kind pkind = poisoned_sval->get_poison_kind ();
+ if (pkind == POISON_KIND_UNINIT)
+ uninit_keys.safe_push (key);
+ else
+ {
+ dump_separator (pp, is_first);
+ dump_tree (pp, key);
+ pp_printf (pp, ": %s", poison_kind_to_str (pkind));
+ }
+ }
+ break;
+ case SK_SETJMP:
+ dump_separator (pp, is_first);
+ pp_printf (pp, "setjmp: EN: %i",
+ sval->dyn_cast_setjmp_svalue ()->get_index ());
+ break;
+ }
+ }
+
+ /* Print unknown and uninitialized values in consolidated form. */
+ dump_vec_of_tree (pp, is_first, unknown_keys, "unknown");
+ dump_vec_of_tree (pp, is_first, uninit_keys, "uninit");
+}
+
+/* Assert that this object is valid. */
+
+void
+region_model::validate () const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ m_constraints->validate ();
+
+ unsigned i;
+ region *r;
+ FOR_EACH_VEC_ELT (m_regions, i, r)
+ r->validate (this);
+
+ // TODO: anything else?
+
+ /* Verify that the stack region (if any) has an "uninitialized" value. */
+ region *stack_region = get_root_region ()->get_stack_region (this);
+ if (stack_region)
+ {
+ svalue_id stack_value_sid = stack_region->get_value_direct ();
+ svalue *stack_value = get_svalue (stack_value_sid);
+ gcc_assert (stack_value->get_kind () == SK_POISONED);
+ poisoned_svalue *subclass = stack_value->dyn_cast_poisoned_svalue ();
+ gcc_assert (subclass);
+ gcc_assert (subclass->get_poison_kind () == POISON_KIND_UNINIT);
+ }
+}
+
+/* Global data for use by svalue_id_cmp_by_constant_svalue. */
+
+static region_model *svalue_id_cmp_by_constant_svalue_model = NULL;
+
+/* Comparator for use by region_model::canonicalize. */
+
+static int
+svalue_id_cmp_by_constant_svalue (const void *p1, const void *p2)
+{
+ const svalue_id *sid1 = (const svalue_id *)p1;
+ const svalue_id *sid2 = (const svalue_id *)p2;
+ gcc_assert (!sid1->null_p ());
+ gcc_assert (!sid2->null_p ());
+ gcc_assert (svalue_id_cmp_by_constant_svalue_model);
+ const svalue &sval1
+ = *svalue_id_cmp_by_constant_svalue_model->get_svalue (*sid1);
+ const svalue &sval2
+ = *svalue_id_cmp_by_constant_svalue_model->get_svalue (*sid2);
+ gcc_assert (sval1.get_kind () == SK_CONSTANT);
+ gcc_assert (sval2.get_kind () == SK_CONSTANT);
+
+ tree cst1 = ((const constant_svalue &)sval1).get_constant ();
+ tree cst2 = ((const constant_svalue &)sval2).get_constant ();
+ return tree_cmp (cst1, cst2);
+}
+
+/* Reorder the regions and svalues into a deterministic "canonical" order,
+ to maximize the chance of equality.
+ If non-NULL, notify CTXT about the svalue id remapping. */
+
+void
+region_model::canonicalize (region_model_context *ctxt)
+{
+ /* Walk all regions and values in a deterministic order, visiting
+ rids and sids, generating a rid and sid map. */
+ canonicalization c (*this);
+
+ /* (1): Walk all svalues, putting constants first, sorting the constants
+ (thus imposing an ordering on any constants that are purely referenced
+ by constraints).
+ Ignore other svalues for now. */
+ {
+ unsigned i;
+ auto_vec<svalue_id> sids;
+ svalue *sval;
+ FOR_EACH_VEC_ELT (m_svalues, i, sval)
+ {
+ if (sval->get_kind () == SK_CONSTANT)
+ sids.safe_push (svalue_id::from_int (i));
+ }
+ svalue_id_cmp_by_constant_svalue_model = this;
+ sids.qsort (svalue_id_cmp_by_constant_svalue);
+ svalue_id_cmp_by_constant_svalue_model = NULL;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (sids, i, sid)
+ c.walk_sid (*sid);
+ }
+
+ /* (2): Walk all regions (and thus their values) in a deterministic
+ order. */
+ c.walk_rid (m_root_rid);
+
+ /* (3): Ensure we've visited everything, as we don't want to purge
+ at this stage. Anything we visit for the first time here has
+ arbitrary order. */
+ {
+ unsigned i;
+ region *region;
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ c.walk_rid (region_id::from_int (i));
+ svalue *sval;
+ FOR_EACH_VEC_ELT (m_svalues, i, sval)
+ c.walk_sid (svalue_id::from_int (i));
+ }
+
+ /* (4): We now have a reordering of the regions and values.
+ Apply it. */
+ remap_svalue_ids (c.m_sid_map);
+ remap_region_ids (c.m_rid_map);
+ if (ctxt)
+ ctxt->remap_svalue_ids (c.m_sid_map);
+
+ /* (5): Canonicalize the constraint_manager (it has already had its
+ svalue_ids remapped above). This makes use of the new svalue_id
+ values, and so must happen last. */
+ m_constraints->canonicalize (get_num_svalues ());
+
+ validate ();
+}
+
+/* Return true if this region_model is in canonical form. */
+
+bool
+region_model::canonicalized_p () const
+{
+ region_model copy (*this);
+ copy.canonicalize (NULL);
+ return *this == copy;
+}
+
+/* A subclass of pending_diagnostic for complaining about uses of
+ poisoned values. */
+
+class poisoned_value_diagnostic
+: public pending_diagnostic_subclass<poisoned_value_diagnostic>
+{
+public:
+ poisoned_value_diagnostic (tree expr, enum poison_kind pkind)
+ : m_expr (expr), m_pkind (pkind)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "poisoned_value_diagnostic"; }
+
+ bool operator== (const poisoned_value_diagnostic &other) const
+ {
+ return m_expr == other.m_expr;
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ switch (m_pkind)
+ {
+ default:
+ gcc_unreachable ();
+ case POISON_KIND_UNINIT:
+ {
+ diagnostic_metadata m;
+ m.add_cwe (457); /* "CWE-457: Use of Uninitialized Variable". */
+ return warning_at (rich_loc, m,
+ OPT_Wanalyzer_use_of_uninitialized_value,
+ "use of uninitialized value %qE",
+ m_expr);
+ }
+ break;
+ case POISON_KIND_FREED:
+ {
+ diagnostic_metadata m;
+ m.add_cwe (416); /* "CWE-416: Use After Free". */
+ return warning_at (rich_loc, m,
+ OPT_Wanalyzer_use_after_free,
+ "use after %<free%> of %qE",
+ m_expr);
+ }
+ break;
+ case POISON_KIND_POPPED_STACK:
+ {
+ diagnostic_metadata m;
+ /* TODO: which CWE? */
+ return warning_at (rich_loc, m,
+ OPT_Wanalyzer_use_of_pointer_in_stale_stack_frame,
+ "use of pointer %qE within stale stack frame",
+ m_expr);
+ }
+ break;
+ }
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ switch (m_pkind)
+ {
+ default:
+ gcc_unreachable ();
+ case POISON_KIND_UNINIT:
+ return ev.formatted_print ("use of uninitialized value %qE here",
+ m_expr);
+ case POISON_KIND_FREED:
+ return ev.formatted_print ("use after %<free%> of %qE here",
+ m_expr);
+ case POISON_KIND_POPPED_STACK:
+ return ev.formatted_print
+ ("use of pointer %qE within stale stack frame here",
+ m_expr);
+ }
+ }
+
+private:
+ tree m_expr;
+ enum poison_kind m_pkind;
+};
+
+/* Determine if EXPR is poisoned, and if so, queue a diagnostic to CTXT. */
+
+void
+region_model::check_for_poison (tree expr, region_model_context *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ // TODO: this is disabled for now (too many false positives)
+ return;
+
+ svalue_id expr_sid = get_rvalue (expr, ctxt);
+ gcc_assert (!expr_sid.null_p ());
+ svalue *expr_svalue = get_svalue (expr_sid);
+ gcc_assert (expr_svalue);
+ if (const poisoned_svalue *poisoned_sval
+ = expr_svalue->dyn_cast_poisoned_svalue ())
+ {
+ enum poison_kind pkind = poisoned_sval->get_poison_kind ();
+ ctxt->warn (new poisoned_value_diagnostic (expr, pkind));
+ }
+}
+
+/* Update this model for the ASSIGN stmt, using CTXT to report any
+ diagnostics. */
+
+void
+region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
+{
+ tree lhs = gimple_assign_lhs (assign);
+ tree rhs1 = gimple_assign_rhs1 (assign);
+
+ region_id lhs_rid = get_lvalue (lhs, ctxt);
+
+ /* Check for uses of poisoned values. */
+ switch (get_gimple_rhs_class (gimple_expr_code (assign)))
+ {
+ case GIMPLE_INVALID_RHS:
+ gcc_unreachable ();
+ break;
+ case GIMPLE_TERNARY_RHS:
+ check_for_poison (gimple_assign_rhs3 (assign), ctxt);
+ /* Fallthru */
+ case GIMPLE_BINARY_RHS:
+ check_for_poison (gimple_assign_rhs2 (assign), ctxt);
+ /* Fallthru */
+ case GIMPLE_UNARY_RHS:
+ case GIMPLE_SINGLE_RHS:
+ check_for_poison (gimple_assign_rhs1 (assign), ctxt);
+ }
+
+ if (lhs_rid.null_p ())
+ return;
+ // TODO: issue a warning for this case
+
+ enum tree_code op = gimple_assign_rhs_code (assign);
+ switch (op)
+ {
+ default:
+ {
+ if (0)
+ sorry_at (assign->location, "unhandled assignment op: %qs",
+ get_tree_code_name (op));
+ set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
+ }
+ break;
+
+ case BIT_FIELD_REF:
+ {
+ // TODO
+ }
+ break;
+
+ case CONSTRUCTOR:
+ {
+ /* e.g. "x ={v} {CLOBBER};" */
+ // TODO
+ }
+ break;
+
+ case POINTER_PLUS_EXPR:
+ {
+ /* e.g. "_1 = a_10(D) + 12;" */
+ tree ptr = rhs1;
+ tree offset = gimple_assign_rhs2 (assign);
+
+ svalue_id ptr_sid = get_rvalue (ptr, ctxt);
+ svalue_id offset_sid = get_rvalue (offset, ctxt);
+ region_id element_rid
+ = get_or_create_pointer_plus_expr (TREE_TYPE (TREE_TYPE (ptr)),
+ ptr_sid, offset_sid,
+ ctxt);
+ svalue_id element_ptr_sid
+ = get_or_create_ptr_svalue (TREE_TYPE (ptr), element_rid);
+ set_value (lhs_rid, element_ptr_sid, ctxt);
+ }
+ break;
+
+ case POINTER_DIFF_EXPR:
+ {
+ /* e.g. "_1 = p_2(D) - q_3(D);". */
+
+ /* TODO. */
+
+ set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
+ }
+ break;
+
+ case ADDR_EXPR:
+ {
+ /* LHS = &RHS; */
+ svalue_id ptr_sid = get_rvalue (rhs1, ctxt);
+ set_value (lhs_rid, ptr_sid, ctxt);
+ }
+ break;
+
+ case MEM_REF:
+ {
+ region_id rhs_rid = get_lvalue (rhs1, ctxt);
+ svalue_id rhs_sid
+ = get_region (rhs_rid)->get_value (*this, true, ctxt);
+ set_value (lhs_rid, rhs_sid, ctxt);
+ }
+ break;
+
+ case REAL_CST:
+ case INTEGER_CST:
+ case ARRAY_REF:
+ {
+ /* LHS = RHS; */
+ svalue_id cst_sid = get_rvalue (rhs1, ctxt);
+ set_value (lhs_rid, cst_sid, ctxt);
+ }
+ break;
+
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ case NOP_EXPR:
+ // cast: TODO
+ // fall though for now
+ case SSA_NAME:
+ case VAR_DECL:
+ case PARM_DECL:
+ {
+ /* LHS = VAR; */
+ svalue_id var_sid = get_rvalue (rhs1, ctxt);
+ set_value (lhs_rid, var_sid, ctxt);
+ }
+ break;
+
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ {
+ tree rhs2 = gimple_assign_rhs2 (assign);
+
+ // TODO: constraints between svalues
+ svalue_id rhs1_sid = get_rvalue (rhs1, ctxt);
+ svalue_id rhs2_sid = get_rvalue (rhs2, ctxt);
+
+ tristate t = eval_condition (rhs1_sid, op, rhs2_sid);
+ if (t.is_known ())
+ set_value (lhs_rid,
+ get_rvalue (t.is_true ()
+ ? boolean_true_node
+ : boolean_false_node,
+ ctxt),
+ ctxt);
+ else
+ set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
+ }
+ break;
+
+ case NEGATE_EXPR:
+ case BIT_NOT_EXPR:
+ {
+ // TODO: unary ops
+
+ // TODO: constant?
+
+ set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
+ }
+ break;
+
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_AND_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ {
+ /* Binary ops. */
+ tree rhs2 = gimple_assign_rhs2 (assign);
+
+ svalue_id rhs1_sid = get_rvalue (rhs1, ctxt);
+ svalue_id rhs2_sid = get_rvalue (rhs2, ctxt);
+
+ if (tree rhs1_cst = maybe_get_constant (rhs1_sid))
+ if (tree rhs2_cst = maybe_get_constant (rhs2_sid))
+ {
+ tree result = fold_build2 (op, TREE_TYPE (lhs),
+ rhs1_cst, rhs2_cst);
+ if (CONSTANT_CLASS_P (result))
+ {
+ svalue_id result_sid
+ = get_or_create_constant_svalue (result);
+ set_value (lhs_rid, result_sid, ctxt);
+ return;
+ }
+ }
+ set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
+ }
+ break;
+
+ case COMPONENT_REF:
+ {
+ /* LHS = op0.op1; */
+ region_id child_rid = get_lvalue (rhs1, ctxt);
+ svalue_id child_sid
+ = get_region (child_rid)->get_value (*this, true, ctxt);
+ set_value (lhs_rid, child_sid, ctxt);
+ }
+ break;
+ }
+}
+
+/* Update this model for the CALL stmt, using CTXT to report any
+ diagnostics - the first half.
+
+ Updates to the region_model that should be made *before* sm-states
+ are updated are done here; other updates to the region_model are done
+ in region_model::on_call_post. */
+
+void
+region_model::on_call_pre (const gcall *call, region_model_context *ctxt)
+{
+ region_id lhs_rid;
+ tree lhs_type = NULL_TREE;
+ if (tree lhs = gimple_call_lhs (call))
+ {
+ lhs_rid = get_lvalue (lhs, ctxt);
+ lhs_type = TREE_TYPE (lhs);
+ }
+
+ /* Check for uses of poisoned values.
+ For now, special-case "free", to avoid warning about "use-after-free"
+ when "double free" would be more precise. */
+ if (!is_special_named_call_p (call, "free", 1))
+ for (unsigned i = 0; i < gimple_call_num_args (call); i++)
+ check_for_poison (gimple_call_arg (call, i), ctxt);
+
+ if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
+ {
+ if (is_named_call_p (callee_fndecl, "malloc", call, 1))
+ {
+ // TODO: capture size as a svalue?
+ region_id new_rid = add_new_malloc_region ();
+ if (!lhs_rid.null_p ())
+ {
+ svalue_id ptr_sid
+ = get_or_create_ptr_svalue (lhs_type, new_rid);
+ set_value (lhs_rid, ptr_sid, ctxt);
+ }
+ return;
+ }
+ else if (is_named_call_p (callee_fndecl, "__builtin_alloca", call, 1))
+ {
+ region_id frame_rid = get_current_frame_id ();
+ region_id new_rid
+ = add_region (new symbolic_region (frame_rid, false));
+ if (!lhs_rid.null_p ())
+ {
+ svalue_id ptr_sid
+ = get_or_create_ptr_svalue (lhs_type, new_rid);
+ set_value (lhs_rid, ptr_sid, ctxt);
+ }
+ return;
+ }
+ else if (is_named_call_p (callee_fndecl, "strlen", call, 1))
+ {
+ region_id buf_rid = deref_rvalue (gimple_call_arg (call, 0), ctxt);
+ svalue_id buf_sid
+ = get_region (buf_rid)->get_value (*this, true, ctxt);
+ if (tree cst_expr = maybe_get_constant (buf_sid))
+ {
+ if (TREE_CODE (cst_expr) == STRING_CST
+ && !lhs_rid.null_p ())
+ {
+ /* TREE_STRING_LENGTH is sizeof, not strlen. */
+ int sizeof_cst = TREE_STRING_LENGTH (cst_expr);
+ int strlen_cst = sizeof_cst - 1;
+ tree t_cst = build_int_cst (lhs_type, strlen_cst);
+ svalue_id result_sid
+ = get_or_create_constant_svalue (t_cst);
+ set_value (lhs_rid, result_sid, ctxt);
+ return;
+ }
+ }
+ /* Otherwise an unknown value. */
+ }
+ else if (is_named_call_p (callee_fndecl,
+ "__analyzer_dump_num_heap_regions", call, 0))
+ {
+ /* Handle the builtin "__analyzer_dump_num_heap_regions" by emitting
+ a warning (for use in DejaGnu tests). */
+ int num_heap_regions = 0;
+ region_id heap_rid = get_root_region ()->ensure_heap_region (this);
+ unsigned i;
+ region *region;
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ if (region->get_parent () == heap_rid)
+ num_heap_regions++;
+ /* Use quotes to ensure the output isn't truncated. */
+ warning_at (call->location, 0,
+ "num heap regions: %qi", num_heap_regions);
+ }
+ }
+
+ /* Unrecognized call. */
+
+ /* Unknown return value. */
+ if (!lhs_rid.null_p ())
+ set_to_new_unknown_value (lhs_rid, lhs_type, ctxt);
+
+ /* TODO: also, any pointer arguments might have been written through,
+ or the things they point to (implying a graph traversal, which
+ presumably we need to do before overwriting the old value). */
+}
+
+/* Update this model for the CALL stmt, using CTXT to report any
+ diagnostics - the second half.
+
+ Updates to the region_model that should be made *after* sm-states
+ are updated are done here; other updates to the region_model are done
+ in region_model::on_call_pre. */
+
+void
+region_model::on_call_post (const gcall *call, region_model_context *ctxt)
+{
+ /* Update for "free" here, after sm-handling.
+
+ If the ptr points to an underlying heap region, delete the region,
+ poisoning pointers to it and regions within it.
+
+ We delay this until after sm-state has been updated so that the
+ sm-handling can transition all of the various casts of the pointer
+ to a "freed" state *before* we delete the related region here.
+
+ This has to be done here so that the sm-handling can use the fact
+ that they point to the same region to establish that they are equal
+ (in region_model::eval_condition_without_cm), and thus transition
+ all pointers to the region to the "freed" state together, regardless
+ of casts. */
+ if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
+ if (is_named_call_p (callee_fndecl, "free", call, 1))
+ {
+ tree ptr = gimple_call_arg (call, 0);
+ svalue_id ptr_sid = get_rvalue (ptr, ctxt);
+ svalue *ptr_sval = get_svalue (ptr_sid);
+ if (region_svalue *ptr_to_region_sval
+ = ptr_sval->dyn_cast_region_svalue ())
+ {
+ /* If the ptr points to an underlying heap region, delete it,
+ poisoning pointers. */
+ region_id pointee_rid = ptr_to_region_sval->get_pointee ();
+ region_id heap_rid = get_root_region ()->ensure_heap_region (this);
+ if (!pointee_rid.null_p ()
+ && get_region (pointee_rid)->get_parent () == heap_rid)
+ {
+ purge_stats stats;
+ delete_region_and_descendents (pointee_rid,
+ POISON_KIND_FREED,
+ &stats, ctxt->get_logger ());
+ purge_unused_svalues (&stats, ctxt);
+ validate ();
+ // TODO: do anything with stats?
+ }
+ }
+ return;
+ }
+}
+
+/* Update this model for the RETURN_STMT, using CTXT to report any
+ diagnostics. */
+
+void
+region_model::on_return (const greturn *return_stmt, region_model_context *ctxt)
+{
+ tree callee = get_current_function ()->decl;
+ tree lhs = DECL_RESULT (callee);
+ tree rhs = gimple_return_retval (return_stmt);
+
+ if (lhs && rhs)
+ set_value (get_lvalue (lhs, ctxt), get_rvalue (rhs, ctxt), ctxt);
+}
+
+/* Update this model for a call and return of "setjmp" at CALL within ENODE,
+ using CTXT to report any diagnostics.
+
+ This is for the initial direct invocation of setjmp (which returns 0),
+ as opposed to any second return due to longjmp. */
+
+void
+region_model::on_setjmp (const gcall *call, const exploded_node *enode,
+ region_model_context *ctxt)
+{
+ region_id buf_rid = deref_rvalue (gimple_call_arg (call, 0), ctxt);
+ region *buf = get_region (buf_rid);
+
+ /* Create a setjmp_svalue for ENODE and store it in BUF_RID's region. */
+ if (buf)
+ {
+ svalue *sval = new setjmp_svalue (enode, buf->get_type ());
+ svalue_id new_sid = add_svalue (sval);
+ set_value (buf_rid, new_sid, ctxt);
+ }
+
+ /* Direct calls to setjmp return 0. */
+ if (tree lhs = gimple_call_lhs (call))
+ {
+ tree zero = build_int_cst (TREE_TYPE (lhs), 0);
+ svalue_id new_sid = get_or_create_constant_svalue (zero);
+ region_id lhs_rid = get_lvalue (lhs, ctxt);
+ set_value (lhs_rid, new_sid, ctxt);
+ }
+}
+
+/* Update this region_model for rewinding from a "longjmp" at LONGJMP_CALL
+ to a "setjmp" at SETJMP_CALL where the final stack depth should be
+ SETJMP_STACK_DEPTH. Purge any stack frames, potentially reporting on
+ leaks to CTXT. */
+
+void
+region_model::on_longjmp (const gcall *longjmp_call, const gcall *setjmp_call,
+ int setjmp_stack_depth,
+ region_model_context *ctxt)
+{
+ /* Evaluate the val, using the frame of the "longjmp". */
+ tree fake_retval = gimple_call_arg (longjmp_call, 1);
+ svalue_id fake_retval_sid = get_rvalue (fake_retval, ctxt);
+
+ /* Pop any frames until we reach the stack depth of the function where
+ setjmp was called. */
+ gcc_assert (get_stack_depth () >= setjmp_stack_depth);
+ while (get_stack_depth () > setjmp_stack_depth)
+ {
+ /* Don't purge unused svalues yet, as we're using fake_retval_sid. */
+ pop_frame (false, NULL, ctxt);
+ }
+
+ gcc_assert (get_stack_depth () == setjmp_stack_depth);
+
+ /* Assign to LHS of "setjmp" in new_state. */
+ if (tree lhs = gimple_call_lhs (setjmp_call))
+ {
+ /* Passing 0 as the val to longjmp leads to setjmp returning 1. */
+ tree t_zero = build_int_cst (TREE_TYPE (fake_retval), 0);
+ svalue_id zero_sid = get_or_create_constant_svalue (t_zero);
+ tristate eq_zero = eval_condition (fake_retval_sid, EQ_EXPR, zero_sid);
+ /* If we have 0, use 1. */
+ if (eq_zero.is_true ())
+ {
+ tree t_one = build_int_cst (TREE_TYPE (fake_retval), 1);
+ svalue_id one_sid = get_or_create_constant_svalue (t_one);
+ fake_retval_sid = one_sid;
+ }
+ else
+ {
+ /* Otherwise note that the value is nonzero. */
+ m_constraints->add_constraint (fake_retval_sid, NE_EXPR, zero_sid);
+ }
+
+ region_id lhs_rid = get_lvalue (lhs, ctxt);
+ set_value (lhs_rid, fake_retval_sid, ctxt);
+ }
+
+ /* Now that we've assigned the fake_retval, we can purge the unused
+ svalues, which could detect leaks. */
+ purge_unused_svalues (NULL, ctxt, NULL);
+ validate ();
+}
+
+/* Update this region_model for a phi stmt of the form
+ LHS = PHI <...RHS...>.
+ where RHS is for the appropriate edge. */
+
+void
+region_model::handle_phi (tree lhs, tree rhs, bool is_back_edge,
+ region_model_context *ctxt)
+{
+ /* For now, don't bother tracking the .MEM SSA names. */
+ if (tree var = SSA_NAME_VAR (lhs))
+ if (TREE_CODE (var) == VAR_DECL)
+ if (VAR_DECL_IS_VIRTUAL_OPERAND (var))
+ return;
+
+ svalue_id rhs_sid = get_rvalue (rhs, ctxt);
+
+ if (is_back_edge && get_svalue (rhs_sid)->get_kind () != SK_UNKNOWN)
+ {
+ /* If we have a back edge, we probably have a loop.
+ Use an unknown value, to avoid effectively unrolling the
+ loop.
+ To terminate, we need to avoid generating a series of
+ models with an unbounded monotonically increasing number of
+ redundant unknown values; hence we need to purge svalues
+ before inserting the state into the exploded graph, to
+ collect unused svalues. */
+ set_to_new_unknown_value (get_lvalue (lhs, ctxt), TREE_TYPE (lhs), ctxt);
+ }
+ else
+ set_value (get_lvalue (lhs, ctxt), rhs_sid, ctxt);
+}
+
+/* Implementation of region_model::get_lvalue; the latter adds type-checking.
+
+ Get the id of the region for PV within this region_model,
+ emitting any diagnostics to CTXT. */
+
+region_id
+region_model::get_lvalue_1 (path_var pv, region_model_context *ctxt)
+{
+ tree expr = pv.m_tree;
+
+ gcc_assert (expr);
+
+ switch (TREE_CODE (expr))
+ {
+ default:
+ gcc_unreachable ();
+
+ case ARRAY_REF:
+ {
+ tree array = TREE_OPERAND (expr, 0);
+ tree index = TREE_OPERAND (expr, 1);
+#if 0
+ // TODO: operands 2 and 3, if present:
+ gcc_assert (TREE_OPERAND (expr, 2) == NULL_TREE);
+ gcc_assert (TREE_OPERAND (expr, 3) == NULL_TREE);
+#endif
+
+ region_id array_rid = get_lvalue (array, ctxt);
+ svalue_id index_sid = get_rvalue (index, ctxt);
+ array_region *array_reg = get_region<array_region> (array_rid);
+ return array_reg->get_element (this, array_rid, index_sid, ctxt);
+ }
+ break;
+
+ case MEM_REF:
+ {
+ tree ptr = TREE_OPERAND (expr, 0);
+ tree offset = TREE_OPERAND (expr, 1);
+ svalue_id ptr_sid = get_rvalue (ptr, ctxt);
+ svalue_id offset_sid = get_rvalue (offset, ctxt);
+ return get_or_create_mem_ref (TREE_TYPE (expr), ptr_sid,
+ offset_sid, ctxt);
+ }
+ break;
+
+ case VAR_DECL:
+ /* Handle globals. */
+ if (is_global_var (expr))
+ {
+ region_id globals_rid
+ = get_root_region ()->ensure_globals_region (this);
+ map_region *globals = get_region<map_region> (globals_rid);
+ region_id var_rid = globals->get_or_create (this, globals_rid, expr,
+ TREE_TYPE (expr));
+ return var_rid;
+ }
+
+ /* Fall through. */
+
+ case SSA_NAME:
+ case PARM_DECL:
+ case RESULT_DECL:
+ {
+ gcc_assert (TREE_CODE (expr) == SSA_NAME
+ || TREE_CODE (expr) == PARM_DECL
+ || TREE_CODE (expr) == VAR_DECL
+ || TREE_CODE (expr) == RESULT_DECL);
+
+ int stack_depth = pv.m_stack_depth;
+ stack_region *stack = get_root_region ()->get_stack_region (this);
+ gcc_assert (stack);
+ region_id frame_rid = stack->get_frame_rid (stack_depth);
+ frame_region *frame = get_region <frame_region> (frame_rid);
+ gcc_assert (frame);
+ region_id child_rid = frame->get_or_create (this, frame_rid, expr,
+ TREE_TYPE (expr));
+ return child_rid;
+ }
+
+ case COMPONENT_REF:
+ {
+ /* obj.field */
+ tree obj = TREE_OPERAND (expr, 0);
+ tree field = TREE_OPERAND (expr, 1);
+ region_id obj_rid = get_lvalue (obj, ctxt);
+ region_id struct_or_union_rid
+ = get_or_create_view (obj_rid, TREE_TYPE (obj));
+ return get_field_region (struct_or_union_rid, field);
+ }
+ break;
+
+ case STRING_CST:
+ {
+ tree cst_type = TREE_TYPE (expr);
+ array_region *cst_region = new array_region (m_root_rid, cst_type);
+ region_id cst_rid = add_region (cst_region);
+ svalue_id cst_sid = get_or_create_constant_svalue (expr);
+ cst_region->set_value (*this, cst_rid, cst_sid, ctxt);
+ return cst_rid;
+ }
+ break;
+ }
+}
+
+/* Assert that SRC_TYPE can be converted to DST_TYPE as a no-op. */
+
+#define ASSERT_COMPAT_TYPES(SRC_TYPE, DST_TYPE) \
+ gcc_checking_assert (useless_type_conversion_p ((SRC_TYPE), (DST_TYPE)))
+
+/* Get the id of the region for PV within this region_model,
+ emitting any diagnostics to CTXT. */
+
+region_id
+region_model::get_lvalue (path_var pv, region_model_context *ctxt)
+{
+ if (pv.m_tree == NULL_TREE)
+ return region_id::null ();
+
+ region_id result_rid = get_lvalue_1 (pv, ctxt);
+ ASSERT_COMPAT_TYPES (get_region (result_rid)->get_type (),
+ TREE_TYPE (pv.m_tree));
+ return result_rid;
+}
+
+/* Get the region_id for EXPR within this region_model (assuming the most
+ recent stack frame if it's a local). */
+
+region_id
+region_model::get_lvalue (tree expr, region_model_context *ctxt)
+{
+ return get_lvalue (path_var (expr, get_stack_depth () - 1), ctxt);
+}
+
+/* Implementation of region_model::get_rvalue; the latter adds type-checking.
+
+ Get the value of PV within this region_model,
+ emitting any diagnostics to CTXT. */
+
+svalue_id
+region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
+{
+ gcc_assert (pv.m_tree);
+
+ switch (TREE_CODE (pv.m_tree))
+ {
+ default:
+ {
+ svalue *unknown_sval = new unknown_svalue (TREE_TYPE (pv.m_tree));
+ return add_svalue (unknown_sval);
+ }
+ break;
+
+ case ADDR_EXPR:
+ {
+ /* "&EXPR". */
+ tree expr = pv.m_tree;
+ tree op0 = TREE_OPERAND (expr, 0);
+ if (TREE_CODE (op0) == FUNCTION_DECL)
+ return get_svalue_for_fndecl (TREE_TYPE (expr), op0);
+ else if (TREE_CODE (op0) == LABEL_DECL)
+ return get_svalue_for_label (TREE_TYPE (expr), op0);
+ region_id expr_rid = get_lvalue (op0, ctxt);
+ return get_or_create_ptr_svalue (TREE_TYPE (expr), expr_rid);
+ }
+ break;
+
+ case ARRAY_REF:
+ {
+ region_id element_rid = get_lvalue (pv, ctxt);
+ return get_region (element_rid)->get_value (*this, true, ctxt);
+ }
+
+ case INTEGER_CST:
+ case REAL_CST:
+ case STRING_CST:
+ return get_or_create_constant_svalue (pv.m_tree);
+
+ case COMPONENT_REF:
+ case MEM_REF:
+ case SSA_NAME:
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ {
+ region_id var_rid = get_lvalue (pv, ctxt);
+ return get_region (var_rid)->get_value (*this, true, ctxt);
+ }
+ }
+}
+
+/* Get the value of PV within this region_model,
+ emitting any diagnostics to CTXT. */
+
+svalue_id
+region_model::get_rvalue (path_var pv, region_model_context *ctxt)
+{
+ if (pv.m_tree == NULL_TREE)
+ return svalue_id::null ();
+ svalue_id result_sid = get_rvalue_1 (pv, ctxt);
+
+ ASSERT_COMPAT_TYPES (get_svalue (result_sid)->get_type (),
+ TREE_TYPE (pv.m_tree));
+
+ return result_sid;
+}
+
+/* Get the value of EXPR within this region_model (assuming the most
+ recent stack frame if it's a local). */
+
+svalue_id
+region_model::get_rvalue (tree expr, region_model_context *ctxt)
+{
+ return get_rvalue (path_var (expr, get_stack_depth () - 1), ctxt);
+}
+
+/* Return an svalue_id for a pointer to RID of type PTR_TYPE, reusing
+ existing pointer values if one is available. */
+
+svalue_id
+region_model::get_or_create_ptr_svalue (tree ptr_type, region_id rid)
+{
+ /* Reuse existing region_svalue, if one of the right type is
+ available. */
+ /* In theory we could stash a svalue_id in "region", but differing
+ pointer types muddles things.
+ For now, just do a linear search through all existing svalues. */
+ int i;
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (m_svalues, i, svalue)
+ if (region_svalue *ptr_svalue = svalue->dyn_cast_region_svalue ())
+ if (ptr_svalue->get_pointee () == rid
+ && ptr_svalue->get_type () == ptr_type)
+ return svalue_id::from_int (i);
+
+ return add_svalue (new region_svalue (ptr_type, rid));
+}
+
+/* Return an svalue_id for a constant_svalue for CST_EXPR,
+ creating the constant_svalue if necessary.
+ The constant_svalue instances are reused, based on pointer equality
+ of trees */
+
+svalue_id
+region_model::get_or_create_constant_svalue (tree cst_expr)
+{
+ gcc_assert (cst_expr);
+
+ /* Reuse one if it already exists. */
+ // TODO: maybe store a map, rather than do linear search?
+ int i;
+ svalue *svalue;
+ FOR_EACH_VEC_ELT (m_svalues, i, svalue)
+ if (svalue->maybe_get_constant () == cst_expr)
+ return svalue_id::from_int (i);
+
+ svalue_id cst_sid = add_svalue (new constant_svalue (cst_expr));
+ return cst_sid;
+}
+
+/* Return an svalue_id for a region_svalue for FNDECL,
+ creating the function_region if necessary. */
+
+svalue_id
+region_model::get_svalue_for_fndecl (tree ptr_type, tree fndecl)
+{
+ gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
+ region_id function_rid = get_region_for_fndecl (fndecl);
+ return get_or_create_ptr_svalue (ptr_type, function_rid);
+}
+
+/* Return a region_id for a function_region for FNDECL,
+ creating it if necessary. */
+
+region_id
+region_model::get_region_for_fndecl (tree fndecl)
+{
+ gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
+
+ region_id code_rid = get_root_region ()->ensure_code_region (this);
+ code_region *code = get_root_region ()->get_code_region (this);
+
+ return code->get_or_create (this, code_rid, fndecl, TREE_TYPE (fndecl));
+}
+
+/* Return an svalue_id for a region_svalue for LABEL,
+ creating the label_region if necessary. */
+
+svalue_id
+region_model::get_svalue_for_label (tree ptr_type, tree label)
+{
+ gcc_assert (TREE_CODE (label) == LABEL_DECL);
+ region_id label_rid = get_region_for_label (label);
+ return get_or_create_ptr_svalue (ptr_type, label_rid);
+}
+
+/* Return a region_id for a label_region for LABEL,
+ creating it if necessary. */
+
+region_id
+region_model::get_region_for_label (tree label)
+{
+ gcc_assert (TREE_CODE (label) == LABEL_DECL);
+
+ tree fndecl = DECL_CONTEXT (label);
+ gcc_assert (fndecl && TREE_CODE (fndecl) == FUNCTION_DECL);
+
+ region_id func_rid = get_region_for_fndecl (fndecl);
+ function_region *func_reg = get_region <function_region> (func_rid);
+ return func_reg->get_or_create (this, func_rid, label, TREE_TYPE (label));
+}
+
+/* Build a cast of SRC_EXPR to DST_TYPE, or return NULL_TREE.
+
+ Adapted from gcc::jit::playback::context::build_cast, which in turn is
+ adapted from
+ - c/c-typeck.c:build_c_cast
+ - c/c-convert.c: convert
+ - convert.h
+ Only some kinds of cast are currently supported here. */
+
+static tree
+build_cast (tree dst_type, tree src_expr)
+{
+ tree result = targetm.convert_to_type (dst_type, src_expr);
+ if (result)
+ return result;
+ enum tree_code dst_code = TREE_CODE (dst_type);
+ switch (dst_code)
+ {
+ case INTEGER_TYPE:
+ case ENUMERAL_TYPE:
+ result = convert_to_integer (dst_type, src_expr);
+ goto maybe_fold;
+
+ case BOOLEAN_TYPE:
+ /* Compare with c_objc_common_truthvalue_conversion and
+ c_common_truthvalue_conversion. */
+ /* For now, convert to: (src_expr != 0) */
+ result = build2 (NE_EXPR, dst_type,
+ src_expr,
+ build_int_cst (TREE_TYPE (src_expr), 0));
+ goto maybe_fold;
+
+ case REAL_TYPE:
+ result = convert_to_real (dst_type, src_expr);
+ goto maybe_fold;
+
+ case POINTER_TYPE:
+ result = build1 (NOP_EXPR, dst_type, src_expr);
+ goto maybe_fold;
+
+ default:
+ return NULL_TREE;
+
+ maybe_fold:
+ if (TREE_CODE (result) != C_MAYBE_CONST_EXPR)
+ result = fold (result);
+ return result;
+ }
+}
+
+/* If the type of SID's underlying value is DST_TYPE, return SID.
+ Otherwise, attempt to create (or reuse) an svalue representing an access
+ of SID as a DST_TYPE and return that value's svalue_id. */
+
+svalue_id
+region_model::maybe_cast_1 (tree dst_type, svalue_id sid)
+{
+ svalue *sval = get_svalue (sid);
+ tree src_type = sval->get_type ();
+ if (src_type == dst_type)
+ return sid;
+
+ if (POINTER_TYPE_P (dst_type)
+ && POINTER_TYPE_P (src_type))
+ {
+ /* Pointer to region. */
+ if (region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
+ return get_or_create_ptr_svalue (dst_type, ptr_sval->get_pointee ());
+
+ /* Unknown pointer? Get or create a new unknown pointer of the
+ correct type, preserving the equality between the pointers. */
+ if (sval->dyn_cast_unknown_svalue ())
+ {
+ equiv_class &ec = m_constraints->get_equiv_class (sid);
+
+ /* Look for an existing pointer of the correct type within the EC. */
+ int i;
+ svalue_id *equiv_sid;
+ FOR_EACH_VEC_ELT (ec.m_vars, i, equiv_sid)
+ {
+ svalue *equiv_val = get_svalue (*equiv_sid);
+ if (equiv_val->get_type () == dst_type)
+ return *equiv_sid;
+ }
+
+ /* Otherwise, create a new unknown pointer of the correct type. */
+ svalue *unknown_sval = new unknown_svalue (dst_type);
+ svalue_id new_ptr_sid = add_svalue (unknown_sval);
+ m_constraints->add_constraint (sid, EQ_EXPR, new_ptr_sid);
+ return new_ptr_sid;
+ }
+ }
+
+ /* Attempt to cast constants. */
+ if (tree src_cst = sval->maybe_get_constant ())
+ {
+ tree dst = build_cast (dst_type, src_cst);
+ gcc_assert (dst != NULL_TREE);
+ if (CONSTANT_CLASS_P (dst))
+ return get_or_create_constant_svalue (dst);
+ }
+
+ /* Otherwise, return a new unknown value. */
+ svalue *unknown_sval = new unknown_svalue (dst_type);
+ return add_svalue (unknown_sval);
+}
+
+/* If the type of SID's underlying value is DST_TYPE, return SID.
+ Otherwise, attempt to create (or reuse) an svalue representing an access
+ of SID as a DST_TYPE and return that value's svalue_id.
+
+ If the result != SID, then call CTXT's on_cast vfunc (if CTXT is non-NULL),
+ so that sm-state can be propagated from SID to the result. */
+
+svalue_id
+region_model::maybe_cast (tree dst_type, svalue_id sid,
+ region_model_context *ctxt)
+{
+ svalue_id result = maybe_cast_1 (dst_type, sid);
+ if (result != sid)
+ if (ctxt)
+ {
+ /* Notify ctxt about a cast, so any sm-state can be copied. */
+ ctxt->on_cast (sid, result);
+ }
+ return result;
+}
+
+/* Ensure that the region for OBJ_RID has a child region for FIELD;
+ return the child region's region_id. */
+
+region_id
+region_model::get_field_region (region_id struct_or_union_rid, tree field)
+{
+ struct_or_union_region *sou_reg
+ = get_region<struct_or_union_region> (struct_or_union_rid);
+
+ /* Inherit constness from parent type. */
+ const int qual_mask = TYPE_QUAL_CONST;
+ int sou_quals = TYPE_QUALS (sou_reg->get_type ()) & qual_mask;
+ tree field_type = TREE_TYPE (field);
+ tree field_type_with_quals = build_qualified_type (field_type, sou_quals);
+
+ // TODO: maybe convert to a vfunc?
+ if (sou_reg->get_kind () == RK_UNION)
+ {
+ /* Union.
+ Get a view of the union as a whole, with the type of the field. */
+ region_id view_rid
+ = get_or_create_view (struct_or_union_rid, field_type_with_quals);
+ return view_rid;
+ }
+ else
+ {
+ /* Struct. */
+ region_id child_rid
+ = sou_reg->get_or_create (this, struct_or_union_rid, field,
+ field_type_with_quals);
+ return child_rid;
+ }
+}
+
+/* Get a region_id for referencing PTR_SID, creating a region if need be, and
+ potentially generating warnings via CTXT. */
+
+region_id
+region_model::deref_rvalue (svalue_id ptr_sid, region_model_context *ctxt)
+{
+ gcc_assert (!ptr_sid.null_p ());
+ svalue *ptr_svalue = get_svalue (ptr_sid);
+ gcc_assert (ptr_svalue);
+
+ switch (ptr_svalue->get_kind ())
+ {
+ case SK_REGION:
+ {
+ region_svalue *region_sval = as_a <region_svalue *> (ptr_svalue);
+ return region_sval->get_pointee ();
+ }
+
+ case SK_CONSTANT:
+ goto create_symbolic_region;
+
+ case SK_POISONED:
+ {
+ if (ctxt)
+ if (tree ptr = get_representative_tree (ptr_sid))
+ {
+ poisoned_svalue *poisoned_sval
+ = as_a <poisoned_svalue *> (ptr_svalue);
+ enum poison_kind pkind = poisoned_sval->get_poison_kind ();
+ ctxt->warn (new poisoned_value_diagnostic (ptr, pkind));
+ }
+ goto create_symbolic_region;
+ }
+
+ case SK_UNKNOWN:
+ {
+ create_symbolic_region:
+ /* We need a symbolic_region to represent this unknown region.
+ We don't know if it on the heap, stack, or a global,
+ so use the root region as parent. */
+ region_id new_rid
+ = add_region (new symbolic_region (m_root_rid, false));
+
+ /* We need to write the region back into the pointer,
+ or we'll get a new, different region each time.
+ We do this by changing the meaning of ptr_sid, replacing
+ the unknown value with the ptr to the new region.
+ We replace the meaning of the ID rather than simply writing
+ to PTR's lvalue since there could be several places sharing
+ the same unknown ptr value. */
+ svalue *ptr_val
+ = new region_svalue (ptr_svalue->get_type (), new_rid);
+ replace_svalue (ptr_sid, ptr_val);
+
+ return new_rid;
+ }
+
+ case SK_SETJMP:
+ goto create_symbolic_region;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Get a region_id for referencing PTR, creating a region if need be, and
+ potentially generating warnings via CTXT. */
+
+region_id
+region_model::deref_rvalue (tree ptr, region_model_context *ctxt)
+{
+ svalue_id ptr_sid = get_rvalue (ptr, ctxt);
+ return deref_rvalue (ptr_sid, ctxt);
+}
+
+/* Set the value of the region given by LHS_RID to the value given
+ by RHS_SID. */
+
+void
+region_model::set_value (region_id lhs_rid, svalue_id rhs_sid,
+ region_model_context *ctxt)
+{
+ gcc_assert (!lhs_rid.null_p ());
+ gcc_assert (!rhs_sid.null_p ());
+ get_region (lhs_rid)->set_value (*this, lhs_rid, rhs_sid, ctxt);
+}
+
+/* Determine what is known about the condition "LHS_SID OP RHS_SID" within
+ this model. */
+
+tristate
+region_model::eval_condition (svalue_id lhs_sid,
+ enum tree_code op,
+ svalue_id rhs_sid) const
+{
+ tristate ts = eval_condition_without_cm (lhs_sid, op, rhs_sid);
+
+ if (ts.is_known ())
+ return ts;
+
+ /* Otherwise, try constraints. */
+ return m_constraints->eval_condition (lhs_sid, op, rhs_sid);
+}
+
+/* Determine what is known about the condition "LHS_SID OP RHS_SID" within
+ this model, without resorting to the constraint_manager.
+
+ This is exposed so that impl_region_model_context::on_state_leak can
+ check for equality part-way through region_model::purge_unused_svalues
+ without risking creating new ECs. */
+
+tristate
+region_model::eval_condition_without_cm (svalue_id lhs_sid,
+ enum tree_code op,
+ svalue_id rhs_sid) const
+{
+ svalue *lhs = get_svalue (lhs_sid);
+ svalue *rhs = get_svalue (rhs_sid);
+ gcc_assert (lhs);
+ gcc_assert (rhs);
+
+ /* See what we know based on the values. */
+ if (lhs && rhs)
+ {
+ if (lhs == rhs)
+ {
+ /* If we have the same svalue, then we have equality.
+ TODO: should this definitely be the case for poisoned values? */
+ switch (op)
+ {
+ default:
+ gcc_unreachable ();
+
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ return tristate::TS_TRUE;
+
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ return tristate::TS_FALSE;
+ }
+ }
+
+ /* If we have a pair of region_svalues, compare them. */
+ if (region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
+ if (region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
+ {
+ tristate res = region_svalue::eval_condition (lhs_ptr, op, rhs_ptr);
+ if (res.is_known ())
+ return res;
+ /* Otherwise, only known through constraints. */
+ }
+
+ /* If we have a pair of constants, compare them. */
+ if (constant_svalue *cst_lhs = lhs->dyn_cast_constant_svalue ())
+ if (constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
+ return constant_svalue::eval_condition (cst_lhs, op, cst_rhs);
+
+ /* Handle comparison of a region_svalue against zero. */
+ if (region_svalue *ptr = lhs->dyn_cast_region_svalue ())
+ if (constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
+ if (zerop (cst_rhs->get_constant ()))
+ {
+ /* A region_svalue is a non-NULL pointer, except in certain
+ special cases (see the comment for region::non_null_p. */
+ region *pointee = get_region (ptr->get_pointee ());
+ if (pointee->non_null_p (*this))
+ {
+ switch (op)
+ {
+ default:
+ gcc_unreachable ();
+
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ return tristate::TS_FALSE;
+
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ return tristate::TS_TRUE;
+ }
+ }
+ }
+ }
+
+ return tristate::TS_UNKNOWN;
+}
+
+/* Attempt to add the constraint "LHS OP RHS" to this region_model.
+ If it is consistent with existing constraints, add it, and return true.
+ Return false if it contradicts existing constraints.
+ Use CTXT for reporting any diagnostics associated with the accesses. */
+
+bool
+region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
+ region_model_context *ctxt)
+{
+ svalue_id lhs_sid = get_rvalue (lhs, ctxt);
+ svalue_id rhs_sid = get_rvalue (rhs, ctxt);
+
+ tristate t_cond = eval_condition (lhs_sid, op, rhs_sid);
+
+ /* If we already have the condition, do nothing. */
+ if (t_cond.is_true ())
+ return true;
+
+ /* Reject a constraint that would contradict existing knowledge, as
+ unsatisfiable. */
+ if (t_cond.is_false ())
+ return false;
+
+ /* Store the constraint. */
+ m_constraints->add_constraint (lhs_sid, op, rhs_sid);
+
+ add_any_constraints_from_ssa_def_stmt (lhs, op, rhs, ctxt);
+
+ /* Notify the context, if any. This exists so that the state machines
+ in a program_state can be notified about the condition, and so can
+ set sm-state for e.g. unchecked->checked, both for cfg-edges, and
+ when synthesizing constraints as above. */
+ if (ctxt)
+ ctxt->on_condition (lhs, op, rhs);
+
+ return true;
+}
+
+/* Subroutine of region_model::add_constraint for handling optimized
+ && and || conditionals.
+
+ If we have an SSA_NAME for a boolean compared against 0,
+ look at anything implied by the def stmt and call add_constraint
+ for it (which could recurse).
+
+ For example, if we have
+ _1 = p_6 == 0B;
+ _2 = p_8 == 0B
+ _3 = _1 | _2
+ and add the constraint
+ (_3 == 0),
+ then the def stmt for _3 implies that _1 and _2 are both false,
+ and hence we can add the constraints:
+ p_6 != 0B
+ p_8 != 0B. */
+
+void
+region_model::add_any_constraints_from_ssa_def_stmt (tree lhs,
+ enum tree_code op,
+ tree rhs,
+ region_model_context *ctxt)
+{
+ if (TREE_CODE (lhs) != SSA_NAME)
+ return;
+
+ if (rhs != boolean_false_node)
+ return;
+
+ if (op != NE_EXPR && op != EQ_EXPR)
+ return;
+
+ /* We have either
+ - "LHS != false" (i.e. LHS is true), or
+ - "LHS == false" (i.e. LHS is false). */
+ bool is_true = op == NE_EXPR;
+
+ gimple *def_stmt = SSA_NAME_DEF_STMT (lhs);
+ gassign *assign = dyn_cast<gassign *> (def_stmt);
+ if (!assign)
+ return;
+
+ enum tree_code rhs_code = gimple_assign_rhs_code (assign);
+
+ switch (rhs_code)
+ {
+ default:
+ break;
+ case BIT_AND_EXPR:
+ {
+ if (is_true)
+ {
+ /* ...and "LHS == (rhs1 & rhs2) i.e. "(rhs1 & rhs2)" is true
+ then both rhs1 and rhs2 must be true. */
+ tree rhs1 = gimple_assign_rhs1 (assign);
+ tree rhs2 = gimple_assign_rhs2 (assign);
+ add_constraint (rhs1, NE_EXPR, boolean_false_node, ctxt);
+ add_constraint (rhs2, NE_EXPR, boolean_false_node, ctxt);
+ }
+ }
+ break;
+
+ case BIT_IOR_EXPR:
+ {
+ if (!is_true)
+ {
+ /* ...and "LHS == (rhs1 | rhs2)
+ i.e. "(rhs1 | rhs2)" is false
+ then both rhs1 and rhs2 must be false. */
+ tree rhs1 = gimple_assign_rhs1 (assign);
+ tree rhs2 = gimple_assign_rhs2 (assign);
+ add_constraint (rhs1, EQ_EXPR, boolean_false_node, ctxt);
+ add_constraint (rhs2, EQ_EXPR, boolean_false_node, ctxt);
+ }
+ }
+ break;
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ {
+ /* ...and "LHS == (rhs1 OP rhs2)"
+ then rhs1 OP rhs2 must have the same logical value as LHS. */
+ tree rhs1 = gimple_assign_rhs1 (assign);
+ tree rhs2 = gimple_assign_rhs2 (assign);
+ if (!is_true)
+ rhs_code
+ = invert_tree_comparison (rhs_code, false /* honor_nans */);
+ add_constraint (rhs1, rhs_code, rhs2, ctxt);
+ }
+ break;
+ }
+}
+
+/* Determine what is known about the condition "LHS OP RHS" within
+ this model.
+ Use CTXT for reporting any diagnostics associated with the accesses. */
+
+tristate
+region_model::eval_condition (tree lhs,
+ enum tree_code op,
+ tree rhs,
+ region_model_context *ctxt)
+{
+ return eval_condition (get_rvalue (lhs, ctxt), op, get_rvalue (rhs, ctxt));
+}
+
+/* If SID is a constant value, return the underlying tree constant.
+ Otherwise, return NULL_TREE. */
+
+tree
+region_model::maybe_get_constant (svalue_id sid) const
+{
+ gcc_assert (!sid.null_p ());
+ svalue *sval = get_svalue (sid);
+ return sval->maybe_get_constant ();
+}
+
+/* Create a new child region of the heap (creating the heap region if
+ necessary).
+ Return the region_id of the new child region. */
+
+region_id
+region_model::add_new_malloc_region ()
+{
+ region_id heap_rid
+ = get_root_region ()->ensure_heap_region (this);
+ return add_region (new symbolic_region (heap_rid, true));
+}
+
+/* Attempt to return a tree that represents SID, or return NULL_TREE.
+ Find the first region that stores the value (e.g. a local) and
+ generate a representative tree for it. */
+
+tree
+region_model::get_representative_tree (svalue_id sid) const
+{
+ if (sid.null_p ())
+ return NULL_TREE;
+
+ unsigned i;
+ region *region;
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ if (sid == region->get_value_direct ())
+ {
+ path_var pv = get_representative_path_var (region_id::from_int (i));
+ if (pv.m_tree)
+ return pv.m_tree;
+ }
+
+ return maybe_get_constant (sid);
+}
+
+/* Attempt to return a path_var that represents the region, or return
+ the NULL path_var.
+ For example, a region for a field of a local would be a path_var
+ wrapping a COMPONENT_REF. */
+
+path_var
+region_model::get_representative_path_var (region_id rid) const
+{
+ region *reg = get_region (rid);
+ region *parent_region = get_region (reg->get_parent ());
+ region_id stack_rid = get_stack_region_id ();
+ if (!stack_rid.null_p ())
+ if (parent_region->get_parent () == stack_rid)
+ {
+ frame_region *parent_frame = (frame_region *)parent_region;
+ tree t = parent_frame->get_tree_for_child_region (rid);
+ return path_var (t, parent_frame->get_depth ());
+ }
+ if (reg->get_parent () == get_globals_region_id ())
+ {
+ map_region *globals = get_root_region ()->get_globals_region (this);
+ if (globals)
+ return path_var (globals->get_tree_for_child_region (rid), -1);
+ }
+
+ /* Handle e.g. fields of a local by recursing. */
+ region_id parent_rid = reg->get_parent ();
+ region *parent_reg = get_region (parent_rid);
+ if (parent_reg)
+ {
+ if (parent_reg->get_kind () == RK_STRUCT)
+ {
+ map_region *parent_map_region = (map_region *)parent_reg;
+ /* This can fail if we have a view, rather than a field. */
+ if (tree child_key
+ = parent_map_region->get_tree_for_child_region (rid))
+ {
+ path_var parent_pv = get_representative_path_var (parent_rid);
+ if (parent_pv.m_tree && TREE_CODE (child_key) == FIELD_DECL)
+ return path_var (build3 (COMPONENT_REF,
+ TREE_TYPE (child_key),
+ parent_pv.m_tree, child_key,
+ NULL_TREE),
+ parent_pv.m_stack_depth);
+ }
+ }
+ }
+
+ return path_var (NULL_TREE, 0);
+}
+
+/* Locate all regions that directly have value SID and append representative
+ path_var instances for them into *OUT. */
+
+void
+region_model::get_path_vars_for_svalue (svalue_id sid, vec<path_var> *out) const
+{
+ unsigned i;
+ region *region;
+ FOR_EACH_VEC_ELT (m_regions, i, region)
+ if (sid == region->get_value_direct ())
+ {
+ path_var pv = get_representative_path_var (region_id::from_int (i));
+ if (pv.m_tree)
+ out->safe_push (pv);
+ }
+}
+
+/* Set DST_RID value to be a new unknown value of type TYPE. */
+
+svalue_id
+region_model::set_to_new_unknown_value (region_id dst_rid, tree type,
+ region_model_context *ctxt)
+{
+ gcc_assert (!dst_rid.null_p ());
+ svalue_id new_sid = add_svalue (new unknown_svalue (type));
+ set_value (dst_rid, new_sid, ctxt);
+
+ // TODO: presumably purge all child regions too (but do this in set_value?)
+
+ return new_sid;
+}
+
+/* Update this model for any phis in SNODE, assuming we came from
+ LAST_CFG_SUPEREDGE. */
+
+void
+region_model::update_for_phis (const supernode *snode,
+ const cfg_superedge *last_cfg_superedge,
+ region_model_context *ctxt)
+{
+ gcc_assert (last_cfg_superedge);
+
+ for (gphi_iterator gpi = const_cast<supernode *>(snode)->start_phis ();
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ gphi *phi = gpi.phi ();
+
+ tree src = last_cfg_superedge->get_phi_arg (phi);
+ tree lhs = gimple_phi_result (phi);
+
+ /* Update next_state based on phi. */
+ bool is_back_edge = last_cfg_superedge->back_edge_p ();
+ handle_phi (lhs, src, is_back_edge, ctxt);
+ }
+}
+
+/* Attempt to update this model for taking EDGE (where the last statement
+ was LAST_STMT), returning true if the edge can be taken, false
+ otherwise.
+
+ For CFG superedges where LAST_STMT is a conditional or a switch
+ statement, attempt to add the relevant conditions for EDGE to this
+ model, returning true if they are feasible, or false if they are
+ impossible.
+
+ For call superedges, push frame information and store arguments
+ into parameters.
+
+ For return superedges, pop frame information and store return
+ values into any lhs.
+
+ Rejection of call/return superedges happens elsewhere, in
+ program_point::on_edge (i.e. based on program point, rather
+ than program state). */
+
+bool
+region_model::maybe_update_for_edge (const superedge &edge,
+ const gimple *last_stmt,
+ region_model_context *ctxt)
+{
+ /* Handle frame updates for interprocedural edges. */
+ switch (edge.m_kind)
+ {
+ default:
+ break;
+
+ case SUPEREDGE_CALL:
+ {
+ const call_superedge *call_edge = as_a <const call_superedge *> (&edge);
+ update_for_call_superedge (*call_edge, ctxt);
+ }
+ break;
+
+ case SUPEREDGE_RETURN:
+ {
+ const return_superedge *return_edge
+ = as_a <const return_superedge *> (&edge);
+ update_for_return_superedge (*return_edge, ctxt);
+ }
+ break;
+
+ case SUPEREDGE_INTRAPROCEDURAL_CALL:
+ {
+ const callgraph_superedge *cg_sedge
+ = as_a <const callgraph_superedge *> (&edge);
+ update_for_call_summary (*cg_sedge, ctxt);
+ }
+ break;
+ }
+
+ if (last_stmt == NULL)
+ return true;
+
+ /* Apply any constraints for conditionals/switch statements. */
+
+ if (const gcond *cond_stmt = dyn_cast <const gcond *> (last_stmt))
+ {
+ const cfg_superedge *cfg_sedge = as_a <const cfg_superedge *> (&edge);
+ return apply_constraints_for_gcond (*cfg_sedge, cond_stmt, ctxt);
+ }
+
+ if (const gswitch *switch_stmt = dyn_cast <const gswitch *> (last_stmt))
+ {
+ const switch_cfg_superedge *switch_sedge
+ = as_a <const switch_cfg_superedge *> (&edge);
+ return apply_constraints_for_gswitch (*switch_sedge, switch_stmt, ctxt);
+ }
+
+ return true;
+}
+
+/* Push a new frame_region on to the stack region.
+ Populate the frame_region with child regions for the function call's
+ parameters, using values from the arguments at the callsite in the
+ caller's frame. */
+
+void
+region_model::update_for_call_superedge (const call_superedge &call_edge,
+ region_model_context *ctxt)
+{
+ /* Build a vec of argument svalue_id, using the current top
+ frame for resolving tree expressions. */
+ const gcall *call_stmt = call_edge.get_call_stmt ();
+ auto_vec<svalue_id> arg_sids (gimple_call_num_args (call_stmt));
+
+ for (unsigned i = 0; i < gimple_call_num_args (call_stmt); i++)
+ {
+ tree arg = gimple_call_arg (call_stmt, i);
+ arg_sids.quick_push (get_rvalue (arg, ctxt));
+ }
+
+ push_frame (call_edge.get_callee_function (), &arg_sids, ctxt);
+}
+
+/* Pop the top-most frame_region from the stack, and store the svalue
+ for any returned value into the region for the lvalue of the LHS of
+ the call (if any). */
+
+void
+region_model::update_for_return_superedge (const return_superedge &return_edge,
+ region_model_context *ctxt)
+{
+ purge_stats stats;
+ svalue_id result_sid = pop_frame (true, &stats, ctxt);
+ // TODO: do something with the stats?
+
+ /* Set the result of the call, within the caller frame. */
+ const gcall *call_stmt = return_edge.get_call_stmt ();
+ tree lhs = gimple_call_lhs (call_stmt);
+ if (lhs)
+ set_value (get_lvalue (lhs, ctxt), result_sid, ctxt);
+ else if (!result_sid.null_p ())
+ {
+ /* This could be a leak; try purging again, but this time,
+ don't special-case the result_sid. */
+ purge_stats stats;
+ purge_unused_svalues (&stats, ctxt);
+ }
+}
+
+/* Update this region_model with a summary of the effect of calling
+ and returning from CG_SEDGE.
+
+ TODO: Currently this is extremely simplistic: we merely set the
+ return value to "unknown". A proper implementation would e.g. update
+ sm-state, and presumably be reworked to support multiple outcomes. */
+
+void
+region_model::update_for_call_summary (const callgraph_superedge &cg_sedge,
+ region_model_context *ctxt)
+{
+ /* For now, set any return value to "unknown". */
+ const gcall *call_stmt = cg_sedge.get_call_stmt ();
+ tree lhs = gimple_call_lhs (call_stmt);
+ if (lhs)
+ set_to_new_unknown_value (get_lvalue (lhs, ctxt), TREE_TYPE (lhs), ctxt);
+
+ // TODO: actually implement some kind of summary here
+}
+
+/* Given a true or false edge guarded by conditional statement COND_STMT,
+ determine appropriate constraints for the edge to be taken.
+
+ If they are feasible, add the constraints and return true.
+
+ Return false if the constraints contradict existing knowledge
+ (and so the edge should not be taken). */
+
+bool
+region_model::apply_constraints_for_gcond (const cfg_superedge &sedge,
+ const gcond *cond_stmt,
+ region_model_context *ctxt)
+{
+ ::edge cfg_edge = sedge.get_cfg_edge ();
+ gcc_assert (cfg_edge != NULL);
+ gcc_assert (cfg_edge->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE));
+
+ enum tree_code op = gimple_cond_code (cond_stmt);
+ tree lhs = gimple_cond_lhs (cond_stmt);
+ tree rhs = gimple_cond_rhs (cond_stmt);
+ if (cfg_edge->flags & EDGE_FALSE_VALUE)
+ op = invert_tree_comparison (op, false /* honor_nans */);
+ return add_constraint (lhs, op, rhs, ctxt);
+}
+
+/* Given an EDGE guarded by SWITCH_STMT, determine appropriate constraints
+ for the edge to be taken.
+
+ If they are feasible, add the constraints and return true.
+
+ Return false if the constraints contradict existing knowledge
+ (and so the edge should not be taken). */
+
+bool
+region_model::apply_constraints_for_gswitch (const switch_cfg_superedge &edge,
+ const gswitch *switch_stmt,
+ region_model_context *ctxt)
+{
+ tree index = gimple_switch_index (switch_stmt);
+ tree case_label = edge.get_case_label ();
+ gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR);
+ tree lower_bound = CASE_LOW (case_label);
+ tree upper_bound = CASE_HIGH (case_label);
+ if (lower_bound)
+ {
+ if (upper_bound)
+ {
+ /* Range. */
+ if (!add_constraint (index, GE_EXPR, lower_bound, ctxt))
+ return false;
+ return add_constraint (index, LE_EXPR, upper_bound, ctxt);
+ }
+ else
+ /* Single-value. */
+ return add_constraint (index, EQ_EXPR, lower_bound, ctxt);
+ }
+ else
+ {
+ /* The default case.
+ Add exclusions based on the other cases. */
+ for (unsigned other_idx = 1;
+ other_idx < gimple_switch_num_labels (switch_stmt);
+ other_idx++)
+ {
+ tree other_label = gimple_switch_label (switch_stmt,
+ other_idx);
+ tree other_lower_bound = CASE_LOW (other_label);
+ tree other_upper_bound = CASE_HIGH (other_label);
+ gcc_assert (other_lower_bound);
+ if (other_upper_bound)
+ {
+ /* Exclude this range-valued case.
+ For now, we just exclude the boundary values.
+ TODO: exclude the values within the region. */
+ if (!add_constraint (index, NE_EXPR, other_lower_bound, ctxt))
+ return false;
+ if (!add_constraint (index, NE_EXPR, other_upper_bound, ctxt))
+ return false;
+ }
+ else
+ /* Exclude this single-valued case. */
+ if (!add_constraint (index, NE_EXPR, other_lower_bound, ctxt))
+ return false;
+ }
+ return true;
+ }
+}
+
+/* Get the root_region within this model (guaranteed to be non-null). */
+
+root_region *
+region_model::get_root_region () const
+{
+ return get_region<root_region> (m_root_rid);
+}
+
+/* Get the region_id of this model's stack region (if any). */
+
+region_id
+region_model::get_stack_region_id () const
+{
+ return get_root_region ()->get_stack_region_id ();
+}
+
+/* Create a new frame_region for a call to FUN and push it onto
+ the stack.
+
+ If ARG_SIDS is non-NULL, use it to populate the parameters
+ in the new frame.
+ Otherwise, populate them with unknown values.
+
+ Return the region_id of the new frame_region. */
+
+region_id
+region_model::push_frame (function *fun, vec<svalue_id> *arg_sids,
+ region_model_context *ctxt)
+{
+ return get_root_region ()->push_frame (this, fun, arg_sids, ctxt);
+}
+
+/* Get the region_id of the top-most frame in this region_model's stack,
+ if any. */
+
+region_id
+region_model::get_current_frame_id () const
+{
+ return get_root_region ()->get_current_frame_id (*this);
+}
+
+/* Get the function of the top-most frame in this region_model's stack.
+ There must be such a frame. */
+
+function *
+region_model::get_current_function () const
+{
+ region_id frame_id = get_current_frame_id ();
+ frame_region *frame = get_region<frame_region> (frame_id);
+ return frame->get_function ();
+}
+
+/* Pop the topmost frame_region from this region_model's stack;
+ see the comment for stack_region::pop_frame. */
+
+svalue_id
+region_model::pop_frame (bool purge, purge_stats *out,
+ region_model_context *ctxt)
+{
+ return get_root_region ()->pop_frame (this, purge, out, ctxt);
+}
+
+/* Get the number of frames in this region_model's stack. */
+
+int
+region_model::get_stack_depth () const
+{
+ stack_region *stack = get_root_region ()->get_stack_region (this);
+ if (stack)
+ return stack->get_num_frames ();
+ else
+ return 0;
+}
+
+/* Get the function * at DEPTH within the call stack. */
+
+function *
+region_model::get_function_at_depth (unsigned depth) const
+{
+ stack_region *stack = get_root_region ()->get_stack_region (this);
+ gcc_assert (stack);
+ region_id frame_rid = stack->get_frame_rid (depth);
+ frame_region *frame = get_region <frame_region> (frame_rid);
+ return frame->get_function ();
+}
+
+/* Get the region_id of this model's globals region (if any). */
+
+region_id
+region_model::get_globals_region_id () const
+{
+ return get_root_region ()->get_globals_region_id ();
+}
+
+/* Add SVAL to this model, taking ownership, and returning its new
+ svalue_id. */
+
+svalue_id
+region_model::add_svalue (svalue *sval)
+{
+ gcc_assert (sval);
+ m_svalues.safe_push (sval);
+ return svalue_id::from_int (m_svalues.length () - 1);
+}
+
+/* Change the meaning of SID to be NEW_SVAL
+ (e.g. when deferencing an unknown pointer, the pointer
+ becomes a pointer to a symbolic region, so that all users
+ of the former unknown pointer are now effectively pointing
+ at the same region). */
+
+void
+region_model::replace_svalue (svalue_id sid, svalue *new_sval)
+{
+ gcc_assert (!sid.null_p ());
+ int idx = sid.as_int ();
+
+ gcc_assert (m_svalues[idx]);
+ gcc_assert (m_svalues[idx]->get_type () == new_sval->get_type ());
+ delete m_svalues[idx];
+
+ m_svalues[idx] = new_sval;
+}
+
+/* Add region R to this model, taking ownership, and returning its new
+ region_id. */
+
+region_id
+region_model::add_region (region *r)
+{
+ gcc_assert (r);
+ m_regions.safe_push (r);
+ return region_id::from_int (m_regions.length () - 1);
+}
+
+/* Return the svalue with id SVAL_ID, or NULL for a null id. */
+
+svalue *
+region_model::get_svalue (svalue_id sval_id) const
+{
+ if (sval_id.null_p ())
+ return NULL;
+ return m_svalues[sval_id.as_int ()];
+}
+
+/* Return the region with id RID, or NULL for a null id. */
+
+region *
+region_model::get_region (region_id rid) const
+{
+ if (rid.null_p ())
+ return NULL;
+ return m_regions[rid.as_int ()];
+}
+
+/* Make a region of an appropriate subclass for TYPE,
+ with parent PARENT_RID. */
+
+static region *
+make_region_for_type (region_id parent_rid, tree type)
+{
+ gcc_assert (TYPE_P (type));
+
+ if (INTEGRAL_TYPE_P (type)
+ || SCALAR_FLOAT_TYPE_P (type)
+ || POINTER_TYPE_P (type)
+ || TREE_CODE (type) == COMPLEX_TYPE)
+ return new primitive_region (parent_rid, type);
+
+ if (TREE_CODE (type) == RECORD_TYPE)
+ return new struct_region (parent_rid, type);
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ return new array_region (parent_rid, type);
+
+ if (TREE_CODE (type) == UNION_TYPE)
+ return new union_region (parent_rid, type);
+
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ return new function_region (parent_rid, type);
+
+ /* If we have a void *, make a new symbolic region. */
+ if (type == void_type_node)
+ return new symbolic_region (parent_rid, false);
+
+ gcc_unreachable ();
+}
+
+/* Add a region with type TYPE and parent PARENT_RID. */
+
+region_id
+region_model::add_region_for_type (region_id parent_rid, tree type)
+{
+ gcc_assert (TYPE_P (type));
+
+ region *new_region = make_region_for_type (parent_rid, type);
+ return add_region (new_region);
+}
+
+/* Helper class for region_model::purge_unused_svalues. */
+
+class restrict_to_used_svalues : public purge_criteria
+{
+public:
+ restrict_to_used_svalues (const auto_sbitmap &used) : m_used (used) {}
+
+ bool should_purge_p (svalue_id sid) const FINAL OVERRIDE
+ {
+ gcc_assert (!sid.null_p ());
+ return !bitmap_bit_p (m_used, sid.as_int ());
+ }
+
+private:
+ const auto_sbitmap &m_used;
+};
+
+/* Remove unused svalues from this model, accumulating stats into STATS.
+ Unused svalues are deleted. Doing so could reorder the svalues, and
+ thus change the meaning of svalue_ids.
+
+ If CTXT is non-NULL, then it is notified about svalue_id remappings,
+ and about svalue_ids that are about to be deleted. This allows e.g.
+ for warning about resource leaks, for the case where the svalue
+ represents a resource handle in the user code (e.g. a FILE * or a malloc
+ buffer).
+
+ Amongst other things, removing unused svalues is important for ensuring
+ that the analysis of loops terminates. Otherwise, we could generate a
+ succession of models with unreferenced "unknown" values, where the
+ number of redundant unknown values could grow without bounds, and each
+ such model would be treated as distinct.
+
+ If KNOWN_USED is non-NULL, treat *KNOWN_USED as used (this is for
+ handling values being returned from functions as their frame is popped,
+ since otherwise we'd have to simultaneously determine both the rvalue
+ of the return expr in the callee frame and the lvalue for the gcall's
+ assignment in the caller frame, and it seems cleaner to express all
+ lvalue and rvalue lookups implicitly relative to a "current" frame). */
+
+void
+region_model::purge_unused_svalues (purge_stats *stats,
+ region_model_context *ctxt,
+ svalue_id *known_used_sid)
+{
+ // TODO: might want to avoid a vfunc call just to do logging here:
+ logger *logger = ctxt ? ctxt->get_logger () : NULL;
+
+ LOG_SCOPE (logger);
+
+ auto_sbitmap used (m_svalues.length ());
+ bitmap_clear (used);
+
+ if (known_used_sid)
+ if (!known_used_sid->null_p ())
+ bitmap_set_bit (used, known_used_sid->as_int ());
+
+ /* Walk the regions, marking sids that are used. */
+ unsigned i;
+ region *r;
+ FOR_EACH_VEC_ELT (m_regions, i, r)
+ {
+ svalue_id sid = r->get_value_direct ();
+ if (!sid.null_p ())
+ bitmap_set_bit (used, sid.as_int ());
+ }
+
+ /* Now purge any constraints involving svalues we don't care about. */
+ restrict_to_used_svalues criterion (used);
+ m_constraints->purge (criterion, stats);
+
+ /* Mark any sids that are in constraints that survived. */
+ {
+ equiv_class *ec;
+ FOR_EACH_VEC_ELT (m_constraints->m_equiv_classes, i, ec)
+ {
+ int j;
+ svalue_id *sid;
+ FOR_EACH_VEC_ELT (ec->m_vars, j, sid)
+ {
+ gcc_assert (!sid->null_p ());
+ bitmap_set_bit (used, sid->as_int ());
+ }
+ }
+ }
+
+ /* Build a mapping from old-sid to new-sid so that we can preserve
+ order of the used IDs and move all redundant ones to the end.
+ Iterate though svalue IDs, adding used ones to the front of
+ the new list, and unused ones to the back. */
+ svalue_id_map map (m_svalues.length ());
+ int next_used_new_sid = 0;
+ int after_next_unused_new_sid = m_svalues.length ();
+ for (unsigned i = 0; i < m_svalues.length (); i++)
+ {
+ svalue_id src (svalue_id::from_int (i));
+ if (bitmap_bit_p (used, i))
+ {
+ if (logger)
+ logger->log ("sv%i is used", i);
+ map.put (src, svalue_id::from_int (next_used_new_sid++));
+ }
+ else
+ {
+ if (logger)
+ logger->log ("sv%i is unused", i);
+ map.put (src, svalue_id::from_int (--after_next_unused_new_sid));
+ }
+ }
+ /* The two insertion points should have met. */
+ gcc_assert (next_used_new_sid == after_next_unused_new_sid);
+
+ /* Now walk the regions and the constraints, remapping sids,
+ so that all the redundant svalues are at the end. */
+ remap_svalue_ids (map);
+
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("map: ");
+ map.dump_to_pp (logger->get_printer ());
+ logger->end_log_line ();
+ }
+
+ /* Notify any client about the remapping and pending deletion.
+ Potentially this could trigger leak warnings. */
+ if (ctxt)
+ {
+ ctxt->remap_svalue_ids (map);
+ int num_client_items_purged
+ = ctxt->on_svalue_purge (svalue_id::from_int (next_used_new_sid), map);
+ if (stats)
+ stats->m_num_client_items += num_client_items_purged;
+ }
+
+ /* Drop the redundant svalues from the end of the vector. */
+ while ((signed)m_svalues.length () > next_used_new_sid)
+ {
+ if (logger)
+ {
+ svalue_id victim = svalue_id::from_int (m_svalues.length () - 1);
+ logger->log ("deleting sv%i (was sv%i)",
+ victim.as_int (),
+ map.get_src_for_dst (victim).as_int ());
+ }
+ delete m_svalues.pop ();
+ if (stats)
+ stats->m_num_svalues++;
+ }
+
+ if (known_used_sid)
+ map.update (known_used_sid);
+
+ validate ();
+}
+
+/* Renumber the svalues within this model according to MAP. */
+
+void
+region_model::remap_svalue_ids (const svalue_id_map &map)
+{
+ /* Update IDs within regions. */
+ unsigned i;
+ region *r;
+ FOR_EACH_VEC_ELT (m_regions, i, r)
+ r->remap_svalue_ids (map);
+
+ /* Update IDs within ECs within constraints. */
+ m_constraints->remap_svalue_ids (map);
+
+ /* Build a reordered svalues vector. */
+ auto_vec<svalue *> new_svalues (m_svalues.length ());
+ for (unsigned i = 0; i < m_svalues.length (); i++)
+ {
+ svalue_id dst (svalue_id::from_int (i));
+ svalue_id src = map.get_src_for_dst (dst);
+ new_svalues.quick_push (get_svalue (src));
+ }
+
+ /* Copy over the reordered vec to m_svalues. */
+ m_svalues.truncate (0);
+ gcc_assert (m_svalues.space (new_svalues.length ()));
+ svalue *sval;
+ FOR_EACH_VEC_ELT (new_svalues, i, sval)
+ m_svalues.quick_push (sval);
+}
+
+/* Renumber the regions within this model according to MAP. */
+
+void
+region_model::remap_region_ids (const region_id_map &map)
+{
+ /* Update IDs within regions. */
+ unsigned i;
+ region *r;
+ FOR_EACH_VEC_ELT (m_regions, i, r)
+ r->remap_region_ids (map);
+
+ /* Update IDs within svalues. */
+ svalue *sval;
+ FOR_EACH_VEC_ELT (m_svalues, i, sval)
+ sval->remap_region_ids (map);
+
+ /* Build a reordered regions vector. */
+ auto_vec<region *> new_regions (m_regions.length ());
+ for (unsigned i = 0; i < m_regions.length (); i++)
+ {
+ region_id dst (region_id::from_int (i));
+ region_id src = map.get_src_for_dst (dst);
+ new_regions.quick_push (get_region (src));
+ }
+
+ /* Copy over the reordered vec to m_regions. */
+ m_regions.truncate (0);
+ gcc_assert (m_regions.space (new_regions.length ()));
+ FOR_EACH_VEC_ELT (new_regions, i, r)
+ m_regions.quick_push (r);
+}
+
+/* Delete all regions within SET_TO_PURGE, remapping region IDs for
+ other regions. It's required that there are no uses of the
+ regions within the set (or the region IDs will become invalid).
+
+ Accumulate stats to STATS. */
+
+void
+region_model::purge_regions (const region_id_set &set_to_purge,
+ purge_stats *stats,
+ logger *)
+{
+ /* Build a mapping from old-rid to new-rid so that we can preserve
+ order of the used IDs and move all redundant ones to the end.
+ Iterate though region IDs, adding used ones to the front of
+ the new list, and unused ones to the back. */
+ region_id_map map (m_regions.length ());
+ int next_used_new_rid = 0;
+ int after_next_unused_new_rid = m_regions.length ();
+ for (unsigned i = 0; i < m_regions.length (); i++)
+ {
+ region_id src (region_id::from_int (i));
+ if (set_to_purge.region_p (src))
+ map.put (src, region_id::from_int (--after_next_unused_new_rid));
+ else
+ map.put (src, region_id::from_int (next_used_new_rid++));
+ }
+ /* The two insertion points should have met. */
+ gcc_assert (next_used_new_rid == after_next_unused_new_rid);
+
+ /* Now walk the regions and svalues, remapping rids,
+ so that all the redundant regions are at the end. */
+ remap_region_ids (map);
+
+ /* Drop the redundant regions from the end of the vector. */
+ while ((signed)m_regions.length () > next_used_new_rid)
+ {
+ delete m_regions.pop ();
+ if (stats)
+ stats->m_num_regions++;
+ }
+}
+
+/* Populate *OUT with RID and all of its descendents.
+ If EXCLUDE_RID is non-null, then don't add it or its descendents. */
+
+void
+region_model::get_descendents (region_id rid, region_id_set *out,
+ region_id exclude_rid) const
+{
+ out->add_region (rid);
+
+ bool changed = true;
+ while (changed)
+ {
+ changed = false;
+ unsigned i;
+ region *r;
+ FOR_EACH_VEC_ELT (m_regions, i, r)
+ {
+ region_id iter_rid = region_id::from_int (i);
+ if (iter_rid == exclude_rid)
+ continue;
+ if (!out->region_p (iter_rid))
+ {
+ region_id parent_rid = r->get_parent ();
+ if (!parent_rid.null_p ())
+ if (out->region_p (parent_rid))
+ {
+ out->add_region (iter_rid);
+ changed = true;
+ }
+ }
+ }
+ }
+}
+
+/* Delete RID and all descendent regions.
+ Find any pointers to such regions; convert convert them to
+ poisoned values of kind PKIND.
+ Accumulate stats on purged entities into STATS. */
+
+void
+region_model::delete_region_and_descendents (region_id rid,
+ enum poison_kind pkind,
+ purge_stats *stats,
+ logger *logger)
+{
+ /* Find all child and descendent regions. */
+ region_id_set descendents (this);
+ get_descendents (rid, &descendents, region_id::null ());
+
+ /* Find any pointers to such regions; convert to poisoned. */
+ poison_any_pointers_to_bad_regions (descendents, pkind);
+
+ /* Delete all such regions. */
+ purge_regions (descendents, stats, logger);
+}
+
+/* Find any pointers to regions within BAD_REGIONS; convert them to
+ poisoned values of kind PKIND. */
+
+void
+region_model::poison_any_pointers_to_bad_regions (const region_id_set &
+ bad_regions,
+ enum poison_kind pkind)
+{
+ int i;
+ svalue *sval;
+ FOR_EACH_VEC_ELT (m_svalues, i, sval)
+ if (region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
+ {
+ region_id ptr_dst = ptr_sval->get_pointee ();
+ if (!ptr_dst.null_p ())
+ if (bad_regions.region_p (ptr_dst))
+ replace_svalue
+ (svalue_id::from_int (i),
+ new poisoned_svalue (pkind, sval->get_type ()));
+ }
+}
+
+/* Attempt to merge THIS with OTHER_MODEL, writing the result
+ to OUT_MODEL, and populating SID_MAPPING. */
+
+bool
+region_model::can_merge_with_p (const region_model &other_model,
+ region_model *out_model,
+ svalue_id_merger_mapping *sid_mapping) const
+{
+ gcc_assert (m_root_rid == other_model.m_root_rid);
+ gcc_assert (m_root_rid.as_int () == 0);
+ gcc_assert (sid_mapping);
+ gcc_assert (out_model);
+
+ model_merger merger (this, &other_model, out_model, sid_mapping);
+
+ if (!root_region::can_merge_p (get_root_region (),
+ other_model.get_root_region (),
+ out_model->get_root_region (),
+ &merger))
+ return false;
+
+ /* Merge constraints. */
+ constraint_manager::merge (*m_constraints,
+ *other_model.m_constraints,
+ out_model->m_constraints,
+ merger);
+
+ out_model->validate ();
+
+ /* The merged model should be simpler (or as simple) as the inputs. */
+#if 0
+ gcc_assert (out_model->m_svalues.length () <= m_svalues.length ());
+ gcc_assert (out_model->m_svalues.length ()
+ <= other_model.m_svalues.length ());
+#endif
+ gcc_assert (out_model->m_regions.length () <= m_regions.length ());
+ gcc_assert (out_model->m_regions.length ()
+ <= other_model.m_regions.length ());
+ // TODO: same, for constraints
+
+ return true;
+}
+
+/* As above, but supply a placeholder svalue_id_merger_mapping
+ instance to be used and receive output. For use in selftests. */
+
+bool
+region_model::can_merge_with_p (const region_model &other_model,
+ region_model *out_model) const
+{
+ svalue_id_merger_mapping sid_mapping (*this, other_model);
+ return can_merge_with_p (other_model, out_model, &sid_mapping);
+}
+
+/* For debugging purposes: look for a region within this region_model
+ for a decl named NAME (or an SSA_NAME for such a decl),
+ returning its value, or svalue_id::null if none are found. */
+
+svalue_id
+region_model::get_value_by_name (const char *name) const
+{
+ gcc_assert (name);
+ tree identifier = get_identifier (name);
+ return get_root_region ()->get_value_by_name (identifier, *this);
+}
+
+/* Generate or reuse an svalue_id within this model for an index
+ into an array of type PTR_TYPE, based on OFFSET_SID. */
+
+svalue_id
+region_model::convert_byte_offset_to_array_index (tree ptr_type,
+ svalue_id offset_sid)
+{
+ gcc_assert (POINTER_TYPE_P (ptr_type));
+
+ if (tree offset_cst = maybe_get_constant (offset_sid))
+ {
+ tree elem_type = TREE_TYPE (ptr_type);
+
+ /* Arithmetic on void-pointers is a GNU C extension, treating the size
+ of a void as 1.
+ https://gcc.gnu.org/onlinedocs/gcc/Pointer-Arith.html
+
+ Returning early for this case avoids a diagnostic from within the
+ call to size_in_bytes. */
+ if (TREE_CODE (elem_type) == VOID_TYPE)
+ return offset_sid;
+
+ /* This might not be a constant. */
+ tree byte_size = size_in_bytes (elem_type);
+
+ tree index
+ = fold_build2 (TRUNC_DIV_EXPR, integer_type_node,
+ offset_cst, byte_size);
+
+ if (CONSTANT_CLASS_P (index))
+ return get_or_create_constant_svalue (index);
+ }
+
+ /* Otherwise, we don't know the array index; generate a new unknown value.
+ TODO: do we need to capture the relationship between two unknown
+ values (the offset and the index)? */
+ return add_svalue (new unknown_svalue (integer_type_node));
+}
+
+/* Get a region of type TYPE for PTR_SID[OFFSET_SID/sizeof (*PTR_SID)].
+
+ If OFFSET_SID is known to be zero, then dereference PTR_SID.
+ Otherwise, impose a view of "typeof(*PTR_SID)[]" on *PTR_SID,
+ and then get a view of type TYPE on the relevant array element. */
+
+region_id
+region_model::get_or_create_mem_ref (tree type,
+ svalue_id ptr_sid,
+ svalue_id offset_sid,
+ region_model_context *ctxt)
+{
+ svalue *ptr_sval = get_svalue (ptr_sid);
+ tree ptr_type = ptr_sval->get_type ();
+ gcc_assert (ptr_type);
+
+ region_id raw_rid = deref_rvalue (ptr_sid, ctxt);
+
+ svalue *offset_sval = get_svalue (offset_sid);
+ tree offset_type = offset_sval->get_type ();
+ gcc_assert (offset_type);
+
+ if (constant_svalue *cst_sval = offset_sval->dyn_cast_constant_svalue ())
+ {
+ if (zerop (cst_sval->get_constant ()))
+ {
+ /* Handle the zero offset case. */
+ return get_or_create_view (raw_rid, type);
+ }
+
+ /* If we're already within an array of the correct type,
+ then we want to reuse that array, rather than starting
+ a new view.
+ If so, figure out our raw_rid's offset from its parent,
+ if we can, and use that to offset OFFSET_SID, and create
+ the element within the parent region. */
+ region *raw_reg = get_region (raw_rid);
+ region_id parent_rid = raw_reg->get_parent ();
+ tree parent_type = get_region (parent_rid)->get_type ();
+ if (parent_type
+ && TREE_CODE (parent_type) == ARRAY_TYPE)
+ {
+ // TODO: check we have the correct parent type
+ array_region *parent_array = get_region <array_region> (parent_rid);
+ array_region::key_t key_for_raw_rid;
+ if (parent_array->get_key_for_child_region (raw_rid,
+ &key_for_raw_rid))
+ {
+ /* Convert from offset to index. */
+ svalue_id index_sid
+ = convert_byte_offset_to_array_index (ptr_type, offset_sid);
+ if (tree index_cst
+ = get_svalue (index_sid)->maybe_get_constant ())
+ {
+ array_region::key_t index_offset
+ = array_region::key_from_constant (index_cst);
+ array_region::key_t index_rel_to_parent
+ = key_for_raw_rid + index_offset;
+ tree index_rel_to_parent_cst
+ = wide_int_to_tree (integer_type_node,
+ index_rel_to_parent);
+ svalue_id index_sid
+ = get_or_create_constant_svalue (index_rel_to_parent_cst);
+
+ /* Carry on, using the parent region and adjusted index. */
+ region_id element_rid
+ = parent_array->get_element (this, raw_rid, index_sid,
+ ctxt);
+ return get_or_create_view (element_rid, type);
+ }
+ }
+ }
+ }
+
+ tree array_type = build_array_type (TREE_TYPE (ptr_type),
+ integer_type_node);
+ region_id array_view_rid = get_or_create_view (raw_rid, array_type);
+ array_region *array_reg = get_region <array_region> (array_view_rid);
+
+ svalue_id index_sid
+ = convert_byte_offset_to_array_index (ptr_type, offset_sid);
+
+ region_id element_rid
+ = array_reg->get_element (this, array_view_rid, index_sid, ctxt);
+
+ return get_or_create_view (element_rid, type);
+}
+
+/* Get a region of type TYPE for PTR_SID + OFFSET_SID.
+
+ If OFFSET_SID is known to be zero, then dereference PTR_SID.
+ Otherwise, impose a view of "typeof(*PTR_SID)[]" on *PTR_SID,
+ and then get a view of type TYPE on the relevant array element. */
+
+region_id
+region_model::get_or_create_pointer_plus_expr (tree type,
+ svalue_id ptr_sid,
+ svalue_id offset_in_bytes_sid,
+ region_model_context *ctxt)
+{
+ return get_or_create_mem_ref (type,
+ ptr_sid,
+ offset_in_bytes_sid,
+ ctxt);
+}
+
+/* Get or create a view of type TYPE of the region with id RAW_ID.
+ Return the id of the view (or RAW_ID if it of the same type). */
+
+region_id
+region_model::get_or_create_view (region_id raw_rid, tree type)
+{
+ region *raw_region = get_region (raw_rid);
+
+ gcc_assert (TYPE_P (type));
+ if (type != raw_region->get_type ())
+ {
+ /* If the region already has a view of the requested type,
+ reuse it. */
+ region_id existing_view_rid = raw_region->get_view (type, this);
+ if (!existing_view_rid.null_p ())
+ return existing_view_rid;
+
+ /* Otherwise, make one (adding it to the region_model and
+ to the viewed region). */
+ region_id view_rid = add_region_for_type (raw_rid, type);
+ raw_region->add_view (view_rid, this);
+ // TODO: something to signify that this is a "view"
+ return view_rid;
+ }
+
+ return raw_rid;
+}
+
+/* Attempt to get the fndecl used at CALL, if known, or NULL_TREE
+ otherwise. */
+
+tree
+region_model::get_fndecl_for_call (const gcall *call,
+ region_model_context *ctxt)
+{
+ tree fn_ptr = gimple_call_fn (call);
+ if (fn_ptr == NULL_TREE)
+ return NULL_TREE;
+ svalue_id fn_ptr_sid = get_rvalue (fn_ptr, ctxt);
+ svalue *fn_ptr_sval = get_svalue (fn_ptr_sid);
+ if (region_svalue *fn_ptr_ptr = fn_ptr_sval->dyn_cast_region_svalue ())
+ {
+ region_id fn_rid = fn_ptr_ptr->get_pointee ();
+ code_region *code = get_root_region ()->get_code_region (this);
+ if (code)
+ {
+ tree fn_decl = code->get_tree_for_child_region (fn_rid);
+ return fn_decl;
+ }
+ }
+
+ return NULL_TREE;
+}
+
+/* struct model_merger. */
+
+/* Dump a multiline representation of this merger to PP. */
+
+void
+model_merger::dump_to_pp (pretty_printer *pp) const
+{
+ pp_string (pp, "model A:");
+ pp_newline (pp);
+ m_model_a->dump_to_pp (pp, false);
+ pp_newline (pp);
+
+ pp_string (pp, "model B:");
+ pp_newline (pp);
+ m_model_b->dump_to_pp (pp, false);
+ pp_newline (pp);
+
+ pp_string (pp, "merged model:");
+ pp_newline (pp);
+ m_merged_model->dump_to_pp (pp, false);
+ pp_newline (pp);
+
+ pp_string (pp, "region map: model A to merged model:");
+ pp_newline (pp);
+ m_map_regions_from_a_to_m.dump_to_pp (pp);
+ pp_newline (pp);
+
+ pp_string (pp, "region map: model B to merged model:");
+ pp_newline (pp);
+ m_map_regions_from_b_to_m.dump_to_pp (pp);
+ pp_newline (pp);
+
+ m_sid_mapping->dump_to_pp (pp);
+}
+
+/* Dump a multiline representation of this merger to FILE. */
+
+void
+model_merger::dump (FILE *fp) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Dump a multiline representation of this merger to stderr. */
+
+DEBUG_FUNCTION void
+model_merger::dump () const
+{
+ dump (stderr);
+}
+
+/* Attempt to merge the svalues of SID_A and SID_B (from their
+ respective models), writing the id of the resulting svalue
+ into *MERGED_SID.
+ Return true if the merger is possible, false otherwise. */
+
+bool
+model_merger::can_merge_values_p (svalue_id sid_a,
+ svalue_id sid_b,
+ svalue_id *merged_sid)
+{
+ gcc_assert (merged_sid);
+ svalue *sval_a = m_model_a->get_svalue (sid_a);
+ svalue *sval_b = m_model_b->get_svalue (sid_b);
+
+ /* If both are NULL, then the "values" are trivially mergeable. */
+ if (!sval_a && !sval_b)
+ return true;
+
+ /* If one is NULL and the other non-NULL, then the "values"
+ are not mergeable. */
+ if (!(sval_a && sval_b))
+ return false;
+
+ /* Have they both already been mapped to the same new svalue_id?
+ If so, use it. */
+ svalue_id sid_a_in_m
+ = m_sid_mapping->m_map_from_a_to_m.get_dst_for_src (sid_a);
+ svalue_id sid_b_in_m
+ = m_sid_mapping->m_map_from_b_to_m.get_dst_for_src (sid_b);
+ if (!sid_a_in_m.null_p ()
+ && !sid_b_in_m.null_p ()
+ && sid_a_in_m == sid_b_in_m)
+ {
+ *merged_sid = sid_a_in_m;
+ return true;
+ }
+
+ tree type = sval_a->get_type ();
+ if (type == NULL_TREE)
+ type = sval_b->get_type ();
+
+ /* If the values have different kinds, or are both unknown,
+ then merge as "unknown". */
+ if (sval_a->get_kind () != sval_b->get_kind ()
+ || sval_a->get_kind () == SK_UNKNOWN)
+ {
+ svalue *merged_sval = new unknown_svalue (type);
+ *merged_sid = m_merged_model->add_svalue (merged_sval);
+ record_svalues (sid_a, sid_b, *merged_sid);
+ return true;
+ }
+
+ gcc_assert (sval_a->get_kind () == sval_b->get_kind ());
+
+ switch (sval_a->get_kind ())
+ {
+ default:
+ case SK_UNKNOWN: /* SK_UNKNOWN handled above. */
+ gcc_unreachable ();
+
+ case SK_REGION:
+ {
+ /* If we have two region pointers, then we can merge (possibly to
+ "unknown"). */
+ const region_svalue ®ion_sval_a = *as_a <region_svalue *> (sval_a);
+ const region_svalue ®ion_sval_b = *as_a <region_svalue *> (sval_b);
+ region_svalue::merge_values (region_sval_a, region_sval_b,
+ merged_sid, type,
+ this);
+ record_svalues (sid_a, sid_b, *merged_sid);
+ return true;
+ }
+ break;
+ case SK_CONSTANT:
+ {
+ /* If we have two constants, then we can merge. */
+ const constant_svalue &cst_sval_a = *as_a <constant_svalue *> (sval_a);
+ const constant_svalue &cst_sval_b = *as_a <constant_svalue *> (sval_b);
+ constant_svalue::merge_values (cst_sval_a, cst_sval_b,
+ merged_sid, this);
+ record_svalues (sid_a, sid_b, *merged_sid);
+ return true;
+ }
+ break;
+
+ case SK_POISONED:
+ case SK_SETJMP:
+ return false;
+ }
+}
+
+/* Record that A_RID in model A and B_RID in model B
+ correspond to MERGED_RID in the merged model, so
+ that pointers can be accurately merged. */
+
+void
+model_merger::record_regions (region_id a_rid,
+ region_id b_rid,
+ region_id merged_rid)
+{
+ m_map_regions_from_a_to_m.put (a_rid, merged_rid);
+ m_map_regions_from_b_to_m.put (b_rid, merged_rid);
+}
+
+/* Record that A_SID in model A and B_SID in model B
+ correspond to MERGED_SID in the merged model. */
+
+void
+model_merger::record_svalues (svalue_id a_sid,
+ svalue_id b_sid,
+ svalue_id merged_sid)
+{
+ gcc_assert (m_sid_mapping);
+ m_sid_mapping->m_map_from_a_to_m.put (a_sid, merged_sid);
+ m_sid_mapping->m_map_from_b_to_m.put (b_sid, merged_sid);
+}
+
+/* struct svalue_id_merger_mapping. */
+
+/* svalue_id_merger_mapping's ctor. */
+
+svalue_id_merger_mapping::svalue_id_merger_mapping (const region_model &a,
+ const region_model &b)
+: m_map_from_a_to_m (a.get_num_svalues ()),
+ m_map_from_b_to_m (b.get_num_svalues ())
+{
+}
+
+/* Dump a multiline representation of this to PP. */
+
+void
+svalue_id_merger_mapping::dump_to_pp (pretty_printer *pp) const
+{
+ pp_string (pp, "svalue_id map: model A to merged model:");
+ pp_newline (pp);
+ m_map_from_a_to_m.dump_to_pp (pp);
+ pp_newline (pp);
+
+ pp_string (pp, "svalue_id map: model B to merged model:");
+ pp_newline (pp);
+ m_map_from_b_to_m.dump_to_pp (pp);
+ pp_newline (pp);
+}
+
+/* Dump a multiline representation of this to FILE. */
+
+void
+svalue_id_merger_mapping::dump (FILE *fp) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Dump a multiline representation of this to stderr. */
+
+DEBUG_FUNCTION void
+svalue_id_merger_mapping::dump () const
+{
+ dump (stderr);
+}
+
+/* struct canonicalization. */
+
+/* canonicalization's ctor. */
+
+canonicalization::canonicalization (const region_model &model)
+: m_model (model),
+ m_rid_map (model.get_num_regions ()),
+ m_sid_map (model.get_num_svalues ()),
+ m_next_rid_int (0),
+ m_next_sid_int (0)
+{
+}
+
+/* If we've not seen RID yet, assign it a canonicalized region_id,
+ and walk the region's svalue and then the region. */
+
+void
+canonicalization::walk_rid (region_id rid)
+{
+ /* Stop if we've already seen RID. */
+ if (!m_rid_map.get_dst_for_src (rid).null_p ())
+ return;
+
+ region *region = m_model.get_region (rid);
+ if (region)
+ {
+ m_rid_map.put (rid, region_id::from_int (m_next_rid_int++));
+ walk_sid (region->get_value_direct ());
+ region->walk_for_canonicalization (this);
+ }
+}
+
+/* If we've not seen SID yet, assign it a canonicalized svalue_id,
+ and walk the svalue (and potentially regions e.g. for ptr values). */
+
+void
+canonicalization::walk_sid (svalue_id sid)
+{
+ /* Stop if we've already seen SID. */
+ if (!m_sid_map.get_dst_for_src (sid).null_p ())
+ return;
+
+ svalue *sval = m_model.get_svalue (sid);
+ if (sval)
+ {
+ m_sid_map.put (sid, svalue_id::from_int (m_next_sid_int++));
+ /* Potentially walk regions e.g. for ptrs. */
+ sval->walk_for_canonicalization (this);
+ }
+}
+
+/* Dump a multiline representation of this to PP. */
+
+void
+canonicalization::dump_to_pp (pretty_printer *pp) const
+{
+ pp_string (pp, "region_id map:");
+ pp_newline (pp);
+ m_rid_map.dump_to_pp (pp);
+ pp_newline (pp);
+
+ pp_string (pp, "svalue_id map:");
+ pp_newline (pp);
+ m_sid_map.dump_to_pp (pp);
+ pp_newline (pp);
+}
+
+/* Dump a multiline representation of this to FILE. */
+
+void
+canonicalization::dump (FILE *fp) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Dump a multiline representation of this to stderr. */
+
+DEBUG_FUNCTION void
+canonicalization::dump () const
+{
+ dump (stderr);
+}
+
+/* Update HSTATE with a hash of SID. */
+
+void
+inchash::add (svalue_id sid, inchash::hash &hstate)
+{
+ hstate.add_int (sid.as_int ());
+}
+
+/* Update HSTATE with a hash of RID. */
+
+void
+inchash::add (region_id rid, inchash::hash &hstate)
+{
+ hstate.add_int (rid.as_int ());
+}
+
+/* Dump RMODEL fully to stderr (i.e. without summarization). */
+
+DEBUG_FUNCTION void
+debug (const region_model &rmodel)
+{
+ rmodel.dump (false);
+}
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Implementation detail of the ASSERT_CONDITION_* macros. */
+
+void
+assert_condition (const location &loc,
+ region_model &model,
+ tree lhs, tree_code op, tree rhs,
+ tristate expected)
+{
+ tristate actual = model.eval_condition (lhs, op, rhs, NULL);
+ ASSERT_EQ_AT (loc, actual, expected);
+}
+
+/* Implementation detail of ASSERT_DUMP_EQ. */
+
+static void
+assert_dump_eq (const location &loc,
+ const region_model &model,
+ bool summarize,
+ const char *expected)
+{
+ auto_fix_quotes sentinel;
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ model.dump_to_pp (&pp, summarize);
+ ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
+}
+
+/* Assert that MODEL.dump_to_pp (SUMMARIZE) is EXPECTED. */
+
+#define ASSERT_DUMP_EQ(MODEL, SUMMARIZE, EXPECTED) \
+ SELFTEST_BEGIN_STMT \
+ assert_dump_eq ((SELFTEST_LOCATION), (MODEL), (SUMMARIZE), (EXPECTED)); \
+ SELFTEST_END_STMT
+
+/* Smoketest for region_model::dump_to_pp. */
+
+static void
+test_dump ()
+{
+ region_model model;
+ model.get_root_region ()->ensure_stack_region (&model);
+ model.get_root_region ()->ensure_globals_region (&model);
+ model.get_root_region ()->ensure_heap_region (&model);
+
+ ASSERT_DUMP_EQ (model, false,
+ "r0: {kind: `root', parent: null, sval: null}\n"
+ "|-stack: r1: {kind: `stack', parent: r0, sval: sv0}\n"
+ "| |: sval: sv0: {poisoned: uninit}\n"
+ "|-globals: r2: {kind: `globals', parent: r0, sval: null, map: {}}\n"
+ "`-heap: r3: {kind: `heap', parent: r0, sval: sv1}\n"
+ " |: sval: sv1: {poisoned: uninit}\n"
+ "svalues:\n"
+ " sv0: {poisoned: uninit}\n"
+ " sv1: {poisoned: uninit}\n"
+ "constraint manager:\n"
+ " equiv classes:\n"
+ " constraints:\n");
+ ASSERT_DUMP_EQ (model, true, "");
+}
+
+/* Verify that calling region_model::get_rvalue repeatedly on the same
+ tree constant retrieves the same svalue_id. */
+
+static void
+test_unique_constants ()
+{
+ tree int_0 = build_int_cst (integer_type_node, 0);
+ tree int_42 = build_int_cst (integer_type_node, 42);
+
+ test_region_model_context ctxt;
+ region_model model;
+ ASSERT_EQ (model.get_rvalue (int_0, &ctxt), model.get_rvalue (int_0, &ctxt));
+ ASSERT_EQ (model.get_rvalue (int_42, &ctxt),
+ model.get_rvalue (int_42, &ctxt));
+ ASSERT_NE (model.get_rvalue (int_0, &ctxt), model.get_rvalue (int_42, &ctxt));
+ ASSERT_EQ (ctxt.get_num_diagnostics (), 0);
+}
+
+/* Check that operator== and hashing works as expected for the
+ various svalue subclasses. */
+
+static void
+test_svalue_equality ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_0 = build_int_cst (integer_type_node, 0);
+
+ /* Create pairs instances of the various subclasses of svalue,
+ testing for hash and equality between (this, this) and
+ (this, other of same subclass). */
+ svalue *ptr_to_r0
+ = new region_svalue (ptr_type_node, region_id::from_int (0));
+ svalue *ptr_to_r1
+ = new region_svalue (ptr_type_node, region_id::from_int (1));
+
+ ASSERT_EQ (ptr_to_r0->hash (), ptr_to_r0->hash ());
+ ASSERT_EQ (*ptr_to_r0, *ptr_to_r0);
+
+ ASSERT_NE (ptr_to_r0->hash (), ptr_to_r1->hash ());
+ ASSERT_NE (*ptr_to_r0, *ptr_to_r1);
+
+ svalue *cst_int_42 = new constant_svalue (int_42);
+ svalue *cst_int_0 = new constant_svalue (int_0);
+
+ ASSERT_EQ (cst_int_42->hash (), cst_int_42->hash ());
+ ASSERT_EQ (*cst_int_42, *cst_int_42);
+
+ ASSERT_NE (cst_int_42->hash (), cst_int_0->hash ());
+ ASSERT_NE (*cst_int_42, *cst_int_0);
+
+ svalue *uninit = new poisoned_svalue (POISON_KIND_UNINIT, NULL_TREE);
+ svalue *freed = new poisoned_svalue (POISON_KIND_FREED, NULL_TREE);
+
+ ASSERT_EQ (uninit->hash (), uninit->hash ());
+ ASSERT_EQ (*uninit, *uninit);
+
+ ASSERT_NE (uninit->hash (), freed->hash ());
+ ASSERT_NE (*uninit, *freed);
+
+ svalue *unknown_0 = new unknown_svalue (ptr_type_node);
+ svalue *unknown_1 = new unknown_svalue (ptr_type_node);
+ ASSERT_EQ (unknown_0->hash (), unknown_0->hash ());
+ ASSERT_EQ (*unknown_0, *unknown_0);
+ ASSERT_EQ (*unknown_1, *unknown_1);
+
+ /* Comparisons between different kinds of svalue. */
+ ASSERT_NE (*ptr_to_r0, *cst_int_42);
+ ASSERT_NE (*ptr_to_r0, *uninit);
+ ASSERT_NE (*ptr_to_r0, *unknown_0);
+ ASSERT_NE (*cst_int_42, *ptr_to_r0);
+ ASSERT_NE (*cst_int_42, *uninit);
+ ASSERT_NE (*cst_int_42, *unknown_0);
+ ASSERT_NE (*uninit, *ptr_to_r0);
+ ASSERT_NE (*uninit, *cst_int_42);
+ ASSERT_NE (*uninit, *unknown_0);
+ ASSERT_NE (*unknown_0, *ptr_to_r0);
+ ASSERT_NE (*unknown_0, *cst_int_42);
+ ASSERT_NE (*unknown_0, *uninit);
+
+ delete ptr_to_r0;
+ delete ptr_to_r1;
+ delete cst_int_42;
+ delete cst_int_0;
+ delete uninit;
+ delete freed;
+ delete unknown_0;
+ delete unknown_1;
+}
+
+/* Check that operator== and hashing works as expected for the
+ various region subclasses. */
+
+static void
+test_region_equality ()
+{
+ region *r0
+ = new primitive_region (region_id::from_int (3), integer_type_node);
+ region *r1
+ = new primitive_region (region_id::from_int (4), integer_type_node);
+
+ ASSERT_EQ (*r0, *r0);
+ ASSERT_EQ (r0->hash (), r0->hash ());
+ ASSERT_NE (*r0, *r1);
+ ASSERT_NE (r0->hash (), r1->hash ());
+
+ delete r0;
+ delete r1;
+
+ // TODO: test coverage for the map within a map_region
+}
+
+/* A subclass of purge_criteria for selftests: purge all svalue_id instances. */
+
+class purge_all_svalue_ids : public purge_criteria
+{
+public:
+ bool should_purge_p (svalue_id) const FINAL OVERRIDE
+ {
+ return true;
+ }
+};
+
+/* A subclass of purge_criteria: purge a specific svalue_id. */
+
+class purge_one_svalue_id : public purge_criteria
+{
+public:
+ purge_one_svalue_id (svalue_id victim) : m_victim (victim) {}
+
+ purge_one_svalue_id (region_model model, tree expr)
+ : m_victim (model.get_rvalue (expr, NULL)) {}
+
+ bool should_purge_p (svalue_id sid) const FINAL OVERRIDE
+ {
+ return sid == m_victim;
+ }
+
+private:
+ svalue_id m_victim;
+};
+
+/* Check that constraint_manager::purge works for individual svalue_ids. */
+
+static void
+test_purging_by_criteria ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_0 = build_int_cst (integer_type_node, 0);
+
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+
+ {
+ region_model model0;
+ region_model model1;
+
+ ADD_SAT_CONSTRAINT (model1, x, EQ_EXPR, y);
+ ASSERT_NE (model0, model1);
+
+ purge_stats stats_for_px;
+ purge_one_svalue_id px (model1, x);
+ model1.get_constraints ()->purge (px, &stats_for_px);
+ ASSERT_EQ (stats_for_px.m_num_equiv_classes, 0);
+
+ purge_stats stats_for_py;
+ purge_one_svalue_id py (model1.get_rvalue (y, NULL));
+ model1.get_constraints ()->purge (py, &stats_for_py);
+ ASSERT_EQ (stats_for_py.m_num_equiv_classes, 1);
+
+ ASSERT_EQ (*model0.get_constraints (), *model1.get_constraints ());
+ }
+
+ {
+ region_model model0;
+ region_model model1;
+
+ ADD_SAT_CONSTRAINT (model1, x, EQ_EXPR, int_42);
+ ASSERT_NE (model0, model1);
+ ASSERT_CONDITION_TRUE (model1, x, EQ_EXPR, int_42);
+
+ purge_stats stats;
+ model1.get_constraints ()->purge (purge_one_svalue_id (model1, x), &stats);
+
+ ASSERT_CONDITION_UNKNOWN (model1, x, EQ_EXPR, int_42);
+ }
+
+ {
+ region_model model0;
+ region_model model1;
+
+ ADD_SAT_CONSTRAINT (model1, x, GE_EXPR, int_0);
+ ADD_SAT_CONSTRAINT (model1, x, LE_EXPR, int_42);
+ ASSERT_NE (model0, model1);
+
+ ASSERT_CONDITION_TRUE (model1, x, GE_EXPR, int_0);
+ ASSERT_CONDITION_TRUE (model1, x, LE_EXPR, int_42);
+
+ purge_stats stats;
+ model1.get_constraints ()->purge (purge_one_svalue_id (model1, x), &stats);
+
+ ASSERT_CONDITION_UNKNOWN (model1, x, GE_EXPR, int_0);
+ ASSERT_CONDITION_UNKNOWN (model1, x, LE_EXPR, int_42);
+ }
+
+ {
+ region_model model0;
+ region_model model1;
+
+ ADD_SAT_CONSTRAINT (model1, x, NE_EXPR, int_42);
+ ADD_SAT_CONSTRAINT (model1, y, NE_EXPR, int_0);
+ ASSERT_NE (model0, model1);
+ ASSERT_CONDITION_TRUE (model1, x, NE_EXPR, int_42);
+ ASSERT_CONDITION_TRUE (model1, y, NE_EXPR, int_0);
+
+ purge_stats stats;
+ model1.get_constraints ()->purge (purge_one_svalue_id (model1, x), &stats);
+ ASSERT_NE (model0, model1);
+
+ ASSERT_CONDITION_UNKNOWN (model1, x, NE_EXPR, int_42);
+ ASSERT_CONDITION_TRUE (model1, y, NE_EXPR, int_0);
+ }
+
+ {
+ region_model model0;
+ region_model model1;
+
+ ADD_SAT_CONSTRAINT (model1, x, NE_EXPR, int_42);
+ ADD_SAT_CONSTRAINT (model1, y, NE_EXPR, int_0);
+ ASSERT_NE (model0, model1);
+ ASSERT_CONDITION_TRUE (model1, x, NE_EXPR, int_42);
+ ASSERT_CONDITION_TRUE (model1, y, NE_EXPR, int_0);
+
+ purge_stats stats;
+ model1.get_constraints ()->purge (purge_all_svalue_ids (), &stats);
+ ASSERT_CONDITION_UNKNOWN (model1, x, NE_EXPR, int_42);
+ ASSERT_CONDITION_UNKNOWN (model1, y, NE_EXPR, int_0);
+ }
+
+}
+
+/* Test that region_model::purge_unused_svalues works as expected. */
+
+static void
+test_purge_unused_svalues ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_0 = build_int_cst (integer_type_node, 0);
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+
+ test_region_model_context ctxt;
+ region_model model;
+ model.set_to_new_unknown_value (model.get_lvalue (x, &ctxt), TREE_TYPE (x),
+ &ctxt);
+ model.set_to_new_unknown_value (model.get_lvalue (x, &ctxt), TREE_TYPE (x),
+ &ctxt);
+ model.set_to_new_unknown_value (model.get_lvalue (x, &ctxt), TREE_TYPE (x),
+ &ctxt);
+ model.add_constraint (x, NE_EXPR, int_42, &ctxt);
+
+ model.set_value (model.get_lvalue (x, &ctxt),
+ model.get_rvalue (int_42, &ctxt),
+ &ctxt);
+ model.add_constraint (y, GT_EXPR, int_0, &ctxt);
+
+ /* The redundant unknown values should have been purged. */
+ purge_stats purged;
+ model.purge_unused_svalues (&purged, NULL);
+ ASSERT_EQ (purged.m_num_svalues, 3);
+
+ /* and the redundant constraint on an old, unknown value for x should
+ have been purged. */
+ ASSERT_EQ (purged.m_num_equiv_classes, 1);
+ ASSERT_EQ (purged.m_num_constraints, 1);
+ ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 2);
+
+ /* ...but we should still have x == 42. */
+ ASSERT_EQ (model.eval_condition (x, EQ_EXPR, int_42, &ctxt),
+ tristate::TS_TRUE);
+
+ /* ...and we should still have the constraint on y. */
+ ASSERT_EQ (model.eval_condition (y, GT_EXPR, int_0, &ctxt),
+ tristate::TS_TRUE);
+
+ ASSERT_EQ (ctxt.get_num_diagnostics (), 0);
+}
+
+/* Verify that simple assignments work as expected. */
+
+static void
+test_assignment ()
+{
+ tree int_0 = build_int_cst (integer_type_node, 0);
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+
+ /* "x == 0", then use of y, then "y = 0;". */
+ region_model model;
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
+ ASSERT_CONDITION_UNKNOWN (model, y, EQ_EXPR, int_0);
+ model.set_value (model.get_lvalue (y, NULL),
+ model.get_rvalue (int_0, NULL),
+ NULL);
+ ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, int_0);
+ ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, x);
+
+ ASSERT_DUMP_EQ (model, true, "y: 0, {x}: unknown, x == y");
+}
+
+/* Verify the details of pushing and popping stack frames. */
+
+static void
+test_stack_frames ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_10 = build_int_cst (integer_type_node, 10);
+ tree int_5 = build_int_cst (integer_type_node, 5);
+ tree int_0 = build_int_cst (integer_type_node, 0);
+
+ auto_vec <tree> param_types;
+ tree parent_fndecl = make_fndecl (integer_type_node,
+ "parent_fn",
+ param_types);
+ allocate_struct_function (parent_fndecl, true);
+
+ tree child_fndecl = make_fndecl (integer_type_node,
+ "child_fn",
+ param_types);
+ allocate_struct_function (child_fndecl, true);
+
+ /* "a" and "b" in the parent frame. */
+ tree a = build_decl (UNKNOWN_LOCATION, PARM_DECL,
+ get_identifier ("a"),
+ integer_type_node);
+ tree b = build_decl (UNKNOWN_LOCATION, PARM_DECL,
+ get_identifier ("b"),
+ integer_type_node);
+ /* "x" and "y" in a child frame. */
+ tree x = build_decl (UNKNOWN_LOCATION, PARM_DECL,
+ get_identifier ("x"),
+ integer_type_node);
+ tree y = build_decl (UNKNOWN_LOCATION, PARM_DECL,
+ get_identifier ("y"),
+ integer_type_node);
+
+ /* "p" global. */
+ tree p = build_global_decl ("p", ptr_type_node);
+
+ /* "q" global. */
+ tree q = build_global_decl ("q", ptr_type_node);
+
+ test_region_model_context ctxt;
+ region_model model;
+
+ /* Push stack frame for "parent_fn". */
+ region_id parent_frame_rid
+ = model.push_frame (DECL_STRUCT_FUNCTION (parent_fndecl), NULL, &ctxt);
+ ASSERT_EQ (model.get_current_frame_id (), parent_frame_rid);
+ region_id a_in_parent_rid = model.get_lvalue (a, &ctxt);
+ model.set_value (a_in_parent_rid, model.get_rvalue (int_42, &ctxt), &ctxt);
+ model.set_to_new_unknown_value (model.get_lvalue (b, &ctxt),
+ integer_type_node, &ctxt);
+ model.add_constraint (b, LT_EXPR, int_10, &ctxt);
+ ASSERT_EQ (model.eval_condition (b, LT_EXPR, int_10, &ctxt),
+ tristate (tristate::TS_TRUE));
+
+ /* Push stack frame for "child_fn". */
+ region_id child_frame_rid
+ = model.push_frame (DECL_STRUCT_FUNCTION (child_fndecl), NULL, &ctxt);
+ ASSERT_EQ (model.get_current_frame_id (), child_frame_rid);
+ region_id x_in_child_rid = model.get_lvalue (x, &ctxt);
+ model.set_value (x_in_child_rid, model.get_rvalue (int_0, &ctxt), &ctxt);
+ model.set_to_new_unknown_value (model.get_lvalue (y, &ctxt),
+ integer_type_node, &ctxt);
+ model.add_constraint (y, NE_EXPR, int_5, &ctxt);
+ ASSERT_EQ (model.eval_condition (y, NE_EXPR, int_5, &ctxt),
+ tristate (tristate::TS_TRUE));
+
+ /* Point a global pointer at a local in the child frame: p = &x. */
+ region_id p_in_globals_rid = model.get_lvalue (p, &ctxt);
+ model.set_value (p_in_globals_rid,
+ model.get_or_create_ptr_svalue (ptr_type_node,
+ x_in_child_rid),
+ &ctxt);
+
+ /* Point another global pointer at p: q = &p. */
+ region_id q_in_globals_rid = model.get_lvalue (q, &ctxt);
+ model.set_value (q_in_globals_rid,
+ model.get_or_create_ptr_svalue (ptr_type_node,
+ p_in_globals_rid),
+ &ctxt);
+
+ /* Test get_descendents. */
+ region_id_set descendents (&model);
+ model.get_descendents (child_frame_rid, &descendents, region_id::null ());
+ ASSERT_TRUE (descendents.region_p (child_frame_rid));
+ ASSERT_TRUE (descendents.region_p (x_in_child_rid));
+ ASSERT_FALSE (descendents.region_p (a_in_parent_rid));
+ ASSERT_EQ (descendents.num_regions (), 3);
+#if 0
+ auto_vec<region_id> test_vec;
+ for (region_id_set::iterator_t iter = descendents.begin ();
+ iter != descendents.end ();
+ ++iter)
+ test_vec.safe_push (*iter);
+ gcc_unreachable (); // TODO
+ //ASSERT_EQ ();
+#endif
+
+ ASSERT_DUMP_EQ (model, true,
+ "x: 0, {y}: unknown, p: &x, q: &p, b < 10, y != 5");
+
+ /* Pop the "child_fn" frame from the stack. */
+ purge_stats purged;
+ model.pop_frame (true, &purged, &ctxt);
+
+ /* We should have purged the unknown values for x and y. */
+ ASSERT_EQ (purged.m_num_svalues, 2);
+
+ /* We should have purged the frame region and the regions for x and y. */
+ ASSERT_EQ (purged.m_num_regions, 3);
+
+ /* We should have purged the constraint on y. */
+ ASSERT_EQ (purged.m_num_equiv_classes, 1);
+ ASSERT_EQ (purged.m_num_constraints, 1);
+
+ /* Verify that p (which was pointing at the local "x" in the popped
+ frame) has been poisoned. */
+ svalue *new_p_sval = model.get_svalue (model.get_rvalue (p, &ctxt));
+ ASSERT_EQ (new_p_sval->get_kind (), SK_POISONED);
+ ASSERT_EQ (new_p_sval->dyn_cast_poisoned_svalue ()->get_poison_kind (),
+ POISON_KIND_POPPED_STACK);
+
+ /* Verify that q still points to p, in spite of the region
+ renumbering. */
+ svalue *new_q_sval = model.get_svalue (model.get_rvalue (q, &ctxt));
+ ASSERT_EQ (new_q_sval->get_kind (), SK_REGION);
+ ASSERT_EQ (new_q_sval->dyn_cast_region_svalue ()->get_pointee (),
+ model.get_lvalue (p, &ctxt));
+
+ /* Verify that top of stack has been updated. */
+ ASSERT_EQ (model.get_current_frame_id (), parent_frame_rid);
+
+ /* Verify locals in parent frame. */
+ /* Verify "a" still has its value. */
+ svalue *new_a_sval = model.get_svalue (model.get_rvalue (a, &ctxt));
+ ASSERT_EQ (new_a_sval->get_kind (), SK_CONSTANT);
+ ASSERT_EQ (new_a_sval->dyn_cast_constant_svalue ()->get_constant (),
+ int_42);
+ /* Verify "b" still has its constraint. */
+ ASSERT_EQ (model.eval_condition (b, LT_EXPR, int_10, &ctxt),
+ tristate (tristate::TS_TRUE));
+}
+
+/* Verify that get_representative_path_var works as expected, that
+ we can map from region ids to parms and back within a recursive call
+ stack. */
+
+static void
+test_get_representative_path_var ()
+{
+ auto_vec <tree> param_types;
+ tree fndecl = make_fndecl (integer_type_node,
+ "factorial",
+ param_types);
+ allocate_struct_function (fndecl, true);
+
+ /* Parm "n". */
+ tree n = build_decl (UNKNOWN_LOCATION, PARM_DECL,
+ get_identifier ("n"),
+ integer_type_node);
+
+ region_model model;
+
+ /* Push 5 stack frames for "factorial", each with a param */
+ auto_vec<region_id> parm_rids;
+ auto_vec<svalue_id> parm_sids;
+ for (int depth = 0; depth < 5; depth++)
+ {
+ region_id frame_rid
+ = model.push_frame (DECL_STRUCT_FUNCTION (fndecl), NULL, NULL);
+ region_id rid_n = model.get_lvalue (path_var (n, depth), NULL);
+ parm_rids.safe_push (rid_n);
+
+ ASSERT_EQ (model.get_region (rid_n)->get_parent (), frame_rid);
+
+ svalue_id sid_n
+ = model.set_to_new_unknown_value (rid_n, integer_type_node, NULL);
+ parm_sids.safe_push (sid_n);
+ }
+
+ /* Verify that we can recognize that the regions are the parms,
+ at every depth. */
+ for (int depth = 0; depth < 5; depth++)
+ {
+ ASSERT_EQ (model.get_representative_path_var (parm_rids[depth]),
+ path_var (n, depth));
+ /* ...and that we can lookup lvalues for locals for all frames,
+ not just the top. */
+ ASSERT_EQ (model.get_lvalue (path_var (n, depth), NULL),
+ parm_rids[depth]);
+ /* ...and that we can locate the svalues. */
+ auto_vec<path_var> pvs;
+ model.get_path_vars_for_svalue (parm_sids[depth], &pvs);
+ ASSERT_EQ (pvs.length (), 1);
+ ASSERT_EQ (pvs[0], path_var (n, depth));
+ }
+}
+
+/* Verify that the core regions within a region_model are in a consistent
+ order after canonicalization. */
+
+static void
+test_canonicalization_1 ()
+{
+ region_model model0;
+ model0.get_root_region ()->ensure_stack_region (&model0);
+ model0.get_root_region ()->ensure_globals_region (&model0);
+
+ region_model model1;
+ model1.get_root_region ()->ensure_globals_region (&model1);
+ model1.get_root_region ()->ensure_stack_region (&model1);
+
+ model0.canonicalize (NULL);
+ model1.canonicalize (NULL);
+ ASSERT_EQ (model0, model1);
+}
+
+/* Verify that region models for
+ x = 42; y = 113;
+ and
+ y = 113; x = 42;
+ are equal after canonicalization. */
+
+static void
+test_canonicalization_2 ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_113 = build_int_cst (integer_type_node, 113);
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+
+ region_model model0;
+ model0.set_value (model0.get_lvalue (x, NULL),
+ model0.get_rvalue (int_42, NULL),
+ NULL);
+ model0.set_value (model0.get_lvalue (y, NULL),
+ model0.get_rvalue (int_113, NULL),
+ NULL);
+
+ region_model model1;
+ model1.set_value (model1.get_lvalue (y, NULL),
+ model1.get_rvalue (int_113, NULL),
+ NULL);
+ model1.set_value (model1.get_lvalue (x, NULL),
+ model1.get_rvalue (int_42, NULL),
+ NULL);
+
+ model0.canonicalize (NULL);
+ model1.canonicalize (NULL);
+ ASSERT_EQ (model0, model1);
+}
+
+/* Verify that constraints for
+ x > 3 && y > 42
+ and
+ y > 42 && x > 3
+ are equal after canonicalization. */
+
+static void
+test_canonicalization_3 ()
+{
+ tree int_3 = build_int_cst (integer_type_node, 3);
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+
+ region_model model0;
+ model0.add_constraint (x, GT_EXPR, int_3, NULL);
+ model0.add_constraint (y, GT_EXPR, int_42, NULL);
+
+ region_model model1;
+ model1.add_constraint (y, GT_EXPR, int_42, NULL);
+ model1.add_constraint (x, GT_EXPR, int_3, NULL);
+
+ model0.canonicalize (NULL);
+ model1.canonicalize (NULL);
+ ASSERT_EQ (model0, model1);
+}
+
+/* Assert that if we have two region_model instances
+ with values VAL_A and VAL_B for EXPR that they are
+ mergable. Write the merged model to *OUT_MERGED_MODEL,
+ and the merged svalue ptr to *OUT_MERGED_SVALUE.
+ If VAL_A or VAL_B are NULL_TREE, don't populate EXPR
+ for that region_model. */
+
+static void
+assert_region_models_merge (tree expr, tree val_a, tree val_b,
+ region_model *out_merged_model,
+ svalue **out_merged_svalue)
+{
+ test_region_model_context ctxt;
+ region_model model0;
+ region_model model1;
+ if (val_a)
+ model0.set_value (model0.get_lvalue (expr, &ctxt),
+ model0.get_rvalue (val_a, &ctxt),
+ &ctxt);
+ if (val_b)
+ model1.set_value (model1.get_lvalue (expr, &ctxt),
+ model1.get_rvalue (val_b, &ctxt),
+ &ctxt);
+
+ /* They should be mergeable. */
+ ASSERT_TRUE (model0.can_merge_with_p (model1, out_merged_model));
+
+ svalue_id merged_svalue_sid = out_merged_model->get_rvalue (expr, &ctxt);
+ *out_merged_svalue = out_merged_model->get_svalue (merged_svalue_sid);
+}
+
+/* Verify that we can merge region_model instances. */
+
+static void
+test_state_merging ()
+{
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_113 = build_int_cst (integer_type_node, 113);
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+ tree z = build_global_decl ("z", integer_type_node);
+ tree p = build_global_decl ("p", ptr_type_node);
+
+ tree addr_of_y = build1 (ADDR_EXPR, ptr_type_node, y);
+ tree addr_of_z = build1 (ADDR_EXPR, ptr_type_node, z);
+
+ auto_vec <tree> param_types;
+ tree test_fndecl = make_fndecl (integer_type_node, "test_fn", param_types);
+ allocate_struct_function (test_fndecl, true);
+
+ /* Param "a". */
+ tree a = build_decl (UNKNOWN_LOCATION, PARM_DECL,
+ get_identifier ("a"),
+ integer_type_node);
+ tree addr_of_a = build1 (ADDR_EXPR, ptr_type_node, a);
+
+ {
+ region_model model0;
+ region_model model1;
+ region_model merged;
+ /* Verify empty models can be merged. */
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_EQ (model0, merged);
+ }
+
+ /* Verify that we can merge two contradictory constraints on the
+ value for a global. */
+ /* TODO: verify that the merged model doesn't have a value for
+ the global */
+ {
+ region_model model0;
+ region_model model1;
+ region_model merged;
+ test_region_model_context ctxt;
+ model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
+ model1.add_constraint (x, EQ_EXPR, int_113, &ctxt);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_NE (model0, merged);
+ ASSERT_NE (model1, merged);
+ }
+
+ /* Verify handling of a PARM_DECL. */
+ {
+ test_region_model_context ctxt;
+ region_model model0;
+ region_model model1;
+ ASSERT_EQ (model0.get_stack_depth (), 0);
+ model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, &ctxt);
+ ASSERT_EQ (model0.get_stack_depth (), 1);
+ ASSERT_EQ (model0.get_function_at_depth (0),
+ DECL_STRUCT_FUNCTION (test_fndecl));
+ model1.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, &ctxt);
+
+ svalue_id sid_a
+ = model0.set_to_new_unknown_value (model0.get_lvalue (a, &ctxt),
+ integer_type_node, &ctxt);
+ model1.set_to_new_unknown_value (model1.get_lvalue (a, &ctxt),
+ integer_type_node, &ctxt);
+ ASSERT_EQ (model0, model1);
+
+ /* Check that get_value_by_name works for locals. */
+ ASSERT_EQ (model0.get_value_by_name ("a"), sid_a);
+
+ /* They should be mergeable, and the result should be the same. */
+ region_model merged;
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_EQ (model0, merged);
+ /* In particular, there should be an unknown value for "a". */
+ svalue *merged_a_sval = merged.get_svalue (merged.get_rvalue (a, &ctxt));
+ ASSERT_EQ (merged_a_sval->get_kind (), SK_UNKNOWN);
+ }
+
+ /* Verify handling of a global. */
+ {
+ test_region_model_context ctxt;
+ region_model model0;
+ region_model model1;
+ svalue_id sid_x
+ = model0.set_to_new_unknown_value (model0.get_lvalue (x, &ctxt),
+ integer_type_node, &ctxt);
+ model1.set_to_new_unknown_value (model1.get_lvalue (x, &ctxt),
+ integer_type_node, &ctxt);
+ ASSERT_EQ (model0, model1);
+
+ /* Check that get_value_by_name works for globals. */
+ ASSERT_EQ (model0.get_value_by_name ("x"), sid_x);
+
+ /* They should be mergeable, and the result should be the same. */
+ region_model merged;
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_EQ (model0, merged);
+ /* In particular, there should be an unknown value for "x". */
+ svalue *merged_x_sval = merged.get_svalue (merged.get_rvalue (x, &ctxt));
+ ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
+ }
+
+ /* Use global-handling to verify various combinations of values. */
+
+ /* Two equal constant values. */
+ {
+ region_model merged;
+ svalue *merged_x_sval;
+ assert_region_models_merge (x, int_42, int_42, &merged, &merged_x_sval);
+
+ /* In particular, there should be a constant value for "x". */
+ ASSERT_EQ (merged_x_sval->get_kind (), SK_CONSTANT);
+ ASSERT_EQ (merged_x_sval->dyn_cast_constant_svalue ()->get_constant (),
+ int_42);
+ }
+
+ /* Two non-equal constant values. */
+ {
+ region_model merged;
+ svalue *merged_x_sval;
+ assert_region_models_merge (x, int_42, int_113, &merged, &merged_x_sval);
+
+ /* In particular, there should be an unknown value for "x". */
+ ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
+ }
+
+ /* Uninit and constant. */
+ {
+ region_model merged;
+ svalue *merged_x_sval;
+ assert_region_models_merge (x, NULL_TREE, int_113, &merged, &merged_x_sval);
+
+ /* In particular, there should be an unknown value for "x". */
+ ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
+ }
+
+ /* Constant and uninit. */
+ {
+ region_model merged;
+ svalue *merged_x_sval;
+ assert_region_models_merge (x, int_42, NULL_TREE, &merged, &merged_x_sval);
+
+ /* In particular, there should be an unknown value for "x". */
+ ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
+ }
+
+ /* Unknown and constant. */
+ // TODO
+
+ /* Pointers: NULL and NULL. */
+ // TODO
+
+ /* Pointers: NULL and non-NULL. */
+ // TODO
+
+ /* Pointers: non-NULL and non-NULL: ptr to a local. */
+ {
+ region_model model0;
+ model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
+ model0.set_to_new_unknown_value (model0.get_lvalue (a, NULL),
+ integer_type_node, NULL);
+ model0.set_value (model0.get_lvalue (p, NULL),
+ model0.get_rvalue (addr_of_a, NULL), NULL);
+
+ region_model model1 (model0);
+ ASSERT_EQ (model0, model1);
+
+ /* They should be mergeable, and the result should be the same. */
+ region_model merged;
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_EQ (model0, merged);
+ }
+
+ /* Pointers: non-NULL and non-NULL: ptr to a global. */
+ {
+ region_model merged;
+ /* p == &y in both input models. */
+ svalue *merged_p_sval;
+ assert_region_models_merge (p, addr_of_y, addr_of_y, &merged,
+ &merged_p_sval);
+
+ /* We should get p == &y in the merged model. */
+ ASSERT_EQ (merged_p_sval->get_kind (), SK_REGION);
+ region_svalue *merged_p_ptr = merged_p_sval->dyn_cast_region_svalue ();
+ region_id merged_p_star_rid = merged_p_ptr->get_pointee ();
+ ASSERT_EQ (merged_p_star_rid, merged.get_lvalue (y, NULL));
+ }
+
+ /* Pointers: non-NULL ptrs to different globals: should be unknown. */
+ {
+ region_model merged;
+ /* x == &y vs x == &z in the input models. */
+ svalue *merged_x_sval;
+ assert_region_models_merge (x, addr_of_y, addr_of_z, &merged,
+ &merged_x_sval);
+
+ /* We should get x == unknown in the merged model. */
+ ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
+ }
+
+ /* Pointers: non-NULL and non-NULL: ptr to a heap region. */
+ {
+ test_region_model_context ctxt;
+ region_model model0;
+ region_id new_rid = model0.add_new_malloc_region ();
+ svalue_id ptr_sid
+ = model0.get_or_create_ptr_svalue (ptr_type_node, new_rid);
+ model0.set_value (model0.get_lvalue (p, &ctxt),
+ ptr_sid, &ctxt);
+ model0.canonicalize (&ctxt);
+
+ region_model model1 (model0);
+
+ ASSERT_EQ (model0, model1);
+
+ region_model merged;
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+
+ merged.canonicalize (&ctxt);
+
+ /* The merged model ought to be identical (after canonicalization,
+ at least). */
+ ASSERT_EQ (model0, merged);
+ }
+
+ /* Two regions sharing the same unknown svalue should continue sharing
+ an unknown svalue after self-merger. */
+ {
+ test_region_model_context ctxt;
+ region_model model0;
+ svalue_id sid
+ = model0.set_to_new_unknown_value (model0.get_lvalue (x, &ctxt),
+ integer_type_node, &ctxt);
+ model0.set_value (model0.get_lvalue (y, &ctxt), sid, &ctxt);
+ region_model model1 (model0);
+
+ /* They should be mergeable, and the result should be the same. */
+ region_model merged;
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_EQ (model0, merged);
+
+ /* In particular, we should have x == y. */
+ ASSERT_EQ (merged.eval_condition (x, EQ_EXPR, y, &ctxt),
+ tristate (tristate::TS_TRUE));
+ }
+
+#if 0
+ {
+ region_model model0;
+ region_model model1;
+ test_region_model_context ctxt;
+ model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
+ model1.add_constraint (x, NE_EXPR, int_42, &ctxt);
+ ASSERT_TRUE (model0.can_merge_with_p (model1));
+ }
+
+ {
+ region_model model0;
+ region_model model1;
+ test_region_model_context ctxt;
+ model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
+ model1.add_constraint (x, NE_EXPR, int_42, &ctxt);
+ model1.add_constraint (x, EQ_EXPR, int_113, &ctxt);
+ ASSERT_TRUE (model0.can_merge_with_p (model1));
+ }
+#endif
+
+ // TODO: what can't we merge? need at least one such test
+
+ /* TODO: various things
+ - heap regions
+ - value merging:
+ - every combination, but in particular
+ - pairs of regions
+ */
+
+ /* Views. */
+ {
+ test_region_model_context ctxt;
+ region_model model0;
+
+ region_id x_rid = model0.get_lvalue (x, &ctxt);
+ region_id x_as_ptr = model0.get_or_create_view (x_rid, ptr_type_node);
+ model0.set_value (x_as_ptr, model0.get_rvalue (addr_of_y, &ctxt), &ctxt);
+
+ region_model model1 (model0);
+ ASSERT_EQ (model1, model0);
+
+ /* They should be mergeable, and the result should be the same. */
+ region_model merged;
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ }
+}
+
+/* Verify that constraints are correctly merged when merging region_model
+ instances. */
+
+static void
+test_constraint_merging ()
+{
+ tree int_0 = build_int_cst (integer_type_node, 0);
+ tree int_5 = build_int_cst (integer_type_node, 5);
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
+ tree z = build_global_decl ("z", integer_type_node);
+ tree n = build_global_decl ("n", integer_type_node);
+
+ test_region_model_context ctxt;
+
+ /* model0: 0 <= (x == y) < n. */
+ region_model model0;
+ model0.set_to_new_unknown_value (model0.get_lvalue (x, &ctxt),
+ integer_type_node, &ctxt);
+ model0.add_constraint (x, EQ_EXPR, y, &ctxt);
+ model0.add_constraint (x, GE_EXPR, int_0, NULL);
+ model0.add_constraint (x, LT_EXPR, n, NULL);
+
+ /* model1: z != 5 && (0 <= x < n). */
+ region_model model1;
+ model1.set_to_new_unknown_value (model1.get_lvalue (x, &ctxt),
+ integer_type_node, &ctxt);
+ model1.add_constraint (z, NE_EXPR, int_5, NULL);
+ model1.add_constraint (x, GE_EXPR, int_0, NULL);
+ model1.add_constraint (x, LT_EXPR, n, NULL);
+
+ /* They should be mergeable; the merged constraints should
+ be: (0 <= x < n). */
+ region_model merged;
+ ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+
+ ASSERT_EQ (merged.eval_condition (x, GE_EXPR, int_0, &ctxt),
+ tristate (tristate::TS_TRUE));
+ ASSERT_EQ (merged.eval_condition (x, LT_EXPR, n, &ctxt),
+ tristate (tristate::TS_TRUE));
+
+ ASSERT_EQ (merged.eval_condition (z, NE_EXPR, int_5, &ctxt),
+ tristate (tristate::TS_UNKNOWN));
+ ASSERT_EQ (merged.eval_condition (x, LT_EXPR, y, &ctxt),
+ tristate (tristate::TS_UNKNOWN));
+}
+
+/* Run all of the selftests within this file. */
+
+void
+analyzer_region_model_cc_tests ()
+{
+ test_dump ();
+ test_unique_constants ();
+ test_svalue_equality ();
+ test_region_equality ();
+ test_purging_by_criteria ();
+ test_purge_unused_svalues ();
+ test_assignment ();
+ test_stack_frames ();
+ test_get_representative_path_var ();
+ test_canonicalization_1 ();
+ test_canonicalization_2 ();
+ test_canonicalization_3 ();
+ test_state_merging ();
+ test_constraint_merging ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Classes for modeling the state of memory.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_REGION_MODEL_H
+#define GCC_ANALYZER_REGION_MODEL_H
+
+/* Implementation of the region-based ternary model described in:
+ "A Memory Model for Static Analysis of C Programs"
+ (Zhongxing Xu, Ted Kremenek, and Jian Zhang)
+ http://lcs.ios.ac.cn/~xuzb/canalyze/memmodel.pdf */
+
+/* A tree, extended with stack frame information for locals, so that
+ we can distinguish between different values of locals within a potentially
+ recursive callstack. */
+// TODO: would this be better as a new tree code?
+
+class path_var
+{
+public:
+ path_var (tree t, int stack_depth)
+ : m_tree (t), m_stack_depth (stack_depth)
+ {
+ // TODO: ignore stack depth for globals and constants
+ }
+
+ bool operator== (const path_var &other) const
+ {
+ return (m_tree == other.m_tree
+ && m_stack_depth == other.m_stack_depth);
+ }
+
+ void dump (pretty_printer *pp) const;
+
+ tree m_tree;
+ int m_stack_depth; // or -1 for globals?
+};
+
+namespace inchash
+{
+ extern void add_path_var (path_var pv, hash &hstate);
+} // namespace inchash
+
+
+/* A region_model is effectively a graph of regions and symbolic values.
+ We store per-model IDs rather than pointers to make it easier to clone
+ and to compare graphs. */
+
+/* An ID for an svalue within a region_model. Internally, this is an index
+ into a vector of svalue * within the region_model. */
+
+class svalue_id
+{
+public:
+ static svalue_id null () { return svalue_id (-1); }
+
+ svalue_id () : m_idx (-1) {}
+
+ bool operator== (const svalue_id &other) const
+ {
+ return m_idx == other.m_idx;
+ }
+
+ bool operator!= (const svalue_id &other) const
+ {
+ return m_idx != other.m_idx;
+ }
+
+ bool null_p () const { return m_idx == -1; }
+
+ static svalue_id from_int (int idx) { return svalue_id (idx); }
+ int as_int () const { return m_idx; }
+
+ void print (pretty_printer *pp) const;
+ void dump_node_name_to_pp (pretty_printer *pp) const;
+
+ void validate (const region_model &model) const;
+
+private:
+ svalue_id (int idx) : m_idx (idx) {}
+
+ int m_idx;
+};
+
+/* An ID for a region within a region_model. Internally, this is an index
+ into a vector of region * within the region_model. */
+
+class region_id
+{
+public:
+ static region_id null () { return region_id (-1); }
+
+ region_id () : m_idx (-1) {}
+
+ bool operator== (const region_id &other) const
+ {
+ return m_idx == other.m_idx;
+ }
+
+ bool operator!= (const region_id &other) const
+ {
+ return m_idx != other.m_idx;
+ }
+
+ bool null_p () const { return m_idx == -1; }
+
+ static region_id from_int (int idx) { return region_id (idx); }
+ int as_int () const { return m_idx; }
+
+ void print (pretty_printer *pp) const;
+ void dump_node_name_to_pp (pretty_printer *pp) const;
+
+ void validate (const region_model &model) const;
+
+private:
+ region_id (int idx) : m_idx (idx) {}
+
+ int m_idx;
+};
+
+/* A class for renumbering IDs within a region_model, mapping old IDs
+ to new IDs (e.g. when removing one or more elements, thus needing to
+ renumber). */
+// TODO: could this be useful for equiv_class_ids?
+
+template <typename T>
+class id_map
+{
+ public:
+ id_map (int num_ids);
+ void put (T src, T dst);
+ T get_dst_for_src (T src) const;
+ T get_src_for_dst (T dst) const;
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump () const;
+ void update (T *) const;
+
+ private:
+ auto_vec<T> m_src_to_dst;
+ auto_vec<T> m_dst_to_src;
+};
+
+typedef id_map<svalue_id> svalue_id_map;
+typedef id_map<region_id> region_id_map;
+
+/* class id_map. */
+
+/* id_map's ctor, which populates the map with dummy null values. */
+
+template <typename T>
+inline id_map<T>::id_map (int num_svalues)
+: m_src_to_dst (num_svalues),
+ m_dst_to_src (num_svalues)
+{
+ for (int i = 0; i < num_svalues; i++)
+ {
+ m_src_to_dst.quick_push (T::null ());
+ m_dst_to_src.quick_push (T::null ());
+ }
+}
+
+/* Record that SRC is to be mapped to DST. */
+
+template <typename T>
+inline void
+id_map<T>::put (T src, T dst)
+{
+ m_src_to_dst[src.as_int ()] = dst;
+ m_dst_to_src[dst.as_int ()] = src;
+}
+
+/* Get the new value for SRC within the map. */
+
+template <typename T>
+inline T
+id_map<T>::get_dst_for_src (T src) const
+{
+ if (src.null_p ())
+ return src;
+ return m_src_to_dst[src.as_int ()];
+}
+
+/* Given DST, a new value, determine which old value will be mapped to it
+ (the inverse of the map). */
+
+template <typename T>
+inline T
+id_map<T>::get_src_for_dst (T dst) const
+{
+ if (dst.null_p ())
+ return dst;
+ return m_dst_to_src[dst.as_int ()];
+}
+
+/* Dump this id_map to PP. */
+
+template <typename T>
+inline void
+id_map<T>::dump_to_pp (pretty_printer *pp) const
+{
+ pp_string (pp, "src to dst: {");
+ unsigned i;
+ T *dst;
+ FOR_EACH_VEC_ELT (m_src_to_dst, i, dst)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ T src (T::from_int (i));
+ src.print (pp);
+ pp_string (pp, " -> ");
+ dst->print (pp);
+ }
+ pp_string (pp, "}");
+ pp_newline (pp);
+
+ pp_string (pp, "dst to src: {");
+ T *src;
+ FOR_EACH_VEC_ELT (m_dst_to_src, i, src)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ T dst (T::from_int (i));
+ dst.print (pp);
+ pp_string (pp, " <- ");
+ src->print (pp);
+ }
+ pp_string (pp, "}");
+ pp_newline (pp);
+}
+
+/* Dump this id_map to stderr. */
+
+template <typename T>
+DEBUG_FUNCTION inline void
+id_map<T>::dump () const
+{
+ pretty_printer pp;
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Update *ID from the old value to its new value in this map. */
+
+template <typename T>
+inline void
+id_map<T>::update (T *id) const
+{
+ *id = get_dst_for_src (*id);
+}
+
+/* Variant of the above, which only stores things in one direction.
+ (e.g. for merging, when the number of destination regions is not
+ the same of the src regions, and can grow). */
+
+template <typename T>
+class one_way_id_map
+{
+ public:
+ one_way_id_map (int num_ids);
+ void put (T src, T dst);
+ T get_dst_for_src (T src) const;
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump () const;
+ void update (T *) const;
+
+ private:
+ auto_vec<T> m_src_to_dst;
+};
+
+typedef one_way_id_map<svalue_id> one_way_svalue_id_map;
+typedef one_way_id_map<region_id> one_way_region_id_map;
+
+/* class one_way_id_map. */
+
+/* one_way_id_map's ctor, which populates the map with dummy null values. */
+
+template <typename T>
+inline one_way_id_map<T>::one_way_id_map (int num_svalues)
+: m_src_to_dst (num_svalues)
+{
+ for (int i = 0; i < num_svalues; i++)
+ m_src_to_dst.quick_push (T::null ());
+}
+
+/* Record that SRC is to be mapped to DST. */
+
+template <typename T>
+inline void
+one_way_id_map<T>::put (T src, T dst)
+{
+ m_src_to_dst[src.as_int ()] = dst;
+}
+
+/* Get the new value for SRC within the map. */
+
+template <typename T>
+inline T
+one_way_id_map<T>::get_dst_for_src (T src) const
+{
+ if (src.null_p ())
+ return src;
+ return m_src_to_dst[src.as_int ()];
+}
+
+/* Dump this map to PP. */
+
+template <typename T>
+inline void
+one_way_id_map<T>::dump_to_pp (pretty_printer *pp) const
+{
+ pp_string (pp, "src to dst: {");
+ unsigned i;
+ T *dst;
+ FOR_EACH_VEC_ELT (m_src_to_dst, i, dst)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ T src (T::from_int (i));
+ src.print (pp);
+ pp_string (pp, " -> ");
+ dst->print (pp);
+ }
+ pp_string (pp, "}");
+ pp_newline (pp);
+}
+
+/* Dump this map to stderr. */
+
+template <typename T>
+DEBUG_FUNCTION inline void
+one_way_id_map<T>::dump () const
+{
+ pretty_printer pp;
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+/* Update *ID from the old value to its new value in this map. */
+
+template <typename T>
+inline void
+one_way_id_map<T>::update (T *id) const
+{
+ *id = get_dst_for_src (*id);
+}
+
+/* A set of IDs within a region_model (either svalue_id or region_id). */
+
+template <typename T>
+class id_set
+{
+public:
+ id_set (const region_model *model);
+
+ void add_region (T id)
+ {
+ if (!id.null_p ())
+ bitmap_set_bit (m_bitmap, id.as_int ());
+ }
+
+ bool region_p (T id) const
+ {
+ gcc_assert (!id.null_p ());
+ return bitmap_bit_p (const_cast <auto_sbitmap &> (m_bitmap),
+ id.as_int ());
+ }
+
+ unsigned int num_regions ()
+ {
+ return bitmap_count_bits (m_bitmap);
+ }
+
+private:
+ auto_sbitmap m_bitmap;
+};
+
+typedef id_set<region_id> region_id_set;
+
+/* Various operations delete information from a region_model.
+
+ This struct tracks how many of each kind of entity were purged (e.g.
+ for selftests, and for debugging). */
+
+struct purge_stats
+{
+ purge_stats ()
+ : m_num_svalues (0),
+ m_num_regions (0),
+ m_num_equiv_classes (0),
+ m_num_constraints (0),
+ m_num_client_items (0)
+ {}
+
+ int m_num_svalues;
+ int m_num_regions;
+ int m_num_equiv_classes;
+ int m_num_constraints;
+ int m_num_client_items;
+};
+
+/* An enum for discriminating between the different concrete subclasses
+ of svalue. */
+
+enum svalue_kind
+{
+ SK_REGION,
+ SK_CONSTANT,
+ SK_UNKNOWN,
+ SK_POISONED,
+ SK_SETJMP
+};
+
+/* svalue and its subclasses.
+
+ The class hierarchy looks like this (using indentation to show
+ inheritance, and with svalue_kinds shown for the concrete subclasses):
+
+ svalue
+ region_svalue (SK_REGION)
+ constant_svalue (SK_CONSTANT)
+ unknown_svalue (SK_UNKNOWN)
+ poisoned_svalue (SK_POISONED)
+ setjmp_svalue (SK_SETJMP). */
+
+/* An abstract base class representing a value held by a region of memory. */
+
+class svalue
+{
+public:
+ virtual ~svalue () {}
+
+ bool operator== (const svalue &other) const;
+ bool operator!= (const svalue &other) const { return !(*this == other); }
+
+ virtual svalue *clone () const = 0;
+
+ tree get_type () const { return m_type; }
+
+ virtual enum svalue_kind get_kind () const = 0;
+
+ hashval_t hash () const;
+
+ void print (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const;
+
+ virtual void dump_dot_to_pp (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const;
+
+ virtual region_svalue *dyn_cast_region_svalue () { return NULL; }
+ virtual constant_svalue *dyn_cast_constant_svalue () { return NULL; }
+ virtual const constant_svalue *dyn_cast_constant_svalue () const
+ { return NULL; }
+ virtual poisoned_svalue *dyn_cast_poisoned_svalue () { return NULL; }
+ virtual unknown_svalue *dyn_cast_unknown_svalue () { return NULL; }
+ virtual setjmp_svalue *dyn_cast_setjmp_svalue () { return NULL; }
+
+ virtual void remap_region_ids (const region_id_map &map);
+
+ virtual void walk_for_canonicalization (canonicalization *c) const;
+
+ virtual svalue_id get_child_sid (region *parent, region *child,
+ region_model &model,
+ region_model_context *ctxt);
+
+ tree maybe_get_constant () const;
+
+ protected:
+ svalue (tree type) : m_type (type) {}
+
+ virtual void add_to_hash (inchash::hash &hstate) const = 0;
+
+ private:
+ virtual void print_details (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const = 0;
+ tree m_type;
+};
+
+/* Concrete subclass of svalue representing a pointer value that points to
+ a known region */
+
+class region_svalue : public svalue
+{
+public:
+ region_svalue (tree type, region_id rid) : svalue (type), m_rid (rid)
+ {
+ /* Should we support NULL ptrs here? */
+ gcc_assert (!rid.null_p ());
+ }
+
+ bool compare_fields (const region_svalue &other) const;
+
+ svalue *clone () const FINAL OVERRIDE
+ { return new region_svalue (get_type (), m_rid); }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_REGION; }
+
+ void dump_dot_to_pp (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ region_svalue *dyn_cast_region_svalue () FINAL OVERRIDE { return this; }
+
+ region_id get_pointee () const { return m_rid; }
+
+ void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+
+ static void merge_values (const region_svalue ®ion_sval_a,
+ const region_svalue ®ion_sval_b,
+ svalue_id *merged_sid,
+ tree type,
+ model_merger *merger);
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+
+ static tristate eval_condition (region_svalue *lhs_ptr,
+ enum tree_code op,
+ region_svalue *rhs_ptr);
+
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+
+ private:
+ void print_details (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ region_id m_rid;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <region_svalue *>::test (svalue *sval)
+{
+ return sval->get_kind () == SK_REGION;
+}
+
+/* Concrete subclass of svalue representing a specific constant value. */
+
+class constant_svalue : public svalue
+{
+public:
+ constant_svalue (tree cst_expr)
+ : svalue (TREE_TYPE (cst_expr)), m_cst_expr (cst_expr)
+ {
+ gcc_assert (cst_expr);
+ gcc_assert (CONSTANT_CLASS_P (cst_expr));
+ }
+
+ bool compare_fields (const constant_svalue &other) const;
+
+ svalue *clone () const FINAL OVERRIDE
+ { return new constant_svalue (m_cst_expr); }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_CONSTANT; }
+
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+
+ constant_svalue *dyn_cast_constant_svalue () FINAL OVERRIDE { return this; }
+ const constant_svalue *dyn_cast_constant_svalue () const FINAL OVERRIDE
+ { return this; }
+
+ tree get_constant () const { return m_cst_expr; }
+
+ static void merge_values (const constant_svalue &cst_sval_a,
+ const constant_svalue &cst_sval_b,
+ svalue_id *merged_sid,
+ model_merger *merger);
+
+ static tristate eval_condition (constant_svalue *lhs,
+ enum tree_code op,
+ constant_svalue *rhs);
+
+ svalue_id get_child_sid (region *parent, region *child,
+ region_model &model,
+ region_model_context *ctxt) FINAL OVERRIDE;
+
+ private:
+ void print_details (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ tree m_cst_expr;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <constant_svalue *>::test (svalue *sval)
+{
+ return sval->get_kind () == SK_CONSTANT;
+}
+
+/* Concrete subclass of svalue representing a unique but unknown value.
+ Comparisons of variables that share the same unknown value are known
+ to be equal, even if we don't know what the value is. */
+
+class unknown_svalue : public svalue
+{
+public:
+ unknown_svalue (tree type)
+ : svalue (type)
+ {}
+
+ bool compare_fields (const unknown_svalue &other) const;
+
+ svalue *clone () const FINAL OVERRIDE
+ { return new unknown_svalue (get_type ()); }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_UNKNOWN; }
+
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+
+ unknown_svalue *dyn_cast_unknown_svalue () FINAL OVERRIDE { return this; }
+
+ private:
+ void print_details (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+};
+
+/* An enum describing a particular kind of "poisoned" value. */
+
+enum poison_kind
+{
+ /* For use to describe uninitialized memory. */
+ POISON_KIND_UNINIT,
+
+ /* For use to describe freed memory. */
+ POISON_KIND_FREED,
+
+ /* For use on pointers to regions within popped stack frames. */
+ POISON_KIND_POPPED_STACK
+};
+
+extern const char *poison_kind_to_str (enum poison_kind);
+
+/* Concrete subclass of svalue representing a value that should not
+ be used (e.g. uninitialized memory, freed memory). */
+
+class poisoned_svalue : public svalue
+{
+public:
+ poisoned_svalue (enum poison_kind kind, tree type)
+ : svalue (type), m_kind (kind) {}
+
+ bool compare_fields (const poisoned_svalue &other) const;
+
+ svalue *clone () const FINAL OVERRIDE
+ { return new poisoned_svalue (m_kind, get_type ()); }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_POISONED; }
+
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+
+ poisoned_svalue *dyn_cast_poisoned_svalue () FINAL OVERRIDE { return this; }
+
+ enum poison_kind get_poison_kind () const { return m_kind; }
+
+ private:
+ void print_details (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ enum poison_kind m_kind;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <poisoned_svalue *>::test (svalue *sval)
+{
+ return sval->get_kind () == SK_POISONED;
+}
+
+/* Concrete subclass of svalue representing setjmp buffers, so that
+ longjmp can potentially "return" to an entirely different function. */
+
+class setjmp_svalue : public svalue
+{
+public:
+ setjmp_svalue (const exploded_node *enode, tree type)
+ : svalue (type), m_enode (enode)
+ {}
+
+ bool compare_fields (const setjmp_svalue &other) const;
+
+ svalue *clone () const FINAL OVERRIDE
+ { return new setjmp_svalue (m_enode, get_type ()); }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_SETJMP; }
+
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+
+ setjmp_svalue *dyn_cast_setjmp_svalue () FINAL OVERRIDE { return this; }
+
+ int get_index () const;
+
+ const exploded_node *get_exploded_node () const { return m_enode; }
+
+ private:
+ void print_details (const region_model &model,
+ svalue_id this_sid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ const exploded_node *m_enode;
+};
+
+/* An enum for discriminating between the different concrete subclasses
+ of region. */
+
+enum region_kind
+{
+ RK_PRIMITIVE,
+ RK_STRUCT,
+ RK_UNION,
+ RK_FRAME,
+ RK_GLOBALS,
+ RK_CODE,
+ RK_FUNCTION,
+ RK_ARRAY,
+ RK_STACK,
+ RK_HEAP,
+ RK_ROOT,
+ RK_SYMBOLIC
+};
+
+extern const char *region_kind_to_str (enum region_kind);
+
+/* Region and its subclasses.
+
+ The class hierarchy looks like this (using indentation to show
+ inheritance, and with region_kinds shown for the concrete subclasses):
+
+ region
+ primitive_region (RK_PRIMITIVE)
+ map_region
+ struct_or_union_region
+ struct_region (RK_STRUCT)
+ union_region (RK_UNION)
+ scope_region
+ frame_region (RK_FRAME)
+ globals_region (RK_GLOBALS)
+ code_region (RK_CODE)
+ function_region (RK_FUNCTION)
+ array_region (RK_ARRAY)
+ stack_region (RK_STACK)
+ heap_region (RK_HEAP)
+ root_region (RK_ROOT)
+ label_region (RK_FUNCTION)
+ symbolic_region (RK_SYMBOLIC). */
+
+/* Abstract base class representing a chunk of memory.
+
+ Regions form a tree-like hierarchy, with a root region at the base,
+ with memory space regions within it, representing the stack and
+ globals, with frames within the stack, and regions for variables
+ within the frames and the "globals" region. Regions for structs
+ can have subregions for fields.
+
+ A region can optionally have a value, or inherit its value from
+ the first ancestor with a value. For example, the stack region
+ has a "uninitialized" poison value which is inherited by all
+ descendent regions that don't themselves have a value. */
+
+class region
+{
+public:
+ virtual ~region () {}
+
+ bool operator== (const region &other) const;
+ bool operator!= (const region &other) const { return !(*this == other); }
+
+ virtual region *clone () const = 0;
+
+ virtual enum region_kind get_kind () const = 0;
+ virtual map_region *dyn_cast_map_region () { return NULL; }
+ virtual const symbolic_region *dyn_cast_symbolic_region () const
+ { return NULL; }
+
+ region_id get_parent () const { return m_parent_rid; }
+ region *get_parent_region (const region_model &model) const;
+
+ void set_value (region_model &model, region_id this_rid, svalue_id rhs_sid,
+ region_model_context *ctxt);
+ svalue_id get_value (region_model &model, bool non_null,
+ region_model_context *ctxt);
+ svalue_id get_value_direct () const { return m_sval_id; }
+
+ svalue_id get_inherited_child_sid (region *child,
+ region_model &model,
+ region_model_context *ctxt);
+
+ tree get_type () const { return m_type; }
+
+ hashval_t hash () const;
+
+ void print (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const;
+
+ virtual void dump_dot_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const;
+
+ void dump_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp,
+ const char *prefix,
+ bool is_last_child) const;
+ virtual void dump_child_label (const region_model &model,
+ region_id this_rid,
+ region_id child_rid,
+ pretty_printer *pp) const;
+
+ void remap_svalue_ids (const svalue_id_map &map);
+ virtual void remap_region_ids (const region_id_map &map);
+
+ virtual void walk_for_canonicalization (canonicalization *c) const = 0;
+
+ void add_view (region_id view_rid, region_model *model);
+ region_id get_view (tree type, region_model *model) const;
+ bool is_view_p () const { return m_is_view; }
+
+ void validate (const region_model *model) const;
+
+ bool non_null_p (const region_model &model) const;
+
+ protected:
+ region (region_id parent_rid, svalue_id sval_id, tree type);
+ region (const region &other);
+
+ virtual void add_to_hash (inchash::hash &hstate) const;
+ virtual void print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const;
+
+ private:
+ void become_active_view (region_model &model, region_id this_rid);
+ void deactivate_any_active_view (region_model &model);
+ void deactivate_view (region_model &model, region_id this_view_rid);
+
+ region_id m_parent_rid;
+ svalue_id m_sval_id;
+ tree m_type;
+ /* Child regions that are "views" (one per type). */
+ auto_vec<region_id> m_view_rids;
+
+ bool m_is_view;
+ region_id m_active_view_rid;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <region *>::test (region *)
+{
+ return true;
+}
+
+/* Concrete region subclass for storing "primitive" types (integral types,
+ pointers, etc). */
+
+class primitive_region : public region
+{
+public:
+ primitive_region (region_id parent_rid, tree type)
+ : region (parent_rid, svalue_id::null (), type)
+ {}
+
+ region *clone () const FINAL OVERRIDE;
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_PRIMITIVE; }
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+};
+
+/* A region that has children identified by tree keys.
+ For example a stack frame has subregions per local, and a region
+ for a struct has subregions per field. */
+
+class map_region : public region
+{
+public:
+ typedef ordered_hash_map<tree, region_id> map_t;
+ typedef map_t::iterator iterator_t;
+
+ map_region (region_id parent_rid, tree type)
+ : region (parent_rid, svalue_id::null (), type)
+ {}
+ map_region (const map_region &other);
+
+ map_region *dyn_cast_map_region () FINAL OVERRIDE { return this; }
+
+ void dump_dot_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ void dump_child_label (const region_model &model,
+ region_id this_rid,
+ region_id child_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ region_id get_or_create (region_model *model,
+ region_id this_rid,
+ tree expr, tree type);
+ void unbind (tree expr);
+ region_id *get (tree expr);
+
+ void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+
+ tree get_tree_for_child_region (region_id child_rid) const;
+
+ tree get_tree_for_child_region (region *child,
+ const region_model &model) const;
+
+ static bool can_merge_p (const map_region *map_region_a,
+ const map_region *map_region_b,
+ map_region *merged_map_region,
+ region_id merged_rid,
+ model_merger *merger);
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+
+ virtual bool valid_key_p (tree key) const = 0;
+
+ svalue_id get_value_by_name (tree identifier,
+ const region_model &model) const;
+
+ iterator_t begin () { return m_map.begin (); }
+ iterator_t end () { return m_map.end (); }
+ size_t elements () const { return m_map.elements (); }
+
+ protected:
+ bool compare_fields (const map_region &other) const;
+ void add_to_hash (inchash::hash &hstate) const OVERRIDE;
+ void print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+ OVERRIDE;
+
+ private:
+ /* Mapping from tree to child region. */
+ map_t m_map;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <map_region *>::test (region *reg)
+{
+ return (reg->dyn_cast_map_region () != NULL);
+}
+
+/* Abstract subclass representing a region with fields
+ (either a struct or a union). */
+
+class struct_or_union_region : public map_region
+{
+public:
+ bool valid_key_p (tree key) const FINAL OVERRIDE;
+
+ protected:
+ struct_or_union_region (region_id parent_rid, tree type)
+ : map_region (parent_rid, type)
+ {}
+
+ bool compare_fields (const struct_or_union_region &other) const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <struct_or_union_region *>::test (region *reg)
+{
+ return (reg->get_kind () == RK_STRUCT
+ || reg->get_kind () == RK_UNION);
+}
+
+/* Concrete region subclass. A map_region representing a struct, using
+ FIELD_DECLs for its keys. */
+
+class struct_region : public struct_or_union_region
+{
+public:
+ struct_region (region_id parent_rid, tree type)
+ : struct_or_union_region (parent_rid, type)
+ {
+ gcc_assert (TREE_CODE (type) == RECORD_TYPE);
+ }
+
+ region *clone () const FINAL OVERRIDE;
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_STRUCT; }
+
+ bool compare_fields (const struct_region &other) const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <struct_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_STRUCT;
+}
+
+/* Concrete region subclass. A map_region representing a union, using
+ FIELD_DECLs for its keys. */
+
+class union_region : public struct_or_union_region
+{
+public:
+ union_region (region_id parent_rid, tree type)
+ : struct_or_union_region (parent_rid, type)
+ {
+ gcc_assert (TREE_CODE (type) == UNION_TYPE);
+ }
+
+ region *clone () const FINAL OVERRIDE;
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_UNION; }
+
+ bool compare_fields (const union_region &other) const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <union_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_UNION;
+}
+
+/* Abstract map_region subclass for accessing decls, used as a base class
+ for function frames and for the globals region. */
+
+class scope_region : public map_region
+{
+ public:
+
+ protected:
+ scope_region (region_id parent_rid)
+ : map_region (parent_rid, NULL_TREE)
+ {}
+
+ scope_region (const scope_region &other)
+ : map_region (other)
+ {
+ }
+
+ bool compare_fields (const scope_region &other) const;
+};
+
+/* Concrete region subclass, representing a function frame on the stack,
+ to contain the locals. */
+
+class frame_region : public scope_region
+{
+public:
+ frame_region (region_id parent_rid, function *fun, int depth)
+ : scope_region (parent_rid), m_fun (fun), m_depth (depth)
+ {}
+
+ frame_region (const frame_region &other)
+ : scope_region (other), m_fun (other.m_fun), m_depth (other.m_depth)
+ {
+ }
+
+ /* region vfuncs. */
+ region *clone () const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_FRAME; }
+ void print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+
+ /* map_region vfuncs. */
+ bool valid_key_p (tree key) const FINAL OVERRIDE;
+
+ /* Accessors. */
+ function *get_function () const { return m_fun; }
+ int get_depth () const { return m_depth; }
+
+ bool compare_fields (const frame_region &other) const;
+
+ private:
+ function *m_fun;
+ int m_depth;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <frame_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_FRAME;
+}
+
+/* Concrete region subclass, to hold global variables (data and bss). */
+
+class globals_region : public scope_region
+{
+ public:
+ globals_region (region_id parent_rid)
+ : scope_region (parent_rid)
+ {}
+
+ globals_region (const globals_region &other)
+ : scope_region (other)
+ {
+ }
+
+ /* region vfuncs. */
+ region *clone () const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_GLOBALS; }
+
+ /* map_region vfuncs. */
+ bool valid_key_p (tree key) const FINAL OVERRIDE;
+
+ bool compare_fields (const globals_region &other) const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <globals_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_GLOBALS;
+}
+
+/* Concrete region subclass. A map_region representing the code, using
+ FUNCTION_DECLs for its keys. */
+
+class code_region : public map_region
+{
+public:
+ code_region (region_id parent_rid)
+ : map_region (parent_rid, NULL_TREE)
+ {}
+ code_region (const code_region &other)
+ : map_region (other)
+ {}
+
+ /* region vfuncs. */
+ region *clone () const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_CODE; }
+
+ /* map_region vfunc. */
+ bool valid_key_p (tree key) const FINAL OVERRIDE;
+
+ region_id get_element (region_model *model,
+ region_id this_rid,
+ svalue_id index_sid,
+ region_model_context *ctxt);
+
+ bool compare_fields (const code_region &other) const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <code_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_CODE;
+}
+
+/* Concrete region subclass. A map_region representing the code for
+ a particular function, using LABEL_DECLs for its keys. */
+
+class function_region : public map_region
+{
+public:
+ function_region (region_id parent_rid, tree type)
+ : map_region (parent_rid, type)
+ {
+ gcc_assert (TREE_CODE (type) == FUNCTION_TYPE);
+ }
+ function_region (const function_region &other)
+ : map_region (other)
+ {}
+
+ /* region vfuncs. */
+ region *clone () const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_FUNCTION; }
+
+ /* map_region vfunc. */
+ bool valid_key_p (tree key) const FINAL OVERRIDE;
+
+ region_id get_element (region_model *model,
+ region_id this_rid,
+ svalue_id index_sid,
+ region_model_context *ctxt);
+
+ bool compare_fields (const function_region &other) const;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <function_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_FUNCTION;
+}
+
+/* Concrete region subclass representing an array (or an array-like view
+ of a parent region of memory.
+ This can't be a map_region as we can't use trees as the keys: there's
+ no guarantee about the uniqueness of an INTEGER_CST. */
+
+class array_region : public region
+{
+public:
+#if 0
+ wide_int m_test;
+
+ typedef wide_int key_t;
+ typedef int_hash <wide_int, -1, -2> hash_t;
+ typedef ordered_hash_map<hash_t, region_id> map_t;
+#else
+ typedef int key_t;
+ typedef int_hash <int, -1, -2> int_hash_t;
+ typedef ordered_hash_map<int_hash_t, region_id> map_t;
+#endif
+ typedef map_t::iterator iterator_t;
+
+ array_region (region_id parent_rid, tree type)
+ : region (parent_rid, svalue_id::null (), type)
+ {
+ gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
+ }
+ array_region (const array_region &other);
+
+ void dump_dot_to_pp (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ void dump_child_label (const region_model &model,
+ region_id this_rid,
+ region_id child_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ /* region vfuncs. */
+ region *clone () const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_ARRAY; }
+
+ region_id get_element (region_model *model,
+ region_id this_rid,
+ svalue_id index_sid,
+ region_model_context *ctxt);
+
+ bool compare_fields (const array_region &other) const;
+
+ static bool can_merge_p (const array_region *array_region_a,
+ const array_region *array_region_b,
+ array_region *merged_array_region,
+ region_id merged_rid,
+ model_merger *merger);
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+
+ iterator_t begin () { return m_map.begin (); }
+ iterator_t end () { return m_map.end (); }
+ size_t elements () const { return m_map.elements (); }
+
+ region_id get_or_create (region_model *model,
+ region_id this_rid,
+ key_t key, tree type);
+// void unbind (int expr);
+ region_id *get (key_t key);
+
+ void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+
+ bool get_key_for_child_region (region_id child_rid,
+ key_t *out) const;
+
+#if 0
+ bool get_key_for_child_region (region *child,
+ const region_model &model,
+ key_t *out) const;
+#endif
+
+ void add_to_hash (inchash::hash &hstate) const OVERRIDE;
+ void print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+ OVERRIDE;
+
+ static key_t key_from_constant (tree cst);
+
+ private:
+ /* Mapping from tree to child region. */
+ map_t m_map;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <array_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_ARRAY;
+}
+
+/* Concrete region subclass representing a stack, containing all stack
+ frames, and implicitly providing a POISON_KIND_UNINIT value to all
+ child regions by default. */
+
+class stack_region : public region
+{
+public:
+ stack_region (region_id parent_rid, svalue_id sval_id)
+ : region (parent_rid, sval_id, NULL_TREE)
+ {}
+
+ stack_region (const stack_region &other);
+
+ bool compare_fields (const stack_region &other) const;
+
+ region *clone () const FINAL OVERRIDE;
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_STACK; }
+
+ void dump_child_label (const region_model &model,
+ region_id this_rid,
+ region_id child_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ void push_frame (region_id frame_rid);
+ region_id get_current_frame_id () const;
+ svalue_id pop_frame (region_model *model, bool purge, purge_stats *stats,
+ region_model_context *ctxt);
+
+ void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+
+ unsigned get_num_frames () const { return m_frame_rids.length (); }
+ region_id get_frame_rid (unsigned i) const { return m_frame_rids[i]; }
+
+ static bool can_merge_p (const stack_region *stack_region_a,
+ const stack_region *stack_region_b,
+ model_merger *merger);
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+
+ svalue_id get_value_by_name (tree identifier,
+ const region_model &model) const;
+
+ private:
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+ void print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ auto_vec<region_id> m_frame_rids;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <stack_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_STACK;
+}
+
+/* Concrete region subclass: a region within which regions can be
+ dynamically allocated. */
+
+class heap_region : public region
+{
+public:
+ heap_region (region_id parent_rid, svalue_id sval_id)
+ : region (parent_rid, sval_id, NULL_TREE)
+ {}
+ heap_region (const heap_region &other);
+
+ bool compare_fields (const heap_region &other) const;
+
+ region *clone () const FINAL OVERRIDE;
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_HEAP; }
+
+ static bool can_merge_p (const heap_region *heap_a, region_id heap_a_rid,
+ const heap_region *heap_b, region_id heap_b_rid,
+ heap_region *merged_heap, region_id merged_heap_rid,
+ model_merger *merger);
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <heap_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_HEAP;
+}
+
+/* Concrete region subclass. The root region, containing all regions
+ (either directly, or as descendents).
+ Unique within a region_model. */
+
+class root_region : public region
+{
+public:
+ root_region ();
+ root_region (const root_region &other);
+
+ bool compare_fields (const root_region &other) const;
+
+ region *clone () const FINAL OVERRIDE;
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_ROOT; }
+
+ void dump_child_label (const region_model &model,
+ region_id this_rid,
+ region_id child_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ region_id push_frame (region_model *model, function *fun,
+ vec<svalue_id> *arg_sids,
+ region_model_context *ctxt);
+ region_id get_current_frame_id (const region_model &model) const;
+ svalue_id pop_frame (region_model *model, bool purge, purge_stats *stats,
+ region_model_context *ctxt);
+
+ region_id ensure_stack_region (region_model *model);
+ region_id get_stack_region_id () const { return m_stack_rid; }
+ stack_region *get_stack_region (const region_model *model) const;
+
+ region_id ensure_globals_region (region_model *model);
+ region_id get_globals_region_id () const { return m_globals_rid; }
+ globals_region *get_globals_region (const region_model *model) const;
+
+ region_id ensure_code_region (region_model *model);
+ code_region *get_code_region (const region_model *model) const;
+
+ region_id ensure_heap_region (region_model *model);
+ heap_region *get_heap_region (const region_model *model) const;
+
+ void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+
+ static bool can_merge_p (const root_region *root_region_a,
+ const root_region *root_region_b,
+ root_region *merged_root_region,
+ model_merger *merger);
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+
+ svalue_id get_value_by_name (tree identifier,
+ const region_model &model) const;
+
+private:
+ void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+ void print_fields (const region_model &model,
+ region_id this_rid,
+ pretty_printer *pp) const
+ FINAL OVERRIDE;
+
+ region_id m_stack_rid;
+ region_id m_globals_rid;
+ region_id m_code_rid;
+ region_id m_heap_rid;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <root_region *>::test (region *reg)
+{
+ return reg->get_kind () == RK_ROOT;
+}
+
+/* Concrete region subclass: a region to use when dereferencing an unknown
+ pointer. */
+
+class symbolic_region : public region
+{
+public:
+ symbolic_region (region_id parent_rid, bool possibly_null)
+ : region (parent_rid, svalue_id::null (), NULL_TREE),
+ m_possibly_null (possibly_null)
+ {}
+ symbolic_region (const symbolic_region &other);
+
+ const symbolic_region *dyn_cast_symbolic_region () const FINAL OVERRIDE
+ { return this; }
+
+ bool compare_fields (const symbolic_region &other) const;
+
+ region *clone () const FINAL OVERRIDE;
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_SYMBOLIC; }
+
+ void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+
+ bool m_possibly_null;
+};
+
+/* A region_model encapsulates a representation of the state of memory, with
+ a tree of regions, along with their associated values.
+ The representation is graph-like because values can be pointers to
+ regions.
+ It also stores a constraint_manager, capturing relationships between
+ the values. */
+
+class region_model
+{
+ public:
+ region_model ();
+ region_model (const region_model &other);
+ ~region_model ();
+
+#if 0//__cplusplus >= 201103
+ region_model (region_model &&other);
+#endif
+
+ region_model &operator= (const region_model &other);
+
+ bool operator== (const region_model &other) const;
+ bool operator!= (const region_model &other) const
+ {
+ return !(*this == other);
+ }
+
+ hashval_t hash () const;
+
+ void print (pretty_printer *pp) const;
+
+ void print_svalue (svalue_id sid, pretty_printer *pp) const;
+
+ void dump_dot_to_pp (pretty_printer *pp) const;
+ void dump_dot_to_file (FILE *fp) const;
+ void dump_dot (const char *path) const;
+
+ void dump_to_pp (pretty_printer *pp, bool summarize) const;
+ void dump (FILE *fp, bool summarize) const;
+ void dump (bool summarize) const;
+
+ void debug () const;
+
+ void validate () const;
+
+ void canonicalize (region_model_context *ctxt);
+ bool canonicalized_p () const;
+
+ void check_for_poison (tree expr, region_model_context *ctxt);
+ void on_assignment (const gassign *stmt, region_model_context *ctxt);
+ void on_call_pre (const gcall *stmt, region_model_context *ctxt);
+ void on_call_post (const gcall *stmt, region_model_context *ctxt);
+ void on_return (const greturn *stmt, region_model_context *ctxt);
+ void on_setjmp (const gcall *stmt, const exploded_node *enode,
+ region_model_context *ctxt);
+ void on_longjmp (const gcall *longjmp_call, const gcall *setjmp_call,
+ int setjmp_stack_depth, region_model_context *ctxt);
+
+ void update_for_phis (const supernode *snode,
+ const cfg_superedge *last_cfg_superedge,
+ region_model_context *ctxt);
+
+ void handle_phi (tree lhs, tree rhs, bool is_back_edge,
+ region_model_context *ctxt);
+
+ bool maybe_update_for_edge (const superedge &edge,
+ const gimple *last_stmt,
+ region_model_context *ctxt);
+
+ region_id get_root_rid () const { return m_root_rid; }
+ root_region *get_root_region () const;
+
+ region_id get_stack_region_id () const;
+ region_id push_frame (function *fun, vec<svalue_id> *arg_sids,
+ region_model_context *ctxt);
+ region_id get_current_frame_id () const;
+ function * get_current_function () const;
+ svalue_id pop_frame (bool purge, purge_stats *stats,
+ region_model_context *ctxt);
+ int get_stack_depth () const;
+ function *get_function_at_depth (unsigned depth) const;
+
+ region_id get_globals_region_id () const;
+
+ svalue_id add_svalue (svalue *sval);
+ void replace_svalue (svalue_id sid, svalue *new_sval);
+
+ region_id add_region (region *r);
+
+ region_id add_region_for_type (region_id parent_rid, tree type);
+
+ svalue *get_svalue (svalue_id sval_id) const;
+ region *get_region (region_id rid) const;
+
+ template <typename Subclass>
+ Subclass *get_region (region_id rid) const
+ {
+ region *result = get_region (rid);
+ if (result)
+ gcc_assert (is_a<Subclass *> (result));
+ return (Subclass *)result;
+ }
+
+ region_id get_lvalue (path_var pv, region_model_context *ctxt);
+ region_id get_lvalue (tree expr, region_model_context *ctxt);
+ svalue_id get_rvalue (path_var pv, region_model_context *ctxt);
+ svalue_id get_rvalue (tree expr, region_model_context *ctxt);
+
+ svalue_id get_or_create_ptr_svalue (tree ptr_type, region_id id);
+ svalue_id get_or_create_constant_svalue (tree cst_expr);
+ svalue_id get_svalue_for_fndecl (tree ptr_type, tree fndecl);
+ svalue_id get_svalue_for_label (tree ptr_type, tree label);
+
+ region_id get_region_for_fndecl (tree fndecl);
+ region_id get_region_for_label (tree label);
+
+ svalue_id maybe_cast (tree type, svalue_id sid, region_model_context *ctxt);
+ svalue_id maybe_cast_1 (tree type, svalue_id sid);
+
+ region_id get_field_region (region_id rid, tree field);
+
+ region_id deref_rvalue (svalue_id ptr_sid, region_model_context *ctxt);
+ region_id deref_rvalue (tree ptr, region_model_context *ctxt);
+
+ void set_value (region_id lhs_rid, svalue_id rhs_sid,
+ region_model_context *ctxt);
+ svalue_id set_to_new_unknown_value (region_id dst_rid, tree type,
+ region_model_context *ctxt);
+
+ tristate eval_condition (svalue_id lhs,
+ enum tree_code op,
+ svalue_id rhs) const;
+ tristate eval_condition_without_cm (svalue_id lhs,
+ enum tree_code op,
+ svalue_id rhs) const;
+ tristate eval_condition (tree lhs,
+ enum tree_code op,
+ tree rhs,
+ region_model_context *ctxt);
+ bool add_constraint (tree lhs, enum tree_code op, tree rhs,
+ region_model_context *ctxt);
+
+ tree maybe_get_constant (svalue_id sid) const;
+
+ region_id add_new_malloc_region ();
+
+ tree get_representative_tree (svalue_id sid) const;
+ path_var get_representative_path_var (region_id rid) const;
+ void get_path_vars_for_svalue (svalue_id sid, vec<path_var> *out) const;
+
+ void purge_unused_svalues (purge_stats *out,
+ region_model_context *ctxt,
+ svalue_id *known_used_sid = NULL);
+ void remap_svalue_ids (const svalue_id_map &map);
+ void remap_region_ids (const region_id_map &map);
+
+ void purge_regions (const region_id_set &set,
+ purge_stats *stats,
+ logger *logger);
+
+ unsigned get_num_svalues () const { return m_svalues.length (); }
+ unsigned get_num_regions () const { return m_regions.length (); }
+
+ /* For selftests. */
+ constraint_manager *get_constraints ()
+ {
+ return m_constraints;
+ }
+
+ void get_descendents (region_id rid, region_id_set *out,
+ region_id exclude_rid) const;
+
+ void delete_region_and_descendents (region_id rid,
+ enum poison_kind pkind,
+ purge_stats *stats,
+ logger *logger);
+
+ bool can_merge_with_p (const region_model &other_model,
+ region_model *out_model,
+ svalue_id_merger_mapping *out) const;
+ bool can_merge_with_p (const region_model &other_model,
+ region_model *out_model) const;
+
+ svalue_id get_value_by_name (const char *name) const;
+
+ svalue_id convert_byte_offset_to_array_index (tree ptr_type,
+ svalue_id offset_sid);
+
+ region_id get_or_create_mem_ref (tree type,
+ svalue_id ptr_sid,
+ svalue_id offset_sid,
+ region_model_context *ctxt);
+ region_id get_or_create_pointer_plus_expr (tree type,
+ svalue_id ptr_sid,
+ svalue_id offset_sid,
+ region_model_context *ctxt);
+ region_id get_or_create_view (region_id raw_rid, tree type);
+
+ tree get_fndecl_for_call (const gcall *call,
+ region_model_context *ctxt);
+
+ private:
+ region_id get_lvalue_1 (path_var pv, region_model_context *ctxt);
+ svalue_id get_rvalue_1 (path_var pv, region_model_context *ctxt);
+
+ void add_any_constraints_from_ssa_def_stmt (tree lhs,
+ enum tree_code op,
+ tree rhs,
+ region_model_context *ctxt);
+
+ void update_for_call_superedge (const call_superedge &call_edge,
+ region_model_context *ctxt);
+ void update_for_return_superedge (const return_superedge &return_edge,
+ region_model_context *ctxt);
+ void update_for_call_summary (const callgraph_superedge &cg_sedge,
+ region_model_context *ctxt);
+ bool apply_constraints_for_gcond (const cfg_superedge &edge,
+ const gcond *cond_stmt,
+ region_model_context *ctxt);
+ bool apply_constraints_for_gswitch (const switch_cfg_superedge &edge,
+ const gswitch *switch_stmt,
+ region_model_context *ctxt);
+
+ void poison_any_pointers_to_bad_regions (const region_id_set &bad_regions,
+ enum poison_kind pkind);
+
+ void dump_summary_of_map (pretty_printer *pp, map_region *map_region,
+ bool *is_first) const;
+
+ auto_delete_vec<svalue> m_svalues;
+ auto_delete_vec<region> m_regions;
+ region_id m_root_rid;
+ constraint_manager *m_constraints; // TODO: embed, rather than dynalloc?
+};
+
+/* Some region_model activity could lead to warnings (e.g. attempts to use an
+ uninitialized value). This abstract base class encapsulates an interface
+ for the region model to use when emitting such warnings.
+
+ It also provides an interface for being notified about svalue_ids being
+ remapped, and being deleted.
+
+ Having this as an abstract base class allows us to support the various
+ operations needed by program_state in the analyzer within region_model,
+ whilst keeping them somewhat modularized. */
+
+class region_model_context
+{
+ public:
+ virtual void warn (pending_diagnostic *d) = 0;
+
+ /* Hook for clients that store svalue_id instances, so that they
+ can remap their IDs when the underlying region_model renumbers
+ the IDs. */
+ virtual void remap_svalue_ids (const svalue_id_map &map) = 0;
+
+#if 0
+ /* Return true if if's OK to purge SID when simplifying state.
+ Subclasses can return false for values that have sm state,
+ to avoid generating "leak" false positives. */
+ virtual bool can_purge_p (svalue_id sid) = 0;
+#endif
+
+ /* Hook for clients to be notified when a range of SIDs have
+ been purged, so that they can purge state relating to those
+ values (and potentially emit warnings about leaks).
+ All SIDs from FIRST_PURGED_SID numerically upwards are being
+ purged.
+ The return values is a count of how many items of data the client
+ has purged (potentially for use in selftests).
+ MAP has already been applied to the IDs, but is provided in case
+ the client needs to figure out the old IDs. */
+ virtual int on_svalue_purge (svalue_id first_purged_sid,
+ const svalue_id_map &map) = 0;
+
+ virtual logger *get_logger () = 0;
+
+ /* Hook for clients to be notified when CHILD_SID is created
+ from PARENT_SID, when "inheriting" a value for a region from a
+ parent region.
+ This exists so that state machines that inherit state can
+ propagate the state from parent to child. */
+ virtual void on_inherited_svalue (svalue_id parent_sid,
+ svalue_id child_sid) = 0;
+
+ /* Hook for clients to be notified when DST_SID is created
+ (or reused) as a cast from SRC_SID.
+ This exists so that state machines can propagate the state
+ from SRC_SID to DST_SID. */
+ virtual void on_cast (svalue_id src_sid,
+ svalue_id dst_sid) = 0;
+
+ /* Hook for clients to be notified when the condition
+ "LHS OP RHS" is added to the region model.
+ This exists so that state machines can detect tests on edges,
+ and use them to trigger sm-state transitions (e.g. transitions due
+ to ptrs becoming known to be NULL or non-NULL, rather than just
+ "unchecked") */
+ virtual void on_condition (tree lhs, enum tree_code op, tree rhs) = 0;
+};
+
+/* A bundle of data for use when attempting to merge two region_model
+ instances to make a third. */
+
+struct model_merger
+{
+ model_merger (const region_model *model_a,
+ const region_model *model_b,
+ region_model *merged_model,
+ svalue_id_merger_mapping *sid_mapping)
+ : m_model_a (model_a), m_model_b (model_b),
+ m_merged_model (merged_model),
+ m_map_regions_from_a_to_m (model_a->get_num_regions ()),
+ m_map_regions_from_b_to_m (model_b->get_num_regions ()),
+ m_sid_mapping (sid_mapping)
+ {
+ gcc_assert (sid_mapping);
+ }
+
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump (FILE *fp) const;
+ void dump () const;
+
+ template <typename Subclass>
+ Subclass *get_region_a (region_id rid_a) const
+ {
+ return m_model_a->get_region <Subclass> (rid_a);
+ }
+
+ template <typename Subclass>
+ Subclass *get_region_b (region_id rid_b) const
+ {
+ return m_model_b->get_region <Subclass> (rid_b);
+ }
+
+ bool can_merge_values_p (svalue_id sid_a,
+ svalue_id sid_b,
+ svalue_id *merged_sid);
+
+ void record_regions (region_id a_rid,
+ region_id b_rid,
+ region_id merged_rid);
+
+ void record_svalues (svalue_id a_sid,
+ svalue_id b_sid,
+ svalue_id merged_sid);
+
+ const region_model *m_model_a;
+ const region_model *m_model_b;
+ region_model *m_merged_model;
+
+ one_way_region_id_map m_map_regions_from_a_to_m;
+ one_way_region_id_map m_map_regions_from_b_to_m;
+ svalue_id_merger_mapping *m_sid_mapping;
+};
+
+/* A bundle of data that can be optionally generated during merger of two
+ region_models that describes how svalue_ids in each of the two inputs
+ are mapped to svalue_ids in the merged output.
+
+ For use when merging sm-states within program_state. */
+
+struct svalue_id_merger_mapping
+{
+ svalue_id_merger_mapping (const region_model &a,
+ const region_model &b);
+
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump (FILE *fp) const;
+ void dump () const;
+
+ one_way_svalue_id_map m_map_from_a_to_m;
+ one_way_svalue_id_map m_map_from_b_to_m;
+};
+
+/* A bundle of data used when canonicalizing a region_model so that the
+ order of regions and svalues is in a predictable order (thus increasing
+ the chance of two region_models being equal).
+
+ This object is used to keep track of a recursive traversal across the
+ svalues and regions within the model, made in a deterministic order,
+ assigning new ids the first time each region or svalue is
+ encountered. */
+
+struct canonicalization
+{
+ canonicalization (const region_model &model);
+ void walk_rid (region_id rid);
+ void walk_sid (svalue_id sid);
+
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump (FILE *fp) const;
+ void dump () const;
+
+ const region_model &m_model;
+ /* Maps from existing IDs to new IDs. */
+ region_id_map m_rid_map;
+ svalue_id_map m_sid_map;
+ /* The next IDs to hand out. */
+ int m_next_rid_int;
+ int m_next_sid_int;
+};
+
+namespace inchash
+{
+ extern void add (svalue_id sid, hash &hstate);
+ extern void add (region_id rid, hash &hstate);
+} // namespace inchash
+
+extern void debug (const region_model &rmodel);
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* An implementation of region_model_context for use in selftests, which
+ stores any pending_diagnostic instances passed to it. */
+
+class test_region_model_context : public region_model_context
+{
+public:
+ void warn (pending_diagnostic *d) FINAL OVERRIDE
+ {
+ m_diagnostics.safe_push (d);
+ }
+
+ void remap_svalue_ids (const svalue_id_map &) FINAL OVERRIDE
+ {
+ /* Empty. */
+ }
+
+#if 0
+ bool can_purge_p (svalue_id) FINAL OVERRIDE
+ {
+ return true;
+ }
+#endif
+
+ int on_svalue_purge (svalue_id, const svalue_id_map &) FINAL OVERRIDE
+ {
+ /* Empty. */
+ return 0;
+ }
+
+ logger *get_logger () FINAL OVERRIDE { return NULL; }
+
+ void on_inherited_svalue (svalue_id parent_sid ATTRIBUTE_UNUSED,
+ svalue_id child_sid ATTRIBUTE_UNUSED)
+ FINAL OVERRIDE
+ {
+ }
+
+ void on_cast (svalue_id src_sid ATTRIBUTE_UNUSED,
+ svalue_id dst_sid ATTRIBUTE_UNUSED) FINAL OVERRIDE
+ {
+ }
+
+ unsigned get_num_diagnostics () const { return m_diagnostics.length (); }
+
+ void on_condition (tree lhs ATTRIBUTE_UNUSED,
+ enum tree_code op ATTRIBUTE_UNUSED,
+ tree rhs ATTRIBUTE_UNUSED) FINAL OVERRIDE
+ {
+ }
+
+private:
+ /* Implicitly delete any diagnostics in the dtor. */
+ auto_delete_vec<pending_diagnostic> m_diagnostics;
+};
+
+/* Attempt to add the constraint (LHS OP RHS) to MODEL.
+ Verify that MODEL remains satisfiable. */
+
+#define ADD_SAT_CONSTRAINT(MODEL, LHS, OP, RHS) \
+ SELFTEST_BEGIN_STMT \
+ bool sat = (MODEL).add_constraint (LHS, OP, RHS, NULL); \
+ ASSERT_TRUE (sat); \
+ SELFTEST_END_STMT
+
+/* Attempt to add the constraint (LHS OP RHS) to MODEL.
+ Verify that the result is not satisfiable. */
+
+#define ADD_UNSAT_CONSTRAINT(MODEL, LHS, OP, RHS) \
+ SELFTEST_BEGIN_STMT \
+ bool sat = (MODEL).add_constraint (LHS, OP, RHS, NULL); \
+ ASSERT_FALSE (sat); \
+ SELFTEST_END_STMT
+
+/* Implementation detail of the ASSERT_CONDITION_* macros. */
+
+void assert_condition (const location &loc,
+ region_model &model,
+ tree lhs, tree_code op, tree rhs,
+ tristate expected);
+
+/* Assert that REGION_MODEL evaluates the condition "LHS OP RHS"
+ as "true". */
+
+#define ASSERT_CONDITION_TRUE(REGION_MODEL, LHS, OP, RHS) \
+ SELFTEST_BEGIN_STMT \
+ assert_condition (SELFTEST_LOCATION, REGION_MODEL, LHS, OP, RHS, \
+ tristate (tristate::TS_TRUE)); \
+ SELFTEST_END_STMT
+
+/* Assert that REGION_MODEL evaluates the condition "LHS OP RHS"
+ as "false". */
+
+#define ASSERT_CONDITION_FALSE(REGION_MODEL, LHS, OP, RHS) \
+ SELFTEST_BEGIN_STMT \
+ assert_condition (SELFTEST_LOCATION, REGION_MODEL, LHS, OP, RHS, \
+ tristate (tristate::TS_FALSE)); \
+ SELFTEST_END_STMT
+
+/* Assert that REGION_MODEL evaluates the condition "LHS OP RHS"
+ as "unknown". */
+
+#define ASSERT_CONDITION_UNKNOWN(REGION_MODEL, LHS, OP, RHS) \
+ SELFTEST_BEGIN_STMT \
+ assert_condition (SELFTEST_LOCATION, REGION_MODEL, LHS, OP, RHS, \
+ tristate (tristate::TS_UNKNOWN)); \
+ SELFTEST_END_STMT
+
+} /* end of namespace selftest. */
+
+#endif /* #if CHECKING_P */
+
+#endif /* GCC_ANALYZER_REGION_MODEL_H */
--- /dev/null
+/* A state machine for detecting misuses of <stdio.h>'s FILE * API.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "options.h"
+#include "diagnostic-path.h"
+#include "diagnostic-metadata.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+
+#if ENABLE_ANALYZER
+
+namespace {
+
+/* A state machine for detecting misuses of <stdio.h>'s FILE * API. */
+
+class fileptr_state_machine : public state_machine
+{
+public:
+ fileptr_state_machine (logger *logger);
+
+ bool inherited_state_p () const FINAL OVERRIDE { return false; }
+
+ bool on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const FINAL OVERRIDE;
+
+ void on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const FINAL OVERRIDE;
+
+ bool can_purge_p (state_t s) const FINAL OVERRIDE;
+ pending_diagnostic *on_leak (tree var) const FINAL OVERRIDE;
+
+ /* Start state. */
+ state_t m_start;
+
+ /* State for a FILE * returned from fopen that hasn't been checked for
+ NULL.
+ It could be an open stream, or could be NULL. */
+ state_t m_unchecked;
+
+ /* State for a FILE * that's known to be NULL. */
+ state_t m_null;
+
+ /* State for a FILE * that's known to be a non-NULL open stream. */
+ state_t m_nonnull;
+
+ /* State for a FILE * that's had fclose called on it. */
+ state_t m_closed;
+
+ /* Stop state, for a FILE * we don't want to track any more. */
+ state_t m_stop;
+};
+
+/* Base class for diagnostics relative to fileptr_state_machine. */
+
+class file_diagnostic : public pending_diagnostic
+{
+public:
+ file_diagnostic (const fileptr_state_machine &sm, tree arg)
+ : m_sm (sm), m_arg (arg)
+ {}
+
+ bool subclass_equal_p (const pending_diagnostic &base_other) const OVERRIDE
+ {
+ return m_arg == ((const file_diagnostic &)base_other).m_arg;
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ OVERRIDE
+ {
+ if (change.m_old_state == m_sm.m_start
+ && change.m_new_state == m_sm.m_unchecked)
+ // TODO: verify that it's the fopen stmt, not a copy
+ return label_text::borrow ("opened here");
+ if (change.m_old_state == m_sm.m_unchecked
+ && change.m_new_state == m_sm.m_nonnull)
+ return change.formatted_print ("assuming %qE is non-NULL",
+ change.m_expr);
+ if (change.m_new_state == m_sm.m_null)
+ return change.formatted_print ("assuming %qE is NULL",
+ change.m_expr);
+ return label_text ();
+ }
+
+protected:
+ const fileptr_state_machine &m_sm;
+ tree m_arg;
+};
+
+class double_fclose : public file_diagnostic
+{
+public:
+ double_fclose (const fileptr_state_machine &sm, tree arg)
+ : file_diagnostic (sm, arg)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "double_fclose"; }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ return warning_at (rich_loc, OPT_Wanalyzer_double_fclose,
+ "double %<fclose%> of FILE %qE",
+ m_arg);
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ OVERRIDE
+ {
+ if (change.m_new_state == m_sm.m_closed)
+ {
+ m_first_fclose_event = change.m_event_id;
+ return change.formatted_print ("first %qs here", "fclose");
+ }
+ return file_diagnostic::describe_state_change (change);
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_first_fclose_event.known_p ())
+ return ev.formatted_print ("second %qs here; first %qs was at %@",
+ "fclose", "fclose",
+ &m_first_fclose_event);
+ return ev.formatted_print ("second %qs here", "fclose");
+ }
+
+private:
+ diagnostic_event_id_t m_first_fclose_event;
+};
+
+class file_leak : public file_diagnostic
+{
+public:
+ file_leak (const fileptr_state_machine &sm, tree arg)
+ : file_diagnostic (sm, arg)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "file_leak"; }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ diagnostic_metadata m;
+ /* CWE-775: "Missing Release of File Descriptor or Handle after
+ Effective Lifetime". */
+ m.add_cwe (775);
+ return warning_at (rich_loc, m, OPT_Wanalyzer_file_leak,
+ "leak of FILE %qE",
+ m_arg);
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.m_new_state == m_sm.m_unchecked)
+ {
+ m_fopen_event = change.m_event_id;
+ return label_text::borrow ("opened here");
+ }
+ return file_diagnostic::describe_state_change (change);
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_fopen_event.known_p ())
+ return ev.formatted_print ("%qE leaks here; was opened at %@",
+ ev.m_expr, &m_fopen_event);
+ else
+ return ev.formatted_print ("%qE leaks here", ev.m_expr);
+ }
+
+private:
+ diagnostic_event_id_t m_fopen_event;
+};
+
+/* fileptr_state_machine's ctor. */
+
+fileptr_state_machine::fileptr_state_machine (logger *logger)
+: state_machine ("file", logger)
+{
+ m_start = add_state ("start");
+ m_unchecked = add_state ("unchecked");
+ m_null = add_state ("null");
+ m_nonnull = add_state ("nonnull");
+ m_closed = add_state ("closed");
+ m_stop = add_state ("stop");
+}
+
+/* Implementation of state_machine::on_stmt vfunc for fileptr_state_machine. */
+
+bool
+fileptr_state_machine::on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const
+{
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (tree callee_fndecl = sm_ctxt->get_fndecl_for_call (call))
+ {
+ if (is_named_call_p (callee_fndecl, "fopen", call, 2))
+ {
+ tree lhs = gimple_call_lhs (call);
+ if (lhs)
+ {
+ lhs = sm_ctxt->get_readable_tree (lhs);
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_unchecked);
+ }
+ else
+ {
+ /* TODO: report leak. */
+ }
+ return true;
+ }
+
+ if (is_named_call_p (callee_fndecl, "fclose", call, 1))
+ {
+ tree arg = gimple_call_arg (call, 0);
+ arg = sm_ctxt->get_readable_tree (arg);
+
+ sm_ctxt->on_transition (node, stmt, arg, m_start, m_closed);
+
+ // TODO: is it safe to call fclose (NULL) ?
+ sm_ctxt->on_transition (node, stmt, arg, m_unchecked, m_closed);
+ sm_ctxt->on_transition (node, stmt, arg, m_null, m_closed);
+
+ sm_ctxt->on_transition (node, stmt , arg, m_nonnull, m_closed);
+
+ sm_ctxt->warn_for_state (node, stmt, arg, m_closed,
+ new double_fclose (*this, arg));
+ sm_ctxt->on_transition (node, stmt, arg, m_closed, m_stop);
+ return true;
+ }
+
+ // TODO: operations on closed file
+ // etc
+ }
+
+ return false;
+}
+
+/* Implementation of state_machine::on_condition vfunc for
+ fileptr_state_machine.
+ Potentially transition state 'unchecked' to 'nonnull' or to 'null'. */
+
+void
+fileptr_state_machine::on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const
+{
+ if (!zerop (rhs))
+ return;
+
+ // TODO: has to be a FILE *, specifically
+ if (TREE_CODE (TREE_TYPE (lhs)) != POINTER_TYPE)
+ return;
+
+ // TODO: has to be a FILE *, specifically
+ if (TREE_CODE (TREE_TYPE (rhs)) != POINTER_TYPE)
+ return;
+
+ if (op == NE_EXPR)
+ {
+ log ("got 'ARG != 0' match");
+ sm_ctxt->on_transition (node, stmt,
+ lhs, m_unchecked, m_nonnull);
+ }
+ else if (op == EQ_EXPR)
+ {
+ log ("got 'ARG == 0' match");
+ sm_ctxt->on_transition (node, stmt,
+ lhs, m_unchecked, m_null);
+ }
+}
+
+/* Implementation of state_machine::can_purge_p vfunc for fileptr_state_machine.
+ Don't allow purging of pointers in state 'unchecked' or 'nonnull'
+ (to avoid false leak reports). */
+
+bool
+fileptr_state_machine::can_purge_p (state_t s) const
+{
+ return s != m_unchecked && s != m_nonnull;
+}
+
+/* Implementation of state_machine::on_leak vfunc for
+ fileptr_state_machine, for complaining about leaks of FILE * in
+ state 'unchecked' and 'nonnull'. */
+
+pending_diagnostic *
+fileptr_state_machine::on_leak (tree var) const
+{
+ return new file_leak (*this, var);
+}
+
+} // anonymous namespace
+
+/* Internal interface to this file. */
+
+state_machine *
+make_fileptr_state_machine (logger *logger)
+{
+ return new fileptr_state_machine (logger);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* A state machine for detecting misuses of the malloc/free API.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "options.h"
+#include "bitmap.h"
+#include "diagnostic-path.h"
+#include "diagnostic-metadata.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+
+#if ENABLE_ANALYZER
+
+namespace {
+
+/* A state machine for detecting misuses of the malloc/free API.
+
+ See sm-malloc.dot for an overview (keep this in-sync with that file). */
+
+class malloc_state_machine : public state_machine
+{
+public:
+ malloc_state_machine (logger *logger);
+
+ bool inherited_state_p () const FINAL OVERRIDE { return false; }
+
+ bool on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const FINAL OVERRIDE;
+
+ void on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const FINAL OVERRIDE;
+
+ bool can_purge_p (state_t s) const FINAL OVERRIDE;
+ pending_diagnostic *on_leak (tree var) const FINAL OVERRIDE;
+
+ /* Start state. */
+ state_t m_start;
+
+ /* State for a pointer returned from malloc that hasn't been checked for
+ NULL.
+ It could be a pointer to heap-allocated memory, or could be NULL. */
+ state_t m_unchecked;
+
+ /* State for a pointer that's known to be NULL. */
+ state_t m_null;
+
+ /* State for a pointer to heap-allocated memory, known to be non-NULL. */
+ state_t m_nonnull;
+
+ /* State for a pointer to freed memory. */
+ state_t m_freed;
+
+ /* State for a pointer that's known to not be on the heap (e.g. to a local
+ or global). */
+ state_t m_non_heap; // TODO: or should this be a different state machine?
+ // or do we need child values etc?
+
+ /* Stop state, for pointers we don't want to track any more. */
+ state_t m_stop;
+};
+
+/* Class for diagnostics relating to malloc_state_machine. */
+
+class malloc_diagnostic : public pending_diagnostic
+{
+public:
+ malloc_diagnostic (const malloc_state_machine &sm, tree arg)
+ : m_sm (sm), m_arg (arg)
+ {}
+
+ bool subclass_equal_p (const pending_diagnostic &base_other) const OVERRIDE
+ {
+ return m_arg == ((const malloc_diagnostic &)base_other).m_arg;
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ OVERRIDE
+ {
+ if (change.m_old_state == m_sm.m_start
+ && change.m_new_state == m_sm.m_unchecked)
+ // TODO: verify that it's the allocation stmt, not a copy
+ return label_text::borrow ("allocated here");
+ if (change.m_old_state == m_sm.m_unchecked
+ && change.m_new_state == m_sm.m_nonnull)
+ return change.formatted_print ("assuming %qE is non-NULL",
+ change.m_expr);
+ if (change.m_new_state == m_sm.m_null)
+ return change.formatted_print ("assuming %qE is NULL",
+ change.m_expr);
+ return label_text ();
+ }
+
+protected:
+ const malloc_state_machine &m_sm;
+ tree m_arg;
+};
+
+/* Concrete subclass for reporting double-free diagnostics. */
+
+class double_free : public malloc_diagnostic
+{
+public:
+ double_free (const malloc_state_machine &sm, tree arg)
+ : malloc_diagnostic (sm, arg)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "double_free"; }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ auto_diagnostic_group d;
+ diagnostic_metadata m;
+ m.add_cwe (415); /* CWE-415: Double Free. */
+ return warning_at (rich_loc, m, OPT_Wanalyzer_double_free,
+ "double-%<free%> of %qE", m_arg);
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.m_new_state == m_sm.m_freed)
+ {
+ m_first_free_event = change.m_event_id;
+ return change.formatted_print ("first %qs here", "free");
+ }
+ return malloc_diagnostic::describe_state_change (change);
+ }
+
+ label_text describe_call_with_state (const evdesc::call_with_state &info)
+ FINAL OVERRIDE
+ {
+ if (info.m_state == m_sm.m_freed)
+ return info.formatted_print
+ ("passing freed pointer %qE in call to %qE from %qE",
+ info.m_expr, info.m_callee_fndecl, info.m_caller_fndecl);
+ return label_text ();
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_first_free_event.known_p ())
+ return ev.formatted_print ("second %qs here; first %qs was at %@",
+ "free", "free",
+ &m_first_free_event);
+ return ev.formatted_print ("second %qs here", "free");
+ }
+
+private:
+ diagnostic_event_id_t m_first_free_event;
+};
+
+/* Abstract subclass for describing possible bad uses of NULL.
+ Responsible for describing the call that could return NULL. */
+
+class possible_null : public malloc_diagnostic
+{
+public:
+ possible_null (const malloc_state_machine &sm, tree arg)
+ : malloc_diagnostic (sm, arg)
+ {}
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.m_old_state == m_sm.m_start
+ && change.m_new_state == m_sm.m_unchecked)
+ {
+ m_origin_of_unchecked_event = change.m_event_id;
+ return label_text::borrow ("this call could return NULL");
+ }
+ return malloc_diagnostic::describe_state_change (change);
+ }
+
+ label_text describe_return_of_state (const evdesc::return_of_state &info)
+ FINAL OVERRIDE
+ {
+ if (info.m_state == m_sm.m_unchecked)
+ return info.formatted_print ("possible return of NULL to %qE from %qE",
+ info.m_caller_fndecl, info.m_callee_fndecl);
+ return label_text ();
+ }
+
+protected:
+ diagnostic_event_id_t m_origin_of_unchecked_event;
+};
+
+/* Concrete subclass for describing dereference of a possible NULL
+ value. */
+
+class possible_null_deref : public possible_null
+{
+public:
+ possible_null_deref (const malloc_state_machine &sm, tree arg)
+ : possible_null (sm, arg)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "possible_null_deref"; }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ /* CWE-690: Unchecked Return Value to NULL Pointer Dereference. */
+ diagnostic_metadata m;
+ m.add_cwe (690);
+ return warning_at (rich_loc, m, OPT_Wanalyzer_possible_null_dereference,
+ "dereference of possibly-NULL %qE", m_arg);
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_origin_of_unchecked_event.known_p ())
+ return ev.formatted_print ("%qE could be NULL: unchecked value from %@",
+ ev.m_expr,
+ &m_origin_of_unchecked_event);
+ else
+ return ev.formatted_print ("%qE could be NULL", ev.m_expr);
+ }
+
+};
+
+/* Subroutine for use by possible_null_arg::emit and null_arg::emit.
+ Issue a note informing that the pertinent argument must be non-NULL. */
+
+static void
+inform_nonnull_attribute (tree fndecl, int arg_idx)
+{
+ inform (DECL_SOURCE_LOCATION (fndecl),
+ "argument %u of %qD must be non-null",
+ arg_idx + 1, fndecl);
+ /* Ideally we would use the location of the parm and underline the
+ attribute also - but we don't have the location_t values at this point
+ in the middle-end.
+ For reference, the C and C++ FEs have get_fndecl_argument_location. */
+}
+
+/* Concrete subclass for describing passing a possibly-NULL value to a
+ function marked with __attribute__((nonnull)). */
+
+class possible_null_arg : public possible_null
+{
+public:
+ possible_null_arg (const malloc_state_machine &sm, tree arg,
+ tree fndecl, int arg_idx)
+ : possible_null (sm, arg),
+ m_fndecl (fndecl), m_arg_idx (arg_idx)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "possible_null_arg"; }
+
+ bool subclass_equal_p (const pending_diagnostic &base_other) const
+ {
+ const possible_null_arg &sub_other
+ = (const possible_null_arg &)base_other;
+ return (m_arg == sub_other.m_arg
+ && m_fndecl == sub_other.m_fndecl
+ && m_arg_idx == sub_other.m_arg_idx);
+ }
+
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ /* CWE-690: Unchecked Return Value to NULL Pointer Dereference. */
+ auto_diagnostic_group d;
+ diagnostic_metadata m;
+ m.add_cwe (690);
+ bool warned
+ = warning_at (rich_loc, m, OPT_Wanalyzer_possible_null_argument,
+ "use of possibly-NULL %qE where non-null expected",
+ m_arg);
+ if (warned)
+ inform_nonnull_attribute (m_fndecl, m_arg_idx);
+ return warned;
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_origin_of_unchecked_event.known_p ())
+ return ev.formatted_print ("argument %u (%qE) from %@ could be NULL"
+ " where non-null expected",
+ m_arg_idx + 1, ev.m_expr,
+ &m_origin_of_unchecked_event);
+ else
+ return ev.formatted_print ("argument %u (%qE) could be NULL"
+ " where non-null expected",
+ m_arg_idx + 1, ev.m_expr);
+ }
+
+private:
+ tree m_fndecl;
+ int m_arg_idx;
+};
+
+/* Concrete subclass for describing a dereference of a NULL value. */
+
+class null_deref : public malloc_diagnostic
+{
+public:
+ null_deref (const malloc_state_machine &sm, tree arg)
+ : malloc_diagnostic (sm, arg) {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "null_deref"; }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ /* CWE-690: Unchecked Return Value to NULL Pointer Dereference. */
+ diagnostic_metadata m;
+ m.add_cwe (690);
+ return warning_at (rich_loc, m, OPT_Wanalyzer_null_dereference,
+ "dereference of NULL %qE", m_arg);
+ }
+
+ label_text describe_return_of_state (const evdesc::return_of_state &info)
+ FINAL OVERRIDE
+ {
+ if (info.m_state == m_sm.m_null)
+ return info.formatted_print ("return of NULL to %qE from %qE",
+ info.m_caller_fndecl, info.m_callee_fndecl);
+ return label_text ();
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ return ev.formatted_print ("dereference of NULL %qE", ev.m_expr);
+ }
+};
+
+/* Concrete subclass for describing passing a NULL value to a
+ function marked with __attribute__((nonnull)). */
+
+class null_arg : public malloc_diagnostic
+{
+public:
+ null_arg (const malloc_state_machine &sm, tree arg,
+ tree fndecl, int arg_idx)
+ : malloc_diagnostic (sm, arg),
+ m_fndecl (fndecl), m_arg_idx (arg_idx)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "null_arg"; }
+
+ bool subclass_equal_p (const pending_diagnostic &base_other) const
+ {
+ const null_arg &sub_other
+ = (const null_arg &)base_other;
+ return (m_arg == sub_other.m_arg
+ && m_fndecl == sub_other.m_fndecl
+ && m_arg_idx == sub_other.m_arg_idx);
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ /* CWE-690: Unchecked Return Value to NULL Pointer Dereference. */
+ auto_diagnostic_group d;
+ diagnostic_metadata m;
+ m.add_cwe (690);
+ bool warned = warning_at (rich_loc, m, OPT_Wanalyzer_null_argument,
+ "use of NULL %qE where non-null expected", m_arg);
+ if (warned)
+ inform_nonnull_attribute (m_fndecl, m_arg_idx);
+ return warned;
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ return ev.formatted_print ("argument %u (%qE) NULL"
+ " where non-null expected",
+ m_arg_idx + 1, ev.m_expr);
+ }
+
+private:
+ tree m_fndecl;
+ int m_arg_idx;
+};
+
+class use_after_free : public malloc_diagnostic
+{
+public:
+ use_after_free (const malloc_state_machine &sm, tree arg)
+ : malloc_diagnostic (sm, arg)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "use_after_free"; }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ /* CWE-416: Use After Free. */
+ diagnostic_metadata m;
+ m.add_cwe (416);
+ return warning_at (rich_loc, m, OPT_Wanalyzer_use_after_free,
+ "use after %<free%> of %qE", m_arg);
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.m_new_state == m_sm.m_freed)
+ {
+ m_free_event = change.m_event_id;
+ return label_text::borrow ("freed here");
+ }
+ return malloc_diagnostic::describe_state_change (change);
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_free_event.known_p ())
+ return ev.formatted_print ("use after %<free%> of %qE; freed at %@",
+ ev.m_expr, &m_free_event);
+ else
+ return ev.formatted_print ("use after %<free%> of %qE", ev.m_expr);
+ }
+
+private:
+ diagnostic_event_id_t m_free_event;
+};
+
+class malloc_leak : public malloc_diagnostic
+{
+public:
+ malloc_leak (const malloc_state_machine &sm, tree arg)
+ : malloc_diagnostic (sm, arg) {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "malloc_leak"; }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ diagnostic_metadata m;
+ m.add_cwe (401);
+ return warning_at (rich_loc, m, OPT_Wanalyzer_malloc_leak,
+ "leak of %qE", m_arg);
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.m_new_state == m_sm.m_unchecked)
+ {
+ m_malloc_event = change.m_event_id;
+ return label_text::borrow ("allocated here");
+ }
+ return malloc_diagnostic::describe_state_change (change);
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_malloc_event.known_p ())
+ return ev.formatted_print ("%qE leaks here; was allocated at %@",
+ ev.m_expr, &m_malloc_event);
+ else
+ return ev.formatted_print ("%qE leaks here", ev.m_expr);
+ }
+
+private:
+ diagnostic_event_id_t m_malloc_event;
+};
+
+class free_of_non_heap : public malloc_diagnostic
+{
+public:
+ free_of_non_heap (const malloc_state_machine &sm, tree arg)
+ : malloc_diagnostic (sm, arg), m_kind (KIND_UNKNOWN)
+ {
+ }
+
+ const char *get_kind () const FINAL OVERRIDE { return "free_of_non_heap"; }
+
+ bool subclass_equal_p (const pending_diagnostic &base_other) const
+ FINAL OVERRIDE
+ {
+ const free_of_non_heap &other = (const free_of_non_heap &)base_other;
+ return (m_arg == other.m_arg && m_kind == other.m_kind);
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ auto_diagnostic_group d;
+ diagnostic_metadata m;
+ m.add_cwe (590); /* CWE-590: Free of Memory not on the Heap. */
+ switch (m_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case KIND_UNKNOWN:
+ return warning_at (rich_loc, m, OPT_Wanalyzer_free_of_non_heap,
+ "%<free%> of %qE which points to memory"
+ " not on the heap",
+ m_arg);
+ break;
+ case KIND_ALLOCA:
+ return warning_at (rich_loc, m, OPT_Wanalyzer_free_of_non_heap,
+ "%<free%> of memory allocated on the stack by"
+ " %qs (%qE) will corrupt the heap",
+ "alloca", m_arg);
+ break;
+ }
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ /* Attempt to reconstruct what kind of pointer it is.
+ (It seems neater for this to be a part of the state, though). */
+ if (TREE_CODE (change.m_expr) == SSA_NAME)
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (change.m_expr);
+ if (gcall *call = dyn_cast <gcall *> (def_stmt))
+ {
+ if (is_special_named_call_p (call, "alloca", 1)
+ || is_special_named_call_p (call, "__builtin_alloca", 1))
+ {
+ m_kind = KIND_ALLOCA;
+ return label_text::borrow
+ ("memory is allocated on the stack here");
+ }
+ }
+ }
+ return label_text::borrow ("pointer is from here");
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ return ev.formatted_print ("call to %qs here", "free");
+ }
+
+private:
+ enum kind
+ {
+ KIND_UNKNOWN,
+ KIND_ALLOCA
+ };
+ enum kind m_kind;
+};
+
+/* malloc_state_machine's ctor. */
+
+malloc_state_machine::malloc_state_machine (logger *logger)
+: state_machine ("malloc", logger)
+{
+ m_start = add_state ("start");
+ m_unchecked = add_state ("unchecked");
+ m_null = add_state ("null");
+ m_nonnull = add_state ("nonnull");
+ m_freed = add_state ("freed");
+ m_non_heap = add_state ("non-heap");
+ m_stop = add_state ("stop");
+}
+
+/* Implementation of state_machine::on_stmt vfunc for malloc_state_machine. */
+
+bool
+malloc_state_machine::on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const
+{
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (tree callee_fndecl = sm_ctxt->get_fndecl_for_call (call))
+ {
+ if (is_named_call_p (callee_fndecl, "malloc", call, 1)
+ || is_named_call_p (callee_fndecl, "calloc", call, 2)
+ || is_named_call_p (callee_fndecl, "__builtin_malloc", call, 1)
+ || is_named_call_p (callee_fndecl, "__builtin_calloc", call, 2))
+ {
+ tree lhs = gimple_call_lhs (call);
+ if (lhs)
+ {
+ lhs = sm_ctxt->get_readable_tree (lhs);
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_unchecked);
+ }
+ else
+ {
+ /* TODO: report leak. */
+ }
+ return true;
+ }
+
+ if (is_named_call_p (callee_fndecl, "alloca", call, 1)
+ || is_named_call_p (callee_fndecl, "__builtin_alloca", call, 1))
+ {
+ tree lhs = gimple_call_lhs (call);
+ if (lhs)
+ {
+ lhs = sm_ctxt->get_readable_tree (lhs);
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_non_heap);
+ }
+ return true;
+ }
+
+ if (is_named_call_p (callee_fndecl, "free", call, 1)
+ || is_named_call_p (callee_fndecl, "__builtin_free", call, 1))
+ {
+ tree arg = gimple_call_arg (call, 0);
+
+ arg = sm_ctxt->get_readable_tree (arg);
+
+ /* start/unchecked/nonnull -> freed. */
+ sm_ctxt->on_transition (node, stmt, arg, m_start, m_freed);
+ sm_ctxt->on_transition (node, stmt, arg, m_unchecked, m_freed);
+ sm_ctxt->on_transition (node, stmt, arg, m_nonnull, m_freed);
+
+ /* Keep state "null" as-is, rather than transitioning to "free";
+ we don't want want to complain about double-free of NULL. */
+
+ /* freed -> stop, with warning. */
+ sm_ctxt->warn_for_state (node, stmt, arg, m_freed,
+ new double_free (*this, arg));
+ sm_ctxt->on_transition (node, stmt, arg, m_freed, m_stop);
+
+ /* non-heap -> stop, with warning. */
+ sm_ctxt->warn_for_state (node, stmt, arg, m_non_heap,
+ new free_of_non_heap (*this, arg));
+ sm_ctxt->on_transition (node, stmt, arg, m_non_heap, m_stop);
+ return true;
+ }
+
+ /* Handle "__attribute__((nonnull))". */
+ {
+ tree fntype = TREE_TYPE (callee_fndecl);
+ bitmap nonnull_args = get_nonnull_args (fntype);
+ if (nonnull_args)
+ {
+ for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
+ continue;
+ /* If we have a nonnull-args, and either all pointers, or just
+ the specified pointers. */
+ if (bitmap_empty_p (nonnull_args)
+ || bitmap_bit_p (nonnull_args, i))
+ {
+ sm_ctxt->warn_for_state
+ (node, stmt, arg, m_unchecked,
+ new possible_null_arg (*this, arg, callee_fndecl, i));
+ sm_ctxt->on_transition (node, stmt, arg, m_unchecked,
+ m_nonnull);
+
+ sm_ctxt->warn_for_state
+ (node, stmt, arg, m_null,
+ new null_arg (*this, arg, callee_fndecl, i));
+ sm_ctxt->on_transition (node, stmt, arg, m_null, m_stop);
+ }
+ }
+ BITMAP_FREE (nonnull_args);
+ }
+ }
+ }
+
+ if (tree lhs = is_zero_assignment (stmt))
+ {
+ if (any_pointer_p (lhs))
+ {
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_null);
+ sm_ctxt->on_transition (node, stmt, lhs, m_unchecked, m_null);
+ sm_ctxt->on_transition (node, stmt, lhs, m_nonnull, m_null);
+ sm_ctxt->on_transition (node, stmt, lhs, m_freed, m_null);
+ }
+ }
+
+ if (const gassign *assign_stmt = dyn_cast <const gassign *> (stmt))
+ {
+ enum tree_code op = gimple_assign_rhs_code (assign_stmt);
+ if (op == ADDR_EXPR)
+ {
+ tree lhs = gimple_assign_lhs (assign_stmt);
+ if (lhs)
+ {
+ lhs = sm_ctxt->get_readable_tree (lhs);
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_non_heap);
+ }
+ }
+ }
+
+ /* Handle dereferences. */
+ for (unsigned i = 0; i < gimple_num_ops (stmt); i++)
+ {
+ tree op = gimple_op (stmt, i);
+ if (!op)
+ continue;
+ if (TREE_CODE (op) == COMPONENT_REF)
+ op = TREE_OPERAND (op, 0);
+
+ if (TREE_CODE (op) == MEM_REF)
+ {
+ tree arg = TREE_OPERAND (op, 0);
+ arg = sm_ctxt->get_readable_tree (arg);
+
+ sm_ctxt->warn_for_state (node, stmt, arg, m_unchecked,
+ new possible_null_deref (*this, arg));
+ sm_ctxt->on_transition (node, stmt, arg, m_unchecked, m_nonnull);
+
+ sm_ctxt->warn_for_state (node, stmt, arg, m_null,
+ new null_deref (*this, arg));
+ sm_ctxt->on_transition (node, stmt, arg, m_null, m_stop);
+
+ sm_ctxt->warn_for_state (node, stmt, arg, m_freed,
+ new use_after_free (*this, arg));
+ sm_ctxt->on_transition (node, stmt, arg, m_freed, m_stop);
+ }
+ }
+ return false;
+}
+
+/* Implementation of state_machine::on_condition vfunc for malloc_state_machine.
+ Potentially transition state 'unchecked' to 'nonnull' or to 'null'. */
+
+void
+malloc_state_machine::on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const
+{
+ if (!zerop (rhs))
+ return;
+
+ if (!any_pointer_p (lhs))
+ return;
+ if (!any_pointer_p (rhs))
+ return;
+
+ if (op == NE_EXPR)
+ {
+ log ("got 'ARG != 0' match");
+ sm_ctxt->on_transition (node, stmt,
+ lhs, m_unchecked, m_nonnull);
+ }
+ else if (op == EQ_EXPR)
+ {
+ log ("got 'ARG == 0' match");
+ sm_ctxt->on_transition (node, stmt,
+ lhs, m_unchecked, m_null);
+ }
+}
+
+/* Implementation of state_machine::can_purge_p vfunc for malloc_state_machine.
+ Don't allow purging of pointers in state 'unchecked' or 'nonnull'
+ (to avoid false leak reports). */
+
+bool
+malloc_state_machine::can_purge_p (state_t s) const
+{
+ return s != m_unchecked && s != m_nonnull;
+}
+
+/* Implementation of state_machine::on_leak vfunc for malloc_state_machine
+ (for complaining about leaks of pointers in state 'unchecked' and
+ 'nonnull'). */
+
+pending_diagnostic *
+malloc_state_machine::on_leak (tree var) const
+{
+ return new malloc_leak (*this, var);
+}
+
+} // anonymous namespace
+
+/* Internal interface to this file. */
+
+state_machine *
+make_malloc_state_machine (logger *logger)
+{
+ return new malloc_state_machine (logger);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* An overview of the state machine from sm-malloc.cc.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Keep this in-sync with sm-malloc.cc */
+
+digraph "malloc" {
+
+ /* STATES. */
+
+ /* Start state. */
+ start;
+
+ /* State for a pointer returned from malloc that hasn't been checked for
+ NULL.
+ It could be a pointer to heap-allocated memory, or could be NULL. */
+ unchecked;
+
+ /* State for a pointer that's known to be NULL. */
+ null;
+
+ /* State for a pointer to heap-allocated memory, known to be non-NULL. */
+ nonnull;
+
+ /* State for a pointer to freed memory. */
+ freed;
+
+ /* State for a pointer that's known to not be on the heap (e.g. to a local
+ or global). */
+ non_heap;
+
+ /* Stop state, for pointers we don't want to track any more. */
+ stop;
+
+ /* TRANSITIONS. */
+
+ start -> unchecked [label="on 'X=malloc(...);'"];
+ start -> unchecked [label="on 'X=calloc(...);'"];
+
+ start -> non_heap [label="on 'X=alloca(...);'"];
+ start -> non_heap [label="on 'X=__builtin_alloca(...);'"];
+
+ /* On "free". */
+ start -> freed [label="on 'free(X);'"];
+ unchecked -> freed [label="on 'free(X);'"];
+ nonnull -> freed [label="on 'free(X);'"];
+ freed -> stop [label="on 'free(X);':\n Warn('double-free')"];
+ non_heap -> stop [label="on 'free(X);':\n Warn('free of non-heap')"];
+
+ /* Handle "__attribute__((nonnull))". */
+ unchecked -> nonnull [label="on 'FN(X)' with __attribute__((nonnull)):\nWarn('possible NULL arg')"];
+ null -> stop [label="on 'FN(X)' with __attribute__((nonnull)):\nWarn('NULL arg')"];
+
+ /* is_zero_assignment. */
+ start -> null [label="on 'X = 0;'"];
+ unchecked -> null [label="on 'X = 0;'"];
+ nonnull -> null [label="on 'X = 0;'"];
+ freed -> null [label="on 'X = 0;'"];
+
+ start -> non_heap [label="on 'X = &EXPR;'"];
+
+ /* Handle dereferences. */
+ unchecked -> nonnull [label="on '*X':\nWarn('possible NULL deref')"];
+ null -> stop [label="on '*X':\nWarn('NULL deref')"];
+ freed -> stop [label="on '*X':\nWarn('use after free')"];
+
+ /* on_condition. */
+ unchecked -> nonnull [label="on 'X != 0'"];
+ unchecked -> null [label="on 'X == 0'"];
+
+ unchecked -> stop [label="on leak:\nWarn('leak')"];
+ nonnull -> stop [label="on leak:\nWarn('leak')"];
+}
--- /dev/null
+/* A state machine for use in DejaGnu tests, to check that
+ pattern-matching works as expected.
+
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "tree-pretty-print.h"
+#include "diagnostic-path.h"
+#include "diagnostic-metadata.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+
+#if ENABLE_ANALYZER
+
+namespace {
+
+/* A state machine for use in DejaGnu tests, to check that
+ pattern-matching works as expected. */
+
+class pattern_test_state_machine : public state_machine
+{
+public:
+ pattern_test_state_machine (logger *logger);
+
+ bool inherited_state_p () const FINAL OVERRIDE { return false; }
+
+ bool on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const FINAL OVERRIDE;
+
+ void on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const FINAL OVERRIDE;
+
+ bool can_purge_p (state_t s) const FINAL OVERRIDE;
+
+private:
+ state_t m_start;
+};
+
+class pattern_match : public pending_diagnostic_subclass<pattern_match>
+{
+public:
+ pattern_match (tree lhs, enum tree_code op, tree rhs)
+ : m_lhs (lhs), m_op (op), m_rhs (rhs) {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "pattern_match"; }
+
+ bool operator== (const pattern_match &other) const
+ {
+ return (m_lhs == other.m_lhs
+ && m_op == other.m_op
+ && m_rhs == other.m_rhs);
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ return warning_at (rich_loc, 0, "pattern match on %<%E %s %E%>",
+ m_lhs, op_symbol_code (m_op), m_rhs);
+ }
+
+private:
+ tree m_lhs;
+ enum tree_code m_op;
+ tree m_rhs;
+};
+
+pattern_test_state_machine::pattern_test_state_machine (logger *logger)
+: state_machine ("pattern-test", logger)
+{
+ m_start = add_state ("start");
+}
+
+bool
+pattern_test_state_machine::on_stmt (sm_context *sm_ctxt ATTRIBUTE_UNUSED,
+ const supernode *node ATTRIBUTE_UNUSED,
+ const gimple *stmt ATTRIBUTE_UNUSED) const
+{
+ return false;
+}
+
+/* Implementation of state_machine::on_condition vfunc for
+ pattern_test_state_machine.
+
+ Queue a pattern_match diagnostic for any comparison against a
+ constant. */
+
+void
+pattern_test_state_machine::on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const
+{
+ if (stmt == NULL)
+ return;
+
+ if (!CONSTANT_CLASS_P (rhs))
+ return;
+
+ pending_diagnostic *diag = new pattern_match (lhs, op, rhs);
+ sm_ctxt->warn_for_state (node, stmt, lhs, m_start, diag);
+}
+
+bool
+pattern_test_state_machine::can_purge_p (state_t s ATTRIBUTE_UNUSED) const
+{
+ return true;
+}
+
+} // anonymous namespace
+
+/* Internal interface to this file. */
+
+state_machine *
+make_pattern_test_state_machine (logger *logger)
+{
+ return new pattern_test_state_machine (logger);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* An experimental state machine, for tracking exposure of sensitive
+ data (e.g. through logging).
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "options.h"
+#include "diagnostic-path.h"
+#include "diagnostic-metadata.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+
+#if ENABLE_ANALYZER
+
+namespace {
+
+/* An experimental state machine, for tracking exposure of sensitive
+ data (e.g. through logging). */
+
+class sensitive_state_machine : public state_machine
+{
+public:
+ sensitive_state_machine (logger *logger);
+
+ bool inherited_state_p () const FINAL OVERRIDE { return true; }
+
+ bool on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const FINAL OVERRIDE;
+
+ void on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const FINAL OVERRIDE;
+
+ bool can_purge_p (state_t s) const FINAL OVERRIDE;
+
+ /* Start state. */
+ state_t m_start;
+
+ /* State for "sensitive" data, such as a password. */
+ state_t m_sensitive;
+
+ /* Stop state, for a value we don't want to track any more. */
+ state_t m_stop;
+
+private:
+ void warn_for_any_exposure (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree arg) const;
+};
+
+class exposure_through_output_file
+ : public pending_diagnostic_subclass<exposure_through_output_file>
+{
+public:
+ exposure_through_output_file (const sensitive_state_machine &sm, tree arg)
+ : m_sm (sm), m_arg (arg)
+ {}
+
+ const char *get_kind () const FINAL OVERRIDE
+ {
+ return "exposure_through_output_file";
+ }
+
+ bool operator== (const exposure_through_output_file &other) const
+ {
+ return m_arg == other.m_arg;
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ diagnostic_metadata m;
+ /* CWE-532: Information Exposure Through Log Files */
+ m.add_cwe (532);
+ return warning_at (rich_loc, m, OPT_Wanalyzer_exposure_through_output_file,
+ "sensitive value %qE written to output file",
+ m_arg);
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.m_new_state == m_sm.m_sensitive)
+ {
+ m_first_sensitive_event = change.m_event_id;
+ return change.formatted_print ("sensitive value acquired here");
+ }
+ return label_text ();
+ }
+
+ label_text describe_call_with_state (const evdesc::call_with_state &info)
+ FINAL OVERRIDE
+ {
+ if (info.m_state == m_sm.m_sensitive)
+ return info.formatted_print
+ ("passing sensitive value %qE in call to %qE from %qE",
+ info.m_expr, info.m_callee_fndecl, info.m_caller_fndecl);
+ return label_text ();
+ }
+
+ label_text describe_return_of_state (const evdesc::return_of_state &info)
+ FINAL OVERRIDE
+ {
+ if (info.m_state == m_sm.m_sensitive)
+ return info.formatted_print ("returning sensitive value to %qE from %qE",
+ info.m_caller_fndecl, info.m_callee_fndecl);
+ return label_text ();
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ if (m_first_sensitive_event.known_p ())
+ return ev.formatted_print ("sensitive value %qE written to output file"
+ "; acquired at %@",
+ m_arg, &m_first_sensitive_event);
+ else
+ return ev.formatted_print ("sensitive value %qE written to output file",
+ m_arg);
+ }
+
+private:
+ const sensitive_state_machine &m_sm;
+ tree m_arg;
+ diagnostic_event_id_t m_first_sensitive_event;
+};
+
+/* sensitive_state_machine's ctor. */
+
+sensitive_state_machine::sensitive_state_machine (logger *logger)
+: state_machine ("sensitive", logger)
+{
+ m_start = add_state ("start");
+ m_sensitive = add_state ("sensitive");
+ m_stop = add_state ("stop");
+}
+
+/* Warn about an exposure at NODE and STMT if ARG is in the "sensitive"
+ state. */
+
+void
+sensitive_state_machine::warn_for_any_exposure (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree arg) const
+{
+ sm_ctxt->warn_for_state (node, stmt, arg, m_sensitive,
+ new exposure_through_output_file (*this, arg));
+}
+
+/* Implementation of state_machine::on_stmt vfunc for
+ sensitive_state_machine. */
+
+bool
+sensitive_state_machine::on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const
+{
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (tree callee_fndecl = sm_ctxt->get_fndecl_for_call (call))
+ {
+ if (is_named_call_p (callee_fndecl, "getpass", call, 1))
+ {
+ tree lhs = gimple_call_lhs (call);
+ if (lhs)
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_sensitive);
+ return true;
+ }
+ else if (is_named_call_p (callee_fndecl, "fprintf")
+ || is_named_call_p (callee_fndecl, "printf"))
+ {
+ /* Handle a match at any position in varargs. */
+ for (unsigned idx = 1; idx < gimple_call_num_args (call); idx++)
+ {
+ tree arg = gimple_call_arg (call, idx);
+ warn_for_any_exposure (sm_ctxt, node, stmt, arg);
+ }
+ return true;
+ }
+ else if (is_named_call_p (callee_fndecl, "fwrite", call, 4))
+ {
+ tree arg = gimple_call_arg (call, 0);
+ warn_for_any_exposure (sm_ctxt, node, stmt, arg);
+ return true;
+ }
+ // TODO: ...etc. This is just a proof-of-concept at this point.
+ }
+ return false;
+}
+
+void
+sensitive_state_machine::on_condition (sm_context *sm_ctxt ATTRIBUTE_UNUSED,
+ const supernode *node ATTRIBUTE_UNUSED,
+ const gimple *stmt ATTRIBUTE_UNUSED,
+ tree lhs ATTRIBUTE_UNUSED,
+ enum tree_code op ATTRIBUTE_UNUSED,
+ tree rhs ATTRIBUTE_UNUSED) const
+{
+ /* Empty. */
+}
+
+bool
+sensitive_state_machine::can_purge_p (state_t s ATTRIBUTE_UNUSED) const
+{
+ return true;
+}
+
+} // anonymous namespace
+
+/* Internal interface to this file. */
+
+state_machine *
+make_sensitive_state_machine (logger *logger)
+{
+ return new sensitive_state_machine (logger);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* An experimental state machine, for tracking bad calls from within
+ signal handlers.
+
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "options.h"
+#include "bitmap.h"
+#include "diagnostic-path.h"
+#include "diagnostic-metadata.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+#include "sbitmap.h"
+#include "tristate.h"
+#include "ordered-hash-map.h"
+#include "selftest.h"
+#include "analyzer/region-model.h"
+#include "analyzer/program-state.h"
+#include "analyzer/checker-path.h"
+#include "digraph.h"
+#include "cfg.h"
+#include "gimple-iterator.h"
+#include "cgraph.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "alloc-pool.h"
+#include "fibonacci_heap.h"
+#include "analyzer/diagnostic-manager.h"
+#include "shortest-paths.h"
+#include "analyzer/exploded-graph.h"
+
+#if ENABLE_ANALYZER
+
+namespace {
+
+/* An experimental state machine, for tracking calls to async-signal-unsafe
+ functions from within signal handlers. */
+
+class signal_state_machine : public state_machine
+{
+public:
+ signal_state_machine (logger *logger);
+
+ bool inherited_state_p () const FINAL OVERRIDE { return false; }
+
+ bool on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const FINAL OVERRIDE;
+
+ void on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const FINAL OVERRIDE;
+
+ bool can_purge_p (state_t s) const FINAL OVERRIDE;
+
+ /* These states are "global", rather than per-expression. */
+
+ /* Start state. */
+ state_t m_start;
+
+ /* State for when we're in a signal handler. */
+ state_t m_in_signal_handler;
+
+ /* Stop state. */
+ state_t m_stop;
+};
+
+/* Concrete subclass for describing call to an async-signal-unsafe function
+ from a signal handler. */
+
+class signal_unsafe_call
+ : public pending_diagnostic_subclass<signal_unsafe_call>
+{
+public:
+ signal_unsafe_call (const signal_state_machine &sm, const gcall *unsafe_call,
+ tree unsafe_fndecl)
+ : m_sm (sm), m_unsafe_call (unsafe_call), m_unsafe_fndecl (unsafe_fndecl)
+ {
+ gcc_assert (m_unsafe_fndecl);
+ }
+
+ const char *get_kind () const FINAL OVERRIDE { return "signal_unsafe_call"; }
+
+ bool operator== (const signal_unsafe_call &other) const
+ {
+ return m_unsafe_call == other.m_unsafe_call;
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ diagnostic_metadata m;
+ /* CWE-479: Signal Handler Use of a Non-reentrant Function. */
+ m.add_cwe (479);
+ return warning_at (rich_loc, m,
+ OPT_Wanalyzer_unsafe_call_within_signal_handler,
+ "call to %qD from within signal handler",
+ m_unsafe_fndecl);
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.is_global_p ()
+ && change.m_new_state == m_sm.m_in_signal_handler)
+ {
+ function *handler
+ = change.m_event.m_dst_state.m_region_model->get_current_function ();
+ return change.formatted_print ("registering %qD as signal handler",
+ handler->decl);
+ }
+ return label_text ();
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ return ev.formatted_print ("call to %qD from within signal handler",
+ m_unsafe_fndecl);
+ }
+
+private:
+ const signal_state_machine &m_sm;
+ const gcall *m_unsafe_call;
+ tree m_unsafe_fndecl;
+};
+
+/* signal_state_machine's ctor. */
+
+signal_state_machine::signal_state_machine (logger *logger)
+: state_machine ("signal", logger)
+{
+ m_start = add_state ("start");
+ m_in_signal_handler = add_state ("in_signal_handler");
+ m_stop = add_state ("stop");
+}
+
+/* Update MODEL for edges that simulate HANDLER_FUN being called as
+ an signal-handler in response to a signal. */
+
+static void
+update_model_for_signal_handler (region_model *model,
+ function *handler_fun)
+{
+ /* Purge all state within MODEL. */
+ *model = region_model ();
+ model->push_frame (handler_fun, NULL, NULL);
+}
+
+/* Custom exploded_edge info: entry into a signal-handler. */
+
+class signal_delivery_edge_info_t : public exploded_edge::custom_info_t
+{
+public:
+ void print (pretty_printer *pp) FINAL OVERRIDE
+ {
+ pp_string (pp, "signal delivered");
+ }
+
+ void update_model (region_model *model,
+ const exploded_edge &eedge) FINAL OVERRIDE
+ {
+ update_model_for_signal_handler (model, eedge.m_dest->get_function ());
+ }
+
+ void add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge ATTRIBUTE_UNUSED)
+ FINAL OVERRIDE
+ {
+ emission_path->add_event
+ (new custom_event (UNKNOWN_LOCATION, NULL_TREE, 0,
+ "later on,"
+ " when the signal is delivered to the process"));
+ }
+};
+
+/* Concrete subclass of custom_transition for modeling registration of a
+ signal handler and the signal handler later being called. */
+
+class register_signal_handler : public custom_transition
+{
+public:
+ register_signal_handler (const signal_state_machine &sm,
+ tree fndecl)
+ : m_sm (sm), m_fndecl (fndecl) {}
+
+ /* Model a signal-handler FNDECL being called at some later point
+ by injecting an edge to a new function-entry node with an empty
+ callstring, setting the 'in-signal-handler' global state
+ on the node. */
+ void impl_transition (exploded_graph *eg,
+ exploded_node *src_enode,
+ int sm_idx) FINAL OVERRIDE
+ {
+ function *handler_fun = DECL_STRUCT_FUNCTION (m_fndecl);
+ if (!handler_fun)
+ return;
+ program_point entering_handler
+ = program_point::from_function_entry (eg->get_supergraph (),
+ handler_fun);
+
+ program_state state_entering_handler (eg->get_ext_state ());
+ update_model_for_signal_handler (state_entering_handler.m_region_model,
+ handler_fun);
+ state_entering_handler.m_checker_states[sm_idx]->set_global_state
+ (m_sm.m_in_signal_handler);
+
+ exploded_node *dst_enode = eg->get_or_create_node (entering_handler,
+ state_entering_handler,
+ NULL);
+ if (dst_enode)
+ eg->add_edge (src_enode, dst_enode, NULL, state_change (),
+ new signal_delivery_edge_info_t ());
+ }
+
+ const signal_state_machine &m_sm;
+ tree m_fndecl;
+};
+
+/* Return true if CALL is known to be unsafe to call from a signal handler. */
+
+static bool
+signal_unsafe_p (tree callee_fndecl)
+{
+ // TODO: maintain a list of known unsafe functions
+ if (is_named_call_p (callee_fndecl, "fprintf"))
+ return true;
+
+ return false;
+}
+
+/* Implementation of state_machine::on_stmt vfunc for signal_state_machine. */
+
+bool
+signal_state_machine::on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const
+{
+ const state_t global_state = sm_ctxt->get_global_state ();
+ if (global_state == m_start)
+ {
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (tree callee_fndecl = sm_ctxt->get_fndecl_for_call (call))
+ if (is_named_call_p (callee_fndecl, "signal", call, 2))
+ {
+ tree handler = gimple_call_arg (call, 1);
+ if (TREE_CODE (handler) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (handler, 0)) == FUNCTION_DECL)
+ {
+ tree fndecl = TREE_OPERAND (handler, 0);
+ register_signal_handler rsh (*this, fndecl);
+ sm_ctxt->on_custom_transition (&rsh);
+ }
+ }
+ }
+ else if (global_state == m_in_signal_handler)
+ {
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (tree callee_fndecl = sm_ctxt->get_fndecl_for_call (call))
+ if (signal_unsafe_p (callee_fndecl))
+ sm_ctxt->warn_for_state (node, stmt, NULL_TREE, m_in_signal_handler,
+ new signal_unsafe_call (*this, call,
+ callee_fndecl));
+ }
+
+ return false;
+}
+
+/* Implementation of state_machine::on_condition vfunc for
+ signal_state_machine. */
+
+void
+signal_state_machine::on_condition (sm_context *sm_ctxt ATTRIBUTE_UNUSED,
+ const supernode *node ATTRIBUTE_UNUSED,
+ const gimple *stmt ATTRIBUTE_UNUSED,
+ tree lhs ATTRIBUTE_UNUSED,
+ enum tree_code op ATTRIBUTE_UNUSED,
+ tree rhs ATTRIBUTE_UNUSED) const
+{
+ // Empty
+}
+
+bool
+signal_state_machine::can_purge_p (state_t s ATTRIBUTE_UNUSED) const
+{
+ return true;
+}
+
+} // anonymous namespace
+
+/* Internal interface to this file. */
+
+state_machine *
+make_signal_state_machine (logger *logger)
+{
+ return new signal_state_machine (logger);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* An experimental state machine, for tracking "taint": unsanitized uses
+ of data potentially under an attacker's control.
+
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "options.h"
+#include "diagnostic-path.h"
+#include "diagnostic-metadata.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+
+#if ENABLE_ANALYZER
+
+namespace {
+
+/* An experimental state machine, for tracking "taint": unsanitized uses
+ of data potentially under an attacker's control. */
+
+class taint_state_machine : public state_machine
+{
+public:
+ taint_state_machine (logger *logger);
+
+ bool inherited_state_p () const FINAL OVERRIDE { return true; }
+
+ bool on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const FINAL OVERRIDE;
+
+ void on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs) const FINAL OVERRIDE;
+
+ bool can_purge_p (state_t s) const FINAL OVERRIDE;
+
+ /* Start state. */
+ state_t m_start;
+
+ /* State for a "tainted" value: unsanitized data potentially under an
+ attacker's control. */
+ state_t m_tainted;
+
+ /* State for a "tainted" value that has a lower bound. */
+ state_t m_has_lb;
+
+ /* State for a "tainted" value that has an upper bound. */
+ state_t m_has_ub;
+
+ /* Stop state, for a value we don't want to track any more. */
+ state_t m_stop;
+};
+
+enum bounds
+{
+ BOUNDS_NONE,
+ BOUNDS_UPPER,
+ BOUNDS_LOWER
+};
+
+class tainted_array_index
+ : public pending_diagnostic_subclass<tainted_array_index>
+{
+public:
+ tainted_array_index (const taint_state_machine &sm, tree arg,
+ enum bounds has_bounds)
+ : m_sm (sm), m_arg (arg), m_has_bounds (has_bounds) {}
+
+ const char *get_kind () const FINAL OVERRIDE { return "tainted_array_index"; }
+
+ bool operator== (const tainted_array_index &other) const
+ {
+ return m_arg == other.m_arg;
+ }
+
+ bool emit (rich_location *rich_loc) FINAL OVERRIDE
+ {
+ diagnostic_metadata m;
+ m.add_cwe (129);
+ switch (m_has_bounds)
+ {
+ default:
+ gcc_unreachable ();
+ case BOUNDS_NONE:
+ return warning_at (rich_loc, m, OPT_Wanalyzer_tainted_array_index,
+ "use of tainted value %qE in array lookup"
+ " without bounds checking",
+ m_arg);
+ break;
+ case BOUNDS_UPPER:
+ return warning_at (rich_loc, m, OPT_Wanalyzer_tainted_array_index,
+ "use of tainted value %qE in array lookup"
+ " without lower-bounds checking",
+ m_arg);
+ break;
+ case BOUNDS_LOWER:
+ return warning_at (rich_loc, m, OPT_Wanalyzer_tainted_array_index,
+ "use of tainted value %qE in array lookup"
+ " without upper-bounds checking",
+ m_arg);
+ break;
+ }
+ }
+
+ label_text describe_state_change (const evdesc::state_change &change)
+ FINAL OVERRIDE
+ {
+ if (change.m_new_state == m_sm.m_tainted)
+ {
+ if (change.m_origin)
+ return change.formatted_print ("%qE has an unchecked value here"
+ " (from %qE)",
+ change.m_expr, change.m_origin);
+ else
+ return change.formatted_print ("%qE gets an unchecked value here",
+ change.m_expr);
+ }
+ else if (change.m_new_state == m_sm.m_has_lb)
+ return change.formatted_print ("%qE has its lower bound checked here",
+ change.m_expr);
+ else if (change.m_new_state == m_sm.m_has_ub)
+ return change.formatted_print ("%qE has its upper bound checked here",
+ change.m_expr);
+ return label_text ();
+ }
+
+ label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
+ {
+ switch (m_has_bounds)
+ {
+ default:
+ gcc_unreachable ();
+ case BOUNDS_NONE:
+ return ev.formatted_print ("use of tainted value %qE in array lookup"
+ " without bounds checking",
+ m_arg);
+ case BOUNDS_UPPER:
+ return ev.formatted_print ("use of tainted value %qE in array lookup"
+ " without lower-bounds checking",
+ m_arg);
+ case BOUNDS_LOWER:
+ return ev.formatted_print ("use of tainted value %qE in array lookup"
+ " without upper-bounds checking",
+ m_arg);
+ }
+ }
+
+private:
+ const taint_state_machine &m_sm;
+ tree m_arg;
+ enum bounds m_has_bounds;
+};
+
+/* taint_state_machine's ctor. */
+
+taint_state_machine::taint_state_machine (logger *logger)
+: state_machine ("taint", logger)
+{
+ m_start = add_state ("start");
+ m_tainted = add_state ("tainted");
+ m_has_lb = add_state ("has_lb");
+ m_has_ub = add_state ("has_ub");
+ m_stop = add_state ("stop");
+}
+
+/* Implementation of state_machine::on_stmt vfunc for taint_state_machine. */
+
+bool
+taint_state_machine::on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const
+{
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ if (tree callee_fndecl = sm_ctxt->get_fndecl_for_call (call))
+ {
+ if (is_named_call_p (callee_fndecl, "fread", call, 4))
+ {
+ tree arg = gimple_call_arg (call, 0);
+ arg = sm_ctxt->get_readable_tree (arg);
+
+ sm_ctxt->on_transition (node, stmt, arg, m_start, m_tainted);
+
+ /* Dereference an ADDR_EXPR. */
+ // TODO: should the engine do this?
+ if (TREE_CODE (arg) == ADDR_EXPR)
+ sm_ctxt->on_transition (node, stmt, TREE_OPERAND (arg, 0),
+ m_start, m_tainted);
+ return true;
+ }
+ }
+ // TODO: ...etc; many other sources of untrusted data
+
+ if (const gassign *assign = dyn_cast <const gassign *> (stmt))
+ {
+ tree rhs1 = gimple_assign_rhs1 (assign);
+ enum tree_code op = gimple_assign_rhs_code (assign);
+
+ /* Check array accesses. */
+ if (op == ARRAY_REF)
+ {
+ tree arg = TREE_OPERAND (rhs1, 1);
+ arg = sm_ctxt->get_readable_tree (arg);
+
+ /* Unsigned types have an implicit lower bound. */
+ bool is_unsigned = false;
+ if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
+ is_unsigned = TYPE_UNSIGNED (TREE_TYPE (arg));
+
+ /* Complain about missing bounds. */
+ sm_ctxt->warn_for_state
+ (node, stmt, arg, m_tainted,
+ new tainted_array_index (*this, arg,
+ is_unsigned
+ ? BOUNDS_LOWER : BOUNDS_NONE));
+ sm_ctxt->on_transition (node, stmt, arg, m_tainted, m_stop);
+
+ /* Complain about missing upper bound. */
+ sm_ctxt->warn_for_state (node, stmt, arg, m_has_lb,
+ new tainted_array_index (*this, arg,
+ BOUNDS_LOWER));
+ sm_ctxt->on_transition (node, stmt, arg, m_has_lb, m_stop);
+
+ /* Complain about missing lower bound. */
+ if (!is_unsigned)
+ {
+ sm_ctxt->warn_for_state (node, stmt, arg, m_has_ub,
+ new tainted_array_index (*this, arg,
+ BOUNDS_UPPER));
+ sm_ctxt->on_transition (node, stmt, arg, m_has_ub, m_stop);
+ }
+ }
+ }
+
+ return false;
+}
+
+/* Implementation of state_machine::on_condition vfunc for taint_state_machine.
+ Potentially transition state 'tainted' to 'has_ub' or 'has_lb',
+ and states 'has_ub' and 'has_lb' to 'stop'. */
+
+void
+taint_state_machine::on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs,
+ enum tree_code op,
+ tree rhs ATTRIBUTE_UNUSED) const
+{
+ if (stmt == NULL)
+ return;
+
+ // TODO: this doesn't use the RHS; should we make it symmetric?
+
+ // TODO
+ switch (op)
+ {
+ //case NE_EXPR:
+ //case EQ_EXPR:
+ case GE_EXPR:
+ case GT_EXPR:
+ {
+ sm_ctxt->on_transition (node, stmt, lhs, m_tainted,
+ m_has_lb);
+ sm_ctxt->on_transition (node, stmt, lhs, m_has_ub,
+ m_stop);
+ }
+ break;
+ case LE_EXPR:
+ case LT_EXPR:
+ {
+ sm_ctxt->on_transition (node, stmt, lhs, m_tainted,
+ m_has_ub);
+ sm_ctxt->on_transition (node, stmt, lhs, m_has_lb,
+ m_stop);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+bool
+taint_state_machine::can_purge_p (state_t s ATTRIBUTE_UNUSED) const
+{
+ return true;
+}
+
+} // anonymous namespace
+
+/* Internal interface to this file. */
+
+state_machine *
+make_taint_state_machine (logger *logger)
+{
+ return new taint_state_machine (logger);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Modeling API uses and misuses via state machines.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "options.h"
+#include "function.h"
+#include "diagnostic-core.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/sm.h"
+
+#if ENABLE_ANALYZER
+
+/* If STMT is an assignment from zero, return the LHS. */
+
+tree
+is_zero_assignment (const gimple *stmt)
+{
+ const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
+ if (!assign_stmt)
+ return NULL_TREE;
+
+ enum tree_code op = gimple_assign_rhs_code (assign_stmt);
+ if (TREE_CODE_CLASS (op) != tcc_constant)
+ return NULL_TREE;
+
+ if (!zerop (gimple_assign_rhs1 (assign_stmt)))
+ return NULL_TREE;
+
+ return gimple_assign_lhs (assign_stmt);
+}
+
+/* Return true if VAR has pointer or reference type. */
+
+bool
+any_pointer_p (tree var)
+{
+ return POINTER_TYPE_P (TREE_TYPE (var));
+}
+
+/* Add a state with name NAME to this state_machine.
+ The string is required to outlive the state_machine.
+
+ Return the state_t for the new state. */
+
+state_machine::state_t
+state_machine::add_state (const char *name)
+{
+ m_state_names.safe_push (name);
+ return m_state_names.length () - 1;
+}
+
+/* Get the name of state S within this state_machine. */
+
+const char *
+state_machine::get_state_name (state_t s) const
+{
+ return m_state_names[s];
+}
+
+/* Assert that S is a valid state for this state_machine. */
+
+void
+state_machine::validate (state_t s) const
+{
+ gcc_assert (s < m_state_names.length ());
+}
+
+/* Create instances of the various state machines, each using LOGGER,
+ and populate OUT with them. */
+
+void
+make_checkers (auto_delete_vec <state_machine> &out, logger *logger)
+{
+ out.safe_push (make_malloc_state_machine (logger));
+ out.safe_push (make_fileptr_state_machine (logger));
+ out.safe_push (make_taint_state_machine (logger));
+ out.safe_push (make_sensitive_state_machine (logger));
+ out.safe_push (make_signal_state_machine (logger));
+
+ /* We only attempt to run the pattern tests if it might have been manually
+ enabled (for DejaGnu purposes). */
+ if (flag_analyzer_checker)
+ out.safe_push (make_pattern_test_state_machine (logger));
+
+ if (flag_analyzer_checker)
+ {
+ unsigned read_index, write_index;
+ state_machine **sm;
+
+ /* TODO: this leaks the machines
+ Would be nice to log the things that were removed. */
+ VEC_ORDERED_REMOVE_IF (out, read_index, write_index, sm,
+ 0 != strcmp (flag_analyzer_checker,
+ (*sm)->get_name ()));
+ }
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Modeling API uses and misuses via state machines.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_SM_H
+#define GCC_ANALYZER_SM_H
+
+/* Utility functions for use by state machines. */
+
+extern tree is_zero_assignment (const gimple *stmt);
+extern bool any_pointer_p (tree var);
+
+class state_machine;
+class sm_context;
+class pending_diagnostic;
+
+/* An abstract base class for a state machine describing an API.
+ A mapping from state IDs to names, and various virtual functions
+ for pattern-matching on statements. */
+
+class state_machine : public log_user
+{
+public:
+ typedef unsigned state_t;
+
+ state_machine (const char *name, logger *logger)
+ : log_user (logger), m_name (name) {}
+
+ virtual ~state_machine () {}
+
+ /* Should states be inherited from a parent region to a child region,
+ when first accessing a child region?
+ For example we should inherit the taintedness of a subregion,
+ but we should not inherit the "malloc:non-null" state of a field
+ within a heap-allocated struct. */
+ virtual bool inherited_state_p () const = 0;
+
+ const char *get_name () const { return m_name; }
+
+ const char *get_state_name (state_t s) const;
+
+ /* Return true if STMT is a function call recognized by this sm. */
+ virtual bool on_stmt (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt) const = 0;
+
+ virtual void on_condition (sm_context *sm_ctxt,
+ const supernode *node,
+ const gimple *stmt,
+ tree lhs, enum tree_code op, tree rhs) const = 0;
+
+ /* Return true if it safe to discard the given state (to help
+ when simplifying state objects).
+ States that need leak detection should return false. */
+ virtual bool can_purge_p (state_t s) const = 0;
+
+ /* Called when VAR leaks (and !can_purge_p). */
+ virtual pending_diagnostic *on_leak (tree var ATTRIBUTE_UNUSED) const
+ {
+ return NULL;
+ }
+
+ void validate (state_t s) const;
+
+protected:
+ state_t add_state (const char *name);
+
+private:
+ DISABLE_COPY_AND_ASSIGN (state_machine);
+
+ const char *m_name;
+ auto_vec<const char *> m_state_names;
+};
+
+/* Is STATE the start state? (zero is hardcoded as the start state). */
+
+static inline bool
+start_start_p (state_machine::state_t state)
+{
+ return state == 0;
+}
+
+/* Abstract base class for state machines to pass to
+ sm_context::on_custom_transition for handling non-standard transitions
+ (e.g. adding a node and edge to simulate registering a callback and having
+ the callback be called later). */
+
+class custom_transition
+{
+public:
+ virtual ~custom_transition () {}
+ virtual void impl_transition (exploded_graph *eg,
+ exploded_node *src_enode,
+ int sm_idx) = 0;
+};
+
+/* Abstract base class giving an interface for the state machine to call
+ the checker engine, at a particular stmt. */
+
+class sm_context
+{
+public:
+ virtual ~sm_context () {}
+
+ /* Get the fndecl used at call, or NULL_TREE.
+ Use in preference to gimple_call_fndecl (and gimple_call_addr_fndecl),
+ since it can look through function pointer assignments and
+ other callback handling. */
+ virtual tree get_fndecl_for_call (const gcall *call) = 0;
+
+ /* Called by state_machine in response to pattern matches:
+ if VAR is in state FROM, transition it to state TO, potentially
+ recording the "origin" of the state as ORIGIN.
+ Use NODE and STMT for location information. */
+ virtual void on_transition (const supernode *node, const gimple *stmt,
+ tree var,
+ state_machine::state_t from,
+ state_machine::state_t to,
+ tree origin = NULL_TREE) = 0;
+
+ /* Called by state_machine in response to pattern matches:
+ issue a diagnostic D if VAR is in state STATE, using NODE and STMT
+ for location information. */
+ virtual void warn_for_state (const supernode *node, const gimple *stmt,
+ tree var, state_machine::state_t state,
+ pending_diagnostic *d) = 0;
+
+ virtual tree get_readable_tree (tree expr)
+ {
+ return expr;
+ }
+
+ virtual state_machine::state_t get_global_state () const = 0;
+ virtual void set_global_state (state_machine::state_t) = 0;
+
+ /* A vfunc for handling custom transitions, such as when registering
+ a signal handler. */
+ virtual void on_custom_transition (custom_transition *transition) = 0;
+
+protected:
+ sm_context (int sm_idx, const state_machine &sm)
+ : m_sm_idx (sm_idx), m_sm (sm) {}
+
+ int m_sm_idx;
+ const state_machine &m_sm;
+};
+
+
+/* The various state_machine subclasses are hidden in their respective
+ implementation files. */
+
+extern void make_checkers (auto_delete_vec <state_machine> &out,
+ logger *logger);
+
+extern state_machine *make_malloc_state_machine (logger *logger);
+extern state_machine *make_fileptr_state_machine (logger *logger);
+extern state_machine *make_taint_state_machine (logger *logger);
+extern state_machine *make_sensitive_state_machine (logger *logger);
+extern state_machine *make_signal_state_machine (logger *logger);
+extern state_machine *make_pattern_test_state_machine (logger *logger);
+
+#endif /* GCC_ANALYZER_SM_H */
--- /dev/null
+/* Classes for purging state at function_points.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "timevar.h"
+#include "tree-ssa-alias.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "stringpool.h"
+#include "tree-vrp.h"
+#include "gimple-ssa.h"
+#include "tree-ssanames.h"
+#include "tree-phinodes.h"
+#include "options.h"
+#include "ssa-iterators.h"
+#include "gimple-pretty-print.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/call-string.h"
+#include "digraph.h"
+#include "ordered-hash-map.h"
+#include "cfg.h"
+#include "gimple-iterator.h"
+#include "cgraph.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/program-point.h"
+#include "analyzer/analyzer-logging.h"
+#include "analyzer/state-purge.h"
+
+#if ENABLE_ANALYZER
+
+/* state_purge_map's ctor. Walk all SSA names in all functions, building
+ a state_purge_per_ssa_name instance for each. */
+
+state_purge_map::state_purge_map (const supergraph &sg,
+ logger *logger)
+: log_user (logger), m_sg (sg)
+{
+ LOG_FUNC (logger);
+
+ auto_timevar tv (TV_ANALYZER_STATE_PURGE);
+
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ function *fun = node->get_fun ();
+ if (logger)
+ log ("function: %s", function_name (fun));
+ tree name;
+ unsigned int i;;
+ FOR_EACH_SSA_NAME (i, name, fun)
+ {
+ /* For now, don't bother tracking the .MEM SSA names. */
+ if (tree var = SSA_NAME_VAR (name))
+ if (TREE_CODE (var) == VAR_DECL)
+ if (VAR_DECL_IS_VIRTUAL_OPERAND (var))
+ continue;
+ m_map.put (name, new state_purge_per_ssa_name (*this, name, fun));
+ }
+ }
+}
+
+/* state_purge_map's dtor. */
+
+state_purge_map::~state_purge_map ()
+{
+ for (iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ delete (*iter).second;
+}
+
+/* state_purge_per_ssa_name's ctor.
+
+ Locate all uses of VAR within FUN.
+ Walk backwards from each use, marking program points, until
+ we reach the def stmt, populating m_points_needing_var.
+
+ We have to track program points rather than
+ just stmts since there could be empty basic blocks on the way. */
+
+state_purge_per_ssa_name::state_purge_per_ssa_name (const state_purge_map &map,
+ tree name,
+ function *fun)
+: m_points_needing_name (), m_name (name), m_fun (fun)
+{
+ LOG_FUNC (map.get_logger ());
+
+ if (map.get_logger ())
+ {
+ map.log ("SSA name: %qE within %qD", name, fun->decl);
+
+ /* Show def stmt. */
+ const gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+ pretty_printer pp;
+ pp_gimple_stmt_1 (&pp, def_stmt, 0, (dump_flags_t)0);
+ map.log ("def stmt: %s", pp_formatted_text (&pp));
+ }
+
+ auto_vec<function_point> worklist;
+
+ /* Add all immediate uses of name to the worklist.
+ Compare with debug_immediate_uses. */
+ imm_use_iterator iter;
+ use_operand_p use_p;
+ FOR_EACH_IMM_USE_FAST (use_p, iter, name)
+ {
+ if (USE_STMT (use_p))
+ {
+ const gimple *use_stmt = USE_STMT (use_p);
+ if (map.get_logger ())
+ {
+ pretty_printer pp;
+ pp_gimple_stmt_1 (&pp, use_stmt, 0, (dump_flags_t)0);
+ map.log ("used by stmt: %s", pp_formatted_text (&pp));
+ }
+
+ const supernode *snode
+ = map.get_sg ().get_supernode_for_stmt (use_stmt);
+
+ /* If it's a use within a phi node, then we care about
+ which in-edge we came from. */
+ if (use_stmt->code == GIMPLE_PHI)
+ {
+ for (gphi_iterator gpi
+ = const_cast<supernode *> (snode)->start_phis ();
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ gphi *phi = gpi.phi ();
+ if (phi == use_stmt)
+ {
+ /* Find arguments (and thus in-edges) which use NAME. */
+ for (unsigned arg_idx = 0;
+ arg_idx < gimple_phi_num_args (phi);
+ ++arg_idx)
+ {
+ if (name == gimple_phi_arg (phi, arg_idx)->def)
+ {
+ edge in_edge = gimple_phi_arg_edge (phi, arg_idx);
+ const superedge *in_sedge
+ = map.get_sg ().get_edge_for_cfg_edge (in_edge);
+ function_point point
+ = function_point::before_supernode
+ (snode, in_sedge);
+ add_to_worklist (point, &worklist,
+ map.get_logger ());
+ m_points_needing_name.add (point);
+ }
+ }
+ }
+ }
+ }
+ else
+ {
+ function_point point = before_use_stmt (map, use_stmt);
+ add_to_worklist (point, &worklist, map.get_logger ());
+ m_points_needing_name.add (point);
+
+ /* We also need to add uses for conditionals and switches,
+ where the stmt "happens" at the after_supernode, for filtering
+ the out-edges. */
+ if (use_stmt == snode->get_last_stmt ())
+ {
+ if (map.get_logger ())
+ map.log ("last stmt in BB");
+ function_point point
+ = function_point::after_supernode (snode);
+ add_to_worklist (point, &worklist, map.get_logger ());
+ m_points_needing_name.add (point);
+ }
+ else
+ if (map.get_logger ())
+ map.log ("not last stmt in BB");
+ }
+ }
+ }
+
+ /* Process worklist by walking backwards until we reach the def stmt. */
+ {
+ log_scope s (map.get_logger (), "processing worklist");
+ while (worklist.length () > 0)
+ {
+ function_point point = worklist.pop ();
+ process_point (point, &worklist, map);
+ }
+ }
+
+ if (map.get_logger ())
+ {
+ map.log ("%qE in %qD is needed to process:", name, fun->decl);
+ for (point_set_t::iterator iter = m_points_needing_name.begin ();
+ iter != m_points_needing_name.end ();
+ ++iter)
+ {
+ map.start_log_line ();
+ map.get_logger ()->log_partial (" point: ");
+ (*iter).print (map.get_logger ()->get_printer (), format (false));
+ map.end_log_line ();
+ }
+ }
+}
+
+/* Return true if the SSA name is needed at POINT. */
+
+bool
+state_purge_per_ssa_name::needed_at_point_p (const function_point &point) const
+{
+ return const_cast <point_set_t &> (m_points_needing_name).contains (point);
+}
+
+/* Get the function_point representing immediately before USE_STMT.
+ Subroutine of ctor. */
+
+function_point
+state_purge_per_ssa_name::before_use_stmt (const state_purge_map &map,
+ const gimple *use_stmt)
+{
+ gcc_assert (use_stmt->code != GIMPLE_PHI);
+
+ const supernode *supernode
+ = map.get_sg ().get_supernode_for_stmt (use_stmt);
+ unsigned int stmt_idx = supernode->get_stmt_index (use_stmt);
+ return function_point::before_stmt (supernode, stmt_idx);
+}
+
+/* Add POINT to *WORKLIST if the point has not already been seen.
+ Subroutine of ctor. */
+
+void
+state_purge_per_ssa_name::add_to_worklist (const function_point &point,
+ auto_vec<function_point> *worklist,
+ logger *logger)
+{
+ LOG_FUNC (logger);
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("point: '");
+ point.print (logger->get_printer (), format (false));
+ logger->log_partial ("' for worklist for %qE", m_name);
+ logger->end_log_line ();
+ }
+
+ gcc_assert (point.get_function () == m_fun);
+ if (point.get_from_edge ())
+ gcc_assert (point.get_from_edge ()->get_kind () == SUPEREDGE_CFG_EDGE);
+
+ if (m_points_needing_name.contains (point))
+ {
+ if (logger)
+ logger->log ("already seen for %qE", m_name);
+ }
+ else
+ {
+ if (logger)
+ logger->log ("not seen; adding to worklist for %qE", m_name);
+ m_points_needing_name.add (point);
+ worklist->safe_push (point);
+ }
+}
+
+/* Process POINT, popped from WORKLIST.
+ Iterate over predecessors of POINT, adding to WORKLIST. */
+
+void
+state_purge_per_ssa_name::process_point (const function_point &point,
+ auto_vec<function_point> *worklist,
+ const state_purge_map &map)
+{
+ logger *logger = map.get_logger ();
+ LOG_FUNC (logger);
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("considering point: '");
+ point.print (logger->get_printer (), format (false));
+ logger->log_partial ("' for %qE", m_name);
+ logger->end_log_line ();
+ }
+
+ gimple *def_stmt = SSA_NAME_DEF_STMT (m_name);
+
+ const supernode *snode = point.get_supernode ();
+
+ switch (point.get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+
+ case PK_ORIGIN:
+ break;
+
+ case PK_BEFORE_SUPERNODE:
+ {
+ for (gphi_iterator gpi
+ = const_cast<supernode *> (snode)->start_phis ();
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ gphi *phi = gpi.phi ();
+ if (phi == def_stmt)
+ {
+ if (logger)
+ logger->log ("def stmt within phis; terminating");
+ return;
+ }
+ }
+
+ /* Add given pred to worklist. */
+ if (point.get_from_edge ())
+ {
+ gcc_assert (point.get_from_edge ()->m_src);
+ add_to_worklist
+ (function_point::after_supernode (point.get_from_edge ()->m_src),
+ worklist, logger);
+ }
+ else
+ {
+ /* Add any intraprocedually edge for a call. */
+ if (snode->m_returning_call)
+ {
+ cgraph_edge *cedge
+ = supergraph_call_edge (snode->m_fun,
+ snode->m_returning_call);
+ gcc_assert (cedge);
+ superedge *sedge
+ = map.get_sg ().get_intraprocedural_edge_for_call (cedge);
+ gcc_assert (sedge);
+ add_to_worklist
+ (function_point::after_supernode (sedge->m_src),
+ worklist, logger);
+ }
+ }
+ }
+ break;
+
+ case PK_BEFORE_STMT:
+ {
+ if (def_stmt == point.get_stmt ())
+ {
+ if (logger)
+ logger->log ("def stmt; terminating");
+ return;
+ }
+ if (point.get_stmt_idx () > 0)
+ add_to_worklist (function_point::before_stmt
+ (snode, point.get_stmt_idx () - 1),
+ worklist, logger);
+ else
+ {
+ /* Add before_supernode to worklist. This captures the in-edge,
+ so we have to do it once per in-edge. */
+ unsigned i;
+ superedge *pred;
+ FOR_EACH_VEC_ELT (snode->m_preds, i, pred)
+ add_to_worklist (function_point::before_supernode (snode,
+ pred),
+ worklist, logger);
+ }
+ }
+ break;
+
+ case PK_AFTER_SUPERNODE:
+ {
+ if (snode->m_stmts.length ())
+ add_to_worklist
+ (function_point::before_stmt (snode,
+ snode->m_stmts.length () - 1),
+ worklist, logger);
+ else
+ {
+ /* Add before_supernode to worklist. This captures the in-edge,
+ so we have to do it once per in-edge. */
+ unsigned i;
+ superedge *pred;
+ FOR_EACH_VEC_ELT (snode->m_preds, i, pred)
+ add_to_worklist (function_point::before_supernode (snode,
+ pred),
+ worklist, logger);
+ /* If it's the initial BB, add it, to ensure that we
+ have "before supernode" for the initial ENTRY block, and don't
+ erroneously purge SSA names for initial values of parameters. */
+ if (snode->entry_p ())
+ {
+ add_to_worklist
+ (function_point::before_supernode (snode, NULL),
+ worklist, logger);
+ }
+ }
+ }
+ break;
+ }
+}
+
+/* class state_purge_annotator : public dot_annotator. */
+
+/* Implementation of dot_annotator::add_node_annotations vfunc for
+ state_purge_annotator.
+
+ Add an additional record showing which names are purged on entry
+ to the supernode N. */
+
+void
+state_purge_annotator::add_node_annotations (graphviz_out *gv,
+ const supernode &n) const
+{
+ if (m_map == NULL)
+ return;
+
+ pretty_printer *pp = gv->get_pp ();
+
+ pp_printf (pp, "annotation_for_node_%i", n.m_index);
+ pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
+ "lightblue");
+ pp_write_text_to_stream (pp);
+
+ // FIXME: passing in a NULL in-edge means we get no hits
+ function_point before_supernode
+ (function_point::before_supernode (&n, NULL));
+
+ for (state_purge_map::iterator iter = m_map->begin ();
+ iter != m_map->end ();
+ ++iter)
+ {
+ tree name = (*iter).first;
+ state_purge_per_ssa_name *per_name_data = (*iter).second;
+ if (per_name_data->get_function () == n.m_fun)
+ {
+PUSH_IGNORE_WFORMAT
+ if (per_name_data->needed_at_point_p (before_supernode))
+ pp_printf (pp, "%qE needed here", name);
+ else
+ pp_printf (pp, "%qE not needed here", name);
+POP_IGNORE_WFORMAT
+ }
+ pp_newline (pp);
+ }
+
+ pp_string (pp, "\"];\n\n");
+ pp_flush (pp);
+}
+
+/* Print V to GV as a comma-separated list in braces within a <TR>,
+ titling it with TITLE.
+
+ Subroutine of state_purge_annotator::add_stmt_annotations. */
+
+static void
+print_vec_of_names (graphviz_out *gv, const char *title,
+ const auto_vec<tree> &v)
+{
+ pretty_printer *pp = gv->get_pp ();
+ tree name;
+ unsigned i;
+ gv->begin_tr ();
+ pp_printf (pp, "%s: {", title);
+ FOR_EACH_VEC_ELT (v, i, name)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+PUSH_IGNORE_WFORMAT
+ pp_printf (pp, "%qE", name);
+POP_IGNORE_WFORMAT
+ }
+ pp_printf (pp, "}");
+ pp_write_text_as_html_like_dot_to_stream (pp);
+ gv->end_tr ();
+ pp_newline (pp);
+}
+
+/* Implementation of dot_annotator::add_stmt_annotations for
+ state_purge_annotator.
+
+ Add text showing which names are purged at STMT. */
+
+void
+state_purge_annotator::add_stmt_annotations (graphviz_out *gv,
+ const gimple *stmt) const
+{
+ if (m_map == NULL)
+ return;
+
+ if (stmt->code == GIMPLE_PHI)
+ return;
+
+ pretty_printer *pp = gv->get_pp ();
+
+ pp_newline (pp);
+
+ const supernode *supernode = m_map->get_sg ().get_supernode_for_stmt (stmt);
+ unsigned int stmt_idx = supernode->get_stmt_index (stmt);
+ function_point before_stmt
+ (function_point::before_stmt (supernode, stmt_idx));
+
+ auto_vec<tree> needed;
+ auto_vec<tree> not_needed;
+ for (state_purge_map::iterator iter = m_map->begin ();
+ iter != m_map->end ();
+ ++iter)
+ {
+ tree name = (*iter).first;
+ state_purge_per_ssa_name *per_name_data = (*iter).second;
+ if (per_name_data->get_function () == supernode->m_fun)
+ {
+ if (per_name_data->needed_at_point_p (before_stmt))
+ needed.safe_push (name);
+ else
+ not_needed.safe_push (name);
+ }
+ }
+
+ print_vec_of_names (gv, "needed here", needed);
+ print_vec_of_names (gv, "not needed here", not_needed);
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Classes for purging state at function_points.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_STATE_PURGE_H
+#define GCC_ANALYZER_STATE_PURGE_H
+
+/* Hash traits for function_point. */
+
+template <> struct default_hash_traits<function_point>
+: public pod_hash_traits<function_point>
+{
+ static const bool empty_zero_p = false;
+};
+
+template <>
+inline hashval_t
+pod_hash_traits<function_point>::hash (value_type v)
+{
+ return v.hash ();
+}
+
+template <>
+inline bool
+pod_hash_traits<function_point>::equal (const value_type &existing,
+ const value_type &candidate)
+{
+ return existing == candidate;
+}
+template <>
+inline void
+pod_hash_traits<function_point>::mark_deleted (value_type &v)
+{
+ v = function_point::deleted ();
+}
+template <>
+inline void
+pod_hash_traits<function_point>::mark_empty (value_type &v)
+{
+ v = function_point::empty ();
+}
+template <>
+inline bool
+pod_hash_traits<function_point>::is_deleted (value_type v)
+{
+ return v.get_kind () == PK_DELETED;
+}
+template <>
+inline bool
+pod_hash_traits<function_point>::is_empty (value_type v)
+{
+ return v.get_kind () == PK_EMPTY;
+}
+
+/* The result of analyzing which SSA names can be purged from state at
+ different points in the program, so that we can simplify program_state
+ objects, in the hope of reducing state-blowup. */
+
+class state_purge_map : public log_user
+{
+public:
+ typedef ordered_hash_map<tree, state_purge_per_ssa_name *> map_t;
+ typedef map_t::iterator iterator;
+
+ state_purge_map (const supergraph &sg, logger *logger);
+ ~state_purge_map ();
+
+ const state_purge_per_ssa_name &get_data_for_ssa_name (tree name) const
+ {
+ gcc_assert (TREE_CODE (name) == SSA_NAME);
+ if (tree var = SSA_NAME_VAR (name))
+ if (TREE_CODE (var) == VAR_DECL)
+ gcc_assert (!VAR_DECL_IS_VIRTUAL_OPERAND (var));
+
+ state_purge_per_ssa_name **slot
+ = const_cast <map_t&> (m_map).get (name);
+ return **slot;
+ }
+
+ const supergraph &get_sg () const { return m_sg; }
+
+ iterator begin () const { return m_map.begin (); }
+ iterator end () const { return m_map.end (); }
+
+private:
+ DISABLE_COPY_AND_ASSIGN (state_purge_map);
+
+ const supergraph &m_sg;
+ map_t m_map;
+};
+
+/* The part of a state_purge_map relating to a specific SSA name.
+
+ The result of analyzing a given SSA name, recording which
+ function_points need to retain state information about it to handle
+ their successor states, so that we can simplify program_state objects,
+ in the hope of reducing state-blowup. */
+
+class state_purge_per_ssa_name
+{
+public:
+ state_purge_per_ssa_name (const state_purge_map &map,
+ tree name,
+ function *fun);
+
+ bool needed_at_point_p (const function_point &point) const;
+
+ function *get_function () const { return m_fun; }
+
+private:
+ static function_point before_use_stmt (const state_purge_map &map,
+ const gimple *use_stmt);
+
+ void add_to_worklist (const function_point &point,
+ auto_vec<function_point> *worklist,
+ logger *logger);
+
+ void process_point (const function_point &point,
+ auto_vec<function_point> *worklist,
+ const state_purge_map &map);
+
+ typedef hash_set<function_point> point_set_t;
+ point_set_t m_points_needing_name;
+ tree m_name;
+ function *m_fun;
+};
+
+/* Subclass of dot_annotator for use by -fdump-analyzer-state-purge.
+ Annotate the .dot output with state-purge information. */
+
+class state_purge_annotator : public dot_annotator
+{
+public:
+ state_purge_annotator (const state_purge_map *map) : m_map (map) {}
+
+ void add_node_annotations (graphviz_out *gv, const supernode &n)
+ const FINAL OVERRIDE;
+
+ void add_stmt_annotations (graphviz_out *gv, const gimple *stmt)
+ const FINAL OVERRIDE;
+
+private:
+ const state_purge_map *m_map;
+};
+
+#endif /* GCC_ANALYZER_STATE_PURGE_H */
--- /dev/null
+/* "Supergraph" classes that combine CFGs and callgraph into one digraph.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "tm.h"
+#include "toplev.h"
+#include "hash-table.h"
+#include "vec.h"
+#include "ggc.h"
+#include "basic-block.h"
+#include "function.h"
+#include "gimple-fold.h"
+#include "tree-eh.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "timevar.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "gimple-pretty-print.h"
+#include "tree-pretty-print.h"
+#include "graphviz.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "cfganal.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "analyzer/analyzer-logging.h"
+
+#if ENABLE_ANALYZER
+
+/* Get the cgraph_edge, but only if there's an underlying function body. */
+
+cgraph_edge *
+supergraph_call_edge (function *fun, gimple *stmt)
+{
+ gcall *call = dyn_cast<gcall *> (stmt);
+ if (!call)
+ return NULL;
+ cgraph_edge *edge = cgraph_node::get (fun->decl)->get_edge (stmt);
+ if (!edge)
+ return NULL;
+ if (!edge->callee)
+ return NULL; /* e.g. for a function pointer. */
+ if (!edge->callee->get_fun ())
+ return NULL;
+ return edge;
+}
+
+/* supergraph's ctor. Walk the callgraph, building supernodes for each
+ CFG basic block, splitting the basic blocks at callsites. Join
+ together the supernodes with interprocedural and intraprocedural
+ superedges as appropriate. */
+
+supergraph::supergraph (logger *logger)
+{
+ auto_timevar tv (TV_ANALYZER_SUPERGRAPH);
+
+ LOG_FUNC (logger);
+
+ /* First pass: make supernodes. */
+ {
+ /* Sort the cgraph_nodes? */
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ function *fun = node->get_fun ();
+
+ /* Ensure that EDGE_DFS_BACK is correct for every CFG edge in
+ the supergraph (by doing it per-function). */
+ auto_cfun sentinel (fun);
+ mark_dfs_back_edges ();
+
+ const int start_idx = m_nodes.length ();
+
+ basic_block bb;
+ FOR_ALL_BB_FN (bb, fun)
+ {
+ /* The initial supernode for the BB gets the phi nodes (if any). */
+ supernode *node_for_stmts = add_node (fun, bb, NULL, phi_nodes (bb));
+ m_bb_to_initial_node.put (bb, node_for_stmts);
+ for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi);
+ gsi_next (&gpi))
+ {
+ gimple *stmt = gsi_stmt (gpi);
+ m_stmt_to_node_t.put (stmt, node_for_stmts);
+ }
+
+ /* Append statements from BB to the current supernode, splitting
+ them into a new supernode at each call site; such call statements
+ appear in both supernodes (representing call and return). */
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ node_for_stmts->m_stmts.safe_push (stmt);
+ m_stmt_to_node_t.put (stmt, node_for_stmts);
+ if (cgraph_edge *edge = supergraph_call_edge (fun, stmt))
+ {
+ m_cgraph_edge_to_caller_prev_node.put(edge, node_for_stmts);
+ node_for_stmts = add_node (fun, bb, as_a <gcall *> (stmt), NULL);
+ m_cgraph_edge_to_caller_next_node.put (edge, node_for_stmts);
+ }
+ }
+
+ m_bb_to_final_node.put (bb, node_for_stmts);
+ }
+
+ const unsigned num_snodes = m_nodes.length () - start_idx;
+ m_function_to_num_snodes.put (fun, num_snodes);
+
+ if (logger)
+ {
+ const int end_idx = m_nodes.length () - 1;
+ logger->log ("SN: %i...%i: function %qD",
+ start_idx, end_idx, fun->decl);
+ }
+ }
+ }
+
+ /* Second pass: make superedges. */
+ {
+ /* Make superedges for CFG edges. */
+ for (bb_to_node_t::iterator iter = m_bb_to_final_node.begin ();
+ iter != m_bb_to_final_node.end ();
+ ++iter)
+ {
+ basic_block bb = (*iter).first;
+ supernode *src_supernode = (*iter).second;
+
+ ::edge cfg_edge;
+ int idx;
+ if (bb->succs)
+ FOR_EACH_VEC_ELT (*bb->succs, idx, cfg_edge)
+ {
+ basic_block dest_cfg_block = cfg_edge->dest;
+ supernode *dest_supernode
+ = *m_bb_to_initial_node.get (dest_cfg_block);
+ cfg_superedge *cfg_sedge
+ = add_cfg_edge (src_supernode, dest_supernode, cfg_edge, idx);
+ m_cfg_edge_to_cfg_superedge.put (cfg_edge, cfg_sedge);
+ }
+ }
+
+ /* Make interprocedural superedges for calls. */
+ {
+ for (cgraph_edge_to_node_t::iterator iter
+ = m_cgraph_edge_to_caller_prev_node.begin ();
+ iter != m_cgraph_edge_to_caller_prev_node.end ();
+ ++iter)
+ {
+ cgraph_edge *edge = (*iter).first;
+ supernode *caller_prev_supernode = (*iter).second;
+ basic_block callee_cfg_block
+ = ENTRY_BLOCK_PTR_FOR_FN (edge->callee->get_fun ());
+ supernode *callee_supernode
+ = *m_bb_to_initial_node.get (callee_cfg_block);
+ call_superedge *sedge
+ = add_call_superedge (caller_prev_supernode,
+ callee_supernode,
+ edge);
+ m_cgraph_edge_to_call_superedge.put (edge, sedge);
+ }
+ }
+
+ /* Make interprocedural superedges for returns. */
+ {
+ for (cgraph_edge_to_node_t::iterator iter
+ = m_cgraph_edge_to_caller_next_node.begin ();
+ iter != m_cgraph_edge_to_caller_next_node.end ();
+ ++iter)
+ {
+ cgraph_edge *edge = (*iter).first;
+ supernode *caller_next_supernode = (*iter).second;
+ basic_block callee_cfg_block
+ = EXIT_BLOCK_PTR_FOR_FN (edge->callee->get_fun ());
+ supernode *callee_supernode
+ = *m_bb_to_initial_node.get (callee_cfg_block);
+ return_superedge *sedge
+ = add_return_superedge (callee_supernode,
+ caller_next_supernode,
+ edge);
+ m_cgraph_edge_to_return_superedge.put (edge, sedge);
+ }
+ }
+
+ /* Make intraprocedural superedges linking the two halves of a call. */
+ {
+ for (cgraph_edge_to_node_t::iterator iter
+ = m_cgraph_edge_to_caller_prev_node.begin ();
+ iter != m_cgraph_edge_to_caller_prev_node.end ();
+ ++iter)
+ {
+ cgraph_edge *edge = (*iter).first;
+ supernode *caller_prev_supernode = (*iter).second;
+ supernode *caller_next_supernode
+ = *m_cgraph_edge_to_caller_next_node.get (edge);
+ superedge *sedge
+ = new callgraph_superedge (caller_prev_supernode,
+ caller_next_supernode,
+ SUPEREDGE_INTRAPROCEDURAL_CALL,
+ edge);
+ add_edge (sedge);
+ m_cgraph_edge_to_intraproc_superedge.put (edge, sedge);
+ }
+
+ }
+ }
+}
+
+/* Dump this graph in .dot format to PP, using DUMP_ARGS.
+ Cluster the supernodes by function, then by BB from original CFG. */
+
+void
+supergraph::dump_dot_to_pp (pretty_printer *pp,
+ const dump_args_t &dump_args) const
+{
+ graphviz_out gv (pp);
+
+ pp_string (pp, "digraph \"");
+ pp_write_text_to_stream (pp);
+ pp_string (pp, "supergraph");
+ pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
+ pp_string (pp, "\" {\n");
+ gv.indent ();
+
+ gv.println ("overlap=false;");
+ gv.println ("compound=true;");
+
+ /* TODO: maybe (optionally) sub-subdivide by TU, for LTO; see also:
+ https://gcc-python-plugin.readthedocs.io/en/latest/_images/sample-supergraph.png
+ */
+
+ /* Break out the supernodes into clusters by function. */
+ {
+ cgraph_node *node;
+ FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
+ {
+ function *fun = node->get_fun ();
+ const char *funcname = function_name (fun);
+ gv.println ("subgraph \"cluster_%s\" {",
+ funcname);
+ gv.indent ();
+ pp_printf (pp,
+ ("style=\"dashed\";"
+ " color=\"black\";"
+ " label=\"%s\";\n"),
+ funcname);
+
+ /* Break out the nodes into clusters by BB from original CFG. */
+ {
+ basic_block bb;
+ FOR_ALL_BB_FN (bb, fun)
+ {
+ if (dump_args.m_flags & SUPERGRAPH_DOT_SHOW_BBS)
+ {
+ gv.println ("subgraph \"cluster_%s_bb_%i\" {",
+ funcname, bb->index);
+ gv.indent ();
+ pp_printf (pp,
+ ("style=\"dashed\";"
+ " color=\"black\";"
+ " label=\"bb: %i\";\n"),
+ bb->index);
+ }
+
+ // TODO: maybe keep an index per-function/per-bb to speed this up???
+ int i;
+ supernode *n;
+ FOR_EACH_VEC_ELT (m_nodes, i, n)
+ if (n->m_fun == fun && n->m_bb == bb)
+ n->dump_dot (&gv, dump_args);
+
+ if (dump_args.m_flags & SUPERGRAPH_DOT_SHOW_BBS)
+ {
+ /* Terminate per-bb "subgraph" */
+ gv.outdent ();
+ gv.println ("}");
+ }
+ }
+ }
+
+ /* Add an invisible edge from ENTRY to EXIT, to improve the graph layout. */
+ pp_string (pp, "\t");
+ get_node_for_function_entry (fun)->dump_dot_id (pp);
+ pp_string (pp, ":s -> ");
+ get_node_for_function_exit (fun)->dump_dot_id (pp);
+ pp_string (pp, ":n [style=\"invis\",constraint=true];\n");
+
+ /* Terminate per-function "subgraph" */
+ gv.outdent ();
+ gv.println ("}");
+ }
+ }
+
+ /* Superedges. */
+ int i;
+ superedge *e;
+ FOR_EACH_VEC_ELT (m_edges, i, e)
+ e->dump_dot (&gv, dump_args);
+
+ /* Terminate "digraph" */
+ gv.outdent ();
+ gv.println ("}");
+}
+
+/* Dump this graph in .dot format to FP, using DUMP_ARGS. */
+
+void
+supergraph::dump_dot_to_file (FILE *fp, const dump_args_t &dump_args) const
+{
+ pretty_printer *pp = global_dc->printer->clone ();
+ pp_show_color (pp) = 0;
+ /* %qE in logs for SSA_NAMEs should show the ssa names, rather than
+ trying to prettify things by showing the underlying var. */
+ pp_format_decoder (pp) = default_tree_printer;
+
+ pp->buffer->stream = fp;
+ dump_dot_to_pp (pp, dump_args);
+ pp_flush (pp);
+ delete pp;
+}
+
+/* Dump this graph in .dot format to PATH, using DUMP_ARGS. */
+
+void
+supergraph::dump_dot (const char *path, const dump_args_t &dump_args) const
+{
+ FILE *fp = fopen (path, "w");
+ dump_dot_to_file (fp, dump_args);
+ fclose (fp);
+}
+
+/* Create a supernode for BB within FUN and add it to this supergraph.
+
+ If RETURNING_CALL is non-NULL, the supernode represents the resumption
+ of the basic block after returning from that call.
+
+ If PHI_NODES is non-NULL, this is the initial supernode for the basic
+ block, and is responsible for any handling of the phi nodes. */
+
+supernode *
+supergraph::add_node (function *fun, basic_block bb, gcall *returning_call,
+ gimple_seq phi_nodes)
+{
+ supernode *n = new supernode (fun, bb, returning_call, phi_nodes,
+ m_nodes.length ());
+ m_nodes.safe_push (n);
+ return n;
+}
+
+/* Create a new cfg_superedge from SRC to DEST for the underlying CFG edge E,
+ adding it to this supergraph.
+
+ If the edge is for a switch statement, create a switch_cfg_superedge
+ subclass using IDX (the index of E within the out-edges from SRC's
+ underlying basic block). */
+
+cfg_superedge *
+supergraph::add_cfg_edge (supernode *src, supernode *dest, ::edge e, int idx)
+{
+ /* Special-case switch edges. */
+ gimple *stmt = src->get_last_stmt ();
+ cfg_superedge *new_edge;
+ if (stmt && stmt->code == GIMPLE_SWITCH)
+ new_edge = new switch_cfg_superedge (src, dest, e, idx);
+ else
+ new_edge = new cfg_superedge (src, dest, e);
+ add_edge (new_edge);
+ return new_edge;
+}
+
+/* Create and add a call_superedge representing an interprocedural call
+ from SRC to DEST, using CEDGE. */
+
+call_superedge *
+supergraph::add_call_superedge (supernode *src, supernode *dest,
+ cgraph_edge *cedge)
+{
+ call_superedge *new_edge = new call_superedge (src, dest, cedge);
+ add_edge (new_edge);
+ return new_edge;
+}
+
+/* Create and add a return_superedge representing returning from an
+ interprocedural call, returning from SRC to DEST, using CEDGE. */
+
+return_superedge *
+supergraph::add_return_superedge (supernode *src, supernode *dest,
+ cgraph_edge *cedge)
+{
+ return_superedge *new_edge = new return_superedge (src, dest, cedge);
+ add_edge (new_edge);
+ return new_edge;
+}
+
+/* Implementation of dnode::dump_dot vfunc for supernodes.
+
+ Write a cluster for the node, and within it a .dot node showing
+ the phi nodes and stmts. Call into any node annotator from ARGS to
+ potentially add other records to the cluster. */
+
+void
+supernode::dump_dot (graphviz_out *gv, const dump_args_t &args) const
+{
+ gv->println ("subgraph cluster_node_%i {",
+ m_index);
+ gv->indent ();
+
+ gv->println("style=\"solid\";");
+ gv->println("color=\"black\";");
+ gv->println("fillcolor=\"lightgrey\";");
+ gv->println("label=\"sn: %i\";", m_index);
+
+ pretty_printer *pp = gv->get_pp ();
+
+ if (args.m_node_annotator)
+ args.m_node_annotator->add_node_annotations (gv, *this);
+
+ gv->write_indent ();
+ dump_dot_id (pp);
+ pp_printf (pp,
+ " [shape=none,margin=0,style=filled,fillcolor=%s,label=<",
+ "lightgrey");
+ pp_string (pp, "<TABLE BORDER=\"0\">");
+ pp_write_text_to_stream (pp);
+
+ if (m_returning_call)
+ {
+ gv->begin_tr ();
+ pp_string (pp, "returning call: ");
+ gv->end_tr ();
+
+ gv->begin_tr ();
+ pp_gimple_stmt_1 (pp, m_returning_call, 0, (dump_flags_t)0);
+ pp_write_text_as_html_like_dot_to_stream (pp);
+ gv->end_tr ();
+
+ if (args.m_node_annotator)
+ args.m_node_annotator->add_stmt_annotations (gv, m_returning_call);
+ pp_newline (pp);
+ }
+
+ if (entry_p ())
+ {
+ pp_string (pp, "<TR><TD>ENTRY</TD></TR>");
+ pp_newline (pp);
+ }
+
+ if (return_p ())
+ {
+ pp_string (pp, "<TR><TD>EXIT</TD></TR>");
+ pp_newline (pp);
+ }
+
+ /* Phi nodes. */
+ for (gphi_iterator gpi = const_cast<supernode *> (this)->start_phis ();
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ const gimple *stmt = gsi_stmt (gpi);
+ gv->begin_tr ();
+ pp_gimple_stmt_1 (pp, stmt, 0, (dump_flags_t)0);
+ pp_write_text_as_html_like_dot_to_stream (pp);
+ gv->end_tr ();
+
+ if (args.m_node_annotator)
+ args.m_node_annotator->add_stmt_annotations (gv, stmt);
+
+ pp_newline (pp);
+ }
+
+ /* Statements. */
+ int i;
+ gimple *stmt;
+ FOR_EACH_VEC_ELT (m_stmts, i, stmt)
+ {
+ gv->begin_tr ();
+ pp_gimple_stmt_1 (pp, stmt, 0, (dump_flags_t)0);
+ pp_write_text_as_html_like_dot_to_stream (pp);
+ gv->end_tr ();
+
+ if (args.m_node_annotator)
+ args.m_node_annotator->add_stmt_annotations (gv, stmt);
+
+ pp_newline (pp);
+ }
+
+ pp_string (pp, "</TABLE>>];\n\n");
+ pp_flush (pp);
+
+ /* Terminate "subgraph" */
+ gv->outdent ();
+ gv->println ("}");
+}
+
+/* Write an ID for this node to PP, for use in .dot output. */
+
+void
+supernode::dump_dot_id (pretty_printer *pp) const
+{
+ pp_printf (pp, "node_%i", m_index);
+}
+
+/* Get a location_t for the start of this supernode. */
+
+location_t
+supernode::get_start_location () const
+{
+ if (m_returning_call && m_returning_call->location != UNKNOWN_LOCATION)
+ return m_returning_call->location;
+
+ int i;
+ gimple *stmt;
+ FOR_EACH_VEC_ELT (m_stmts, i, stmt)
+ if (stmt->location != UNKNOWN_LOCATION)
+ return stmt->location;
+
+ if (entry_p ())
+ {
+ // TWEAK: show the decl instead; this leads to more readable output:
+ return DECL_SOURCE_LOCATION (m_fun->decl);
+
+ return m_fun->function_start_locus;
+ }
+ if (return_p ())
+ return m_fun->function_end_locus;
+
+ return UNKNOWN_LOCATION;
+}
+
+/* Get a location_t for the end of this supernode. */
+
+location_t
+supernode::get_end_location () const
+{
+ int i;
+ gimple *stmt;
+ FOR_EACH_VEC_ELT_REVERSE (m_stmts, i, stmt)
+ if (stmt->location != UNKNOWN_LOCATION)
+ return stmt->location;
+
+ if (m_returning_call && m_returning_call->location != UNKNOWN_LOCATION)
+ return m_returning_call->location;
+
+ if (entry_p ())
+ return m_fun->function_start_locus;
+ if (return_p ())
+ return m_fun->function_end_locus;
+
+ return UNKNOWN_LOCATION;
+}
+
+/* Given STMT within this supernode, return its index within m_stmts. */
+
+unsigned int
+supernode::get_stmt_index (const gimple *stmt) const
+{
+ unsigned i;
+ gimple *iter_stmt;
+ FOR_EACH_VEC_ELT (m_stmts, i, iter_stmt)
+ if (iter_stmt == stmt)
+ return i;
+ gcc_unreachable ();
+}
+
+/* Implementation of dedge::dump_dot for superedges.
+ Write a .dot edge to GV representing this superedge. */
+
+void
+superedge::dump_dot (graphviz_out *gv, const dump_args_t &) const
+{
+ const char *style = "\"solid,bold\"";
+ const char *color = "black";
+ int weight = 10;
+ const char *constraint = "true";
+
+ switch (m_kind)
+ {
+ default:
+ gcc_unreachable ();
+ case SUPEREDGE_CFG_EDGE:
+ break;
+ case SUPEREDGE_CALL:
+ color = "red";
+ break;
+ case SUPEREDGE_RETURN:
+ color = "green";
+ break;
+ case SUPEREDGE_INTRAPROCEDURAL_CALL:
+ style = "\"dotted\"";
+ break;
+ }
+
+ /* Adapted from graph.c:draw_cfg_node_succ_edges. */
+ if (::edge cfg_edge = get_any_cfg_edge ())
+ {
+ if (cfg_edge->flags & EDGE_FAKE)
+ {
+ style = "dotted";
+ color = "green";
+ weight = 0;
+ }
+ else if (cfg_edge->flags & EDGE_DFS_BACK)
+ {
+ style = "\"dotted,bold\"";
+ color = "blue";
+ weight = 10;
+ }
+ else if (cfg_edge->flags & EDGE_FALLTHRU)
+ {
+ color = "blue";
+ weight = 100;
+ }
+
+ if (cfg_edge->flags & EDGE_ABNORMAL)
+ color = "red";
+ }
+
+ gv->write_indent ();
+
+ pretty_printer *pp = gv->get_pp ();
+
+ m_src->dump_dot_id (pp);
+ pp_string (pp, " -> ");
+ m_dest->dump_dot_id (pp);
+ pp_printf (pp,
+ (" [style=%s, color=%s, weight=%d, constraint=%s,"
+ " ltail=\"cluster_node_%i\", lhead=\"cluster_node_%i\""
+ " headlabel=\""),
+ style, color, weight, constraint,
+ m_src->m_index, m_dest->m_index);
+
+ dump_label_to_pp (pp, false);
+
+ pp_printf (pp, "\"];\n");
+}
+
+/* If this is an intraprocedural superedge, return the associated
+ CFG edge. Otherwise, return NULL. */
+
+::edge
+superedge::get_any_cfg_edge () const
+{
+ if (const cfg_superedge *sub = dyn_cast_cfg_superedge ())
+ return sub->get_cfg_edge ();
+ return NULL;
+}
+
+/* If this is an interprocedural superedge, return the associated
+ cgraph_edge *. Otherwise, return NULL. */
+
+cgraph_edge *
+superedge::get_any_callgraph_edge () const
+{
+ if (const callgraph_superedge *sub = dyn_cast_callgraph_superedge ())
+ return sub->m_cedge;
+ return NULL;
+}
+
+/* Build a description of this superedge (e.g. "true" for the true
+ edge of a conditional, or "case 42:" for a switch case).
+
+ The caller is responsible for freeing the result.
+
+ If USER_FACING is false, the result also contains any underlying
+ CFG edge flags. e.g. " (flags FALLTHRU | DFS_BACK)". */
+
+char *
+superedge::get_description (bool user_facing) const
+{
+ pretty_printer pp;
+ dump_label_to_pp (&pp, user_facing);
+ return xstrdup (pp_formatted_text (&pp));
+}
+
+/* Implementation of superedge::dump_label_to_pp for non-switch CFG
+ superedges.
+
+ For true/false edges, print "true" or "false" to PP.
+
+ If USER_FACING is false, also print flags on the underlying CFG edge to
+ PP. */
+
+void
+cfg_superedge::dump_label_to_pp (pretty_printer *pp,
+ bool user_facing) const
+{
+ if (true_value_p ())
+ pp_printf (pp, "true");
+ else if (false_value_p ())
+ pp_printf (pp, "false");
+
+ if (user_facing)
+ return;
+
+ /* Express edge flags as a string with " | " separator.
+ e.g. " (flags FALLTHRU | DFS_BACK)". */
+ if (get_flags ())
+ {
+ pp_string (pp, " (flags ");
+ bool seen_flag = false;
+#define DEF_EDGE_FLAG(NAME,IDX) \
+ do { \
+ if (get_flags () & EDGE_##NAME) \
+ { \
+ if (seen_flag) \
+ pp_string (pp, " | "); \
+ pp_printf (pp, "%s", (#NAME)); \
+ seen_flag = true; \
+ } \
+ } while (0);
+#include "cfg-flags.def"
+#undef DEF_EDGE_FLAG
+ pp_string (pp, ")");
+ }
+
+ /* Otherwise, no label. */
+}
+
+/* Get the phi argument for PHI for this CFG edge. */
+
+tree
+cfg_superedge::get_phi_arg (const gphi *phi) const
+{
+ size_t index = m_cfg_edge->dest_idx;
+ return gimple_phi_arg_def (phi, index);
+}
+
+/* Implementation of superedge::dump_label_to_pp for CFG superedges for
+ "switch" statements.
+
+ Print "case VAL:", "case LOWER ... UPPER:", or "default:" to PP. */
+
+void
+switch_cfg_superedge::dump_label_to_pp (pretty_printer *pp,
+ bool user_facing ATTRIBUTE_UNUSED) const
+{
+ tree case_label = get_case_label ();
+ gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR);
+ tree lower_bound = CASE_LOW (case_label);
+ tree upper_bound = CASE_HIGH (case_label);
+ if (lower_bound)
+ {
+ pp_printf (pp, "case ");
+ dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, false);
+ if (upper_bound)
+ {
+ pp_printf (pp, " ... ");
+ dump_generic_node (pp, upper_bound, 0, (dump_flags_t)0, false);
+ }
+ pp_printf (pp, ":");
+ }
+ else
+ pp_printf (pp, "default:");
+}
+
+/* Get the case label for this "switch" superedge. */
+
+tree
+switch_cfg_superedge::get_case_label () const
+{
+ return gimple_switch_label (get_switch_stmt (), m_idx);
+}
+
+/* Implementation of superedge::dump_label_to_pp for interprocedural
+ superedges. */
+
+void
+callgraph_superedge::dump_label_to_pp (pretty_printer *pp,
+ bool user_facing ATTRIBUTE_UNUSED) const
+{
+ switch (m_kind)
+ {
+ default:
+ case SUPEREDGE_CFG_EDGE:
+ gcc_unreachable ();
+
+ case SUPEREDGE_CALL:
+ pp_printf (pp, "call");
+ break;
+
+ case SUPEREDGE_RETURN:
+ pp_printf (pp, "return");
+ break;
+
+ case SUPEREDGE_INTRAPROCEDURAL_CALL:
+ pp_printf (pp, "intraproc link");
+ break;
+ }
+}
+
+/* Get the function that was called at this interprocedural call/return
+ edge. */
+
+function *
+callgraph_superedge::get_callee_function () const
+{
+ return m_cedge->callee->get_fun ();
+}
+
+/* Get the calling function at this interprocedural call/return edge. */
+
+function *
+callgraph_superedge::get_caller_function () const
+{
+ return m_cedge->caller->get_fun ();
+}
+
+/* Get the fndecl that was called at this interprocedural call/return
+ edge. */
+
+tree
+callgraph_superedge::get_callee_decl () const
+{
+ return get_callee_function ()->decl;
+}
+
+/* Get the calling fndecl at this interprocedural call/return edge. */
+
+tree
+callgraph_superedge::get_caller_decl () const
+{
+ return get_caller_function ()->decl;
+}
+
+/* Given PARM_TO_FIND, a PARM_DECL, identify its index (writing it
+ to *OUT if OUT is non-NULL), and return the corresponding argument
+ at the callsite. */
+
+tree
+callgraph_superedge::get_arg_for_parm (tree parm_to_find,
+ callsite_expr *out) const
+{
+ gcc_assert (TREE_CODE (parm_to_find) == PARM_DECL);
+
+ tree callee = get_callee_decl ();
+
+ int i = 0;
+ for (tree iter_parm = DECL_ARGUMENTS (callee); iter_parm;
+ iter_parm = DECL_CHAIN (iter_parm), ++i)
+ {
+ if (iter_parm == parm_to_find)
+ {
+ if (out)
+ *out = callsite_expr::from_zero_based_param (i);
+ return gimple_call_arg (get_call_stmt (), i);
+ }
+ }
+
+ /* Not found. */
+ return NULL_TREE;
+}
+
+/* Look for a use of ARG_TO_FIND as an argument at this callsite.
+ If found, return the default SSA def of the corresponding parm within
+ the callee, and if OUT is non-NULL, write the index to *OUT.
+ Only the first match is handled. */
+
+tree
+callgraph_superedge::get_parm_for_arg (tree arg_to_find,
+ callsite_expr *out) const
+{
+ tree callee = get_callee_decl ();
+
+ int i = 0;
+ for (tree iter_parm = DECL_ARGUMENTS (callee); iter_parm;
+ iter_parm = DECL_CHAIN (iter_parm), ++i)
+ {
+ tree param = gimple_call_arg (get_call_stmt (), i);
+ if (arg_to_find == param)
+ {
+ if (out)
+ *out = callsite_expr::from_zero_based_param (i);
+ return ssa_default_def (get_callee_function (), iter_parm);
+ }
+ }
+
+ /* Not found. */
+ return NULL_TREE;
+}
+
+/* Map caller_expr back to an expr within the callee, or return NULL_TREE.
+ If non-NULL is returned, populate OUT. */
+
+tree
+callgraph_superedge::map_expr_from_caller_to_callee (tree caller_expr,
+ callsite_expr *out) const
+{
+ /* Is it an argument (actual param)? If so, convert to
+ parameter (formal param). */
+ tree parm = get_parm_for_arg (caller_expr, out);
+ if (parm)
+ return parm;
+ /* Otherwise try return value. */
+ if (caller_expr == gimple_call_lhs (get_call_stmt ()))
+ {
+ if (out)
+ *out = callsite_expr::from_return_value ();
+ return DECL_RESULT (get_callee_decl ());
+ }
+
+ return NULL_TREE;
+}
+
+/* Map callee_expr back to an expr within the caller, or return NULL_TREE.
+ If non-NULL is returned, populate OUT. */
+
+tree
+callgraph_superedge::map_expr_from_callee_to_caller (tree callee_expr,
+ callsite_expr *out) const
+{
+ if (callee_expr == NULL_TREE)
+ return NULL_TREE;
+
+ /* If it's a parameter (formal param), get the argument (actual param). */
+ if (TREE_CODE (callee_expr) == PARM_DECL)
+ return get_arg_for_parm (callee_expr, out);
+
+ /* Similar for the default SSA name of the PARM_DECL. */
+ if (TREE_CODE (callee_expr) == SSA_NAME
+ && SSA_NAME_IS_DEFAULT_DEF (callee_expr)
+ && TREE_CODE (SSA_NAME_VAR (callee_expr)) == PARM_DECL)
+ return get_arg_for_parm (SSA_NAME_VAR (callee_expr), out);
+
+ /* Otherwise try return value. */
+ if (callee_expr == DECL_RESULT (get_callee_decl ()))
+ {
+ if (out)
+ *out = callsite_expr::from_return_value ();
+ return gimple_call_lhs (get_call_stmt ());
+ }
+
+ return NULL_TREE;
+}
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* "Supergraph" classes that combine CFGs and callgraph into one digraph.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_SUPERGRAPH_H
+#define GCC_ANALYZER_SUPERGRAPH_H
+
+/* Forward decls, using indentation to show inheritance. */
+
+class supergraph;
+class supernode;
+class superedge;
+ class callgraph_superedge;
+ class call_superedge;
+ class return_superedge;
+ class cfg_superedge;
+ class switch_cfg_superedge;
+class supercluster;
+class dot_annotator;
+
+class logger;
+
+/* An enum for discriminating between superedge subclasses. */
+
+enum edge_kind
+{
+ SUPEREDGE_CFG_EDGE,
+ SUPEREDGE_CALL,
+ SUPEREDGE_RETURN,
+ SUPEREDGE_INTRAPROCEDURAL_CALL
+};
+
+/* Flags for controlling the appearance of .dot dumps. */
+
+enum supergraph_dot_flags
+{
+ SUPERGRAPH_DOT_SHOW_BBS = (1 << 0)
+};
+
+/* A traits struct describing the family of node, edge and digraph
+ classes for supergraphs. */
+
+struct supergraph_traits
+{
+ typedef supernode node_t;
+ typedef superedge edge_t;
+ typedef supergraph graph_t;
+ struct dump_args_t
+ {
+ dump_args_t (enum supergraph_dot_flags flags,
+ const dot_annotator *node_annotator)
+ : m_flags (flags),
+ m_node_annotator (node_annotator)
+ {}
+
+ enum supergraph_dot_flags m_flags;
+ const dot_annotator *m_node_annotator;
+ };
+ typedef supercluster cluster_t;
+};
+
+/* A "supergraph" is a directed graph formed by joining together all CFGs,
+ linking them via interprocedural call and return edges.
+
+ Basic blocks are split at callsites, so that a call statement occurs
+ twice: once at the end of a supernode, and a second instance at the
+ start of the next supernode (to handle the return). */
+
+class supergraph : public digraph<supergraph_traits>
+{
+public:
+ supergraph (logger *logger);
+
+ supernode *get_node_for_function_entry (function *fun) const
+ {
+ return get_node_for_block (ENTRY_BLOCK_PTR_FOR_FN (fun));
+ }
+
+ supernode *get_node_for_function_exit (function *fun) const
+ {
+ return get_node_for_block (EXIT_BLOCK_PTR_FOR_FN (fun));
+ }
+
+ supernode *get_node_for_block (basic_block bb) const
+ {
+ return *const_cast <bb_to_node_t &> (m_bb_to_initial_node).get (bb);
+ }
+
+ /* Get the supernode containing the second half of the gcall *
+ at an interprocedural call, within the caller. */
+ supernode *get_caller_next_node (cgraph_edge *edge) const
+ {
+ return (*const_cast <cgraph_edge_to_node_t &>
+ (m_cgraph_edge_to_caller_next_node).get (edge));
+ }
+
+ call_superedge *get_edge_for_call (cgraph_edge *edge) const
+ {
+ return (*const_cast <cgraph_edge_to_call_superedge_t &>
+ (m_cgraph_edge_to_call_superedge).get (edge));
+ }
+
+ return_superedge *get_edge_for_return (cgraph_edge *edge) const
+ {
+ return (*const_cast <cgraph_edge_to_return_superedge_t &>
+ (m_cgraph_edge_to_return_superedge).get (edge));
+ }
+
+ superedge *get_intraprocedural_edge_for_call (cgraph_edge *edge) const
+ {
+ return (*const_cast <cgraph_edge_to_intraproc_superedge_t &>
+ (m_cgraph_edge_to_intraproc_superedge).get (edge));
+ }
+
+ cfg_superedge *get_edge_for_cfg_edge (edge e) const
+ {
+ return (*const_cast <cfg_edge_to_cfg_superedge_t &>
+ (m_cfg_edge_to_cfg_superedge).get (e));
+ }
+
+ supernode *get_supernode_for_stmt (const gimple *stmt) const
+ {
+ return (*const_cast <stmt_to_node_t &>(m_stmt_to_node_t).get
+ (const_cast <gimple *> (stmt)));
+ }
+
+ void dump_dot_to_pp (pretty_printer *pp, const dump_args_t &) const;
+ void dump_dot_to_file (FILE *fp, const dump_args_t &) const;
+ void dump_dot (const char *path, const dump_args_t &) const;
+
+ int num_nodes () const { return m_nodes.length (); }
+ int num_edges () const { return m_edges.length (); }
+
+ supernode *get_node_by_index (int idx) const
+ {
+ return m_nodes[idx];
+ }
+
+ unsigned get_num_snodes (function *fun) const
+ {
+ function_to_num_snodes_t &map
+ = const_cast <function_to_num_snodes_t &>(m_function_to_num_snodes);
+ return *map.get (fun);
+ }
+
+private:
+ supernode *add_node (function *fun, basic_block bb, gcall *returning_call,
+ gimple_seq phi_nodes);
+ cfg_superedge *add_cfg_edge (supernode *src, supernode *dest, ::edge e, int idx);
+ call_superedge *add_call_superedge (supernode *src, supernode *dest,
+ cgraph_edge *cedge);
+ return_superedge *add_return_superedge (supernode *src, supernode *dest,
+ cgraph_edge *cedge);
+
+ /* Data. */
+
+ typedef ordered_hash_map<basic_block, supernode *> bb_to_node_t;
+ bb_to_node_t m_bb_to_initial_node;
+ bb_to_node_t m_bb_to_final_node;
+
+ typedef ordered_hash_map<cgraph_edge *, supernode *> cgraph_edge_to_node_t;
+ cgraph_edge_to_node_t m_cgraph_edge_to_caller_prev_node;
+ cgraph_edge_to_node_t m_cgraph_edge_to_caller_next_node;
+
+ typedef ordered_hash_map< ::edge, cfg_superedge *>
+ cfg_edge_to_cfg_superedge_t;
+ cfg_edge_to_cfg_superedge_t m_cfg_edge_to_cfg_superedge;
+
+ typedef ordered_hash_map<cgraph_edge *, call_superedge *>
+ cgraph_edge_to_call_superedge_t;
+ cgraph_edge_to_call_superedge_t m_cgraph_edge_to_call_superedge;
+
+ typedef ordered_hash_map<cgraph_edge *, return_superedge *>
+ cgraph_edge_to_return_superedge_t;
+ cgraph_edge_to_return_superedge_t m_cgraph_edge_to_return_superedge;
+
+ typedef ordered_hash_map<cgraph_edge *, superedge *>
+ cgraph_edge_to_intraproc_superedge_t;
+ cgraph_edge_to_intraproc_superedge_t m_cgraph_edge_to_intraproc_superedge;
+
+ typedef ordered_hash_map<gimple *, supernode *> stmt_to_node_t;
+ stmt_to_node_t m_stmt_to_node_t;
+
+ typedef hash_map<function *, unsigned> function_to_num_snodes_t;
+ function_to_num_snodes_t m_function_to_num_snodes;
+};
+
+/* A node within a supergraph. */
+
+class supernode : public dnode<supergraph_traits>
+{
+ public:
+ supernode (function *fun, basic_block bb, gcall *returning_call,
+ gimple_seq phi_nodes, int index)
+ : m_fun (fun), m_bb (bb), m_returning_call (returning_call),
+ m_phi_nodes (phi_nodes), m_index (index)
+ {}
+
+ bool entry_p () const
+ {
+ return m_bb == ENTRY_BLOCK_PTR_FOR_FN (m_fun);
+ }
+
+ bool return_p () const
+ {
+ return m_bb == EXIT_BLOCK_PTR_FOR_FN (m_fun);
+ }
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &args) const OVERRIDE;
+ void dump_dot_id (pretty_printer *pp) const;
+
+ location_t get_start_location () const;
+ location_t get_end_location () const;
+
+ /* Returns iterator at the start of the list of phi nodes, if any. */
+ gphi_iterator start_phis ()
+ {
+ gimple_seq *pseq = &m_phi_nodes;
+
+ /* Adapted from gsi_start_1. */
+ gphi_iterator i;
+
+ i.ptr = gimple_seq_first (*pseq);
+ i.seq = pseq;
+ i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
+
+ return i;
+ }
+
+ gimple *get_last_stmt () const
+ {
+ if (m_stmts.length () == 0)
+ return NULL;
+ return m_stmts[m_stmts.length () - 1];
+ }
+
+ gcall *get_final_call () const
+ {
+ gimple *stmt = get_last_stmt ();
+ if (stmt == NULL)
+ return NULL;
+ return dyn_cast<gcall *> (stmt);
+ }
+
+ unsigned int get_stmt_index (const gimple *stmt) const;
+
+ function * const m_fun; // alternatively could be stored as runs of indices within the supergraph
+ const basic_block m_bb;
+ gcall * const m_returning_call; // for handling the result of a returned call
+ gimple_seq m_phi_nodes; // ptr to that of the underlying BB, for the first supernode for the BB
+ auto_vec<gimple *> m_stmts;
+ const int m_index; /* unique within the supergraph as a whole. */
+};
+
+/* An abstract base class encapsulating an edge within a supergraph.
+ Edges can be CFG edges, or calls/returns for callgraph edges. */
+
+class superedge : public dedge<supergraph_traits>
+{
+ public:
+ virtual ~superedge () {}
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &args) const;
+
+ virtual void dump_label_to_pp (pretty_printer *pp,
+ bool user_facing) const = 0;
+
+ enum edge_kind get_kind () const { return m_kind; }
+
+ virtual cfg_superedge *dyn_cast_cfg_superedge () { return NULL; }
+ virtual const cfg_superedge *dyn_cast_cfg_superedge () const { return NULL; }
+ virtual const switch_cfg_superedge *dyn_cast_switch_cfg_superedge () const { return NULL; }
+ virtual callgraph_superedge *dyn_cast_callgraph_superedge () { return NULL; }
+ virtual const callgraph_superedge *dyn_cast_callgraph_superedge () const { return NULL; }
+ virtual call_superedge *dyn_cast_call_superedge () { return NULL; }
+ virtual const call_superedge *dyn_cast_call_superedge () const { return NULL; }
+ virtual return_superedge *dyn_cast_return_superedge () { return NULL; }
+ virtual const return_superedge *dyn_cast_return_superedge () const { return NULL; }
+
+ ::edge get_any_cfg_edge () const;
+ cgraph_edge *get_any_callgraph_edge () const;
+
+ char *get_description (bool user_facing) const;
+
+ protected:
+ superedge (supernode *src, supernode *dest, enum edge_kind kind)
+ : dedge (src, dest),
+ m_kind (kind)
+ {}
+
+ public:
+ const enum edge_kind m_kind;
+};
+
+/* An ID representing an expression at a callsite:
+ either a parameter index, or the return value (or unknown). */
+
+class callsite_expr
+{
+ public:
+ callsite_expr () : m_val (-1) {}
+
+ static callsite_expr from_zero_based_param (int idx)
+ {
+ return callsite_expr (idx + 1);
+ }
+
+ static callsite_expr from_return_value ()
+ {
+ return callsite_expr (0);
+ }
+
+ bool param_p () const
+ {
+ return m_val > 0;
+ }
+
+ bool return_value_p () const
+ {
+ return m_val == 0;
+ }
+
+ private:
+ callsite_expr (int val) : m_val (val) {}
+
+ int m_val; /* 1-based parm, 0 for return value, or -1 for "unknown". */
+};
+
+/* A subclass of superedge with an associated callgraph edge (either a
+ call or a return). */
+
+class callgraph_superedge : public superedge
+{
+ public:
+ callgraph_superedge (supernode *src, supernode *dst, enum edge_kind kind,
+ cgraph_edge *cedge)
+ : superedge (src, dst, kind),
+ m_cedge (cedge)
+ {}
+
+ void dump_label_to_pp (pretty_printer *pp, bool user_facing) const
+ FINAL OVERRIDE;
+
+ function *get_callee_function () const;
+ function *get_caller_function () const;
+ tree get_callee_decl () const;
+ tree get_caller_decl () const;
+ gcall *get_call_stmt () const { return m_cedge->call_stmt; }
+ tree get_arg_for_parm (tree parm, callsite_expr *out) const;
+ tree get_parm_for_arg (tree arg, callsite_expr *out) const;
+ tree map_expr_from_caller_to_callee (tree caller_expr,
+ callsite_expr *out) const;
+ tree map_expr_from_callee_to_caller (tree callee_expr,
+ callsite_expr *out) const;
+
+ cgraph_edge *const m_cedge;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <const callgraph_superedge *>::test (const superedge *sedge)
+{
+ return (sedge->get_kind () == SUPEREDGE_INTRAPROCEDURAL_CALL
+ || sedge->get_kind () == SUPEREDGE_CALL
+ || sedge->get_kind () == SUPEREDGE_RETURN);
+}
+
+/* A subclass of superedge representing an interprocedural call. */
+
+class call_superedge : public callgraph_superedge
+{
+ public:
+ call_superedge (supernode *src, supernode *dst, cgraph_edge *cedge)
+ : callgraph_superedge (src, dst, SUPEREDGE_CALL, cedge)
+ {}
+
+ callgraph_superedge *dyn_cast_callgraph_superedge () FINAL OVERRIDE
+ {
+ return this;
+ }
+ const callgraph_superedge *dyn_cast_callgraph_superedge () const
+ FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ call_superedge *dyn_cast_call_superedge () FINAL OVERRIDE
+ {
+ return this;
+ }
+ const call_superedge *dyn_cast_call_superedge () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ return_superedge *get_edge_for_return (const supergraph &sg) const
+ {
+ return sg.get_edge_for_return (m_cedge);
+ }
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <const call_superedge *>::test (const superedge *sedge)
+{
+ return sedge->get_kind () == SUPEREDGE_CALL;
+}
+
+/* A subclass of superedge represesnting an interprocedural return. */
+
+class return_superedge : public callgraph_superedge
+{
+ public:
+ return_superedge (supernode *src, supernode *dst, cgraph_edge *cedge)
+ : callgraph_superedge (src, dst, SUPEREDGE_RETURN, cedge)
+ {}
+
+ callgraph_superedge *dyn_cast_callgraph_superedge () FINAL OVERRIDE
+ {
+ return this;
+ }
+ const callgraph_superedge *dyn_cast_callgraph_superedge () const
+ FINAL OVERRIDE
+ { return this; }
+
+ return_superedge *dyn_cast_return_superedge () FINAL OVERRIDE { return this; }
+ const return_superedge *dyn_cast_return_superedge () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ call_superedge *get_edge_for_call (const supergraph &sg) const
+ {
+ return sg.get_edge_for_call (m_cedge);
+ }
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <const return_superedge *>::test (const superedge *sedge)
+{
+ return sedge->get_kind () == SUPEREDGE_RETURN;
+}
+
+/* A subclass of superedge that corresponds to a CFG edge. */
+
+class cfg_superedge : public superedge
+{
+ public:
+ cfg_superedge (supernode *src, supernode *dst, ::edge e)
+ : superedge (src, dst, SUPEREDGE_CFG_EDGE),
+ m_cfg_edge (e)
+ {}
+
+ void dump_label_to_pp (pretty_printer *pp, bool user_facing) const OVERRIDE;
+ cfg_superedge *dyn_cast_cfg_superedge () FINAL OVERRIDE { return this; }
+ const cfg_superedge *dyn_cast_cfg_superedge () const FINAL OVERRIDE { return this; }
+
+ ::edge get_cfg_edge () const { return m_cfg_edge; }
+ int get_flags () const { return m_cfg_edge->flags; }
+ int true_value_p () const { return get_flags () & EDGE_TRUE_VALUE; }
+ int false_value_p () const { return get_flags () & EDGE_FALSE_VALUE; }
+ int back_edge_p () const { return get_flags () & EDGE_DFS_BACK; }
+
+ tree get_phi_arg (const gphi *phi) const;
+
+ private:
+ const ::edge m_cfg_edge;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <const cfg_superedge *>::test (const superedge *sedge)
+{
+ return sedge->get_kind () == SUPEREDGE_CFG_EDGE;
+}
+
+/* A subclass for edges from switch statements, retaining enough
+ information to identify the pertinent case, and for adding labels
+ when rendering via graphviz. */
+
+class switch_cfg_superedge : public cfg_superedge {
+ public:
+ switch_cfg_superedge (supernode *src, supernode *dst, ::edge e, int idx)
+ : cfg_superedge (src, dst, e),
+ m_idx (idx)
+ {}
+
+ const switch_cfg_superedge *dyn_cast_switch_cfg_superedge () const
+ FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ void dump_label_to_pp (pretty_printer *pp, bool user_facing) const
+ FINAL OVERRIDE;
+
+ gswitch *get_switch_stmt () const
+ {
+ return as_a <gswitch *> (m_src->get_last_stmt ());
+ }
+
+ tree get_case_label () const;
+
+ private:
+ const int m_idx;
+};
+
+template <>
+template <>
+inline bool
+is_a_helper <const switch_cfg_superedge *>::test (const superedge *sedge)
+{
+ return sedge->dyn_cast_switch_cfg_superedge () != NULL;
+}
+
+/* Base class for adding additional content to the .dot output
+ for a supergraph. */
+
+class dot_annotator
+{
+ public:
+ virtual ~dot_annotator () {}
+ virtual void add_node_annotations (graphviz_out *gv ATTRIBUTE_UNUSED,
+ const supernode &n ATTRIBUTE_UNUSED)
+ const {}
+ virtual void add_stmt_annotations (graphviz_out *gv ATTRIBUTE_UNUSED,
+ const gimple *stmt ATTRIBUTE_UNUSED)
+ const {}
+};
+
+extern cgraph_edge *supergraph_call_edge (function *fun, gimple *stmt);
+
+#endif /* GCC_ANALYZER_SUPERGRAPH_H */
Common Report Var(flag_store_data_races) Optimization
Allow the compiler to introduce new data races on stores.
+fanalyzer
+Common Var(flag_analyzer)
+Enable static analysis pass.
+
fargument-alias
Common Ignore
Does nothing. Preserved for backward compatibility.
#endif
+/* Define 0/1 if static analyzer feature is enabled. */
+#ifndef USED_FOR_TARGET
+#undef ENABLE_ANALYZER
+#endif
+
+
/* Define if you want assertions enabled. This is a cheap check. */
#ifndef USED_FOR_TARGET
#undef ENABLE_ASSERT_CHECKING
enable_threads
enable_tls
enable_vtable_verify
+enable_analyzer
enable_objc_gc
with_dwarf2
enable_shared
--enable-tls enable or disable generation of tls code overriding
the assembler check for tls support
--enable-vtable-verify enable vtable verification feature
+ --disable-analyzer disable -fanalyzer static analyzer
--enable-objc-gc enable the use of Boehm's garbage collector with the
GNU Objective-C runtime
--disable-shared don't provide a shared libgcc
_ACEOF
+# Check whether --enable-analyzer was given.
+if test "${enable_analyzer+set}" = set; then :
+ enableval=$enable_analyzer; if test x$enable_analyzer = xno; then
+ analyzer=0
+else
+ analyzer=1
+fi
+else
+ analyzer=1
+fi
+
+
+cat >>confdefs.h <<_ACEOF
+#define ENABLE_ANALYZER $analyzer
+_ACEOF
+
+
# Check whether --enable-objc-gc was given.
if test "${enable_objc_gc+set}" = set; then :
enableval=$enable_objc_gc; if test x$enable_objc_gc = xno; then
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 18941 "configure"
+#line 18960 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 19047 "configure"
+#line 19066 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
"depdir":C) $SHELL $ac_aux_dir/mkinstalldirs $DEPDIR ;;
"gccdepdir":C)
${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs build/$DEPDIR
- for lang in $subdirs c-family common
+ for lang in $subdirs c-family common analyzer
do
${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs $lang/$DEPDIR
done ;;
AC_DEFINE_UNQUOTED(ENABLE_VTABLE_VERIFY, $vtable_verify,
[Define 0/1 if vtable verification feature is enabled.])
+AC_ARG_ENABLE(analyzer,
+[AS_HELP_STRING([--disable-analyzer],
+ [disable -fanalyzer static analyzer])],
+if test x$enable_analyzer = xno; then
+ analyzer=0
+else
+ analyzer=1
+fi,
+analyzer=1)
+AC_DEFINE_UNQUOTED(ENABLE_ANALYZER, $analyzer,
+[Define 0/1 if static analyzer feature is enabled.])
+
AC_ARG_ENABLE(objc-gc,
[AS_HELP_STRING([--enable-objc-gc],
[enable the use of Boehm's garbage collector with
ZW_CREATE_DEPDIR
AC_CONFIG_COMMANDS([gccdepdir],[
${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs build/$DEPDIR
- for lang in $subdirs c-family common
+ for lang in $subdirs c-family common analyzer
do
${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs $lang/$DEPDIR
done], [subdirs="$subdirs" ac_aux_dir=$ac_aux_dir DEPDIR=$DEPDIR])
--- /dev/null
+/* Template classes for directed graphs.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "diagnostic.h"
+#include "graphviz.h"
+#include "digraph.h"
+#include "shortest-paths.h"
+#include "selftest.h"
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* A family of digraph classes for writing selftests. */
+
+struct test_node;
+struct test_edge;
+struct test_graph;
+struct test_dump_args_t {};
+struct test_cluster;
+
+struct test_graph_traits
+{
+ typedef test_node node_t;
+ typedef test_edge edge_t;
+ typedef test_graph graph_t;
+ typedef test_dump_args_t dump_args_t;
+ typedef test_cluster cluster_t;
+};
+
+struct test_node : public dnode<test_graph_traits>
+{
+ test_node (const char *name, int index) : m_name (name), m_index (index) {}
+ void dump_dot (graphviz_out *, const dump_args_t &) const OVERRIDE
+ {
+ }
+
+ const char *m_name;
+ int m_index;
+};
+
+struct test_edge : public dedge<test_graph_traits>
+{
+ test_edge (node_t *src, node_t *dest)
+ : dedge (src, dest)
+ {}
+
+ void dump_dot (graphviz_out *gv, const dump_args_t &) const OVERRIDE
+ {
+ gv->println ("%s -> %s;", m_src->m_name, m_dest->m_name);
+ }
+};
+
+struct test_graph : public digraph<test_graph_traits>
+{
+ test_node *add_test_node (const char *name)
+ {
+ test_node *result = new test_node (name, m_nodes.length ());
+ add_node (result);
+ return result;
+ }
+
+ test_edge *add_test_edge (test_node *src, test_node *dst)
+ {
+ test_edge *result = new test_edge (src, dst);
+ add_edge (result);
+ return result;
+ }
+};
+
+struct test_cluster : public cluster<test_graph_traits>
+{
+};
+
+struct test_path
+{
+ auto_vec<const test_edge *> m_edges;
+};
+
+/* Smoketest of digraph dumping. */
+
+static void
+test_dump_to_dot ()
+{
+ test_graph g;
+ test_node *a = g.add_test_node ("a");
+ test_node *b = g.add_test_node ("b");
+ g.add_test_edge (a, b);
+
+ pretty_printer pp;
+ pp.buffer->stream = NULL;
+ test_dump_args_t dump_args;
+ g.dump_dot_to_pp (&pp, NULL, dump_args);
+
+ ASSERT_STR_CONTAINS (pp_formatted_text (&pp),
+ "a -> b;\n");
+}
+
+/* Test shortest paths from A in this digraph,
+ where edges run top-to-bottom if not otherwise labeled:
+
+ A
+ / \
+ B C-->D
+ | |
+ E |
+ \ /
+ F. */
+
+static void
+test_shortest_paths ()
+{
+ test_graph g;
+ test_node *a = g.add_test_node ("a");
+ test_node *b = g.add_test_node ("b");
+ test_node *c = g.add_test_node ("d");
+ test_node *d = g.add_test_node ("d");
+ test_node *e = g.add_test_node ("e");
+ test_node *f = g.add_test_node ("f");
+
+ test_edge *ab = g.add_test_edge (a, b);
+ test_edge *ac = g.add_test_edge (a, c);
+ test_edge *cd = g.add_test_edge (c, d);
+ test_edge *be = g.add_test_edge (b, e);
+ g.add_test_edge (e, f);
+ test_edge *cf = g.add_test_edge (c, f);
+
+ shortest_paths<test_graph_traits, test_path> sp (g, a);
+
+ test_path path_to_a = sp.get_shortest_path (a);
+ ASSERT_EQ (path_to_a.m_edges.length (), 0);
+
+ test_path path_to_b = sp.get_shortest_path (b);
+ ASSERT_EQ (path_to_b.m_edges.length (), 1);
+ ASSERT_EQ (path_to_b.m_edges[0], ab);
+
+ test_path path_to_c = sp.get_shortest_path (c);
+ ASSERT_EQ (path_to_c.m_edges.length (), 1);
+ ASSERT_EQ (path_to_c.m_edges[0], ac);
+
+ test_path path_to_d = sp.get_shortest_path (d);
+ ASSERT_EQ (path_to_d.m_edges.length (), 2);
+ ASSERT_EQ (path_to_d.m_edges[0], ac);
+ ASSERT_EQ (path_to_d.m_edges[1], cd);
+
+ test_path path_to_e = sp.get_shortest_path (e);
+ ASSERT_EQ (path_to_e.m_edges.length (), 2);
+ ASSERT_EQ (path_to_e.m_edges[0], ab);
+ ASSERT_EQ (path_to_e.m_edges[1], be);
+
+ test_path path_to_f = sp.get_shortest_path (f);
+ ASSERT_EQ (path_to_f.m_edges.length (), 2);
+ ASSERT_EQ (path_to_f.m_edges[0], ac);
+ ASSERT_EQ (path_to_f.m_edges[1], cf);
+}
+
+/* Run all of the selftests within this file. */
+
+void
+digraph_cc_tests ()
+{
+ test_dump_to_dot ();
+ test_shortest_paths ();
+}
+
+} // namespace selftest
+
+#endif /* #if CHECKING_P */
--- /dev/null
+/* Template classes for directed graphs.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_DIGRAPH_H
+#define GCC_DIGRAPH_H
+
+#include "diagnostic.h"
+#include "tree-diagnostic.h" /* for default_tree_printer. */
+#include "graphviz.h"
+
+/* Templates for a family of classes: digraph, node, edge, and cluster.
+ This assumes a traits type with the following typedefs:
+ node_t: the node class
+ edge_t: the edge class
+ dump_args_t: additional args for dot-dumps
+ cluster_t: the cluster class (for use when generating .dot files).
+
+ Using a template allows for typesafe nodes and edges: a node's
+ predecessor and successor edges can be of a node-specific edge
+ subclass, without needing casting. */
+
+/* Abstract base class for a node in a directed graph. */
+
+template <typename GraphTraits>
+class dnode
+{
+ public:
+ typedef typename GraphTraits::edge_t edge_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+
+ virtual ~dnode () {}
+ virtual void dump_dot (graphviz_out *gv, const dump_args_t &args) const = 0;
+
+ auto_vec<edge_t *> m_preds;
+ auto_vec<edge_t *> m_succs;
+};
+
+/* Abstract base class for an edge in a directed graph. */
+
+template <typename GraphTraits>
+class dedge
+{
+ public:
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+
+ dedge (node_t *src, node_t *dest)
+ : m_src (src), m_dest (dest) {}
+
+ virtual ~dedge () {}
+
+ virtual void dump_dot (graphviz_out *gv, const dump_args_t &args) const = 0;
+
+ node_t *const m_src;
+ node_t *const m_dest;
+};
+
+/* Abstract base class for a directed graph.
+ This class maintains the vectors of nodes and edges,
+ and owns the nodes and edges. */
+
+template <typename GraphTraits>
+class digraph
+{
+ public:
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::edge_t edge_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+ typedef typename GraphTraits::cluster_t cluster_t;
+
+ digraph () {}
+ virtual ~digraph () {}
+
+ void dump_dot_to_pp (pretty_printer *pp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const;
+ void dump_dot_to_file (FILE *fp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const;
+ void dump_dot (const char *path,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const;
+
+ void add_node (node_t *node);
+ void add_edge (edge_t *edge);
+
+ auto_delete_vec<node_t> m_nodes;
+ auto_delete_vec<edge_t> m_edges;
+};
+
+/* Abstract base class for splitting dnodes into hierarchical clusters
+ in the generated .dot file.
+
+ See "Subgraphs and Clusters" within
+ https://www.graphviz.org/doc/info/lang.html
+ and e.g.
+ https://graphviz.gitlab.io/_pages/Gallery/directed/cluster.html
+
+ If a root_cluster is passed to dump_dot*, then all nodes will be
+ added to it at the start of dumping, via calls to add_node.
+
+ The root cluster can organize the nodes into a hierarchy of
+ child clusters.
+
+ After all nodes are added to the root cluster, dump_dot will then
+ be called on it (and not on the nodes themselves). */
+
+template <typename GraphTraits>
+class cluster
+{
+ public:
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::dump_args_t dump_args_t;
+
+ virtual ~cluster () {}
+
+ virtual void add_node (node_t *node) = 0;
+
+ /* Recursively dump the cluster, all nodes, and child clusters. */
+ virtual void dump_dot (graphviz_out *gv, const dump_args_t &) const = 0;
+};
+
+/* Write .dot information for this graph to PP, passing ARGS to the nodes
+ and edges.
+ If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::dump_dot_to_pp (pretty_printer *pp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const
+{
+ graphviz_out gv (pp);
+
+ pp_string (pp, "digraph \"");
+ pp_string (pp, "base");
+ pp_string (pp, "\" {\n");
+
+ gv.indent ();
+
+ pp_string (pp, "overlap=false;\n");
+ pp_string (pp, "compound=true;\n");
+
+ /* If using clustering, emit all nodes via clusters. */
+ if (root_cluster)
+ {
+ int i;
+ node_t *n;
+ FOR_EACH_VEC_ELT (m_nodes, i, n)
+ root_cluster->add_node (n);
+ root_cluster->dump_dot (&gv, args);
+ }
+ else
+ {
+ /* Otherwise, display all nodes at top level. */
+ int i;
+ node_t *n;
+ FOR_EACH_VEC_ELT (m_nodes, i, n)
+ n->dump_dot (&gv, args);
+ }
+
+ /* Edges. */
+ int i;
+ edge_t *e;
+ FOR_EACH_VEC_ELT (m_edges, i, e)
+ e->dump_dot (&gv, args);
+
+ /* Terminate "digraph" */
+ gv.outdent ();
+ pp_string (pp, "}");
+ pp_newline (pp);
+}
+
+/* Write .dot information for this graph to FP, passing ARGS to the nodes
+ and edges.
+ If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::dump_dot_to_file (FILE *fp,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const
+{
+ pretty_printer pp;
+ // TODO:
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp.buffer->stream = fp;
+ dump_dot_to_pp (&pp, root_cluster, args);
+ pp_flush (&pp);
+}
+
+/* Write .dot information for this graph to a file at PATH, passing ARGS
+ to the nodes and edges.
+ If ROOT_CLUSTER is non-NULL, use it to organize the nodes into clusters. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::dump_dot (const char *path,
+ cluster_t *root_cluster,
+ const dump_args_t &args) const
+{
+ FILE *fp = fopen (path, "w");
+ dump_dot_to_file (fp, root_cluster, args);
+ fclose (fp);
+}
+
+/* Add NODE to this DIGRAPH, taking ownership. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::add_node (node_t *node)
+{
+ m_nodes.safe_push (node);
+}
+
+/* Add EDGE to this digraph, and to the preds/succs of its endpoints.
+ Take ownership of EDGE. */
+
+template <typename GraphTraits>
+inline void
+digraph<GraphTraits>::add_edge (edge_t *edge)
+{
+ m_edges.safe_push (edge);
+ edge->m_dest->m_preds.safe_push (edge);
+ edge->m_src->m_succs.safe_push (edge);
+
+}
+
+#endif /* GCC_DIGRAPH_H */
--- /dev/null
+@c Copyright (C) 2019 Free Software Foundation, Inc.
+@c This is part of the GCC manual.
+@c For copying conditions, see the file gcc.texi.
+@c Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+@node Static Analyzer
+@chapter Static Analyzer
+@cindex analyzer
+@cindex static analysis
+@cindex static analyzer
+
+@menu
+* Analyzer Internals:: Analyzer Internals
+* Debugging the Analyzer:: Useful debugging tips
+@end menu
+
+@node Analyzer Internals
+@section Analyzer Internals
+@cindex analyzer, internals
+@cindex static analyzer, internals
+
+@subsection Overview
+
+The analyzer implementation works on the gimple-SSA representation.
+(I chose this in the hopes of making it easy to work with LTO to
+do whole-program analysis).
+
+The implementation is read-only: it doesn't attempt to change anything,
+just emit warnings.
+
+First, we build a @code{supergraph} which combines the callgraph and all
+of the CFGs into a single directed graph, with both interprocedural and
+intraprocedural edges. The nodes and edges in the supergraph are called
+``supernodes'' and ``superedges'', and often referred to in code as
+@code{snodes} and @code{sedges}. Basic blocks in the CFGs are split at
+interprocedural calls, so there can be more than one supernode per
+basic block. Most statements will be in just one supernode, but a call
+statement can appear in two supernodes: at the end of one for the call,
+and again at the start of another for the return.
+
+The supergraph can be seen using @option{-fdump-analyzer-supergraph}.
+
+We then build an @code{analysis_plan} which walks the callgraph to
+determine which calls might be suitable for being summarized (rather
+than fully explored) and thus in what order to explore the functions.
+
+Next is the heart of the analyzer: we use a worklist to explore state
+within the supergraph, building an "exploded graph".
+Nodes in the exploded graph correspond to <point,@w{ }state> pairs, as in
+ "Precise Interprocedural Dataflow Analysis via Graph Reachability"
+ (Thomas Reps, Susan Horwitz and Mooly Sagiv).
+
+We reuse nodes for <point, state> pairs we've already seen, and avoid
+tracking state too closely, so that (hopefully) we rapidly converge
+on a final exploded graph, and terminate the analysis. We also bail
+out if the number of exploded <end-of-basic-block, state> nodes gets
+larger than a particular multiple of the total number of basic blocks
+(to ensure termination in the face of pathological state-explosion
+cases, or bugs). We also stop exploring a point once we hit a limit
+of states for that point.
+
+We can identify problems directly when processing a <point,@w{ }state>
+instance. For example, if we're finding the successors of
+
+@smallexample
+ <point: before-stmt: "free (ptr);",
+ state: @{"ptr": freed@}>
+@end smallexample
+
+then we can detect a double-free of "ptr". We can then emit a path
+to reach the problem by finding the simplest route through the graph.
+
+Program points in the analysis are much more fine-grained than in the
+CFG and supergraph, with points (and thus potentially exploded nodes)
+for various events, including before individual statements.
+By default the exploded graph merges multiple consecutive statements
+in a supernode into one exploded edge to minimize the size of the
+exploded graph. This can be suppressed via
+@option{-fanalyzer-fine-grained}.
+The fine-grained approach seems to make things simpler and more debuggable
+that other approaches I tried, in that each point is responsible for one
+thing.
+
+Program points in the analysis also have a "call string" identifying the
+stack of callsites below them, so that paths in the exploded graph
+correspond to interprocedurally valid paths: we always return to the
+correct call site, propagating state information accordingly.
+We avoid infinite recursion by stopping the analysis if a callsite
+appears more than @code{analyzer-max-recursion-depth} in a callstring
+(defaulting to 2).
+
+@subsection Graphs
+
+Nodes and edges in the exploded graph are called ``exploded nodes'' and
+``exploded edges'' and often referred to in the code as
+@code{enodes} and @code{eedges} (especially when distinguishing them
+from the @code{snodes} and @code{sedges} in the supergraph).
+
+Each graph numbers its nodes, giving unique identifiers - supernodes
+are referred to throughout dumps in the form @samp{SN': @var{index}} and
+exploded nodes in the form @samp{EN: @var{index}} (e.g. @samp{SN: 2} and
+@samp{EN:29}).
+
+The supergraph can be seen using @option{-fdump-analyzer-supergraph-graph}.
+
+The exploded graph can be seen using @option{-fdump-analyzer-exploded-graph}
+and other dump options. Exploded nodes are color-coded in the .dot output
+based on state-machine states to make it easier to see state changes at
+a glance.
+
+@subsection State Tracking
+
+There's a tension between:
+@itemize @bullet
+@item
+precision of analysis in the straight-line case, vs
+@item
+exponential blow-up in the face of control flow.
+@end itemize
+
+For example, in general, given this CFG:
+
+@smallexample
+ A
+ / \
+ B C
+ \ /
+ D
+ / \
+ E F
+ \ /
+ G
+@end smallexample
+
+we want to avoid differences in state-tracking in B and C from
+leading to blow-up. If we don't prevent state blowup, we end up
+with exponential growth of the exploded graph like this:
+
+@smallexample
+
+ 1:A
+ / \
+ / \
+ / \
+ 2:B 3:C
+ | |
+ 4:D 5:D (2 exploded nodes for D)
+ / \ / \
+ 6:E 7:F 8:E 9:F
+ | | | |
+ 10:G 11:G 12:G 13:G (4 exploded nodes for G)
+
+@end smallexample
+
+Similar issues arise with loops.
+
+To prevent this, we follow various approaches:
+
+@enumerate a
+@item
+state pruning: which tries to discard state that won't be relevant
+later on withing the function.
+This can be disabled via @option{-fno-analyzer-state-purge}.
+
+@item
+state merging. We can try to find the commonality between two
+program_state instances to make a third, simpler program_state.
+We have two strategies here:
+
+ @enumerate
+ @item
+ the worklist keeps new nodes for the same program_point together,
+ and tries to merge them before processing, and thus before they have
+ successors. Hence, in the above, the two nodes for D (4 and 5) reach
+ the front of the worklist together, and we create a node for D with
+ the merger of the incoming states.
+
+ @item
+ try merging with the state of existing enodes for the program_point
+ (which may have already been explored). There will be duplication,
+ but only one set of duplication; subsequent duplicates are more likely
+ to hit the cache. In particular, (hopefully) all merger chains are
+ finite, and so we guarantee termination.
+ This is intended to help with loops: we ought to explore the first
+ iteration, and then have a "subsequent iterations" exploration,
+ which uses a state merged from that of the first, to be more abstract.
+ @end enumerate
+
+We avoid merging pairs of states that have state-machine differences,
+as these are the kinds of differences that are likely to be most
+interesting. So, for example, given:
+
+@smallexample
+ if (condition)
+ ptr = malloc (size);
+ else
+ ptr = local_buf;
+
+ .... do things with 'ptr'
+
+ if (condition)
+ free (ptr);
+
+ ...etc
+@end smallexample
+
+then we end up with an exploded graph that looks like this:
+
+@smallexample
+
+ if (condition)
+ / T \ F
+ --------- ----------
+ / \
+ ptr = malloc (size) ptr = local_buf
+ | |
+ copy of copy of
+ "do things with 'ptr'" "do things with 'ptr'"
+ with ptr: heap-allocated with ptr: stack-allocated
+ | |
+ if (condition) if (condition)
+ | known to be T | known to be F
+ free (ptr); |
+ \ /
+ -----------------------------
+ | ('ptr' is pruned, so states can be merged)
+ etc
+
+@end smallexample
+
+where some duplication has occurred, but only for the places where the
+the different paths are worth exploringly separately.
+
+Merging can be disabled via @option{-fno-analyzer-state-merge}.
+@end enumerate
+
+@subsection Region Model
+
+Part of the state stored at a @code{exploded_node} is a @code{region_model}.
+This is an implementation of the region-based ternary model described in
+@url{http://lcs.ios.ac.cn/~xuzb/canalyze/memmodel.pdf,
+"A Memory Model for Static Analysis of C Programs"}
+(Zhongxing Xu, Ted Kremenek, and Jian Zhang).
+
+A @code{region_model} encapsulates a representation of the state of
+memory, with a tree of @code{region} instances, along with their associated
+values. The representation is graph-like because values can be pointers
+to regions. It also stores a constraint_manager, capturing relationships
+between the values.
+
+Because each node in the @code{exploded_graph} has a @code{region_model},
+and each of the latter is graph-like, the @code{exploded_graph} is in some
+ways a graph of graphs.
+
+Here's an example of printing a @code{region_model}, showing the ASCII-art
+used to visualize the region hierarchy (colorized when printing to stderr):
+
+@smallexample
+(gdb) call debug (*this)
+r0: @{kind: 'root', parent: null, sval: null@}
+|-stack: r1: @{kind: 'stack', parent: r0, sval: sv1@}
+| |: sval: sv1: @{poisoned: uninit@}
+| |-frame for 'test': r2: @{kind: 'frame', parent: r1, sval: null, map: @{'ptr_3': r3@}, function: 'test', depth: 0@}
+| | `-'ptr_3': r3: @{kind: 'map', parent: r2, sval: sv3, type: 'void *', map: @{@}@}
+| | |: sval: sv3: @{type: 'void *', unknown@}
+| | |: type: 'void *'
+| `-frame for 'calls_malloc': r4: @{kind: 'frame', parent: r1, sval: null, map: @{'result_3': r7, '_4': r8, '<anonymous>': r5@}, function: 'calls_malloc', depth: 1@}
+| |-'<anonymous>': r5: @{kind: 'map', parent: r4, sval: sv4, type: 'void *', map: @{@}@}
+| | |: sval: sv4: @{type: 'void *', &r6@}
+| | |: type: 'void *'
+| |-'result_3': r7: @{kind: 'map', parent: r4, sval: sv4, type: 'void *', map: @{@}@}
+| | |: sval: sv4: @{type: 'void *', &r6@}
+| | |: type: 'void *'
+| `-'_4': r8: @{kind: 'map', parent: r4, sval: sv4, type: 'void *', map: @{@}@}
+| |: sval: sv4: @{type: 'void *', &r6@}
+| |: type: 'void *'
+`-heap: r9: @{kind: 'heap', parent: r0, sval: sv2@}
+ |: sval: sv2: @{poisoned: uninit@}
+ `-r6: @{kind: 'symbolic', parent: r9, sval: null, map: @{@}@}
+svalues:
+ sv0: @{type: 'size_t', '1024'@}
+ sv1: @{poisoned: uninit@}
+ sv2: @{poisoned: uninit@}
+ sv3: @{type: 'void *', unknown@}
+ sv4: @{type: 'void *', &r6@}
+constraint manager:
+ equiv classes:
+ ec0: @{sv0 == '1024'@}
+ ec1: @{sv4@}
+ constraints:
+@end smallexample
+
+This is the state at the point of returning from @code{calls_malloc} back
+to @code{test} in the following:
+
+@smallexample
+void *
+calls_malloc (void)
+@{
+ void *result = malloc (1024);
+ return result;
+@}
+
+void test (void)
+@{
+ void *ptr = calls_malloc ();
+ /* etc. */
+@}
+@end smallexample
+
+The ``root'' region (``r0'') has a ``stack'' child (``r1''), with two
+children: a frame for @code{test} (``r2''), and a frame for
+@code{calls_malloc} (``r4''). These frame regions have child regions for
+storing their local variables. For example, the return region
+and that of various other regions within the ``calls_malloc'' frame all have
+value ``sv4'', a pointer to a heap-allocated region ``r6''. Within the parent
+frame, @code{ptr_3} has value ``sv3'', an unknown @code{void *}.
+
+@subsection Analyzer Paths
+
+We need to explain to the user what the problem is, and to persuade them
+that there really is a problem. Hence having a @code{diagnostic_path}
+isn't just an incidental detail of the analyzer; it's required.
+
+Paths ought to be:
+@itemize @bullet
+@item
+interprocedurally-valid
+@item
+feasible
+@end itemize
+
+Without state-merging, all paths in the exploded graph are feasible
+(in terms of constraints being satisified).
+With state-merging, paths in the exploded graph can be infeasible.
+
+We collate warnings and only emit them for the simplest path
+e.g. for a bug in a utility function, with lots of routes to calling it,
+we only emit the simplest path (which could be intraprocedural, if
+it can be reproduced without a caller). We apply a check that
+each duplicate warning's shortest path is feasible, rejecting any
+warnings for which the shortest path is infeasible (which could lead to
+false negatives).
+
+We use the shortest feasible @code{exploded_path} through the
+@code{exploded_graph} (a list of @code{exploded_edge *}) to build a
+@code{diagnostic_path} (a list of events for the diagnostic subsystem) -
+specifically a @code{checker_path}.
+
+Having built the @code{checker_path}, we prune it to try to eliminate
+events that aren't relevant, to minimize how much the user has to read.
+
+After pruning, we notify each event in the path of its ID and record the
+IDs of interesting events, allowing for events to refer to other events
+in their descriptions. The @code{pending_diagnostic} class has various
+vfuncs to support emitting more precise descriptions, so that e.g.
+
+@itemize @bullet
+@item
+a deref-of-unchecked-malloc diagnostic might use:
+@smallexample
+ returning possibly-NULL pointer to 'make_obj' from 'allocator'
+@end smallexample
+for a @code{return_event} to make it clearer how the unchecked value moves
+from callee back to caller
+@item
+a double-free diagnostic might use:
+@smallexample
+ second 'free' here; first 'free' was at (3)
+@end smallexample
+and a use-after-free might use
+@smallexample
+ use after 'free' here; memory was freed at (2)
+@end smallexample
+@end itemize
+
+At this point we can emit the diagnostic.
+
+@subsection Limitations
+
+@itemize @bullet
+@item
+Only for C so far
+@item
+The implementation of call summaries is currently very simplistic.
+@item
+Lack of function pointer analysis
+@item
+The region model code creates lots of little mutable objects at each
+@code{region_model} (and thus per @code{exploded_node}) rather than
+sharing immutable objects and having the mutable state in the
+@code{program_state} or @code{region_model}. The latter approach might be
+more efficient, and might avoid dealing with IDs rather than pointers
+(which requires us to impose an ordering to get meaningful equality).
+@item
+The region model code doesn't yet support @code{memcpy}. At the
+gimple-ssa level these have been optimized to statements like this:
+@smallexample
+_10 = MEM <long unsigned int> [(char * @{ref-all@})&c]
+MEM <long unsigned int> [(char * @{ref-all@})&d] = _10;
+@end smallexample
+Perhaps they could be supported via a new @code{compound_svalue} type.
+@item
+There are various other limitations in the region model (grep for TODO/xfail
+in the testsuite).
+@item
+The constraint_manager's implementation of transitivity is currently too
+expensive to enable by default and so must be manually enabled via
+@option{-fanalyzer-transitivity}).
+@item
+The checkers are currently hardcoded and don't allow for user extensibility
+(e.g. adding allocate/release pairs).
+@item
+Although the analyzer's test suite has a proof-of-concept test case for
+LTO, LTO support hasn't had extensive testing. There are various
+lang-specific things in the analyzer that assume C rather than LTO.
+For example, SSA names are printed to the user in ``raw'' form, rather
+than printing the underlying variable name.
+@end itemize
+
+Some ideas for other checkers
+@itemize @bullet
+@item
+File-descriptor-based APIs
+@item
+Linux kernel internal APIs
+@item
+Signal handling
+@end itemize
+
+@node Debugging the Analyzer
+@section Debugging the Analyzer
+@cindex analyzer, debugging
+@cindex static analyzer, debugging
+
+@subsection Special Functions for Debugging the Analyzer
+
+The analyzer recognizes various special functions by name, for use
+in debugging the analyzer. Declarations can be seen in the testsuite
+in @file{analyzer-decls.h}. None of these functions are actually
+implemented.
+
+Add:
+@smallexample
+ __analyzer_break ();
+@end smallexample
+to the source being analyzed to trigger a breakpoint in the analyzer when
+that source is reached. By putting a series of these in the source, it's
+much easier to effectively step through the program state as it's analyzed.
+
+@smallexample
+__analyzer_dump ();
+@end smallexample
+
+will dump the copious information about the analyzer's state each time it
+reaches the call in its traversal of the source.
+
+@smallexample
+__analyzer_dump_path ();
+@end smallexample
+
+will emit a placeholder ``note'' diagnostic with a path to that call site,
+if the analyzer finds a feasible path to it.
+
+The builtin @code{__analyzer_dump_exploded_nodes} will dump information
+after analysis on all of the exploded nodes at that program point:
+
+@smallexample
+ __analyzer_dump_exploded_nodes (0);
+@end smallexample
+
+will dump just the number of nodes, and their IDs.
+
+@smallexample
+ __analyzer_dump_exploded_nodes (1);
+@end smallexample
+
+will also dump all of the states within those nodes.
+
+@smallexample
+ __analyzer_dump_region_model ();
+@end smallexample
+will dump the region_model's state to stderr.
+
+@smallexample
+__analyzer_eval (expr);
+@end smallexample
+will emit a warning with text "TRUE", FALSE" or "UNKNOWN" based on the
+truthfulness of the argument. This is useful for writing DejaGnu tests.
+
+
+@subsection Other Debugging Techniques
+
+One approach when tracking down where a particular bogus state is
+introduced into the @code{exploded_graph} is to add custom code to
+@code{region_model::validate}.
+
+For example, this custom code (added to @code{region_model::validate})
+breaks with an assertion failure when a variable called @code{ptr}
+acquires a value that's unknown, using
+@code{region_model::get_value_by_name} to locate the variable
+
+@smallexample
+ /* Find a variable matching "ptr". */
+ svalue_id sid = get_value_by_name ("ptr");
+ if (!sid.null_p ())
+ @{
+ svalue *sval = get_svalue (sid);
+ gcc_assert (sval->get_kind () != SK_UNKNOWN);
+ @}
+@end smallexample
+
+making it easier to investigate further in a debugger when this occurs.
* LTO:: Using Link-Time Optimization.
* Match and Simplify:: How to write expression simplification patterns for GIMPLE and GENERIC
+* Static Analyzer:: Working with the static analyzer.
* User Experience Guidelines:: Guidelines for implementing diagnostics and options.
* Funding:: How to help assure funding for free software.
* GNU Project:: The GNU Project and GNU/Linux.
@include plugins.texi
@include lto.texi
@include match-and-simplify.texi
+@include analyzer.texi
@include ux.texi
@include funding.texi
* Diagnostic Message Formatting Options:: Controlling how diagnostics should
be formatted.
* Warning Options:: How picky should the compiler be?
+* Static Analyzer Options:: More expensive warnings.
* Debugging Options:: Producing debuggable code.
* Optimize Options:: How much optimization?
* Instrumentation Options:: Enabling profiling and extra run-time error checking.
@item Warning Options
@xref{Warning Options,,Options to Request or Suppress Warnings}.
-@gccoptlist{-fsyntax-only -fmax-errors=@var{n} -Wpedantic @gol
+@gccoptlist{-fanalyzer -fsyntax-only -fmax-errors=@var{n} -Wpedantic @gol
-pedantic-errors @gol
-w -Wextra -Wall -Waddress -Waddress-of-packed-member @gol
-Waggregate-return -Waligned-new @gol
-Walloc-zero -Walloc-size-larger-than=@var{byte-size} @gol
-Walloca -Walloca-larger-than=@var{byte-size} @gol
--Wno-aggressive-loop-optimizations -Warray-bounds -Warray-bounds=@var{n} @gol
+-Wno-aggressive-loop-optimizations @gol
+-Wno-analyzer-double-fclose @gol
+-Wno-analyzer-double-free @gol
+-Wno-analyzer-exposure-through-output-file @gol
+-Wno-analyzer-file-leak @gol
+-Wno-analyzer-free-of-non-heap @gol
+-Wno-analyzer-malloc-leak @gol
+-Wno-analyzer-possible-null-argument @gol
+-Wno-analyzer-possible-null-dereference @gol
+-Wno-analyzer-null-argument @gol
+-Wno-analyzer-null-dereference @gol
+-Wno-analyzer-stale-setjmp-buffer @gol
+-Wno-analyzer-tainted-array-index @gol
+-Wno-analyzer-unsafe-call-within-signal-handler @gol
+-Wno-analyzer-use-after-free @gol
+-Wno-analyzer-use-of-pointer-in-stale-stack-frame @gol
+-Wno-analyzer-use-of-uninitialized-value @gol
+-Wanalyzer-too-complex @gol
+-Warray-bounds -Warray-bounds=@var{n} @gol
-Wno-attributes -Wattribute-alias=@var{n} @gol
-Wbool-compare -Wbool-operation @gol
-Wno-builtin-declaration-mismatch @gol
-Wwrite-strings @gol
-Wzero-as-null-pointer-constant}
+@item Static Analyzer Options
+@gccoptlist{-Wanalyzer-double-fclose @gol
+-Wanalyzer-double-free @gol
+-Wanalyzer-exposure-through-output-file @gol
+-Wanalyzer-file-leak @gol
+-Wanalyzer-free-of-non-heap @gol
+-Wanalyzer-malloc-leak @gol
+-Wanalyzer-null-argument @gol
+-Wanalyzer-null-dereference @gol
+-Wanalyzer-possible-null-argument @gol
+-Wanalyzer-possible-null-dereference @gol
+-Wanalyzer-stale-setjmp-buffer @gol
+-Wanalyzer-tainted-array-index @gol
+-Wanalyzer-unsafe-call-within-signal-handler @gol
+-Wanalyzer-use-after-free @gol
+-Wanalyzer-use-of-pointer-in-stale-stack-frame @gol
+-Wanalyzer-use-of-uninitialized-value @gol
+-Wanalyzer-too-complex @gol
+-fanalyzer-call-summaries @gol
+-fanalyzer-checker=@var{name} @gol
+-fanalyzer-fine-grained @gol
+-fanalyzer-state-merge @gol
+-fanalyzer-state-purge @gol
+-fanalyzer-transitivity @gol
+-fanalyzer-verbose-edges @gol
+-fanalyzer-verbose-state-changes @gol
+-fanalyzer-verbosity=@var{level} @gol
+-fdump-analyzer @gol
+-fdump-analyzer-stderr @gol
+-fdump-analyzer-callgraph @gol
+-fdump-analyzer-exploded-graph @gol
+-fdump-analyzer-exploded-nodes @gol
+-fdump-analyzer-exploded-nodes-2 @gol
+-fdump-analyzer-exploded-nodes-3 @gol
+-fdump-analyzer-state-purge @gol
+-fdump-analyzer-supergraph @gol
+}
+
@item C and Objective-C-only Warning Options
@gccoptlist{-Wbad-function-cast -Wmissing-declarations @gol
-Wmissing-parameter-type -Wmissing-prototypes -Wnested-externs @gol
Disable @option{-Walloca-larger-than=} warnings. The option is
equivalent to @option{-Walloca-larger-than=}@samp{SIZE_MAX} or larger.
+@item -Wno-analyzer-double-fclose
+@opindex Wanalyzer-double-fclose
+@opindex Wno-analyzer-double-fclose
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-double-fclose} to disable it.
+
+This diagnostic warns for paths through the code in which a @code{FILE *}
+can have @code{fclose} called on it more than once.
+
+@item -Wno-analyzer-double-free
+@opindex Wanalyzer-double-free
+@opindex Wno-analyzer-double-free
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-double-free} to disable it.
+
+This diagnostic warns for paths through the code in which a pointer
+can have @code{free} called on it more than once.
+
+@item -Wno-analyzer-exposure-through-output-file
+@opindex Wanalyzer-exposure-through-output-file
+@opindex Wno-analyzer-exposure-through-output-file
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-exposure-through-output-file}
+to disable it.
+
+This diagnostic warns for paths through the code in which a
+security-sensitive value is written to an output file
+(such as writing a password to a log file).
+
+@item -Wno-analyzer-file-leak
+@opindex Wanalyzer-file-leak
+@opindex Wno-analyzer-file-leak
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-file-leak}
+to disable it.
+
+This diagnostic warns for paths through the code in which a
+@code{<stdio.h>} @code{FILE *} stream object is leaked.
+
+@item -Wno-analyzer-free-of-non-heap
+@opindex Wanalyzer-free-of-non-heap
+@opindex Wno-analyzer-free-of-non-heap
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-free-of-non-heap}
+to disable it.
+
+This diagnostic warns for paths through the code in which @code{free}
+is called on a non-heap pointer (e.g. an on-stack buffer, or a global).
+
+@item -Wno-analyzer-malloc-leak
+@opindex Wanalyzer-malloc-leak
+@opindex Wno-analyzer-malloc-leak
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-malloc-leak}
+to disable it.
+
+This diagnostic warns for paths through the code in which a
+pointer allocated via @code{malloc} is leaked.
+
+@item -Wno-analyzer-possible-null-argument
+@opindex Wanalyzer-possible-null-argument
+@opindex Wno-analyzer-possible-null-argument
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-possible-null-argument} to disable it.
+
+This diagnostic warns for paths through the code in which a
+possibly-NULL value is passed to a function argument marked
+with @code{__attribute__((nonnull))} as requiring a non-NULL
+value.
+
+@item -Wno-analyzer-possible-null-dereference
+@opindex Wanalyzer-possible-null-dereference
+@opindex Wno-analyzer-possible-null-dereference
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-possible-null-dereference} to disable it.
+
+This diagnostic warns for paths through the code in which a
+possibly-NULL value is dereferenced.
+
+@item -Wno-analyzer-null-argument
+@opindex Wanalyzer-null-argument
+@opindex Wno-analyzer-null-argument
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-null-argument} to disable it.
+
+This diagnostic warns for paths through the code in which a
+value known to be NULL is passed to a function argument marked
+with @code{__attribute__((nonnull))} as requiring a non-NULL
+value.
+
+@item -Wno-analyzer-null-dereference
+@opindex Wanalyzer-null-dereference
+@opindex Wno-analyzer-null-dereference
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-null-dereference} to disable it.
+
+This diagnostic warns for paths through the code in which a
+value known to be NULL is dereferenced.
+
+@item -Wno-analyzer-stale-setjmp-buffer
+@opindex Wanalyzer-stale-setjmp-buffer
+@opindex Wno-analyzer-stale-setjmp-buffer
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-stale-setjmp-buffer} to disable it.
+
+This diagnostic warns for paths through the code in which
+@code{longjmp} is called to rewind to a @code{jmp_buf} relating
+to a @code{setjmp} call in a function that has returned.
+
+When @code{setjmp} is called on a @code{jmp_buf} to record a rewind
+location, it records the stack frame. The stack frame becomes invalid
+when the function containing the @code{setjmp} call returns. Attempting
+to rewind to it via @code{longjmp} would reference a stack frame that
+no longer exists, and likely lead to a crash (or worse).
+
+@item -Wno-analyzer-tainted-array-index
+@opindex Wanalyzer-tainted-array-index
+@opindex Wno-analyzer-tainted-array-index
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-tainted-array-index} to disable it.
+
+This diagnostic warns for paths through the code in which a value
+that could be under an attacker's control is used as the index
+of an array access without being sanitized.
+
+@item -Wno-analyzer-unsafe-call-within-signal-handler
+@opindex Wanalyzer-unsafe-call-within-signal-handler
+@opindex Wno-analyzer-unsafe-call-within-signal-handler
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-unsafe-call-within-signal-handler} to disable it.
+
+This diagnostic warns for paths through the code in which a
+function known to be async-signal-unsafe (such as @code{fprintf}) is
+called from a signal handler.
+
+@item -Wno-analyzer-use-after-free
+@opindex Wanalyzer-use-after-free
+@opindex Wno-analyzer-use-after-free
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-use-after-free} to disable it.
+
+This diagnostic warns for paths through the code in which a
+pointer is used after @code{free} is called on it.
+
+@item -Wno-analyzer-use-of-pointer-in-stale-stack-frame
+@opindex Wanalyzer-use-of-pointer-in-stale-stack-frame
+@opindex Wno-analyzer-use-of-pointer-in-stale-stack-frame
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-use-of-pointer-in-stale-stack-frame}
+to disable it.
+
+This diagnostic warns for paths through the code in which a pointer
+is dereferenced that points to a variable in a stale stack frame.
+
+@item -Wno-analyzer-use-of-uninitialized-value
+@opindex Wanalyzer-use-of-uninitialized-value
+@opindex Wno-analyzer-use-of-uninitialized-value
+This warning requires @option{-fanalyzer}, which enables it; use
+@option{-Wno-analyzer-use-of-uninitialized-value} to disable it.
+
+This diagnostic warns for paths through the code in which an uninitialized
+value is used.
+
@item -Warray-bounds
@itemx -Warray-bounds=@var{n}
@opindex Wno-array-bounds
@end table
+@node Static Analyzer Options
+@section Options That Control Static Analysis
+
+@table @gcctabopt
+@item -fanalyzer
+@opindex analyzer
+@opindex fanalyzer
+@opindex fno-analyzer
+This option enables an static analysis of program flow which looks
+for ``interesting'' interprocedural paths through the
+code, and issues warnings for problems found on them.
+
+This analysis is much more expensive than other GCC warnings.
+
+Enabling this option effectively enables the following warnings:
+
+@gccoptlist{ @gol
+-Wanalyzer-double-fclose @gol
+-Wanalyzer-double-free @gol
+-Wanalyzer-exposure-through-output-file @gol
+-Wanalyzer-file-leak @gol
+-Wanalyzer-free-of-non-heap @gol
+-Wanalyzer-malloc-leak @gol
+-Wanalyzer-possible-null-argument @gol
+-Wanalyzer-possible-null-dereference @gol
+-Wanalyzer-null-argument @gol
+-Wanalyzer-null-dereference @gol
+-Wanalyzer-tainted-array-index @gol
+-Wanalyzer-unsafe-call-within-signal-handler @gol
+-Wanalyzer-use-after-free @gol
+-Wanalyzer-use-of-uninitialized-value @gol
+-Wanalyzer-use-of-pointer-in-stale-stack-frame @gol
+}
+
+This option is only available if GCC was configured with analyzer
+support enabled.
+
+@item -Wanalyzer-too-complex
+@opindex Wanalyzer-too-complex
+@opindex Wno-analyzer-too-complex
+If @option{-fanalyzer} is enabled, the analyzer uses various heuristics
+to attempt to explore the control flow and data flow in the program,
+but these can be defeated by sufficiently complicated code.
+
+By default, the analysis will silently stop if the code is too
+complicated for the analyzer to fully explore and it reaches an internal
+limit.
+
+The @option{-Wanalyzer-too-complex} option will warn if this occurs.
+
+@end table
+
+Pertinent parameters for controlling the exploration are:
+@option{--param analyzer-bb-explosion-factor=@var{value}},
+@option{--param analyzer-max-enodes-per-program-point=@var{value}},
+@option{--param analyzer-max-recursion-depth=@var{value}}, and
+@option{--param analyzer-min-snodes-for-call-summary=@var{value}}.
+
+The following options control the analyzer.
+
+@table @gcctabopt
+
+@item -fanalyzer-call-summaries
+@opindex fanalyzer-call-summaries
+@opindex fno-analyzer-call-summaries
+Simplify interprocedural analysis by computing the effect of certain calls,
+rather than exploring all paths through the function from callsite to each
+possible return.
+
+If enabled, call summaries are only used for functions with more than one
+call site, and that are sufficiently complicated (as per
+@option{--param analyzer-min-snodes-for-call-summary=@var{value}}).
+
+@item -fanalyzer-checker=@var{name}
+@opindex fanalyzer-checker
+Restrict the analyzer to run just the named checker.
+
+@item -fanalyzer-fine-grained
+@opindex fanalyzer-fine-grained
+@opindex fno-analyzer-fine-grained
+This option is intended for analyzer developers.
+
+Internally the analyzer builds an ``exploded graph'' that combines
+control flow graphs with data flow information.
+
+By default, an edge in this graph can contain the effects of a run
+of multiple statements within a basic block. With
+@option{-fanalyzer-fine-grained}, each statement gets its own edge.
+
+@item -fno-analyzer-state-merge
+@opindex fanalyzer-state-merge
+@opindex fno-analyzer-state-merge
+This option is intended for analyzer developers.
+
+By default the analyzer will attempt to simplify analysis by merging
+sufficiently similar states at each program point as it builds its
+``exploded graph''. With @option{-fno-analyzer-state-merge} this
+merging can be suppressed, for debugging state-handling issues.
+
+@item -fno-analyzer-state-purge
+@opindex fanalyzer-state-purge
+@opindex fno-analyzer-state-purge
+This option is intended for analyzer developers.
+
+By default the analyzer will attempt to simplify analysis by purging
+aspects of state at a program point that appear to no longer be relevant
+e.g. the values of locals that aren't accessed later in the function
+and which aren't relevant to leak analysis.
+
+With @option{-fno-analyzer-state-purge} this purging of state can
+be suppressed, for debugging state-handling issues.
+
+@item -fanalyzer-transitivity
+@opindex fanalyzer-transitivity
+@opindex fno-analyzer-transitivity
+This option enables transitivity of constraints within the analyzer.
+
+@item -fanalyzer-verbose-edges
+This option is intended for analyzer developers. It enables more
+verbose, lower-level detail in the descriptions of control flow
+within diagnostic paths.
+
+@item -fanalyzer-verbose-state-changes
+This option is intended for analyzer developers. It enables more
+verbose, lower-level detail in the descriptions of events relating
+to state machines within diagnostic paths.
+
+@item -fanalyzer-verbosity=@var{level}
+This option controls the complexity of the control flow paths that are
+emitted for analyzer diagnostics.
+
+The @var{level} can be one of:
+
+@table @samp
+@item 0
+At this level, interprocedural call and return events are displayed,
+along with the most pertinent state-change events relating to
+a diagnostic. For example, for a double-@code{free} diagnostic,
+both calls to @code{free} will be shown.
+
+@item 1
+As per the previous level, but also show events for the entry
+to each function.
+
+@item 2
+As per the previous level, but also show events relating to
+control flow (e.g. ``true path taken'' at a conditional).
+
+This level is the default.
+
+@item 3
+This level is intended for analyzer developers; it adds various
+other events intended for debugging the analyzer.
+
+@end table
+
+@item -fdump-analyzer
+@opindex fdump-analyzer
+Dump internal details about what the analyzer is doing to
+@file{@var{file}.analyzer.txt}.
+This option is overridden by @option{-fdump-analyzer-stderr}.
+
+@item -fdump-analyzer-stderr
+@opindex fdump-analyzer-stderr
+Dump internal details about what the analyzer is doing to stderr.
+This option overrides @option{-fdump-analyzer}.
+
+@item -fdump-analyzer-callgraph
+@opindex fdump-analyzer-callgraph
+Dump a representation of the call graph suitable for viewing with
+GraphViz to @file{@var{file}.callgraph.dot}.
+
+@item -fdump-analyzer-exploded-graph
+@opindex fdump-analyzer-exploded-graph
+Dump a representation of the ``exploded graph'' suitable for viewing with
+GraphViz to @file{@var{file}.eg.dot}.
+Nodes are color-coded based on state-machine states to emphasize
+state changes.
+
+@item -fdump-analyzer-exploded-nodes
+@opindex dump-analyzer-exploded-nodes
+Emit diagnostics showing where nodes in the ``exploded graph'' are
+in relation to the program source.
+
+@item -fdump-analyzer-exploded-nodes-2
+@opindex dump-analyzer-exploded-nodes-2
+Dump a textual representation of the ``exploded graph'' to
+@file{@var{file}.eg.txt}.
+
+@item -fdump-analyzer-exploded-nodes-3
+@opindex dump-analyzer-exploded-nodes-3
+Dump a textual representation of the ``exploded graph'' to
+one dump file per node, to @file{@var{file}.eg-@var{id}.txt}.
+This is typically a large number of dump files.
+
+@item -fdump-analyzer-state-purge
+@opindex fdump-analyzer-state-purge
+As per @option{-fdump-analyzer-supergraph}, dump a representation of the
+``supergraph'' suitable for viewing with GraphViz, but annotate the
+graph with information on what state will be purged at each node.
+The graph is written to @file{@var{file}.state-purge.dot}.
+
+@item -fdump-analyzer-supergraph
+@opindex fdump-analyzer-supergraph
+Dump a representation of the ``supergraph'' suitable for viewing with
+GraphViz to @file{@var{file}.supergraph.dot}. This shows all of the
+control flow graphs in the program, with interprocedural edges for
+calls and returns.
+
+@end table
+
@node Debugging Options
@section Options for Debugging Your Program
@cindex options, debugging
@item dg-require-dll ""
Skip the test if the target does not support DLL attributes.
+@item dg-require-dot ""
+Skip the test if the host does not have @command{dot}.
+
@item dg-require-fork ""
Skip the test if the target does not support @code{fork}.
Passes if @var{regexp} does not match text in @var{filename}.
@item scan-module @var{module} @var{regexp} [@{ target/xfail @var{selector} @}]
Passes if @var{regexp} matches in Fortran module @var{module}.
+@item dg-check-dot @var{filename}
+Passes if @var{filename} is a valid @file{.dot} file (by running
+@code{dot -Tpng} on it, and verifying the exit code is 0).
@end table
@subsubsection Scan the assembly output
command-line options).
end
+define break-on-saved-diagnostic
+break diagnostic_manager::add_diagnostic
+end
+
+document break-on-saved-diagnostic
+Put a breakpoint on diagnostic_manager::add_diagnostic, called within
+the analyzer whenever a diagnostic is saved for later de-duplication and
+possible emission.
+end
+
define reload-gdbhooks
python import imp; imp.reload(gdbhooks)
end
--- /dev/null
+/* Helper code for graphviz output.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "graphviz.h"
+
+/* graphviz_out's ctor, wrapping PP. */
+
+graphviz_out::graphviz_out (pretty_printer *pp)
+: m_pp (pp),
+ m_indent (0)
+{
+}
+
+/* Formatted print of FMT. */
+
+void
+graphviz_out::print (const char *fmt, ...)
+{
+ text_info text;
+ va_list ap;
+
+ va_start (ap, fmt);
+ text.err_no = errno;
+ text.args_ptr = ≈
+ text.format_spec = fmt;
+ pp_format (m_pp, &text);
+ pp_output_formatted_text (m_pp);
+ va_end (ap);
+}
+
+/* Formatted print of FMT. The text is indented by the current
+ indent, and a newline is added. */
+
+void
+graphviz_out::println (const char *fmt, ...)
+{
+ text_info text;
+ va_list ap;
+
+ write_indent ();
+
+ va_start (ap, fmt);
+ text.err_no = errno;
+ text.args_ptr = ≈
+ text.format_spec = fmt;
+ pp_format (m_pp, &text);
+ pp_output_formatted_text (m_pp);
+ va_end (ap);
+
+ pp_newline (m_pp);
+}
+
+/* Print the current indent to the underlying pp. */
+
+void
+graphviz_out::write_indent ()
+{
+ for (int i = 0; i < m_indent * 2; ++i)
+ pp_space (m_pp);
+}
+
+/* Write the start of an HTML-like row via <TR><TD>, writing to the stream
+ so that followup text can be escaped. */
+
+void
+graphviz_out::begin_tr ()
+{
+ pp_string (m_pp, "<TR><TD ALIGN=\"LEFT\">");
+ pp_write_text_to_stream (m_pp);
+}
+
+/* Write the end of an HTML-like row via </TD></TR>, writing to the stream
+ so that followup text can be escaped. */
+
+void
+graphviz_out::end_tr ()
+{
+ pp_string (m_pp, "</TD></TR>");
+ pp_write_text_to_stream (m_pp);
+}
--- /dev/null
+/* Helper code for graphviz output.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_GRAPHVIZ_H
+#define GCC_GRAPHVIZ_H
+
+#include "pretty-print.h" /* for ATTRIBUTE_GCC_PPDIAG. */
+
+/* A class for writing .dot output to a pretty_printer with
+ indentation to show nesting. */
+
+class graphviz_out {
+ public:
+ graphviz_out (pretty_printer *pp);
+
+ void print (const char *fmt, ...)
+ ATTRIBUTE_GCC_PPDIAG(2,3);
+ void println (const char *fmt, ...)
+ ATTRIBUTE_GCC_PPDIAG(2,3);
+
+ void indent () { m_indent++; }
+ void outdent () { m_indent--; }
+
+ void write_indent ();
+
+ void begin_tr ();
+ void end_tr ();
+
+ pretty_printer *get_pp () const { return m_pp; }
+
+ private:
+ pretty_printer *m_pp;
+ int m_indent;
+};
+
+#endif /* GCC_GRAPHVIZ_H */
--- /dev/null
+/* Unit tests for ordered-hash-map.h.
+ Copyright (C) 2015-2020 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "opts.h"
+#include "hash-set.h"
+#include "fixed-value.h"
+#include "alias.h"
+#include "flags.h"
+#include "symtab.h"
+#include "tree-core.h"
+#include "stor-layout.h"
+#include "tree.h"
+#include "stringpool.h"
+#include "ordered-hash-map.h"
+#include "selftest.h"
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Populate *OUT_KVS with the key/value pairs of M. */
+
+template <typename HashMap, typename Key, typename Value>
+static void
+get_kv_pairs (const HashMap &m,
+ auto_vec<std::pair<Key, Value> > *out_kvs)
+{
+ for (typename HashMap::iterator iter = m.begin ();
+ iter != m.end ();
+ ++iter)
+ out_kvs->safe_push (std::make_pair ((*iter).first, (*iter).second));
+}
+
+/* Construct an ordered_hash_map <const char *, int> and verify that
+ various operations work correctly. */
+
+static void
+test_map_of_strings_to_int ()
+{
+ ordered_hash_map <const char *, int> m;
+
+ const char *ostrich = "ostrich";
+ const char *elephant = "elephant";
+ const char *ant = "ant";
+ const char *spider = "spider";
+ const char *millipede = "Illacme plenipes";
+ const char *eric = "half a bee";
+
+ /* A fresh hash_map should be empty. */
+ ASSERT_EQ (0, m.elements ());
+ ASSERT_EQ (NULL, m.get (ostrich));
+
+ /* Populate the hash_map. */
+ ASSERT_EQ (false, m.put (ostrich, 2));
+ ASSERT_EQ (false, m.put (elephant, 4));
+ ASSERT_EQ (false, m.put (ant, 6));
+ ASSERT_EQ (false, m.put (spider, 8));
+ ASSERT_EQ (false, m.put (millipede, 750));
+ ASSERT_EQ (false, m.put (eric, 3));
+
+ /* Verify that we can recover the stored values. */
+ ASSERT_EQ (6, m.elements ());
+ ASSERT_EQ (2, *m.get (ostrich));
+ ASSERT_EQ (4, *m.get (elephant));
+ ASSERT_EQ (6, *m.get (ant));
+ ASSERT_EQ (8, *m.get (spider));
+ ASSERT_EQ (750, *m.get (millipede));
+ ASSERT_EQ (3, *m.get (eric));
+
+ /* Verify that the order of insertion is preserved. */
+ auto_vec<std::pair<const char *, int> > kvs;
+ get_kv_pairs (m, &kvs);
+ ASSERT_EQ (kvs.length (), 6);
+ ASSERT_EQ (kvs[0].first, ostrich);
+ ASSERT_EQ (kvs[0].second, 2);
+ ASSERT_EQ (kvs[1].first, elephant);
+ ASSERT_EQ (kvs[1].second, 4);
+ ASSERT_EQ (kvs[2].first, ant);
+ ASSERT_EQ (kvs[2].second, 6);
+ ASSERT_EQ (kvs[3].first, spider);
+ ASSERT_EQ (kvs[3].second, 8);
+ ASSERT_EQ (kvs[4].first, millipede);
+ ASSERT_EQ (kvs[4].second, 750);
+ ASSERT_EQ (kvs[5].first, eric);
+ ASSERT_EQ (kvs[5].second, 3);
+}
+
+/* Construct an ordered_hash_map using int_hash and verify that various
+ operations work correctly. */
+
+static void
+test_map_of_int_to_strings ()
+{
+ const int EMPTY = -1;
+ const int DELETED = -2;
+ typedef int_hash <int, EMPTY, DELETED> int_hash_t;
+ ordered_hash_map <int_hash_t, const char *> m;
+
+ const char *ostrich = "ostrich";
+ const char *elephant = "elephant";
+ const char *ant = "ant";
+ const char *spider = "spider";
+ const char *millipede = "Illacme plenipes";
+ const char *eric = "half a bee";
+
+ /* A fresh hash_map should be empty. */
+ ASSERT_EQ (0, m.elements ());
+ ASSERT_EQ (NULL, m.get (2));
+
+ /* Populate the hash_map. */
+ ASSERT_EQ (false, m.put (2, ostrich));
+ ASSERT_EQ (false, m.put (4, elephant));
+ ASSERT_EQ (false, m.put (6, ant));
+ ASSERT_EQ (false, m.put (8, spider));
+ ASSERT_EQ (false, m.put (750, millipede));
+ ASSERT_EQ (false, m.put (3, eric));
+
+ /* Verify that we can recover the stored values. */
+ ASSERT_EQ (6, m.elements ());
+ ASSERT_EQ (*m.get (2), ostrich);
+ ASSERT_EQ (*m.get (4), elephant);
+ ASSERT_EQ (*m.get (6), ant);
+ ASSERT_EQ (*m.get (8), spider);
+ ASSERT_EQ (*m.get (750), millipede);
+ ASSERT_EQ (*m.get (3), eric);
+
+ /* Verify that the order of insertion is preserved. */
+ auto_vec<std::pair<int, const char *> > kvs;
+ get_kv_pairs (m, &kvs);
+ ASSERT_EQ (kvs.length (), 6);
+ ASSERT_EQ (kvs[0].first, 2);
+ ASSERT_EQ (kvs[0].second, ostrich);
+ ASSERT_EQ (kvs[1].first, 4);
+ ASSERT_EQ (kvs[1].second, elephant);
+ ASSERT_EQ (kvs[2].first, 6);
+ ASSERT_EQ (kvs[2].second, ant);
+ ASSERT_EQ (kvs[3].first, 8);
+ ASSERT_EQ (kvs[3].second, spider);
+ ASSERT_EQ (kvs[4].first, 750);
+ ASSERT_EQ (kvs[4].second, millipede);
+ ASSERT_EQ (kvs[5].first, 3);
+ ASSERT_EQ (kvs[5].second, eric);
+}
+
+/* Verify that we can remove items from an ordered_hash_map. */
+
+static void
+test_removal ()
+{
+ ordered_hash_map <const char *, int> m;
+
+ const char *ostrich = "ostrich";
+ ASSERT_EQ (false, m.put (ostrich, 2));
+
+ ASSERT_EQ (1, m.elements ());
+ ASSERT_EQ (2, *m.get (ostrich));
+
+ {
+ auto_vec<std::pair<const char *, int> > kvs;
+ get_kv_pairs (m, &kvs);
+ ASSERT_EQ (kvs.length (), 1);
+ ASSERT_EQ (kvs[0].first, ostrich);
+ ASSERT_EQ (kvs[0].second, 2);
+ }
+
+ m.remove (ostrich);
+
+ ASSERT_EQ (0, m.elements ());
+ {
+ auto_vec<std::pair<const char *, int> > kvs;
+ get_kv_pairs (m, &kvs);
+ ASSERT_EQ (kvs.length (), 0);
+ }
+
+ /* Reinsertion (with a different value). */
+ ASSERT_EQ (false, m.put (ostrich, 42));
+ ASSERT_EQ (1, m.elements ());
+ ASSERT_EQ (42, *m.get (ostrich));
+ {
+ auto_vec<std::pair<const char *, int> > kvs;
+ get_kv_pairs (m, &kvs);
+ ASSERT_EQ (kvs.length (), 1);
+ ASSERT_EQ (kvs[0].first, ostrich);
+ ASSERT_EQ (kvs[0].second, 42);
+ }
+}
+
+/* Verify that ordered_hash_map's copy-ctor works. */
+
+static void
+test_copy_ctor ()
+{
+ ordered_hash_map <const char *, int> m;
+
+ const char *ostrich = "ostrich";
+ ASSERT_EQ (false, m.put (ostrich, 2));
+
+ ASSERT_EQ (1, m.elements ());
+ ASSERT_EQ (2, *m.get (ostrich));
+
+ ordered_hash_map <const char *, int> copy (m);
+ ASSERT_EQ (1, copy.elements ());
+ ASSERT_EQ (2, *copy.get (ostrich));
+
+ /* Remove from source. */
+ m.remove (ostrich);
+ ASSERT_EQ (0, m.elements ());
+
+ /* Copy should be unaffected. */
+ ASSERT_EQ (1, copy.elements ());
+ ASSERT_EQ (2, *copy.get (ostrich));
+}
+
+/* Run all of the selftests within this file. */
+
+void
+ordered_hash_map_tests_cc_tests ()
+{
+ test_map_of_strings_to_int ();
+ test_map_of_int_to_strings ();
+ test_removal ();
+ test_copy_ctor ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
--- /dev/null
+/* A type-safe hash map that retains the insertion order of keys.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#ifndef GCC_ORDERED_HASH_MAP_H
+#define GCC_ORDERED_HASH_MAP_H
+
+/* Notes:
+ - The keys must be PODs, since vec<> uses assignment to populate slots
+ without properly initializing them.
+ - doesn't have GTY support.
+ - supports removal, but retains order of original insertion.
+ (Removal might be better handled by using a doubly-linked list
+ of nodes, holding the values). */
+
+template<typename KeyId, typename Value,
+ typename Traits>
+class ordered_hash_map
+{
+ typedef typename Traits::key_type Key;
+
+public:
+ ordered_hash_map () {}
+
+ ordered_hash_map (const ordered_hash_map &other)
+ : m_map (other.m_map),
+ m_keys (other.m_keys.length ()),
+ m_key_index (other.m_key_index)
+ {
+ unsigned i;
+ Key key;
+ FOR_EACH_VEC_ELT (other.m_keys, i, key)
+ m_keys.quick_push (key);
+ }
+
+ /* If key K isn't already in the map add key K with value V to the map, and
+ return false. Otherwise set the value of the entry for key K to be V and
+ return true. */
+
+ bool put (const Key &k, const Value &v)
+ {
+ bool existed = m_map.put (k, v);
+ if (!existed)
+ {
+ bool key_present;
+ int &slot = m_key_index.get_or_insert (k, &key_present);
+ if (!key_present)
+ {
+ slot = m_keys.length ();
+ m_keys.safe_push (k);
+ }
+ }
+ return existed;
+ }
+
+ /* If the passed in key is in the map return its value otherwise NULL. */
+
+ Value *get (const Key &k)
+ {
+ return m_map.get (k);
+ }
+
+ /* Removing a key removes it from the map, but retains the insertion
+ order. */
+
+ void remove (const Key &k)
+ {
+ m_map.remove (k);
+ }
+
+ size_t elements () const { return m_map.elements (); }
+
+ class iterator
+ {
+ public:
+ explicit iterator (const ordered_hash_map &map, unsigned idx) :
+ m_ordered_hash_map (map), m_idx (idx) {}
+
+ iterator &operator++ ()
+ {
+ /* Increment m_idx until we find a non-deleted element, or go beyond
+ the end. */
+ while (1)
+ {
+ ++m_idx;
+ if (valid_index_p ())
+ break;
+ }
+ return *this;
+ }
+
+ /* Can't use std::pair here, because GCC before 4.3 don't handle
+ std::pair where template parameters are references well.
+ See PR86739. */
+ struct reference_pair {
+ const Key &first;
+ Value &second;
+
+ reference_pair (const Key &key, Value &value)
+ : first (key), second (value) {}
+
+ template <typename K, typename V>
+ operator std::pair<K, V> () const { return std::pair<K, V> (first, second); }
+ };
+
+ reference_pair operator* ()
+ {
+ const Key &k = m_ordered_hash_map.m_keys[m_idx];
+ Value *slot
+ = const_cast<ordered_hash_map &> (m_ordered_hash_map).get (k);
+ gcc_assert (slot);
+ return reference_pair (k, *slot);
+ }
+
+ bool
+ operator != (const iterator &other) const
+ {
+ return m_idx != other.m_idx;
+ }
+
+ /* Treat one-beyond-the-end as valid, for handling the "end" case. */
+
+ bool valid_index_p () const
+ {
+ if (m_idx > m_ordered_hash_map.m_keys.length ())
+ return false;
+ if (m_idx == m_ordered_hash_map.m_keys.length ())
+ return true;
+ const Key &k = m_ordered_hash_map.m_keys[m_idx];
+ Value *slot
+ = const_cast<ordered_hash_map &> (m_ordered_hash_map).get (k);
+ return slot != NULL;
+ }
+
+ const ordered_hash_map &m_ordered_hash_map;
+ unsigned m_idx;
+ };
+
+ /* Standard iterator retrieval methods. */
+
+ iterator begin () const
+ {
+ iterator i = iterator (*this, 0);
+ while (!i.valid_index_p () && i != end ())
+ ++i;
+ return i;
+ }
+ iterator end () const { return iterator (*this, m_keys.length ()); }
+
+private:
+ /* The assignment operator is not yet implemented; prevent erroneous
+ usage of unsafe compiler-generated one. */
+ void operator= (const ordered_hash_map &);
+
+ /* The underlying map. */
+ hash_map<KeyId, Value, Traits> m_map;
+
+ /* The ordering of the keys. */
+ auto_vec<Key> m_keys;
+
+ /* For each key that's ever been in the map, its index within m_keys. */
+ hash_map<KeyId, int> m_key_index;
+};
+
+/* Two-argument form. */
+
+template<typename Key, typename Value,
+ typename Traits = simple_hashmap_traits<default_hash_traits<Key>,
+ Value> >
+class ordered_hash_map;
+
+#endif /* GCC_ORDERED_HASH_MAP_H */
TERMINATE_PASS_LIST (all_small_ipa_passes)
INSERT_PASSES_AFTER (all_regular_ipa_passes)
+ NEXT_PASS (pass_analyzer);
NEXT_PASS (pass_ipa_whole_program_visibility);
NEXT_PASS (pass_ipa_profile);
NEXT_PASS (pass_ipa_icf);
#include "options.h"
#include "stringpool.h"
#include "attribs.h"
+#include "analyzer/analyzer-selftests.h"
/* This function needed to be split out from selftest.c as it references
tests from the whole source tree, and so is within
cgraph_c_tests ();
optinfo_emit_json_cc_tests ();
opt_problem_cc_tests ();
+ ordered_hash_map_tests_cc_tests ();
/* Mid-level data structures. */
input_c_tests ();
gimple_c_tests ();
rtl_tests_c_tests ();
read_rtl_function_c_tests ();
+ digraph_cc_tests ();
+ tristate_cc_tests ();
/* Higher-level tests, or for components that other selftests don't
rely on. */
/* Run any lang-specific selftests. */
lang_hooks.run_lang_selftests ();
+ /* Run the analyzer selftests (if enabled). */
+ run_analyzer_selftests ();
+
/* Force a GC at the end of the selftests, to shake out GC-related
issues. For example, if any GC-managed items have buggy (or missing)
finalizers, this last collection will ensure that things that were
extern void diagnostic_c_tests ();
extern void diagnostic_format_json_cc_tests ();
extern void diagnostic_show_locus_c_tests ();
+extern void digraph_cc_tests ();
extern void dumpfile_c_tests ();
extern void edit_context_c_tests ();
extern void et_forest_c_tests ();
extern void json_cc_tests ();
extern void opt_problem_cc_tests ();
extern void optinfo_emit_json_cc_tests ();
+extern void ordered_hash_map_tests_cc_tests ();
extern void predict_c_tests ();
extern void pretty_print_c_tests ();
extern void range_tests ();
extern void tree_c_tests ();
extern void tree_cfg_c_tests ();
extern void tree_diagnostic_path_cc_tests ();
+extern void tristate_cc_tests ();
extern void typed_splay_tree_c_tests ();
extern void unique_ptr_tests_cc_tests ();
extern void vec_c_tests ();
--- /dev/null
+/* Template class for Dijkstra's algorithm on directed graphs.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_SHORTEST_PATHS_H
+#define GCC_SHORTEST_PATHS_H
+
+#include "timevar.h"
+
+/* A record of the shortest path to each node in an graph
+ from the origin node.
+ The constructor runs Dijkstra's algorithm, and the results are
+ stored in this class. */
+
+template <typename GraphTraits, typename Path_t>
+class shortest_paths
+{
+public:
+ typedef typename GraphTraits::graph_t graph_t;
+ typedef typename GraphTraits::node_t node_t;
+ typedef typename GraphTraits::edge_t edge_t;
+ typedef Path_t path_t;
+
+ shortest_paths (const graph_t &graph, const node_t *origin);
+
+ path_t get_shortest_path (const node_t *to) const;
+
+private:
+ const graph_t &m_graph;
+
+ /* For each node (by index), the minimal distance to that node from the
+ origin. */
+ auto_vec<int> m_dist;
+
+ /* For each exploded_node (by index), the previous edge in the shortest
+ path from the origin. */
+ auto_vec<const edge_t *> m_prev;
+};
+
+/* shortest_paths's constructor.
+
+ Use Dijkstra's algorithm relative to ORIGIN to populate m_dist and
+ m_prev with enough information to be able to generate Path_t instances
+ to give the shortest path to any node in GRAPH from ORIGIN. */
+
+template <typename GraphTraits, typename Path_t>
+inline
+shortest_paths<GraphTraits, Path_t>::shortest_paths (const graph_t &graph,
+ const node_t *origin)
+: m_graph (graph),
+ m_dist (graph.m_nodes.length ()),
+ m_prev (graph.m_nodes.length ())
+{
+ auto_timevar tv (TV_ANALYZER_SHORTEST_PATHS);
+
+ auto_vec<int> queue (graph.m_nodes.length ());
+
+ for (unsigned i = 0; i < graph.m_nodes.length (); i++)
+ {
+ m_dist.quick_push (INT_MAX);
+ m_prev.quick_push (NULL);
+ queue.quick_push (i);
+ }
+ m_dist[origin->m_index] = 0;
+
+ while (queue.length () > 0)
+ {
+ /* Get minimal distance in queue.
+ FIXME: this is O(N^2); replace with a priority queue. */
+ int idx_with_min_dist = -1;
+ int idx_in_queue_with_min_dist = -1;
+ int min_dist = INT_MAX;
+ for (unsigned i = 0; i < queue.length (); i++)
+ {
+ int idx = queue[i];
+ if (m_dist[queue[i]] < min_dist)
+ {
+ min_dist = m_dist[idx];
+ idx_with_min_dist = idx;
+ idx_in_queue_with_min_dist = i;
+ }
+ }
+ gcc_assert (idx_with_min_dist != -1);
+ gcc_assert (idx_in_queue_with_min_dist != -1);
+
+ // FIXME: this is confusing: there are two indices here
+
+ queue.unordered_remove (idx_in_queue_with_min_dist);
+
+ node_t *n
+ = static_cast <node_t *> (m_graph.m_nodes[idx_with_min_dist]);
+
+ int i;
+ edge_t *succ;
+ FOR_EACH_VEC_ELT (n->m_succs, i, succ)
+ {
+ // TODO: only for dest still in queue
+ node_t *dest = succ->m_dest;
+ int alt = m_dist[n->m_index] + 1;
+ if (alt < m_dist[dest->m_index])
+ {
+ m_dist[dest->m_index] = alt;
+ m_prev[dest->m_index] = succ;
+ }
+ }
+ }
+}
+
+/* Generate an Path_t instance giving the shortest path to the node
+ TO from the origin node. */
+
+template <typename GraphTraits, typename Path_t>
+inline Path_t
+shortest_paths<GraphTraits, Path_t>::get_shortest_path (const node_t *to) const
+{
+ Path_t result;
+
+ while (m_prev[to->m_index])
+ {
+ result.m_edges.safe_push (m_prev[to->m_index]);
+ to = m_prev[to->m_index]->m_src;
+ }
+
+ result.m_edges.reverse ();
+
+ return result;
+}
+
+#endif /* GCC_SHORTEST_PATHS_H */
+2020-01-14 David Malcolm <dmalcolm@redhat.com>
+
+ * gcc.dg/analyzer/CVE-2005-1689-minimal.c: New test.
+ * gcc.dg/analyzer/abort.c: New test.
+ * gcc.dg/analyzer/alloca-leak.c: New test.
+ * gcc.dg/analyzer/analyzer-decls.h: New header.
+ * gcc.dg/analyzer/analyzer-verbosity-0.c: New test.
+ * gcc.dg/analyzer/analyzer-verbosity-1.c: New test.
+ * gcc.dg/analyzer/analyzer-verbosity-2.c: New test.
+ * gcc.dg/analyzer/analyzer.exp: New suite.
+ * gcc.dg/analyzer/attribute-nonnull.c: New test.
+ * gcc.dg/analyzer/call-summaries-1.c: New test.
+ * gcc.dg/analyzer/conditionals-2.c: New test.
+ * gcc.dg/analyzer/conditionals-3.c: New test.
+ * gcc.dg/analyzer/conditionals-notrans.c: New test.
+ * gcc.dg/analyzer/conditionals-trans.c: New test.
+ * gcc.dg/analyzer/data-model-1.c: New test.
+ * gcc.dg/analyzer/data-model-2.c: New test.
+ * gcc.dg/analyzer/data-model-3.c: New test.
+ * gcc.dg/analyzer/data-model-4.c: New test.
+ * gcc.dg/analyzer/data-model-5.c: New test.
+ * gcc.dg/analyzer/data-model-5b.c: New test.
+ * gcc.dg/analyzer/data-model-5c.c: New test.
+ * gcc.dg/analyzer/data-model-5d.c: New test.
+ * gcc.dg/analyzer/data-model-6.c: New test.
+ * gcc.dg/analyzer/data-model-7.c: New test.
+ * gcc.dg/analyzer/data-model-8.c: New test.
+ * gcc.dg/analyzer/data-model-9.c: New test.
+ * gcc.dg/analyzer/data-model-11.c: New test.
+ * gcc.dg/analyzer/data-model-12.c: New test.
+ * gcc.dg/analyzer/data-model-13.c: New test.
+ * gcc.dg/analyzer/data-model-14.c: New test.
+ * gcc.dg/analyzer/data-model-15.c: New test.
+ * gcc.dg/analyzer/data-model-16.c: New test.
+ * gcc.dg/analyzer/data-model-17.c: New test.
+ * gcc.dg/analyzer/data-model-18.c: New test.
+ * gcc.dg/analyzer/data-model-19.c: New test.
+ * gcc.dg/analyzer/data-model-path-1.c: New test.
+ * gcc.dg/analyzer/disabling.c: New test.
+ * gcc.dg/analyzer/dot-output.c: New test.
+ * gcc.dg/analyzer/double-free-lto-1-a.c: New test.
+ * gcc.dg/analyzer/double-free-lto-1-b.c: New test.
+ * gcc.dg/analyzer/double-free-lto-1.h: New header.
+ * gcc.dg/analyzer/equivalence.c: New test.
+ * gcc.dg/analyzer/explode-1.c: New test.
+ * gcc.dg/analyzer/explode-2.c: New test.
+ * gcc.dg/analyzer/factorial.c: New test.
+ * gcc.dg/analyzer/fibonacci.c: New test.
+ * gcc.dg/analyzer/fields.c: New test.
+ * gcc.dg/analyzer/file-1.c: New test.
+ * gcc.dg/analyzer/file-2.c: New test.
+ * gcc.dg/analyzer/function-ptr-1.c: New test.
+ * gcc.dg/analyzer/function-ptr-2.c: New test.
+ * gcc.dg/analyzer/function-ptr-3.c: New test.
+ * gcc.dg/analyzer/gzio-2.c: New test.
+ * gcc.dg/analyzer/gzio-3.c: New test.
+ * gcc.dg/analyzer/gzio-3a.c: New test.
+ * gcc.dg/analyzer/gzio.c: New test.
+ * gcc.dg/analyzer/infinite-recursion.c: New test.
+ * gcc.dg/analyzer/loop-2.c: New test.
+ * gcc.dg/analyzer/loop-2a.c: New test.
+ * gcc.dg/analyzer/loop-3.c: New test.
+ * gcc.dg/analyzer/loop-4.c: New test.
+ * gcc.dg/analyzer/loop.c: New test.
+ * gcc.dg/analyzer/malloc-1.c: New test.
+ * gcc.dg/analyzer/malloc-2.c: New test.
+ * gcc.dg/analyzer/malloc-3.c: New test.
+ * gcc.dg/analyzer/malloc-callbacks.c: New test.
+ * gcc.dg/analyzer/malloc-dce.c: New test.
+ * gcc.dg/analyzer/malloc-dedupe-1.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-1.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-10.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-11.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-12.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-13.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-2.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-3.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-4.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-5.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-6.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-7.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-8-double-free.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-8-lto-a.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-8-lto-b.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-8-lto-c.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-8-lto.h: New test.
+ * gcc.dg/analyzer/malloc-ipa-8-unchecked.c: New test.
+ * gcc.dg/analyzer/malloc-ipa-9.c: New test.
+ * gcc.dg/analyzer/malloc-macro-inline-events.c: New test.
+ * gcc.dg/analyzer/malloc-macro-separate-events.c: New test.
+ * gcc.dg/analyzer/malloc-macro.h: New header.
+ * gcc.dg/analyzer/malloc-many-paths-1.c: New test.
+ * gcc.dg/analyzer/malloc-many-paths-2.c: New test.
+ * gcc.dg/analyzer/malloc-many-paths-3.c: New test.
+ * gcc.dg/analyzer/malloc-paths-1.c: New test.
+ * gcc.dg/analyzer/malloc-paths-10.c: New test.
+ * gcc.dg/analyzer/malloc-paths-2.c: New test.
+ * gcc.dg/analyzer/malloc-paths-3.c: New test.
+ * gcc.dg/analyzer/malloc-paths-4.c: New test.
+ * gcc.dg/analyzer/malloc-paths-5.c: New test.
+ * gcc.dg/analyzer/malloc-paths-6.c: New test.
+ * gcc.dg/analyzer/malloc-paths-7.c: New test.
+ * gcc.dg/analyzer/malloc-paths-8.c: New test.
+ * gcc.dg/analyzer/malloc-paths-9.c: New test.
+ * gcc.dg/analyzer/malloc-vs-local-1a.c: New test.
+ * gcc.dg/analyzer/malloc-vs-local-1b.c: New test.
+ * gcc.dg/analyzer/malloc-vs-local-2.c: New test.
+ * gcc.dg/analyzer/malloc-vs-local-3.c: New test.
+ * gcc.dg/analyzer/malloc-vs-local-4.c: New test.
+ * gcc.dg/analyzer/operations.c: New test.
+ * gcc.dg/analyzer/params-2.c: New test.
+ * gcc.dg/analyzer/params.c: New test.
+ * gcc.dg/analyzer/paths-1.c: New test.
+ * gcc.dg/analyzer/paths-1a.c: New test.
+ * gcc.dg/analyzer/paths-2.c: New test.
+ * gcc.dg/analyzer/paths-3.c: New test.
+ * gcc.dg/analyzer/paths-4.c: New test.
+ * gcc.dg/analyzer/paths-5.c: New test.
+ * gcc.dg/analyzer/paths-6.c: New test.
+ * gcc.dg/analyzer/paths-7.c: New test.
+ * gcc.dg/analyzer/pattern-test-1.c: New test.
+ * gcc.dg/analyzer/pattern-test-2.c: New test.
+ * gcc.dg/analyzer/pointer-merging.c: New test.
+ * gcc.dg/analyzer/pr61861.c: New test.
+ * gcc.dg/analyzer/pragma-1.c: New test.
+ * gcc.dg/analyzer/scope-1.c: New test.
+ * gcc.dg/analyzer/sensitive-1.c: New test.
+ * gcc.dg/analyzer/setjmp-1.c: New test.
+ * gcc.dg/analyzer/setjmp-2.c: New test.
+ * gcc.dg/analyzer/setjmp-3.c: New test.
+ * gcc.dg/analyzer/setjmp-4.c: New test.
+ * gcc.dg/analyzer/setjmp-5.c: New test.
+ * gcc.dg/analyzer/setjmp-6.c: New test.
+ * gcc.dg/analyzer/setjmp-7.c: New test.
+ * gcc.dg/analyzer/setjmp-7a.c: New test.
+ * gcc.dg/analyzer/setjmp-8.c: New test.
+ * gcc.dg/analyzer/setjmp-9.c: New test.
+ * gcc.dg/analyzer/signal-1.c: New test.
+ * gcc.dg/analyzer/signal-2.c: New test.
+ * gcc.dg/analyzer/signal-3.c: New test.
+ * gcc.dg/analyzer/signal-4a.c: New test.
+ * gcc.dg/analyzer/signal-4b.c: New test.
+ * gcc.dg/analyzer/strcmp-1.c: New test.
+ * gcc.dg/analyzer/switch.c: New test.
+ * gcc.dg/analyzer/taint-1.c: New test.
+ * gcc.dg/analyzer/zlib-1.c: New test.
+ * gcc.dg/analyzer/zlib-2.c: New test.
+ * gcc.dg/analyzer/zlib-3.c: New test.
+ * gcc.dg/analyzer/zlib-4.c: New test.
+ * gcc.dg/analyzer/zlib-5.c: New test.
+ * gcc.dg/analyzer/zlib-6.c: New test.
+ * lib/gcc-defs.exp (dg-check-dot): New procedure.
+ * lib/target-supports.exp (check_dot_available): New procedure.
+ (check_effective_target_analyzer): New.
+ * lib/target-supports-dg.exp (dg-require-dot): New procedure.
+
2020-01-14 Jason Merrill <jason@redhat.com>
* lib/prune.exp (prune_gcc_output): Adjust constexpr pattern.
--- /dev/null
+#include <stdlib.h>
+
+typedef struct _krb5_data {
+ char *data;
+} krb5_data;
+
+void
+test_1 (krb5_data inbuf, int flag)
+{
+ free(inbuf.data); /* { dg-message "first 'free' here" } */
+ free(inbuf.data); /* { dg-warning "double-'free' of 'inbuf.data'" } */
+}
+
+void
+test_2 (krb5_data inbuf, int flag)
+{
+ if (flag) {
+ free(inbuf.data); /* { dg-message "first 'free' here" } */
+ }
+ free(inbuf.data); /* { dg-warning "double-'free' of 'inbuf.data'" } */
+}
+
+void
+test_3 (krb5_data inbuf, int flag)
+{
+ if (flag) {
+ free((char *)inbuf.data); /* { dg-message "first 'free' here" } */
+ }
+ free((char *)inbuf.data); /* { dg-warning "double-'free' of 'inbuf.data'" } */
+}
--- /dev/null
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+extern void foo ();
+extern void bar ();
+
+void test_1 (int i)
+{
+ if (i == 42)
+ abort ();
+
+ __analyzer_eval (i != 42); /* { dg-warning "TRUE" } */
+}
+
+void test_2 (int i)
+{
+ if (i)
+ foo ();
+ else
+ bar ();
+
+ foo ();
+
+ if (i)
+ foo ();
+ else
+ abort ();
+
+ __analyzer_eval (i != 0); /* { dg-warning "TRUE" } */
+}
+
+/**************************************************************************/
+
+void calls_abort (const char *msg)
+{
+ fprintf (stderr, "%s", msg);
+ abort ();
+}
+
+void test_3 (void *ptr)
+{
+ if (!ptr)
+ calls_abort ("ptr was NULL");
+
+ __analyzer_eval (ptr != 0); /* { dg-warning "TRUE" } */
+}
+
+/**************************************************************************/
+
+extern void marked_noreturn (const char *msg)
+ __attribute__ ((__noreturn__));
+
+void test_4 (void *ptr)
+{
+ if (!ptr)
+ marked_noreturn ("ptr was NULL");
+
+ __analyzer_eval (ptr != 0); /* { dg-warning "TRUE" } */
+}
+
+/**************************************************************************/
+
+void test_5 (int i)
+{
+ assert (i < 10);
+
+ /* We have not defined NDEBUG, so this will call __assert_fail if
+ i >= 10, which is labelled with __attribute__ ((__noreturn__)). */
+ __analyzer_eval (i < 10); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include <alloca.h>
+
+void *test (void)
+{
+ void *ptr = alloca (64);
+ return ptr;
+}
+/* TODO: warn about escaping alloca. */
--- /dev/null
+#ifndef ANALYZER_DECLS_H
+#define ANALYZER_DECLS_H
+
+/* Function decls with special meaning to the analyzer.
+ None of these are actually implemented. */
+
+/* Trigger a breakpoint in the analyzer when reached. */
+extern void __analyzer_break (void);
+
+/* Dump copious information about the analyzer’s state when reached. */
+extern void __analyzer_dump (void);
+
+/* Dump information after analysis on all of the exploded nodes at this
+ program point.
+
+ __analyzer_dump_exploded_nodes (0);
+ will dump just the number of nodes, and their IDs.
+
+ __analyzer_dump_exploded_nodes (1);
+ will also dump all of the states within those nodes. */
+extern void __analyzer_dump_exploded_nodes (int);
+
+extern void __analyzer_dump_num_heap_regions (void);
+
+/* Emit a placeholder "note" diagnostic with a path to this call site,
+ if the analyzer finds a feasible path to it. */
+extern void __analyzer_dump_path (void);
+
+/* Dump the region_model's state to stderr. */
+extern void __analyzer_dump_region_model (void);
+
+/* Emit a warning with text "TRUE", FALSE" or "UNKNOWN" based on the
+ truthfulness of the argument. */
+extern void __analyzer_eval (int);
+
+#endif /* #ifndef ANALYZER_DECLS_H. */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret -fanalyzer-verbosity=0" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdlib.h>
+
+void calls_free_1 (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_1 (void *ptr, int a, int b)
+{
+ if (a)
+ calls_free_1 (ptr);
+
+ if (b)
+ {
+ }
+ else
+ calls_free_1 (ptr);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_1': event 1
+ |
+ | NN | calls_free_1 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (1) calling 'calls_free_1' from 'test_1'
+ |
+ +--> 'calls_free_1': event 2
+ |
+ | NN | free (ptr);
+ | | ^~~~~~~~~~
+ | | |
+ | | (2) first 'free' here
+ |
+ <------+
+ |
+ 'test_1': events 3-4
+ |
+ | NN | calls_free_1 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (3) returning to 'test_1' from 'calls_free_1'
+ |......
+ | NN | calls_free_1 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (4) passing freed pointer 'ptr' in call to 'calls_free_1' from 'test_1'
+ |
+ +--> 'calls_free_1': event 5
+ |
+ | NN | free (ptr);
+ | | ^~~~~~~~~~
+ | | |
+ | | (5) second 'free' here; first 'free' was at (2)
+ |
+ { dg-end-multiline-output "" } */
+
+void calls_free_2 (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_2 (void *ptr, int a, int b)
+{
+ switch (a)
+ {
+ default:
+ break;
+ case 1:
+ break;
+ case 3:
+ calls_free_2 (ptr);
+ break;
+ }
+
+ switch (b)
+ {
+ default:
+ calls_free_2 (ptr);
+ break;
+ case 1:
+ break;
+ case 42:
+ break;
+ }
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_2': event 1
+ |
+ | NN | calls_free_2 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (1) calling 'calls_free_2' from 'test_2'
+ |
+ +--> 'calls_free_2': event 2
+ |
+ | NN | free (ptr);
+ | | ^~~~~~~~~~
+ | | |
+ | | (2) first 'free' here
+ |
+ <------+
+ |
+ 'test_2': events 3-4
+ |
+ | NN | calls_free_2 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (3) returning to 'test_2' from 'calls_free_2'
+ |......
+ | NN | calls_free_2 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (4) passing freed pointer 'ptr' in call to 'calls_free_2' from 'test_2'
+ |
+ +--> 'calls_free_2': event 5
+ |
+ | NN | free (ptr);
+ | | ^~~~~~~~~~
+ | | |
+ | | (5) second 'free' here; first 'free' was at (2)
+ |
+ { dg-end-multiline-output "" } */
+
+// TODO: range cases
+
+/* The call/return to this function shouldn't appear in the path. */
+
+void called_by_test_3 (void)
+{
+}
+
+void test_3 (void *ptr)
+{
+ free (ptr);
+ called_by_test_3 ();
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_3': events 1-2
+ |
+ | NN | free (ptr);
+ | | ^~~~~~~~~~
+ | | |
+ | | (1) first 'free' here
+ | NN | called_by_test_3 ();
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (2) second 'free' here; first 'free' was at (1)
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret -fanalyzer-verbosity=1" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdlib.h>
+
+void calls_free_1 (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_1 (void *ptr, int a, int b)
+{
+ if (a)
+ calls_free_1 (ptr);
+
+ if (b)
+ {
+ }
+ else
+ calls_free_1 (ptr);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_1': events 1-2
+ |
+ | NN | void test_1 (void *ptr, int a, int b)
+ | | ^~~~~~
+ | | |
+ | | (1) entry to 'test_1'
+ |......
+ | NN | calls_free_1 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (2) calling 'calls_free_1' from 'test_1'
+ |
+ +--> 'calls_free_1': events 3-4
+ |
+ | NN | void calls_free_1 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (3) entry to 'calls_free_1'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (4) first 'free' here
+ |
+ <------+
+ |
+ 'test_1': events 5-6
+ |
+ | NN | calls_free_1 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (5) returning to 'test_1' from 'calls_free_1'
+ |......
+ | NN | calls_free_1 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (6) passing freed pointer 'ptr' in call to 'calls_free_1' from 'test_1'
+ |
+ +--> 'calls_free_1': events 7-8
+ |
+ | NN | void calls_free_1 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (7) entry to 'calls_free_1'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (8) second 'free' here; first 'free' was at (4)
+ |
+ { dg-end-multiline-output "" } */
+
+void calls_free_2 (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_2 (void *ptr, int a, int b)
+{
+ switch (a)
+ {
+ default:
+ break;
+ case 1:
+ break;
+ case 3:
+ calls_free_2 (ptr);
+ break;
+ }
+
+ switch (b)
+ {
+ default:
+ calls_free_2 (ptr);
+ break;
+ case 1:
+ break;
+ case 42:
+ break;
+ }
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_2': events 1-2
+ |
+ | NN | void test_2 (void *ptr, int a, int b)
+ | | ^~~~~~
+ | | |
+ | | (1) entry to 'test_2'
+ |......
+ | NN | calls_free_2 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (2) calling 'calls_free_2' from 'test_2'
+ |
+ +--> 'calls_free_2': events 3-4
+ |
+ | NN | void calls_free_2 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (3) entry to 'calls_free_2'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (4) first 'free' here
+ |
+ <------+
+ |
+ 'test_2': events 5-6
+ |
+ | NN | calls_free_2 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (5) returning to 'test_2' from 'calls_free_2'
+ |......
+ | NN | calls_free_2 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (6) passing freed pointer 'ptr' in call to 'calls_free_2' from 'test_2'
+ |
+ +--> 'calls_free_2': events 7-8
+ |
+ | NN | void calls_free_2 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (7) entry to 'calls_free_2'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (8) second 'free' here; first 'free' was at (4)
+ |
+ { dg-end-multiline-output "" } */
+
+/* The call/return to this function shouldn't appear in the path. */
+
+void called_by_test_3 (void)
+{
+}
+
+void test_3 (void *ptr)
+{
+ free (ptr);
+ called_by_test_3 ();
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_3': events 1-2
+ |
+ | NN | free (ptr);
+ | | ^~~~~~~~~~
+ | | |
+ | | (1) first 'free' here
+ | NN | called_by_test_3 ();
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (2) second 'free' here; first 'free' was at (1)
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret -fanalyzer-verbosity=2" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdlib.h>
+
+void calls_free_1 (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_1 (void *ptr, int a, int b)
+{
+ if (a)
+ calls_free_1 (ptr);
+
+ if (b)
+ {
+ }
+ else
+ calls_free_1 (ptr);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_1': events 1-4
+ |
+ | NN | void test_1 (void *ptr, int a, int b)
+ | | ^~~~~~
+ | | |
+ | | (1) entry to 'test_1'
+ | NN | {
+ | NN | if (a)
+ | | ~
+ | | |
+ | | (2) following 'true' branch (when 'a != 0')...
+ | NN | calls_free_1 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (3) ...to here
+ | | (4) calling 'calls_free_1' from 'test_1'
+ |
+ +--> 'calls_free_1': events 5-6
+ |
+ | NN | void calls_free_1 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (5) entry to 'calls_free_1'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (6) first 'free' here
+ |
+ <------+
+ |
+ 'test_1': events 7-10
+ |
+ | NN | calls_free_1 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) returning to 'test_1' from 'calls_free_1'
+ | NN |
+ | NN | if (b)
+ | | ~
+ | | |
+ | | (8) following 'false' branch (when 'b == 0')...
+ |......
+ | NN | calls_free_1 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (9) ...to here
+ | | (10) passing freed pointer 'ptr' in call to 'calls_free_1' from 'test_1'
+ |
+ +--> 'calls_free_1': events 11-12
+ |
+ | NN | void calls_free_1 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (11) entry to 'calls_free_1'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (12) second 'free' here; first 'free' was at (6)
+ |
+ { dg-end-multiline-output "" } */
+
+void calls_free_2 (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_2 (void *ptr, int a, int b)
+{
+ switch (a)
+ {
+ default:
+ break;
+ case 1:
+ break;
+ case 3:
+ calls_free_2 (ptr);
+ break;
+ }
+
+ switch (b)
+ {
+ default:
+ calls_free_2 (ptr);
+ break;
+ case 1:
+ break;
+ case 42:
+ break;
+ }
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_2': events 1-4
+ |
+ | NN | void test_2 (void *ptr, int a, int b)
+ | | ^~~~~~
+ | | |
+ | | (1) entry to 'test_2'
+ | NN | {
+ | NN | switch (a)
+ | | ~~~~~~
+ | | |
+ | | (2) following 'case 3:' branch...
+ |......
+ | NN | case 3:
+ | | ~~~~
+ | | |
+ | | (3) ...to here
+ | NN | calls_free_2 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (4) calling 'calls_free_2' from 'test_2'
+ |
+ +--> 'calls_free_2': events 5-6
+ |
+ | NN | void calls_free_2 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (5) entry to 'calls_free_2'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (6) first 'free' here
+ |
+ <------+
+ |
+ 'test_2': events 7-10
+ |
+ | NN | calls_free_2 (ptr);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) returning to 'test_2' from 'calls_free_2'
+ |......
+ | NN | switch (b)
+ | | ~~~~~~
+ | | |
+ | | (8) following 'default:' branch...
+ | NN | {
+ | NN | default:
+ | | ~~~~~~~
+ | | |
+ | | (9) ...to here
+ | NN | calls_free_2 (ptr);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (10) passing freed pointer 'ptr' in call to 'calls_free_2' from 'test_2'
+ |
+ +--> 'calls_free_2': events 11-12
+ |
+ | NN | void calls_free_2 (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (11) entry to 'calls_free_2'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (12) second 'free' here; first 'free' was at (6)
+ |
+ { dg-end-multiline-output "" } */
+
+// TODO: range cases
+
+/* The call/return to this function shouldn't appear in the path. */
+
+void called_by_test_3 (void)
+{
+}
+
+void test_3 (void *ptr)
+{
+ free (ptr);
+ called_by_test_3 ();
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_3': events 1-2
+ |
+ | NN | free (ptr);
+ | | ^~~~~~~~~~
+ | | |
+ | | (1) first 'free' here
+ | NN | called_by_test_3 ();
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (2) second 'free' here; first 'free' was at (1)
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+# Copyright (C) 2019 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# If the analyzer has not been enabled, bail.
+if { ![check_effective_target_analyzer] } {
+ return
+}
+
+global DEFAULT_CFLAGS
+if [info exists DEFAULT_CFLAGS] then {
+ set save_default_cflags $DEFAULT_CFLAGS
+}
+
+# If a testcase doesn't have special options, use these.
+set DEFAULT_CFLAGS "-fanalyzer -fdiagnostics-path-format=separate-events -Wanalyzer-too-complex -fanalyzer-call-summaries"
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] \
+ "" $DEFAULT_CFLAGS
+
+# All done.
+dg-finish
+
+if [info exists save_default_cflags] {
+ set DEFAULT_CFLAGS $save_default_cflags
+} else {
+ unset DEFAULT_CFLAGS
+}
--- /dev/null
+#include <stdlib.h>
+
+extern void foo(void *ptrA, void *ptrB, void *ptrC) /* { dg-message "argument 1 of 'foo' must be non-null" } */
+ __attribute__((nonnull (1, 3)));
+
+extern void bar(void *ptrA, void *ptrB, void *ptrC) /* { dg-message "argument 1 of 'bar' must be non-null" } */
+ __attribute__((nonnull));
+
+// TODO: complain about NULL and possible NULL args
+// FIXME: ought to complain about NULL args
+
+void test_1 (void *p, void *q, void *r)
+{
+ foo(p, q, r);
+ foo(NULL, q, r);
+ foo(p, NULL, r);
+ foo(p, q, NULL);
+}
+
+void test_1a (void *q, void *r)
+{
+ void *p = NULL;
+ foo(p, q, r); /* { dg-warning "use of NULL 'p' where non-null expected" } */
+ /* { dg-message "argument 1 \\('p'\\) NULL where non-null expected" "" { target *-*-* } .-1 } */
+}
+
+void test_2 (void *p, void *q, void *r)
+{
+ bar(p, q, r);
+ bar(NULL, q, r);
+ bar(p, NULL, r);
+ bar(p, q, NULL);
+}
+
+void test_3 (void *q, void *r)
+{
+ void *p = malloc(1024); /* { dg-message "\\(1\\) this call could return NULL" } */
+
+ foo(p, q, r); /* { dg-warning "use of possibly-NULL 'p' where non-null expected" } */
+ /* { dg-message "argument 1 \\('p'\\) from \\(1\\) could be NULL where non-null expected" "" { target *-*-* } .-1 } */
+
+ foo(p, q, r);
+
+ free(p);
+}
+
+void test_4 (void *q, void *r)
+{
+ void *p = malloc(1024); /* { dg-message "\\(1\\) this call could return NULL" } */
+
+ bar(p, q, r); /* { dg-warning "use of possibly-NULL 'p' where non-null expected" } */
+ /* { dg-message "argument 1 \\('p'\\) from \\(1\\) could be NULL where non-null expected" "" { target *-*-* } .-1 } */
+
+ bar(p, q, r);
+
+ free(p);
+}
+
+/* Verify that we detect passing NULL to a __attribute__((nonnull)) function
+ when it's called via a function pointer. */
+
+typedef void (*bar_t)(void *ptrA, void *ptrB, void *ptrC);
+
+static bar_t __attribute__((noinline))
+get_bar (void)
+{
+ return bar;
+}
+
+void test_5 (void *q, void *r)
+{
+ void *p = malloc(1024); /* { dg-message "\\(1\\) this call could return NULL" } */
+ bar_t cb = get_bar ();
+ cb(p, q, r); /* { dg-warning "use of possibly-NULL 'p' where non-null expected" } */
+ /* { dg-message "argument 1 \\('p'\\) from \\(1\\) could be NULL where non-null expected" "" { target *-*-* } .-1 } */
+ /* TODO: do we want an event showing where cb is assigned "bar"? */
+
+ cb(p, q, r);
+
+ free(p);
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-call-summaries" } */
+
+#include <stdlib.h>
+
+void calls_free (void *p)
+{
+ free (p); /* { dg-warning "double-'free' of 'p'" } */
+}
+
+void test (void *q)
+{
+ calls_free (q);
+ calls_free (q);
+}
--- /dev/null
+// TODO: run this test case at every optimization level
+/* { dg-additional-options "-O2" } */
+
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+#define Z_NULL 0
+
+static void __attribute__((noinline))
+test_1_callee (void *p, void *q)
+{
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ __analyzer_eval (p == Z_NULL); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p != Z_NULL); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q == Z_NULL); /* { dg-warning "FALSE" } */
+ __analyzer_eval (q != Z_NULL); /* { dg-warning "TRUE" } */
+}
+
+void test_1 (void *p, void *q)
+{
+ if (p == Z_NULL || q == Z_NULL)
+ return;
+
+ test_1_callee (p, q);
+}
+
+static void __attribute__((noinline))
+test_2_callee (void *p, void *q)
+{
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ __analyzer_eval (p == Z_NULL); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p != Z_NULL); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q == Z_NULL); /* { dg-warning "FALSE" } */
+ __analyzer_eval (q != Z_NULL); /* { dg-warning "TRUE" } */
+}
+
+void test_2 (void *p, void *q)
+{
+ if (p != Z_NULL && q != Z_NULL)
+ test_2_callee (p, q);
+}
--- /dev/null
+/* { dg-additional-options "-fno-analyzer-state-merge" } */
+
+#include "analyzer-decls.h"
+
+static void only_called_when_flag_a_true (int i)
+{
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+}
+
+static void only_called_when_flag_b_true (int i)
+{
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+}
+
+int test_1 (int flag_a, int flag_b)
+{
+ int i = 17;
+
+ __analyzer_eval (flag_a); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (flag_b); /* { dg-warning "UNKNOWN" } */
+
+ if (flag_a)
+ {
+ __analyzer_eval (flag_a); /* { dg-warning "TRUE" } */
+ __analyzer_eval (flag_b); /* { dg-warning "UNKNOWN" } */
+ i = 42;
+ }
+
+ __analyzer_eval (flag_b); /* { dg-warning "UNKNOWN" } */
+
+ if (flag_a)
+ {
+ __analyzer_eval (flag_a); /* { dg-warning "TRUE" } */
+ __analyzer_eval (flag_b); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i == 17); /* { dg-warning "FALSE" } */
+ only_called_when_flag_a_true (i);
+ }
+ else
+ {
+ __analyzer_eval (flag_a); /* { dg-warning "FALSE" } */
+ __analyzer_eval (flag_b); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i == 42); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+ only_called_when_flag_b_true (i);
+ }
+}
--- /dev/null
+/* { dg-additional-options "-fno-analyzer-transitivity" } */
+#include "analyzer-decls.h"
+
+void test (int i, int j)
+{
+ if (i > 4)
+ {
+ __analyzer_eval (i > 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i <= 4); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i > 3); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+
+ __analyzer_eval (i > 5); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i != 3); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+
+ __analyzer_eval (i == 3); /* { dg-warning "FALSE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+
+ __analyzer_eval (i != 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i == 4); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i == 5); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i != 5); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i < 5); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i <= 5); /* { dg-warning "UNKNOWN" } */
+
+ /* Tests of transitivity. */
+ if (j < i)
+ {
+ __analyzer_eval (j < i); /* { dg-warning "TRUE" } */
+ __analyzer_eval (j <= 4); /* { dg-warning "UNKNOWN" } */
+ }
+ else
+ {
+ __analyzer_eval (j >= i); /* { dg-warning "TRUE" } */
+ __analyzer_eval (j > 4); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ }
+ }
+ else
+ {
+ __analyzer_eval (i > 4); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i <= 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i > 3); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i > 5); /* { dg-warning "FALSE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i != 3); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i == 3); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i != 4); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i == 4); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i == 5); /* { dg-warning "FALSE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i != 5); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i < 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i <= 5); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ }
+}
+
+void test_2 (int i, int j, int k)
+{
+ if (i >= j)
+ {
+ __analyzer_eval (i == k); /* { dg-warning "UNKNOWN" } */
+ if (j >= k)
+ {
+ __analyzer_eval (i >= k); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i == k); /* { dg-warning "UNKNOWN" } */
+ if (k >= i)
+ __analyzer_eval (i == k); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ }
+ }
+}
+
+void test_3 (int flag, unsigned int i)
+{
+ if (!flag) {
+ return;
+ }
+
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+
+ if (i>0) {
+ __analyzer_eval (i > 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+ } else {
+ __analyzer_eval (i <= 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+ }
+
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+}
+
+void test_range_int_gt_lt (int i)
+{
+ if (i > 3)
+ if (i < 5)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+}
+
+void test_range_float_gt_lt (float f)
+{
+ if (f > 3)
+ if (f < 5)
+ __analyzer_eval (f == 4); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_range_int_ge_lt (int i)
+{
+ if (i >= 4)
+ if (i < 5)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+}
+
+void test_range_float_ge_lt (float f)
+{
+ if (f >= 4)
+ if (f < 5)
+ __analyzer_eval (f == 4); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_range_int_gt_le (int i)
+{
+ if (i > 3)
+ if (i <= 4)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+}
+
+void test_range_float_gt_le (float f)
+{
+ if (f > 3)
+ if (f <= 4)
+ __analyzer_eval (f == 4); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_range_int_ge_le (int i)
+{
+ if (i >= 4)
+ if (i <= 4)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+}
+
+void test_range_float_ge_le (float f)
+{
+ if (f >= 4)
+ if (f <= 4)
+ __analyzer_eval (f == 4); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-transitivity" } */
+#include "analyzer-decls.h"
+
+void test (int i, int j)
+{
+ if (i > 4)
+ {
+ __analyzer_eval (i > 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i <= 4); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i > 3); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (i > 5); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i != 3); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (i == 3); /* { dg-warning "FALSE" } */
+
+ __analyzer_eval (i != 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i == 4); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i == 5); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i != 5); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i < 5); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i <= 5); /* { dg-warning "UNKNOWN" } */
+
+ /* Tests of transitivity. */
+ if (j < i)
+ {
+ __analyzer_eval (j < i); /* { dg-warning "TRUE" } */
+ __analyzer_eval (j <= 4); /* { dg-warning "UNKNOWN" } */
+ }
+ else
+ {
+ __analyzer_eval (j >= i); /* { dg-warning "TRUE" } */
+ __analyzer_eval (j > 4); /* { dg-warning "TRUE" } */
+ }
+ }
+ else
+ {
+ __analyzer_eval (i > 4); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i <= 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i > 3); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i > 5); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i != 3); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i == 3); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i != 4); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i == 4); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i == 5); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i != 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i < 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i <= 5); /* { dg-warning "TRUE" } */
+ }
+}
+
+void test_2 (int i, int j, int k)
+{
+ if (i >= j)
+ {
+ __analyzer_eval (i == k); /* { dg-warning "UNKNOWN" } */
+ if (j >= k)
+ {
+ __analyzer_eval (i >= k); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i == k); /* { dg-warning "UNKNOWN" } */
+ if (k >= i)
+ __analyzer_eval (i == k); /* { dg-warning "TRUE" } */
+ }
+ }
+}
+
+void test_3 (int flag, unsigned int i)
+{
+ if (!flag) {
+ return;
+ }
+
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+
+ if (i>0) {
+ __analyzer_eval (i > 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+ } else {
+ __analyzer_eval (i <= 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+ }
+
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+}
+
+void test_range_int_gt_lt (int i)
+{
+ if (i > 3)
+ if (i < 5)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
+}
+
+void test_range_float_gt_lt (float f)
+{
+ if (f > 3)
+ if (f < 5)
+ __analyzer_eval (f == 4); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_range_int_ge_lt (int i)
+{
+ if (i >= 4)
+ if (i < 5)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
+}
+
+void test_range_float_ge_lt (float f)
+{
+ if (f >= 4)
+ if (f < 5)
+ __analyzer_eval (f == 4); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_range_int_gt_le (int i)
+{
+ if (i > 3)
+ if (i <= 4)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
+}
+
+void test_range_float_gt_le (float f)
+{
+ if (f > 3)
+ if (f <= 4)
+ __analyzer_eval (f == 4); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_range_int_ge_le (int i)
+{
+ if (i >= 4)
+ if (i <= 4)
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
+}
+
+void test_range_float_ge_le (float f)
+{
+ if (f >= 4)
+ if (f <= 4)
+ __analyzer_eval (f == 4); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "analyzer-decls.h"
+
+struct foo
+{
+ int i;
+};
+
+/* Fields of a local. */
+
+void test_1 (void)
+{
+ struct foo f;
+ f.i = 1;
+ __analyzer_eval (f.i == 1); /* { dg-warning "TRUE" } */
+}
+
+/* Fields of a param. */
+
+void test_2 (struct foo f)
+{
+ __analyzer_eval (f.i == 2); /* { dg-warning "UNKNOWN" } */
+ f.i = 2;
+ __analyzer_eval (f.i == 2); /* { dg-warning "TRUE" } */
+}
+
+/* Fields of a param ptr. */
+
+void test_3 (struct foo *f)
+{
+ __analyzer_eval (f->i == 3); /* { dg-warning "UNKNOWN" } */
+ f->i = 3;
+ __analyzer_eval (f->i == 3); /* { dg-warning "TRUE" } */
+}
+
+/* Fields of a global ptr. */
+struct foo *global_foo_ptr;
+
+void test_3a (void)
+{
+ struct foo *tmp = global_foo_ptr;
+ __analyzer_eval (global_foo_ptr->i == 3); /* { dg-warning "UNKNOWN" } */
+ global_foo_ptr->i = 3;
+ __analyzer_eval (global_foo_ptr->i == 3); /* { dg-warning "TRUE" } */
+}
+
+/* Pointer to a local. */
+
+void test_4 (void)
+{
+ int i;
+ int *p = &i;
+ i = 1;
+ *p = 2;
+ __analyzer_eval (i == 2); /* { dg-warning "TRUE" } */
+}
+
+/* Local array. */
+
+void test_5 (void)
+{
+ int a[10];
+ a[3] = 5; /* ARRAY_REF. */
+ __analyzer_eval (a[3] == 5); /* { dg-warning "TRUE" } */
+}
+
+/* Local array, but using an unknown index. */
+
+void test_5a (int idx)
+{
+ int a[10];
+ a[idx] = 5; /* ARRAY_REF. */
+ __analyzer_eval (a[idx] == 5); /* { dg-warning "TRUE" } */
+}
+
+/* Array passed in as a param. */
+
+void test_6 (int a[10])
+{
+ /* POINTER_PLUS_EXPR then a MEM_REF. */
+ __analyzer_eval (a[3] == 42); /* { dg-warning "UNKNOWN" } */
+ a[3] = 42;
+ __analyzer_eval (a[3] == 42); /* { dg-warning "TRUE" } */
+}
+
+/* Array passed in as a param ptr. */
+
+void test_7 (int *a)
+{
+ __analyzer_eval (a[3] == 42); /* { dg-warning "UNKNOWN" } */
+ a[3] = 42;
+ __analyzer_eval (a[3] == 42); /* { dg-warning "TRUE" } */
+}
+
+/* Globals. */
+
+int glob_a;
+
+void test_10 (void)
+{
+ __analyzer_eval (glob_a == 42); /* { dg-warning "UNKNOWN" } */
+ glob_a = 42;
+ __analyzer_eval (glob_a == 42); /* { dg-warning "TRUE" } */
+}
+
+/* malloc. */
+
+void test_11 (void)
+{
+ void *p = malloc (256);
+ void *q = malloc (256);
+
+ /* malloc results should be unique. */
+ __analyzer_eval (p == q); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p != q); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p <= q); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p >= q); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p < q); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p > q); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (p == p); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p != p); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p <= p); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p >= p); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p < p); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p > p); /* { dg-warning "FALSE" } */
+
+ free (p);
+ free (q);
+ // TODO: mark freed memory as freed
+ //__analyzer_break ();
+}
+
+/* alloca. */
+
+void test_12 (void)
+{
+ void *p = alloca (256);
+ void *q = alloca (256);
+
+ /* alloca results should be unique. */
+ __analyzer_eval (p == q); /* { dg-warning "FALSE" } */
+
+ // FIXME: complain about uses of poisoned values
+}
+
+/* Use of uninit value. */
+int test_12a (void)
+{
+ int i;
+ return i; // FIXME: do we see the return stmt?
+}
+
+void test_12b (void *p, void *q)
+{
+ __analyzer_eval (p == q); /* { dg-warning "UNKNOWN" } */
+}
+
+int test_12c (void)
+{
+ int i;
+ int j;
+
+ j = i; // FIXME: should complain about this
+
+ return j;
+}
+
+struct coord
+{
+ int x;
+ int y;
+};
+
+int test_12d (struct coord c)
+{
+ struct coord d;
+ d = c;
+ __analyzer_eval (d.x == c.x); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): c and d share the same unknown value of type "coord", but
+ attempts to access the fields lead to different unknown values. */
+
+ __analyzer_eval (d.y == c.y); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail): likewise
+
+ __analyzer_eval (d.x == d.y); /* { dg-warning "UNKNOWN" } */
+ /* d and c share an unknown value of type "struct coord".
+ But d.x and d.y should be different unknown values (although they inherit
+ from d's region). */
+}
+
+/* Nested structs. */
+
+struct outer
+{
+ struct middle {
+ struct inner {
+ float f;
+ } in;
+ } mid;
+};
+
+void test_13 (struct outer *o)
+{
+ __analyzer_eval (o->mid.in.f == 0.f); /* { dg-warning "UNKNOWN" } */
+ o->mid.in.f = 0.f;
+ __analyzer_eval (o->mid.in.f == 0.f); /* { dg-warning "TRUE" } */
+}
+
+void test_14 (struct outer o)
+{
+ __analyzer_eval (o.mid.in.f == 0.f); /* { dg-warning "UNKNOWN" } */
+ o.mid.in.f = 0.f;
+ __analyzer_eval (o.mid.in.f == 0.f); /* { dg-warning "TRUE" } */
+}
+
+void test_15 (const char *str)
+{
+ char ch = str[0];
+ __analyzer_eval (ch == 'a'); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (ch == str[0]); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+
+ ch = 'a';
+ __analyzer_eval (ch == 'a'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (str[0] == 'a'); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_16 (void)
+{
+ const char *msg = "hello world";
+
+ __analyzer_eval (msg != NULL); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (msg[0] == 'h'); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+
+ __analyzer_eval (msg[1] == 'e'); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+
+ __analyzer_eval (strlen (msg) == 11); /* { dg-warning "TRUE" } */
+}
+
+static const char *__attribute__((noinline))
+get_hello_world (void)
+{
+ return "hello world";
+}
+
+void test_16_alt (void)
+{
+ const char *msg = get_hello_world ();
+
+ __analyzer_eval (msg != NULL); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (msg[0] == 'h'); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+
+ __analyzer_eval (msg[1] == 'e'); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+
+ __analyzer_eval (strlen (msg) == 11); /* { dg-warning "TRUE" } */
+}
+
+void test_16a (const char *msg)
+{
+ __analyzer_eval (strlen (msg) == 11); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_16b (const char *msg)
+{
+ __analyzer_eval (strlen (msg) == strlen (msg)); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+}
+
+extern int unknown_result (void);
+
+void test_16c (int i)
+{
+ int j;
+
+ j = i;
+ __analyzer_eval (j == i); /* { dg-warning "TRUE" } */
+
+ j = unknown_result ();
+ __analyzer_eval (j == i); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_16c_a (void)
+{
+ int i, j;
+
+ i = unknown_result ();
+ j = unknown_result ();
+ __analyzer_eval (i == j); /* { dg-warning "UNKNOWN" } */
+}
+
+int global_int_16d;
+
+void test_16d (int i)
+{
+ global_int_16d = i;
+ __analyzer_eval (global_int_16d == i); /* { dg-warning "TRUE" } */
+
+ global_int_16d = unknown_result ();
+ __analyzer_eval (global_int_16d == i); /* { dg-warning "UNKNOWN" } */
+}
+
+extern void might_write_to (int *);
+
+void test_16e (int i)
+{
+ int j;
+
+ j = i;
+ __analyzer_eval (j == i); /* { dg-warning "TRUE" } */
+
+ might_write_to (&j);
+ __analyzer_eval (j == i); /* { dg-warning "UNKNOWN" "" { xfail *-*-* } } */
+ /* { dg-warning "TRUE" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+}
+
+/* TODO: and more complicated graph-like examples, where anything that's
+ reachable from the pointer might be modified. */
+
+void test_17 (int i)
+{
+ int j = 42;
+ __analyzer_eval (j == 42); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (i == j); /* { dg-warning "UNKNOWN" } */
+ i = j;
+ __analyzer_eval (i == j); /* { dg-warning "TRUE" } */
+}
+
+void test_18 (int i)
+{
+ int j;
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+
+ j = i;
+
+ __analyzer_eval (i == j); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i >= j); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i <= j); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (i != j); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i > j); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i < j); /* { dg-warning "FALSE" } */
+}
+
+void test_19 (void)
+{
+ int i, j;
+ /* Compare two uninitialized locals. */
+ __analyzer_eval (i == j); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_20 (int i, int j)
+{
+ __analyzer_eval (i + 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i + j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i - 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i - j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i * 2); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i * j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i / 2); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i / j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i % 2); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i % j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i & 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i & j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i | 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i | j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i ^ 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i ^ j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i >> 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i >> j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i << 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i << j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i && 0); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i && 1); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i && j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i || 0); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (i || 1); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i || j); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval (~i); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (-i); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (+i); /* { dg-warning "UNKNOWN" } */
+
+ /* Anything added above should be added to the next function also. */
+}
+
+/* As above, but where the values are known due to the region model,
+ but not known to GCC's regular optimizations (folding and SSA). */
+
+void test_21 (void)
+{
+ int i, j, zero;
+ int *pi = &i;
+ int *pj = &j;
+ int *pzero = &zero;
+ *pi = 5;
+ *pj = 3;
+ *pzero = 0;
+
+ __analyzer_eval (i + j == 8); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i - j == 2); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i * j == 15); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i / j == 1); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i % j == 2); /* { dg-warning "TRUE" } */
+
+ /* Division by zero. */
+ // TODO: should we warn for this?
+ __analyzer_eval (i / zero); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i % zero); /* { dg-warning "UNKNOWN" } */
+
+ __analyzer_eval ((i & 1) == (5 & 1)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i & j) == (5 & 3)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i | 1) == (5 | 1)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i | j) == (5 | 3)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i ^ 1) == (5 ^ 1)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i ^ j) == (5 ^ 3)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i >> 1) == (5 >> 1)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i >> j) == (5 >> 3)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i << 1) == (5 << 1)); /* { dg-warning "TRUE" } */
+ __analyzer_eval ((i << j) == (5 << 3)); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i && 0); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i && 1); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i && j); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (i || 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i || 1); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i || j); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (~i == ~5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (-i == -5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (+i == +5); /* { dg-warning "TRUE" } */
+}
+
+void test_22 (int i, int j)
+{
+ __analyzer_eval (i + j == i + j); /* { dg-warning "TRUE" } */
+ // FIXME: this is getting folded; can we build a non-folded equivalent?
+}
+
+void test_23 (struct foo *f, struct foo *g)
+{
+ int i, j, k;
+ i = f->i + g->i;
+ j = f->i + g->i;
+ k = f->i * g->i;
+ __analyzer_eval (i == j); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): we'd need to record that the two unknown values are both
+ the sum of the two unknown input values (and thus are the same); not
+ yet sure if we want arbitrary expression trees in the representation
+ (analysis termination concerns). */
+
+ __analyzer_eval (i == k); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_24 (struct foo *f)
+{
+ struct foo g;
+ g.i = 42;
+ __analyzer_eval (g.i == 42); /* { dg-warning "TRUE" } */
+
+ /* Overwriting a whole struct should invalidate our knowledge
+ about fields within it. */
+ g = *f;
+ __analyzer_eval (g.i == 42); /* { dg-warning "UNKNOWN" "" { xfail *-*-* } } */
+ /* { dg-warning "TRUE" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+}
+
+void test_25 (struct foo *f)
+{
+ struct foo g;
+ g.i = 42;
+ f->i = 43;
+ __analyzer_eval (f->i == 43); /* { dg-warning "TRUE" } */
+ __analyzer_eval (g.i == 42); /* { dg-warning "TRUE" } */
+
+ /* Overwriting a whole struct where we know things about the
+ source value should update our knowledge about fields within
+ the dest value. */
+ g = *f;
+ __analyzer_eval (g.i == 43); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "FALSE" "" { target *-*-* } .-1 } */
+ // TODO(xfail)
+}
+
+void test_26 (struct coord *p, struct coord *q)
+{
+ p->x = 42;
+ q->y = 17;
+ __analyzer_eval (p->x == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p->y); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (q->x); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (q->y == 17); /* { dg-warning "TRUE" } */
+
+ /* Overwriting a whole struct where we know some things about the
+ source value should update our knowledge about fields within
+ the dest value. */
+ *p = *q;
+ __analyzer_eval (p->x); /* { dg-warning "UNKNOWN" "" { xfail *-*-* } } */
+ /* { dg-warning "TRUE" "" { target *-*-* } .-1 } */
+ // TODO(xfail): should have been overwritten
+ __analyzer_eval (p->y == 17); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail): should have been overwritten with q->y
+
+ __analyzer_eval (q->x); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (q->y == 17); /* { dg-warning "TRUE" } */
+}
+
+void test_27 (struct coord *p)
+{
+ memset (p, 0, sizeof (struct coord));
+ __analyzer_eval (p->x == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail):
+ __analyzer_eval (p->y == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail):
+}
+
+void test_28 (struct coord *p)
+{
+ memset (p, 0, sizeof (struct coord) * 10);
+ __analyzer_eval (p[0].x == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail):
+ __analyzer_eval (p[0].y == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail):
+
+ __analyzer_eval (p[9].x == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail):
+ __analyzer_eval (p[9].y == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail):
+
+ __analyzer_eval (p[10].x == 0); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p[10].y == 0); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_29 (struct coord *p)
+{
+ struct coord *q;
+
+ p[0].x = 100024;
+ p[0].y = 100025;
+
+ p[7].x = 107024;
+ p[7].y = 107025;
+
+ p[9].x = 109024;
+ p[9].y = 109025;
+
+ __analyzer_eval (p[0].x == 100024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[0].y == 100025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[7].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[7].y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[9].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[9].y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[10].x == 0); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p[10].y == 0); /* { dg-warning "UNKNOWN" } */
+
+ q = &p[7];
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[2].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[2].y == 109025); /* { dg-warning "TRUE" } */
+
+ q += 2;
+
+ __analyzer_eval (q->x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[-2].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[-2].y == 107025); /* { dg-warning "TRUE" } */
+
+ q -= 2;
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+}
+
+void test_29a (struct coord p[])
+{
+ struct coord *q;
+
+ p[0].x = 100024;
+ p[0].y = 100025;
+
+ p[7].x = 107024;
+ p[7].y = 107025;
+
+ p[9].x = 109024;
+ p[9].y = 109025;
+
+ __analyzer_eval (p[0].x == 100024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[0].y == 100025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[7].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[7].y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[9].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[9].y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[10].x == 0); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p[10].y == 0); /* { dg-warning "UNKNOWN" } */
+
+ q = &p[7];
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[2].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[2].y == 109025); /* { dg-warning "TRUE" } */
+
+ q += 2;
+
+ __analyzer_eval (q->x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[-2].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[-2].y == 107025); /* { dg-warning "TRUE" } */
+
+ q -= 2;
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+}
+
+void test_29b (void)
+{
+ struct coord p[11];
+ struct coord *q;
+
+ p[0].x = 100024;
+ p[0].y = 100025;
+
+ p[7].x = 107024;
+ p[7].y = 107025;
+
+ p[9].x = 109024;
+ p[9].y = 109025;
+
+ __analyzer_eval (p[0].x == 100024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[0].y == 100025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[7].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[7].y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[9].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[9].y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[10].x == 0); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p[10].y == 0); /* { dg-warning "UNKNOWN" } */
+
+ q = &p[7];
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[2].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[2].y == 109025); /* { dg-warning "TRUE" } */
+
+ q += 2;
+
+ __analyzer_eval (q->x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[-2].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[-2].y == 107025); /* { dg-warning "TRUE" } */
+
+ q -= 2;
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+}
+
+void test_29c (int len)
+{
+ struct coord p[len];
+ struct coord *q;
+
+ p[0].x = 100024;
+ p[0].y = 100025;
+
+ p[7].x = 107024;
+ p[7].y = 107025;
+
+ p[9].x = 109024;
+ p[9].y = 109025;
+
+ __analyzer_eval (p[0].x == 100024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[0].y == 100025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[7].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[7].y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[9].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[9].y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p[10].x == 0); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p[10].y == 0); /* { dg-warning "UNKNOWN" } */
+
+ q = &p[7];
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[2].x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[2].y == 109025); /* { dg-warning "TRUE" } */
+
+ q += 2;
+
+ __analyzer_eval (q->x == 109024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 109025); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (q[-2].x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q[-2].y == 107025); /* { dg-warning "TRUE" } */
+
+ q -= 2;
+
+ __analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
+}
+
+void test_30 (void *ptr)
+{
+ struct coord *p = (struct coord *)ptr;
+ struct coord *q = (struct coord *)ptr;
+
+ p->x = 42;
+
+ __analyzer_eval (p->x == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->x == 42); /* { dg-warning "TRUE" } */
+}
+
+void test_31 (unsigned i)
+{
+ int j, k;
+
+ j = i < 100 ? i : 100; /* MIN_EXPR. */
+ k = i < 100 ? 100 : i; /* MAX_EXPR. */
+}
+
+enum color
+{
+ RED,
+ GREEN,
+ BLUE
+};
+
+void test_32 (enum color c)
+{
+ __analyzer_eval (c == GREEN); /* { dg-warning "UNKNOWN" } */
+
+ c = RED;
+
+ __analyzer_eval (c == RED); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c == GREEN); /* { dg-warning "FALSE" } */
+}
+
+void test_33 (void)
+{
+ static int s;
+
+ __analyzer_eval (s == 42); /* { dg-warning "UNKNOWN" } */
+
+ s = 42;
+
+ __analyzer_eval (s == 42); /* { dg-warning "TRUE" } */
+}
+
+static int __attribute__((noinline))
+only_called_by_test_34 (int parm)
+{
+ __analyzer_eval (parm == 42); /* { dg-warning "TRUE" } */
+
+ return parm * 2;
+}
+
+void test_34 (void)
+{
+ int result = only_called_by_test_34 (42);
+ __analyzer_eval (result == 84); /* { dg-warning "TRUE" } */
+}
+
+void test_35 (int i, int j)
+{
+ __analyzer_eval (&i == &i); /* { dg-warning "TRUE" } */
+ __analyzer_eval (&i != &j); /* { dg-warning "TRUE" } */
+}
+
+static void __attribute__((noinline))
+write_through_ptr (int *dst, int val)
+{
+ *dst = val;
+}
+
+void test_36 (int i)
+{
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+
+ write_through_ptr (&i, 42);
+
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+}
+
+/* Read through uninitialized pointer. */
+
+int test_37 (void)
+{
+ int *ptr;
+ return *ptr; /* { dg-warning "use of uninitialized value 'ptr'" } */
+}
+
+/* Write through uninitialized pointer. */
+
+void test_37a (int i)
+{
+ int *ptr;
+ *ptr = i; /* { dg-warning "use of uninitialized value 'ptr'" } */
+}
+
+// TODO: the various other ptr deref poisonings
+
+/* Read through NULL pointer. */
+
+int test_38 (void)
+{
+ int *ptr = NULL;
+ return *ptr; /* { dg-warning "dereference of NULL 'ptr'" } */
+}
+
+/* Write through NULL pointer. */
+
+int test_38a (int i)
+{
+ int *ptr = NULL;
+ *ptr = i; /* { dg-warning "dereference of NULL 'ptr'" } */
+}
+
+/* Read through non-NULL constant pointer. */
+
+int test_39 (void)
+{
+ int *ptr = (int *)0x1000;
+ return *ptr;
+}
+
+int test_40 (int flag)
+{
+ int i;
+ if (flag)
+ i = 43;
+ else
+ i = 17;
+
+ /* With state-merging, we lose the relationship between 'flag' and 'i'. */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (flag)
+ __analyzer_eval (i == 43); /* { dg-warning "UNKNOWN" } */
+ else
+ __analyzer_eval (i == 17); /* { dg-warning "UNKNOWN" } */
+}
+
+struct link
+{
+ struct link *next;
+ int f;
+};
+
+/* Traversing a singly-linked list. */
+
+void foo (struct link *in)
+{
+ struct link a;
+ struct link b;
+ struct link c;
+ a.next = &b;
+ b.next = &c;
+ in->next = &a;
+ c.f = 42;
+ __analyzer_eval (in->next->next->next->f == 42); /* { dg-warning "TRUE" } */
+}
+
+union u
+{
+ int i;
+ int *ptr;
+};
+
+void test_41 (void)
+{
+ union u u;
+ u.i = 42;
+ __analyzer_eval (u.i == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (u.ptr == NULL); /* { dg-warning "UNKNOWN" } */
+
+ /* Writes to a union member should invalidate knowledge about other members. */
+ u.ptr = NULL;
+ __analyzer_eval (u.ptr == NULL); /* { dg-warning "TRUE" } */
+ __analyzer_eval (u.i == 42); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_42 (void)
+{
+ int i;
+ float f;
+ i = 42;
+ f = i;
+ __analyzer_eval (f == 42.0); /* { dg-warning "TRUE" } */
+}
+
+void test_43 (void)
+{
+ int i;
+ float f;
+ f = 42.0f;
+ i = f;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+}
+
+struct sbits
+{
+ int b0 : 1;
+ int b123 : 3;
+ int b456 : 3;
+ int b7 : 1;
+};
+
+void test_44 (void)
+{
+ struct sbits bits;
+ bits.b0 = 1;
+ __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "FALSE" "" { target *-*-* } .-1 } */
+ // TODO(xfail): ^^^^
+
+ bits.b456 = 5;
+ __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "FALSE" "" { target *-*-* } .-1 } */
+ // TODO(xfail): ^^^^
+};
+
+struct ubits
+{
+ unsigned int b0 : 1;
+ unsigned int b123 : 3;
+ unsigned int b456 : 3;
+ unsigned int b7 : 1;
+};
+
+/* FIXME: this requires BIT_FIELD_REF to work. */
+
+void test_45 (void)
+{
+ struct ubits bits;
+ bits.b0 = 1;
+ __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail): ^^^^
+
+ bits.b456 = 5;
+ __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ // TODO(xfail): ^^^^
+};
+
+extern const char *char_ptr;
+
+int test_46 (void)
+{
+ if (strcmp("literal", char_ptr))
+ return 1;
+ return 0;
+}
+
+char test_47 (void)
+{
+ static const char* my_version = "1.1.3";
+ return my_version[0];
+}
+
+unsigned test_48 (unsigned char *p, unsigned char *q)
+{
+ return (unsigned int)(p - q);
+}
+
+typedef struct {
+ const char *filename;
+ short lineno;
+} loc;
+
+static loc loc_last;
+
+void test_49 (void)
+{
+ loc_last = __extension__(loc) { "", 328 };
+ loc_last = __extension__(loc) { "", 333 };
+}
+
+void test_50 (void *p, void *q)
+{
+ __analyzer_eval (p == q); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p == p); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q == q); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p == (struct coord *)p); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p == (const struct coord *)p); /* { dg-warning "TRUE" } */
+
+ struct coord *cp = (struct coord *)p;
+ __analyzer_eval (p == cp); /* { dg-warning "TRUE" } */
+
+ struct coord *cq = (struct coord *)q;
+ __analyzer_eval (q == cq); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (cp == cq); /* { dg-warning "UNKNOWN" } */
+}
+
+void test_51 (struct coord c)
+{
+ struct coord d;
+ memcpy (&d, &c, sizeof (struct coord));
+ __analyzer_eval (c.x == d.x); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ __analyzer_eval (c.y == d.y); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+}
+
+struct big
+{
+ int ia[1024];
+};
+
+void test_52 (struct big b)
+{
+ struct big d;
+ memcpy (&d, &b, sizeof (struct big));
+ __analyzer_eval (b.ia[0] == d.ia[0]); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+}
+
+void test_53 (const char *msg)
+{
+ (void)fprintf(stderr, "LOG: %s", msg);
+}
--- /dev/null
+#include <stdlib.h>
+
+struct foo
+{
+ char **m_f;
+};
+
+struct foo *
+test (void)
+{
+ struct foo *new_table = (struct foo *) malloc(sizeof(struct foo));
+ if (!new_table)
+ return NULL;
+ new_table->m_f = (char **)malloc(sizeof(char **));
+ *new_table->m_f = NULL; /* { dg-warning "dereference of possibly-NULL '<unknown>'" } */ // FIXME: something better than "unknown" here
+ return new_table;
+}
--- /dev/null
+int test (void)
+{
+ unsigned char *s = "abc";
+ char *t = "xyz";
+ return s[1] + t[1];
+}
--- /dev/null
+/* Mismatching decl of foo. */
+
+int foo ();
+
+int bar (void)
+{
+ return foo() + 1;
+}
+
+int foo (int x, int y)
+{
+ return x * y;
+}
--- /dev/null
+#include <stdlib.h>
+
+union
+{
+ void *ptr_val;
+ int int_val;
+} global_union;
+
+void test_1 (void)
+{
+ global_union.ptr_val = malloc (1024);
+}
+
+void test_2 (void)
+{
+ global_union.ptr_val = malloc (1024); /* { dg-message "allocated here" } */
+ global_union.int_val = 0;
+} /* { dg-warning "leak of '<unknown>' " } */
+/* TODO: something better than "<unknown>". */
+/* TODO: better location for the leak. */
+
--- /dev/null
+/* FIXME: we shouldn't need this. */
+/* { dg-additional-options "-fanalyzer-fine-grained" } */
+
+#include <stdlib.h>
+
+void *global_ptr;
+
+void test_1 (int i)
+{
+ global_ptr = malloc (1024); /* { dg-message "allocated here" } */
+ *(int *)&global_ptr = i; /* { dg-warning "leak of '<unknown>'" } */
+ // TODO: something better than "<unknown>" here ^^^
+}
+
+void test_2 (int i)
+{
+ void *p = malloc (1024); /* { dg-message "allocated here" "" { xfail *-*-* } } */
+ // TODO(xfail)
+ global_ptr = p;
+ *(int *)&p = i;
+ p = global_ptr;
+ free (p);
+ free (global_ptr); /* { dg-warning "double-'free' of 'p'" } */
+}
--- /dev/null
+#include <string.h>
+
+struct coord
+{
+ double x;
+ double y;
+ double z;
+};
+
+struct tri {
+ struct coord verts[3];
+};
+
+double test_1 (void)
+{
+ struct tri t;
+ memset (&t, 0, sizeof (struct tri));
+ return t.verts[1].y;
+}
+
+int test_2 (const struct coord *c1, const struct coord *c2, double r_squared)
+{
+ double dx = c1->x - c2->x;
+ double dy = c1->y - c2->y;
+ double dz = c1->z - c2->z;
+ return (dx * dx) + (dy * dy) + (dz * dz) <= r_squared;
+}
+
+int test_3 (const struct coord *c1, const struct coord *c2, struct coord *out)
+{
+ out->x = c1->x + c2->x;
+ out->y = c1->y + c2->y;
+ out->z = c1->z + c2->z;
+}
--- /dev/null
+/* Labels as values. */
+
+#include "analyzer-decls.h"
+
+extern void foo (void);
+
+void *x, *y, *z;
+
+void test (void)
+{
+ label0:
+ foo ();
+ label1:
+ foo ();
+ label2:
+ foo ();
+
+ x = &&label0;
+ y = &&label1;
+ z = &&label2;
+
+ __analyzer_eval (x == x); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x == y); /* { dg-warning "FALSE" } */
+}
+
+void test_2 (int i)
+{
+ static void *array[] = { &&label0, &&label1, &&label2 };
+ goto *array[i];
+
+ label0:
+ foo ();
+ label1:
+ foo ();
+ label2:
+ foo ();
+}
+
+void test_3 (int i)
+{
+ static const int array[] = { &&label0 - &&label0,
+ &&label1 - &&label0,
+ &&label2 - &&label0 };
+ goto *(&&label0 + array[i]);
+
+ label0:
+ foo ();
+ label1:
+ foo ();
+ label2:
+ foo ();
+}
--- /dev/null
+typedef struct foo {} foo_t;
+
+typedef void (*func_t)(foo_t *s);
+
+void cb_1 (foo_t *s);
+void cb_2 (foo_t *s);
+
+typedef struct config_s {
+ func_t func;
+} config;
+
+static const config table[2] = {
+ { cb_1 },
+ { cb_2 }
+};
+
+int deflate (foo_t *s, int which)
+{
+ (*(table[which].func))(s);
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test (int *p, int i, int j)
+{
+ p[3] = 42;
+ __analyzer_eval (p[3] == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (*(p + 3) == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[i] == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p[j] == 42); /* { dg-warning "UNKNOWN" } */
+
+ //__analyzer_dump ();
+
+ p[i] = 17;
+
+ //__analyzer_dump ();
+
+ __analyzer_eval (p[3] == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p[i] == 17); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p[j] == 17); /* { dg-warning "UNKNOWN" "" { xfail *-*-* } } */
+ /* { dg-bogus "TRUE" "" { xfail *-*-* } .-1 } */
+ // FIXME(xfails) ^^^
+}
--- /dev/null
+/* { dg-additional-options "-fgimple" } */
+
+typedef long long int i64;
+
+int __GIMPLE (ssa)
+test (i64 * pA, i64 iB)
+{
+ __complex__ long long int D_37702;
+ int D_37701;
+ long long int _1;
+ long long int _2;
+ long long int _3;
+ _Bool _4;
+ __complex__ long long int _8;
+ int _10;
+
+ __BB(2):
+ _1 = __MEM <i64> (pA_6(D));
+ _8 = .ADD_OVERFLOW (_1, iB_7(D));
+ _2 = __real _8;
+ __MEM <i64> (pA_6(D)) = _2;
+ _3 = __imag _8;
+ _4 = (_Bool) _3;
+ _10 = (int) _4;
+ goto __BB3;
+
+ __BB(3):
+L0:
+ return _10;
+
+}
--- /dev/null
+/* { dg-additional-options "-O2" } */
+/* TODO:is there a way to automatically run the tests on various
+ optimizations levels, and with/without debuginfo, rather than
+ hardcoding options? Adapt from torture .exp, presumably. */
+
+
+#include <stdlib.h>
+#include <string.h>
+
+int test_1 (void)
+{
+ return 0;
+}
--- /dev/null
+/* { dg-additional-options "-O2" } */
+/* TODO:is there a way to automatically run the tests on various
+ optimizations levels, and with/without debuginfo, rather than
+ hardcoding options? Adapt from torture .exp, presumably. */
+
+#include <stdio.h>
+int
+main ()
+{
+ FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
--- /dev/null
+/* { dg-additional-options "-fexceptions" } */
+/* TODO:is there a way to automatically run the tests on various
+ optimizations levels, and with/without debuginfo, rather than
+ hardcoding options? Adapt from torture .exp, presumably. */
+
+#include <stdio.h>
+int
+main ()
+{
+ FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+ ;
+ return 0;
+}
+
--- /dev/null
+/* A toy re-implementation of CPython's object model. */
+
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+
+typedef struct base_obj
+{
+ struct type_obj *ob_type;
+ int ob_refcnt;
+} base_obj;
+
+typedef struct type_obj
+{
+ base_obj tp_base;
+ void (*tp_dealloc) (base_obj *);
+} type_obj;
+
+typedef struct tuple_obj
+{
+ base_obj tup_base;
+ int num_elements;
+ base_obj elements[];
+} tuple_obj;
+
+typedef struct list_obj
+{
+ base_obj list_base;
+ int num_elements;
+ base_obj *elements;
+} list_obj;
+
+typedef struct string_obj
+{
+ base_obj str_base;
+ size_t str_len;
+ char str_buf[];
+} string_obj;
+
+extern void type_del (base_obj *);
+extern void tuple_del (base_obj *);
+extern void str_del (base_obj *);
+
+type_obj type_type = {
+ { &type_type, 1},
+ type_del
+};
+
+type_obj tuple_type = {
+ { &type_type, 1},
+ tuple_del
+};
+
+type_obj str_type = {
+ { &str_type, 1},
+ str_del
+};
+
+base_obj *alloc_obj (type_obj *ob_type, size_t sz)
+{
+ base_obj *obj = (base_obj *)malloc (sz);
+ if (!obj)
+ return NULL;
+ obj->ob_type = ob_type;
+ obj->ob_refcnt = 1;
+ return obj;
+}
+
+base_obj *new_string_obj (const char *str)
+{
+ //__analyzer_dump ();
+ size_t len = strlen (str);
+#if 1
+ string_obj *str_obj
+ = (string_obj *)alloc_obj (&str_type, sizeof (string_obj) + len + 1);
+#else
+ string_obj *str_obj = (string_obj *)malloc (sizeof (string_obj) + len + 1);
+ if (!str_obj)
+ return NULL;
+ str_obj->str_base.ob_type = &str_type;
+ str_obj->str_base.ob_refcnt = 1;
+#endif
+ str_obj->str_len = len; /* { dg-warning "dereference of NULL 'str_obj'" } */
+ memcpy (str_obj->str_buf, str, len);
+ str_obj->str_buf[len] = '\0';
+ return (base_obj *)str_obj;
+}
+
+void unref (base_obj *obj)
+{
+ if (--obj->ob_refcnt == 0) /* { dg-bogus "dereference of uninitialized pointer 'obj'" } */
+ obj->ob_type->tp_dealloc (obj);
+}
+
+void test_1 (const char *str)
+{
+ base_obj *obj = new_string_obj (str);
+ //__analyzer_dump();
+ unref (obj);
+} /* { dg-bogus "leak" } */
--- /dev/null
+/* A toy re-implementation of CPython's object model. */
+
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+
+typedef struct base_obj base_obj;
+typedef struct type_obj type_obj;
+typedef struct string_obj string_obj;
+
+struct base_obj
+{
+ struct type_obj *ob_type;
+ int ob_refcnt;
+};
+
+struct type_obj
+{
+ base_obj tp_base;
+ void (*tp_dealloc) (base_obj *);
+};
+
+struct string_obj
+{
+ base_obj str_base;
+ size_t str_len;
+ char str_buf[];
+};
+
+extern void type_del (base_obj *);
+extern void str_del (base_obj *);
+
+type_obj type_type = {
+ { &type_type, 1},
+ type_del
+};
+
+type_obj str_type = {
+ { &str_type, 1},
+ str_del
+};
+
+base_obj *alloc_obj (type_obj *ob_type, size_t sz)
+{
+ base_obj *obj = (base_obj *)malloc (sz);
+ if (!obj)
+ return NULL;
+ obj->ob_type = ob_type;
+ obj->ob_refcnt = 1;
+ return obj;
+}
+
+string_obj *new_string_obj (const char *str)
+{
+ //__analyzer_dump ();
+ size_t len = strlen (str);
+#if 1
+ string_obj *str_obj
+ = (string_obj *)alloc_obj (&str_type, sizeof (string_obj) + len + 1);
+#else
+ string_obj *str_obj = (string_obj *)malloc (sizeof (string_obj) + len + 1);
+ if (!str_obj)
+ return NULL;
+ str_obj->str_base.ob_type = &str_type;
+ str_obj->str_base.ob_refcnt = 1;
+#endif
+ str_obj->str_len = len; /* { dg-warning "dereference of NULL 'str_obj'" } */
+ memcpy (str_obj->str_buf, str, len);
+ str_obj->str_buf[len] = '\0';
+ return str_obj;
+}
+
+void unref (string_obj *obj)
+{
+ //__analyzer_dump();
+ if (--obj->str_base.ob_refcnt == 0)
+ {
+ //__analyzer_dump();
+ obj->str_base.ob_type->tp_dealloc ((base_obj *)obj); /* { dg-bogus "use of uninitialized value '<unknown>'" "" { xfail *-*-* } } */
+ // TODO (xfail): not sure what's going on here
+ }
+}
+
+void test_1 (const char *str)
+{
+ string_obj *obj = new_string_obj (str);
+ //__analyzer_dump();
+ if (obj)
+ unref (obj);
+} /* { dg-bogus "leak of 'obj'" "" { xfail *-*-* } } */
+// TODO (xfail): not sure why this is treated as leaking
--- /dev/null
+/* A toy re-implementation of CPython's object model. */
+
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+
+typedef struct base_obj base_obj;
+typedef struct type_obj type_obj;
+typedef struct string_obj string_obj;
+
+struct base_obj
+{
+ struct type_obj *ob_type;
+ int ob_refcnt;
+};
+
+struct type_obj
+{
+ base_obj tp_base;
+};
+
+struct string_obj
+{
+ base_obj str_base;
+ size_t str_len;
+ char str_buf[];
+};
+
+type_obj type_type = {
+ { &type_type, 1},
+};
+
+type_obj str_type = {
+ { &str_type, 1},
+};
+
+base_obj *alloc_obj (type_obj *ob_type, size_t sz)
+{
+ base_obj *obj = (base_obj *)malloc (sz);
+ if (!obj)
+ return NULL;
+ obj->ob_type = ob_type;
+ obj->ob_refcnt = 1;
+ return obj;
+}
+
+string_obj *new_string_obj (const char *str)
+{
+ //__analyzer_dump ();
+ size_t len = strlen (str);
+#if 1
+ string_obj *str_obj
+ = (string_obj *)alloc_obj (&str_type, sizeof (string_obj) + len + 1);
+#else
+ string_obj *str_obj = (string_obj *)malloc (sizeof (string_obj) + len + 1);
+ if (!str_obj)
+ return NULL;
+ str_obj->str_base.ob_type = &str_type;
+ str_obj->str_base.ob_refcnt = 1;
+#endif
+ str_obj->str_len = len; /* { dg-warning "dereference of NULL 'str_obj'" } */
+ memcpy (str_obj->str_buf, str, len);
+ str_obj->str_buf[len] = '\0';
+ return str_obj;
+}
+
+void unref (string_obj *obj)
+{
+ //__analyzer_dump();
+ if (--obj->str_base.ob_refcnt == 0)
+ {
+ //__analyzer_dump();
+ free (obj);
+ }
+}
+
+void test_1 (const char *str)
+{
+ string_obj *obj = new_string_obj (str);
+ //__analyzer_dump();
+ if (obj)
+ unref (obj);
+} /* { dg-bogus "leak of 'obj'" "" { xfail *-*-* } } */
+// TODO (xfail): not sure why this is treated as leaking
--- /dev/null
+/* A toy re-implementation of CPython's object model. */
+
+#include <stddef.h>
+#include <string.h>
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+typedef struct base_obj base_obj;
+typedef struct type_obj type_obj;
+typedef struct string_obj string_obj;
+
+struct base_obj
+{
+ struct type_obj *ob_type;
+ int ob_refcnt;
+};
+
+struct type_obj
+{
+ base_obj tp_base;
+};
+
+struct string_obj
+{
+ base_obj str_base;
+ size_t str_len;
+ char str_buf[];
+};
+
+type_obj type_type = {
+ { &type_type, 1},
+};
+
+type_obj str_type = {
+ { &str_type, 1},
+};
+
+base_obj *alloc_obj (type_obj *ob_type, size_t sz)
+{
+ base_obj *obj = (base_obj *)malloc (sz);
+ if (!obj)
+ return NULL;
+ obj->ob_type = ob_type;
+ obj->ob_refcnt = 1;
+ return obj;
+}
+
+void unref (base_obj *obj)
+{
+ //__analyzer_dump();
+ if (--obj->ob_refcnt == 0)
+ free (obj);
+}
+
+void test_1 ()
+{
+ base_obj *obj = alloc_obj (&str_type, sizeof (string_obj));
+ if (obj)
+ {
+ __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '1'" } */
+ unref (obj);
+ __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '0'" } */
+ }
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+/* Verify that we don't accumulate state after a malloc/free pair. */
+
+void test (void)
+{
+ void *ptr;
+ __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '0'" } */
+ ptr = malloc (1024);
+ __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '1'" } */
+ free (ptr);
+ __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '0'" } */
+}
--- /dev/null
+/* { dg-additional-options "-fno-analyzer-state-merge" } */
+#include "analyzer-decls.h"
+
+int test_40 (int flag)
+{
+ int i;
+ if (flag)
+ i = 43;
+ else
+ i = 17;
+
+ /* Without state-merging, we retain the relationship between 'flag' and 'i'. */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (flag)
+ __analyzer_eval (i == 43); /* { dg-warning "TRUE" } */
+ else
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+struct base
+{
+ int i;
+};
+
+struct sub
+{
+ struct base b;
+ int j;
+};
+
+void test (void)
+{
+ struct sub s;
+ s.b.i = 3;
+ s.j = 4;
+ __analyzer_eval (s.b.i == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.j == 4); /* { dg-warning "TRUE" } */
+
+ struct base *bp = (struct base *)&s;
+
+ __analyzer_eval (bp->i == 3); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+}
--- /dev/null
+#include <stdlib.h>
+#include <string.h>
+#include "analyzer-decls.h"
+
+struct foo
+{
+ int i;
+};
+
+/* TODO: verify that we know that calloc zeros its memory. */
+
+void test_1 (void)
+{
+ struct foo *f = calloc (1, sizeof (struct foo));
+ if (f == NULL)
+ return;
+ __analyzer_eval (f->i == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ free (f);
+}
+
+/* TODO: verify that we know the behavior of memset. */
+
+void test_2 (void)
+{
+ struct foo *f = malloc (sizeof (struct foo));
+ if (f == NULL)
+ return;
+ memset (f, 0, sizeof (struct foo));
+ __analyzer_eval (f->i == 0); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-1 } */
+ free (f);
+}
--- /dev/null
+#include <stddef.h>
+
+static int *__attribute__((noinline))
+callee (void)
+{
+ return NULL;
+}
+
+void test_1 (void)
+{
+ int *p = callee (); /* { dg-message "return of NULL to 'test_1' from 'callee'" } */
+ *p = 42; /* { dg-warning "dereference of NULL 'p'" } */
+}
--- /dev/null
+/* Verify that we can override -fanalyzer with -fno-analyzer. */
+/* { dg-additional-options "-fno-analyzer" } */
+
+#include <stdlib.h>
+
+void test (void *ptr)
+{
+ free (ptr);
+ free (ptr); /* { dg-bogus "free" } */
+}
--- /dev/null
+/* Verify that the various .dot output files from the analyzer are readable
+ by .dot. */
+
+/* { dg-require-dot "" } */
+/* { dg-additional-options "-fdump-analyzer-callgraph -fdump-analyzer-exploded-graph -fdump-analyzer-state-purge -fdump-analyzer-supergraph" } */
+
+#include <stdlib.h>
+
+int some_call (int i, char ch)
+{
+ return i * i;
+}
+
+int *test (int *buf, int n, int *out)
+{
+ int i;
+ int *result = malloc (sizeof (int) * n);
+
+ /* A loop, to ensure we have phi nodes. */
+ for (i = 0; i < n; i++)
+ result[i] = buf[i] + i; /* { dg-warning "possibly-NULL" "" { xfail *-*-* } } */
+ /* TODO(xfail): why isn't the warning appearing? */
+
+ /* Example of a "'" (to test quoting). */
+ *out = some_call (i, 'a');
+
+ return result;
+}
+
+/* { dg-final { dg-check-dot "dot-output.c.callgraph.dot" } } */
+/* { dg-final { dg-check-dot "dot-output.c.eg.dot" } } */
+/* { dg-final { dg-check-dot "dot-output.c.state-purge.dot" } } */
+/* { dg-final { dg-check-dot "dot-output.c.supergraph.dot" } } */
--- /dev/null
+/* { dg-do link } */
+/* { dg-require-effective-target lto } */
+/* { dg-additional-options "-flto" } */
+/* { dg-additional-sources double-free-lto-1-b.c } */
+
+#include <stdlib.h>
+#include "double-free-lto-1.h"
+
+void test (void *ptr)
+{
+ calls_free (ptr); /* { dg-message "calling 'calls_free' from 'test'" } */
+ free (ptr); /* { dg-warning "double-'free' of 'ptr_.+'" } */
+ // TODO: report "ptr", rather than an SSA name
+}
+
+int main() { return 0; }
--- /dev/null
+#include <stdlib.h>
+
+#include "double-free-lto-1.h"
+
+extern void calls_free (void *ptr)
+{
+ free (ptr);
+}
--- /dev/null
+extern void calls_free (void *ptr);
--- /dev/null
+#include "analyzer-decls.h"
+
+void test (int p, int q, int r)
+{
+ if (p == 42)
+ {
+ __analyzer_eval (p == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p != 42); /* { dg-warning "FALSE" } */
+ if (q == 42)
+ {
+ __analyzer_eval (p == q); /* { dg-warning "TRUE" } */
+ }
+ else
+ {
+ __analyzer_eval (p != q); /* { dg-warning "TRUE" } */
+ }
+ }
+ else
+ {
+ __analyzer_eval (p == 42); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p != 42); /* { dg-warning "TRUE" } */
+ if (q == 42)
+ {
+ __analyzer_eval (p == q); /* { dg-warning "FALSE" } */
+ }
+ else
+ {
+ __analyzer_eval (p == q); /* { dg-warning "UNKNOWN" } */
+ }
+ }
+}
--- /dev/null
+/* { dg-additional-options "-Wno-analyzer-too-complex" } */
+
+#include <stdlib.h>
+
+extern int get (void);
+
+/* In theory each of p0...p8 can be in various malloc states,
+ independently, so the total combined number of states
+ at any program point within the loop is NUM_VARS * NUM_STATES. */
+
+void test (void)
+{
+ void *p0, *p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8;
+ void **pp;
+ while (get ())
+ {
+ switch (get ())
+ {
+ default:
+ case 0:
+ pp = &p0;
+ break;
+ case 1:
+ pp = &p1;
+ break;
+ case 2:
+ pp = &p2;
+ break;
+ case 3:
+ pp = &p3;
+ break;
+ case 4:
+ pp = &p4;
+ break;
+ case 5:
+ pp = &p5;
+ break;
+ case 6:
+ pp = &p6;
+ break;
+ case 7:
+ pp = &p7;
+ break;
+ }
+
+ switch (get ())
+ {
+ default:
+ case 0:
+ *pp = malloc (16);
+ break;
+ case 1:
+ free (*pp);
+ break;
+ case 2:
+ /* no-op. */
+ break;
+ }
+ }
+}
--- /dev/null
+/* In theory each of p0...p3 can be in various malloc states,
+ independently, so the total combined number of states
+ at any program point within the loop is NUM_VARS * NUM_STATES.
+
+ Set the limits high enough that we can fully explore this. */
+
+/* { dg-additional-options "--param analyzer-max-enodes-per-program-point=200 --param analyzer-bb-explosion-factor=50" } */
+
+#include <stdlib.h>
+
+extern int get (void);
+
+void test (void)
+{
+ void *p0, *p1, *p2, *p3;
+ while (get ())
+ {
+ switch (get ())
+ {
+ default:
+ case 0:
+ p0 = malloc (16);
+ break;
+ case 1:
+ free (p0); /* { dg-warning "double-'free' of 'p0'" } */
+ break;
+
+ case 2:
+ p1 = malloc (16);
+ break;
+ case 3:
+ free (p1); /* { dg-warning "double-'free' of 'p1'" } */
+ break;
+
+ case 4:
+ p2 = malloc (16);
+ break;
+ case 5:
+ free (p2); /* { dg-warning "double-'free' of 'p2'" } */
+ break;
+
+ case 6:
+ p3 = malloc (16);
+ break;
+ case 7:
+ free (p3); /* { dg-warning "double-'free' of 'p3'" } */
+ break;
+ }
+ }
+}
--- /dev/null
+int factorial (int n)
+{
+ if (n > 1)
+ return n * factorial (n - 1);
+ else
+ return 1;
+}
--- /dev/null
+int fib (int n)
+{
+ if (n > 1)
+ return fib (n - 1) + fib (n - 2);
+ else
+ return n;
+}
+
+/* { dg-regexp "\[^\n\r\]+: warning: analysis bailed out early \\(\[0-9\]+ 'after-snode' enodes; \[0-9\]+ enodes\\) \[^\n\r\]*" } */
--- /dev/null
+typedef long unsigned int size_t;
+
+extern size_t strlen (const char *__s)
+ __attribute__ ((__nothrow__ , __leaf__))
+ __attribute__ ((__pure__))
+ __attribute__ ((__nonnull__ (1)));
+
+extern void *malloc (size_t __size)
+ __attribute__ ((__nothrow__ , __leaf__))
+ __attribute__ ((__malloc__)) ;
+
+extern void free (void *__ptr)
+ __attribute__ ((__nothrow__ , __leaf__));
+
+typedef struct _krb5_data {
+ unsigned int length;
+ char *data;
+} krb5_data;
+
+typedef struct _krb5_error {
+ krb5_data text;
+} krb5_error;
+
+extern const char *error_message (int);
+
+int
+recvauth_common (int problem)
+{
+ if (problem) {
+ krb5_error error;
+ const char *message = error_message(problem);
+ error.text.length = strlen(message) + 1;
+ if (!(error.text.data = malloc(error.text.length))) {
+ goto cleanup;
+ }
+ free(error.text.data);
+ }
+
+ cleanup:
+ return problem; /* { dg-bogus "leak" } */
+}
--- /dev/null
+#include <stdio.h>
+
+void
+test_1 (const char *path)
+{
+ FILE *f = fopen (path, "r"); /* { dg-message "opened here" } */
+ if (!f)
+ return;
+
+ fclose (f); /* { dg-message "\\(4\\) \\.\\.\\.to here" } */
+ /* { dg-message "\\(5\\) first 'fclose' here" "" { target *-*-* } .-1 } */
+ fclose (f); /* { dg-warning "double 'fclose' of FILE 'f'" } */
+ /* { dg-message "second 'fclose' here; first 'fclose' was at \\(5\\)" "" { target *-*-* } .-1 } */
+}
+
+void
+test_2 (const char *src, const char *dst)
+{
+ FILE *f_in = fopen (src, "r"); /* { dg-message "\\(1\\) opened here" } */
+ if (!f_in)
+ return;
+
+ FILE *f_out = fopen (src, "w");
+ if (!f_out)
+ return; /* { dg-warning "leak of FILE 'f_in'" } */
+ /* { dg-message "\\(7\\) 'f_in' leaks here; was opened at \\(1\\)" "" { target *-*-* } .-1 } */
+
+ fclose (f_out);
+ fclose (f_in);
+}
+
+void
+test_3 (const char *path)
+{
+ FILE *f = fopen (path, "r"); /* { dg-message "opened here" } */
+ return; /* { dg-warning "leak of FILE 'f'" } */
+}
--- /dev/null
+#include <stdio.h>
+
+struct foo
+{
+ FILE *m_f;
+};
+
+void test (const char *path)
+{
+ struct foo f;
+ f.m_f = fopen (path, "r");
+
+ if (!f.m_f)
+ return; /* { dg-bogus "leak of FILE" } */
+
+ fclose (f.m_f);
+ fclose (f.m_f); /* { dg-warning "double 'fclose' of FILE 'f.m_f'" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+typedef void *(*fn_ptr_t) (void *);
+
+void *test_1 (fn_ptr_t fn_ptr, void *data)
+{
+ return fn_ptr (data);
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+typedef void (*fn_ptr_t) (void *);
+
+void
+calls_free (void *victim)
+{
+ free (victim);
+}
+
+void
+no_op (void *ptr)
+{
+}
+
+void test_1 (void *ptr)
+{
+ fn_ptr_t fn_ptr = calls_free;
+ __analyzer_eval (fn_ptr == calls_free); /* { dg-warning "TRUE" } */
+ __analyzer_eval (fn_ptr != NULL); /* { dg-warning "TRUE" } */
+ __analyzer_eval (fn_ptr == NULL); /* { dg-warning "FALSE" } */
+ __analyzer_eval (fn_ptr == no_op); /* { dg-warning "FALSE" } */
+
+ fn_ptr (ptr);
+ fn_ptr (ptr);
+}
+// TODO: issue a double-'free' warning at 2nd call to fn_ptr.
+
+/* As above, but with an extra indirection to try to thwart
+ the optimizer. */
+
+void test_2 (void *ptr, fn_ptr_t *fn_ptr)
+{
+ *fn_ptr = calls_free;
+ __analyzer_eval (*fn_ptr == calls_free); /* { dg-warning "TRUE" } */
+ __analyzer_eval (*fn_ptr != NULL); /* { dg-warning "TRUE" } */
+ __analyzer_eval (*fn_ptr == NULL); /* { dg-warning "FALSE" } */
+ __analyzer_eval (*fn_ptr == no_op); /* { dg-warning "FALSE" } */
+
+ (*fn_ptr) (ptr);
+ (*fn_ptr) (ptr);
+}
+// TODO: issue a double-'free' warning at 2nd call to fn_ptr.
--- /dev/null
+#include <stdlib.h>
+
+typedef void *(*alloc_func_t) (size_t);
+typedef void (*free_func_t) (void *);
+
+typedef struct callbacks
+{
+ alloc_func_t alloc_cb;
+ free_func_t dealloc_cb;
+} callbacks_t;
+
+void test (void)
+{
+ callbacks_t cb;
+ cb.alloc_cb = (alloc_func_t)0;
+ cb.dealloc_cb = (free_func_t)0;
+}
--- /dev/null
+void gzseek (long offset, int whence)
+{
+ if (whence == 2)
+ return;
+ if (whence == 0)
+ offset -= 1;
+ if (offset < 0)
+ return;
+ while (offset > 0) {
+ }
+}
--- /dev/null
+typedef long unsigned int size_t;
+typedef struct _IO_FILE FILE;
+extern size_t fread(void *__restrict __ptr, size_t __size, size_t __n,
+ FILE *__restrict __stream);
+typedef unsigned char Byte;
+typedef unsigned int uInt;
+typedef unsigned long uLong;
+
+typedef struct z_stream_s {
+ uInt avail_in;
+ uInt avail_out;
+} z_stream;
+
+typedef struct gz_stream {
+ z_stream stream;
+ FILE *file;
+} gz_stream;
+
+void test_1_callee(gz_stream *s, Byte *buf) {
+ Byte *next_out = buf;
+ uInt n = s->stream.avail_in;
+ if (n > 0) {
+ next_out += n;
+ }
+ s->stream.avail_out -= fread(next_out, 1, s->stream.avail_out, s->file);
+}
+
+void test_1_caller(gz_stream *s) {
+ unsigned char c;
+ test_1_callee(s, &c);
+}
--- /dev/null
+typedef long unsigned int size_t;
+typedef struct _IO_FILE FILE;
+extern size_t fread(void *__restrict __ptr, size_t __size, size_t __n,
+ FILE *__restrict __stream);
+typedef unsigned char Byte;
+typedef unsigned int uInt;
+typedef unsigned long uLong;
+
+typedef struct gz_stream {
+ FILE *file;
+ uInt avail_in;
+ uInt avail_out;
+} gz_stream;
+
+void test_1_callee(gz_stream *s, Byte *buf) {
+ Byte *next_out = buf;
+ uInt n = s->avail_in;
+ if (n > 0) {
+ next_out += n;
+ }
+ s->avail_out -= fread(next_out, 1, s->avail_out, s->file);
+}
+
+void test_1_caller(gz_stream *s) {
+ unsigned char c;
+ test_1_callee(s, &c);
+}
--- /dev/null
+#include <stdlib.h>
+typedef struct z_stream_s {
+ unsigned char *next_out;
+} z_stream;
+typedef struct gz_stream {
+ z_stream stream;
+ unsigned char *outbuf;
+} gz_stream;
+gz_stream *s;
+static void gz_open(const char *path)
+{
+ s->stream.next_out = s->outbuf = (unsigned char *)malloc(16384); /* { dg-bogus "leak" } */
+}
+void gzopen(const char *path)
+{
+ gz_open(path);
+}
--- /dev/null
+extern void marker_A(void);
+extern void marker_B(void);
+extern void marker_C(void);
+extern void marker_D(void);
+
+void test(int flag)
+{
+ marker_A();
+
+ if (flag) {
+ marker_B();
+
+ /* Recurse, infinitely, as it happens: */
+ test(flag);
+
+ marker_C();
+ }
+
+ marker_D();
+}
+
+/* A cycle of 4 mutually-recursive functions (but only for certain inputs). */
+
+extern void mutual_test_1 (int flag);
+extern void mutual_test_2 (int flag);
+extern void mutual_test_3 (int flag);
+extern void mutual_test_4 (int flag);
+
+void mutual_test_1 (int flag)
+{
+ marker_A ();
+ if (flag)
+ mutual_test_2 (flag);
+}
+
+void mutual_test_2 (int flag)
+{
+ marker_B ();
+ if (flag)
+ mutual_test_3 (flag);
+}
+
+void mutual_test_3 (int flag)
+{
+ marker_C ();
+ if (flag)
+ mutual_test_4 (flag);
+}
+
+void mutual_test_4 (int flag)
+{
+ marker_D ();
+ if (flag)
+ mutual_test_1 (flag);
+}
--- /dev/null
+/* { dg-additional-options "-fno-analyzer-state-purge" } */
+#include "analyzer-decls.h"
+
+struct s
+{
+ int i;
+};
+
+void test(void)
+{
+ struct s s;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+
+ for (s.i=0; s.i<256; s.i++) {
+ __analyzer_eval (s.i < 256); /* { dg-warning "TRUE" } */
+ /* (should report TRUE twice). */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ //__analyzer_eval (s.i == 0); /* { d-todo-g-warning "UNKNOWN" "" { xfail *-*-* } } */
+ /* { d-todo-g-warning "TRUE" "" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): we're only capturing the first iteration, so
+ we erroneously get i == 0. */
+
+ //__analyzer_eval (s.i >= 0); /* { d-todo-g-warning "TRUE" } */
+ }
+
+ __analyzer_eval (s.i >= 256); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (s.i == 256); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): ideally it should figure out i == 256 at exit. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+}
--- /dev/null
+/* { dg-additional-options "-fno-analyzer-state-purge" } */
+#include "analyzer-decls.h"
+
+union u
+{
+ int i;
+};
+
+void test(void)
+{
+ union u u;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+
+ for (u.i=0; u.i<256; u.i++) {
+ __analyzer_eval (u.i < 256); /* { dg-warning "TRUE" } */
+ /* { dg-warning "TRUE" "" { xfail *-*-* } .-1 } */
+ /* { dg-bogus "UNKNOWN" "" { xfail *-*-* } .-2 } */
+ /* (should report TRUE twice). */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ //__analyzer_eval (u.i == 0); /* { d-todo-g-warning "UNKNOWN" "" { xfail *-*-* } } */
+ /* { d-todo-g-warning "TRUE" "" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): we're only capturing the first iteration, so
+ we erroneously get i == 0. */
+
+ //__analyzer_eval (u.i >= 0); /* { d-todo-g-warning "TRUE" } */
+ }
+
+ __analyzer_eval (u.i >= 256); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+
+ __analyzer_eval (u.i == 256); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): ideally it should figure out i == 256 at exit. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+void test(int c)
+{
+ int i;
+ char *buffer = (char*)malloc(256);
+
+ for (i=0; i<255; i++) {
+ buffer[i] = c; /* { dg-warning "use after 'free' of 'buffer'" } */
+ /* BUG: the malloc could have failed
+ TODO: the checker doesn't yet pick up on this, perhaps
+ due to the pointer arithmetic not picking up on the
+ state */
+ free(buffer); /* { dg-warning "double-'free' of 'buffer'" } */
+ }
+
+}
--- /dev/null
+// FIXME:
+/* { dg-additional-options "-fno-analyzer-state-purge" } */
+
+/* Example of nested loops. */
+
+#include "analyzer-decls.h"
+
+void test(void)
+{
+ int i, j, k;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ for (i=0; i<256; i++) {
+
+ __analyzer_eval (i >= 0); /* { dg-warning "TRUE" } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+
+ __analyzer_eval (i < 256); /* { dg-warning "TRUE" } */
+
+ for (j=0; j<256; j++) {
+
+ __analyzer_eval (j >= 0); /* { dg-warning "TRUE" } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+
+ __analyzer_eval (j < 256); /* { dg-warning "TRUE" } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+
+ for (k=0; k<256; k++) {
+
+ __analyzer_eval (k >= 0); /* { dg-warning "TRUE" } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+
+ __analyzer_eval (k < 256); /* { dg-warning "TRUE" } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "4 exploded nodes" } */
+ }
+ }
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+}
--- /dev/null
+/* { dg-additional-options "-fno-analyzer-state-purge" } */
+
+#include "analyzer-decls.h"
+
+void test(void)
+{
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ for (i=0; i<256; i++) {
+ __analyzer_eval (i < 256); /* { dg-warning "TRUE" } */
+ /* (should report TRUE twice). */
+
+ __analyzer_eval (i == 0); /* { dg-warning "TRUE" } */
+ /* { dg-warning "FALSE" "" { xfail *-*-* } .-1 } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ __analyzer_eval (i >= 0); /* { dg-warning "TRUE" } */
+ /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i >= 0 for all iterations. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ }
+
+ __analyzer_eval (i >= 256); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (i == 256); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): it only figures out i >= 256, rather than i == 256. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+}
--- /dev/null
+#include <stdlib.h>
+#include <string.h>
+
+extern int foo (void);
+extern int bar (void);
+extern void could_free (void *);
+extern void cant_free (const void *); /* since it's a const void *. */
+
+void test_1 (void)
+{
+ void *ptr = malloc (1024);
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_2 (void *ptr)
+{
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_2a (void *ptr)
+{
+ __builtin_free (ptr);
+ __builtin_free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+int *test_3 (void)
+{
+ int *ptr = (int *)malloc (sizeof (int));
+ *ptr = 42; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ return ptr;
+}
+
+int *test_3a (void)
+{
+ int *ptr = (int *)__builtin_malloc (sizeof (int));
+ *ptr = 42; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ return ptr;
+}
+
+int *test_4 (void)
+{
+ int *ptr = (int *)malloc (sizeof (int));
+ if (ptr)
+ *ptr = 42;
+ else
+ *ptr = 43; /* { dg-warning "dereference of NULL 'ptr'" } */
+ return ptr;
+}
+
+int test_5 (int *ptr)
+{
+ free (ptr);
+ return *ptr; /* { dg-warning "use after 'free' of 'ptr'" } */
+}
+
+void test_6 (void *ptr)
+{
+ void *q;
+ q = ptr;
+ free (ptr);
+ free (q); /* { dg-warning "double-'free' of 'q'" } */
+ /* The above case requires us to handle equivalence classes in
+ state transitions. */
+}
+
+void test_7 (void)
+{
+ void *ptr = malloc(4096);
+ if (!ptr)
+ return;
+ memset(ptr, 0, 4096);
+ free(ptr);
+}
+
+void *test_8 (void)
+{
+ void *ptr = malloc(4096);
+ if (!ptr)
+ return NULL;
+ memset(ptr, 0, 4096);
+ return ptr;
+ /* This needs phi nodes to affect equivalence classes, or we get a false report
+ of a leak. */
+}
+
+void test_9 (void)
+{
+ void *ptr = malloc (1024);
+
+ int i;
+ for (i = 0; i < 1024; i++)
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_10 (void)
+{
+ void *ptr = malloc (1024);
+
+ int i;
+ for (i = 0; i < 1024; i++)
+ foo ();
+
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_11 (void)
+{
+ void *ptr = malloc (1024);
+
+ while (foo ())
+ bar ();
+
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+void test_12 (void)
+{
+ void *ptr = malloc (1024);
+
+ while (1)
+ {
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+ }
+}
+
+void test_13 (void)
+{
+ void *p = malloc (1024); /* { dg-message "allocated here" } */
+ void *q = malloc (1024);
+
+ foo ();
+ if (!q)
+ {
+ free (q);
+ return; /* { dg-warning "leak of 'p'" } */
+ }
+ bar ();
+ free (q);
+ free (p);
+}
+
+void test_14 (void)
+{
+ void *p, *q;
+ p = malloc (1024);
+ if (!p)
+ return;
+
+ q = malloc (1024);
+ if (!q)
+ {
+ free (p);
+ free (q);
+ /* oops: missing "return". */
+ }
+ bar ();
+ free (q); /* Although this looks like a double-'free' of q,
+ it's known to be NULL for the case where free is
+ called twice on it. */
+ free (p); /* { dg-warning "double-'free' of 'p'" } */
+}
+
+void test_15 (void)
+{
+ void *p = NULL, *q = NULL;
+
+ p = malloc (1024);
+ if (!p)
+ goto fail;
+
+ foo ();
+
+ q = malloc (1024);
+ if (!q)
+ goto fail;
+
+ bar ();
+
+ fail:
+ free (q);
+ free (p);
+}
+
+void test_16 (void)
+{
+ void *p, *q;
+
+ p = malloc (1024);
+ if (!p)
+ goto fail;
+
+ foo ();
+
+ q = malloc (1024);
+ if (!q)
+ goto fail;
+
+ bar ();
+
+ fail:
+ free (q); /* { dg-warning "free of uninitialized 'q'" "" { xfail *-*-* } } */
+ /* TODO(xfail): implement uninitialized detection. */
+ free (p);
+}
+
+void test_17 (void)
+{
+ void *ptr = malloc (1024); /* { dg-message "allocated here" } */
+} /* { dg-warning "leak of 'ptr'" } */
+
+void test_18 (void)
+{
+ void *ptr = malloc (64); /* { dg-message "allocated here" } */
+ ptr = NULL; /* { dg-warning "leak of 'ptr'" } */
+}
+
+void test_19 (void)
+{
+ void *ptr = malloc (64);
+ free (ptr);
+ ptr = NULL;
+ free (ptr);
+}
+
+void *global_ptr_20;
+
+void test_20 (void)
+{
+ global_ptr_20 = malloc (1024);
+}
+
+int *test_21 (int i)
+{
+ int *ptr = malloc (sizeof (int));
+ if (!ptr)
+ abort ();
+ *ptr = i;
+ return ptr;
+}
+
+void test_22 (void)
+{
+ void *ptr = malloc (1024);
+
+ int i;
+ for (i = 5; i < 10; i++)
+ foo ();
+
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+int *test_23 (int n)
+{
+ int *ptr = (int *)calloc (n, sizeof (int));
+ ptr[0] = 42; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ return ptr;
+}
+
+int *test_23a (int n)
+{
+ int *ptr = (int *)__builtin_calloc (n, sizeof (int));
+ ptr[0] = 42; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ return ptr;
+}
+
+int test_24 (void)
+{
+ void *ptr = alloca (sizeof (int)); /* { dg-message "memory is allocated on the stack here" } */
+ free (ptr); /* { dg-warning "'free' of memory allocated on the stack by 'alloca' \\('ptr'\\) will corrupt the heap \\\[CWE-590\\\]" } */
+}
+
+int test_25 (void)
+{
+ char tmp[100];
+ void *p = tmp; /* { dg-message "pointer is from here" } */
+ free (p); /* { dg-warning "'free' of 'p' which points to memory not on the heap \\\[CWE-590\\\]" } */
+ /* TODO: more precise messages here. */
+}
+
+char global_buffer[100];
+
+int test_26 (void)
+{
+ void *p = global_buffer; /* { dg-message "pointer is from here" } */
+ free (p); /* { dg-warning "'free' of 'p' which points to memory not on the heap \\\[CWE-590\\\]" } */
+ /* TODO: more precise messages here. */
+}
+
+struct coord {
+ float x;
+ float y;
+};
+
+struct coord *test_27 (void)
+{
+ struct coord *p = (struct coord *) malloc (sizeof (struct coord)); /* { dg-message "this call could return NULL" } */
+ p->x = 0.f; /* { dg-warning "dereference of possibly-NULL 'p'" } */
+
+ /* Only the first such usage should be reported: */
+ p->y = 0.f;
+
+ return p;
+}
+
+struct coord *test_28 (void)
+{
+ struct coord *p = NULL;
+ p->x = 0.f; /* { dg-warning "dereference of NULL 'p'" } */
+
+ /* Only the first such usage should be reported: */
+ p->y = 0.f;
+
+ return p;
+}
+
+struct link
+{
+ struct link *m_ptr;
+};
+
+struct link *test_29 (void)
+{
+ struct link *res = (struct link *)malloc (sizeof (struct link));
+ if (!res)
+ return NULL;
+ res->m_ptr = (struct link *)malloc (sizeof (struct link));
+ return res;
+}
+
+struct link *test_29a (void)
+{
+ struct link *res = (struct link *)malloc (sizeof (struct link));
+ if (!res)
+ return NULL;
+ res->m_ptr = (struct link *)malloc (sizeof (struct link));
+ if (!res->m_ptr)
+ {
+ free (res);
+ return NULL;
+ }
+ res->m_ptr->m_ptr = (struct link *)malloc (sizeof (struct link));
+ return res;
+}
+
+/* Without consolidation by EC, this one shows two leaks:
+ warning: leak of '<unknown>'
+ warning: leak of 'tmp.m_ptr'
+ We should only show the latter (favoring the most user-readable
+ expression in the equivalence class). */
+void test_30 (void)
+{
+ struct link tmp;
+ tmp.m_ptr = (struct link *)malloc (sizeof (struct link)); /* { dg-message "allocated here" } */
+} /* { dg-warning "leak of 'tmp.m_ptr'" } */
+/* { dg-bogus "leak of '<unknown>'" "" { xfail *-*-* } .-1 } */
+
+void test_31 (void)
+{
+ struct link tmp;
+ void *ptr = malloc (sizeof (struct link)); /* { dg-message "allocated here" } */
+ tmp.m_ptr = (struct link *)ptr;
+} /* { dg-warning "leak of 'ptr'" } */
+/* { dg-bogus "leak of 'tmp.m_ptr'" "" { xfail *-*-* } .-1 } */
+
+void test_32 (void)
+{
+ void *ptr = malloc (1024);
+ could_free (ptr);
+} /* { dg-bogus "leak" } */
+
+void test_33 (void)
+{
+ void *ptr = malloc (1024); /* { dg-message "allocated here" } */
+ cant_free (ptr);
+} /* { dg-warning "leak of 'ptr'" } */
+
+void test_34 (void)
+{
+ float *q;
+ struct coord *p = malloc (sizeof (struct coord));
+ if (!p)
+ return;
+ p->x = 0.0f;
+ q = &p->x;
+ free (p);
+ *q = 1.0f; /* { dg-warning "use after 'free' of 'q'" } */
+};
+
+int test_35 (void)
+{
+ void *ptr = malloc(4096);
+ if (!ptr)
+ return -1;
+ memset(ptr, 0, 4096);
+ free(ptr);
+ return 0;
+}
+
+void test_36 (void)
+{
+ void *ptr = malloc(4096);
+ if (!ptr)
+ return;
+ memset(ptr, 0, 4096);
+ free(ptr);
+}
+
+void *test_37a (void)
+{
+ void *ptr = malloc(4096); /* { dg-message "this call could return NULL" } */
+ memset(ptr, 0, 4096); /* { dg-warning "use of possibly-NULL 'ptr' where non-null expected" } */
+ return ptr;
+}
+
+int test_37b (void)
+{
+ void *p = malloc(4096);
+ void *q = malloc(4096); /* { dg-message "this call could return NULL" } */
+ if (p) {
+ memset(p, 0, 4096); /* Not a bug: checked */
+ } else {
+ memset(q, 0, 4096); /* { dg-warning "use of possibly-NULL 'q' where non-null expected" } */
+ }
+ free(p);
+ free(q);
+ return 0;
+}
+
+extern void might_use_ptr (void *ptr);
+
+void test_38(int i)
+{
+ void *p;
+
+ p = malloc(1024);
+ if (p) {
+ free(p);
+ might_use_ptr(p); /* { dg-warning "use after 'free' of 'p'" "" { xfail *-*-* } } */
+ // TODO: xfail
+ }
+}
+
+int *
+test_39 (int i)
+{
+ int *p = (int*)malloc(sizeof(int*)); /* { dg-message "this call could return NULL" } */
+ *p = i; /* { dg-warning "dereference of possibly-NULL 'p'" } */
+ return p;
+}
+
+int *
+test_40 (int i)
+{
+ int *p = (int*)malloc(sizeof(int*));
+ i = *p; /* { dg-warning "dereference of possibly-NULL 'p'" } */
+ /* TODO: (it's also uninitialized) */
+ return p;
+}
+
+char *
+test_41 (int flag)
+{
+ char *buffer;
+
+ if (flag) {
+ buffer = (char*)malloc(4096);
+ } else {
+ buffer = NULL;
+ }
+
+ buffer[0] = 'a'; /* { dg-warning "dereference of possibly-NULL 'buffer'" } */
+ /* { dg-warning "dereference of NULL 'buffer'" "" { target *-*-* } .-1 } */
+
+ return buffer;
+}
+
+void test_42a (void)
+{
+ void *p = malloc (1024); /* { dg-message "allocated here" } */
+ free (p + 64); /* this could well corrupt the heap. */
+ /* TODO: ^^^ we should warn about this. */
+} /* { dg-warning "leak of 'p'" } */
+/* TODO: presumably we should complain about the bogus free, but then
+ maybe not complain about the leak. */
+// CWE-761: Free of Pointer not at Start of Buffer
+
+void test_42b (void)
+{
+ void *p = malloc (1024); /* { dg-message "allocated here" } */
+ free (p - 64); /* this could well corrupt the heap. */
+ /* TODO: ^^^ we should warn about this. */
+} /* { dg-warning "leak of 'p'" } */
+/* TODO: presumably we should complain about the bogus free, but then
+ maybe not complain about the leak. */
+// CWE-761: Free of Pointer not at Start of Buffer
+
+void test_42c (void)
+{
+ void *p = malloc (1024);
+ void *q = p + 64;
+ free (q - 64); /* this is probably OK. */
+} /* { dg-bogus "leak of 'p'" "" { xfail *-*-* } } */
+// TODO(xfail)
+
+#if 0
+void test_31 (void *p)
+{
+ void *q = realloc (p, 1024);
+ free (p); /* FIXME: this is a double-'free'. */
+ free (q);
+}
+
+void test_32 (void)
+{
+ void *p = malloc (64);
+ p = realloc (p, 1024); /* FIXME: this leaks if it fails. */
+ free (p);
+}
+#endif
+
+struct link global_link;
+
+void test_43 (void)
+{
+ global_link.m_ptr = malloc (sizeof (struct link)); /* { dg-message "allocated here" } */
+ global_link.m_ptr = NULL;
+} /* { dg-warning "leak of '<unknown>'" } */
+/* TODO: should be more precise than just '<unknown>', and
+ ideally would be at the assigment to NULL. */
+
+struct link *global_ptr;
+
+void test_44 (void)
+{
+ global_ptr = malloc (sizeof (struct link));
+ if (!global_ptr)
+ return;
+ global_ptr->m_ptr = malloc (sizeof (struct link)); /* { dg-message "allocated here" } */
+ free (global_ptr); /* { dg-warning "leak of '<unknown>'" } */
+ /* TODO: should be more precise than just '<unknown>'. */
+}
+
+extern void might_take_ownership (void *ptr);
+
+void test_45 (void)
+{
+ void *p = malloc (1024);
+ might_take_ownership (p);
+}
+
+void test_46 (void)
+{
+ struct link *p = (struct link *)malloc (sizeof (struct link));
+ if (!p)
+ return;
+ struct link *q = (struct link *)malloc (sizeof (struct link));
+ p->m_ptr = q;
+ might_take_ownership (p);
+}
+
+extern int maybe_alloc (char **);
+
+int test_47 (void)
+{
+ char *p = ((void *)0);
+ int p_size = 0;
+
+ p = malloc (16);
+ if (p) {
+ free (p);
+ } else {
+ int retval = maybe_alloc (&p); /* this might write to "p". */
+ if (retval)
+ return (retval);
+ p_size = strlen(p); /* { dg-bogus "non-null expected" } */
+ free (p);
+ }
+ return p_size;
+}
--- /dev/null
+/* Tests for precision-of-wording within malloc warnings. */
+
+typedef __SIZE_TYPE__ size_t;
+extern void *malloc(size_t);
+extern void free(void *);
+extern char *strcpy(char *__restrict __dest, const char *__restrict __src)
+ __attribute__((__nothrow__, __leaf__)) __attribute__((__nonnull__(1, 2)));
+
+void test_1 (void)
+{
+ void *p = malloc (1024); /* { dg-message "\\(1\\) this call could return NULL" } */
+ strcpy ((char *)p, "hello world"); /* { dg-warning "use of possibly-NULL 'p' where non-null expected" } */
+ /* { dg-message "\\(2\\) argument 1 \\('p'\\) from \\(1\\) could be NULL where non-null expected" "" { target *-*-* } .-1 } */
+ free (p);
+}
+
+int *test_2 (void)
+{
+ int *i = malloc (sizeof (int)); /* { dg-message "\\(1\\) this call could return NULL" } */
+ *i = 42; /* { dg-warning "dereference of possibly-NULL 'i'" } */
+ /* { dg-message "\\(2\\) 'i' could be NULL: unchecked value from \\(1\\)" "" { target *-*-* } .-1 } */
+ return i;
+}
--- /dev/null
+#include <stdlib.h>
+
+/* Don't complain about leaks due to exiting from "main". */
+
+void main (void)
+{
+ void *p = malloc (1024);
+}
--- /dev/null
+#include <stdlib.h>
+
+typedef void *(*allocator_t) (size_t);
+typedef void (*deallocator_t) (void *);
+
+static allocator_t __attribute__((noinline))
+get_malloc (void)
+{
+ return malloc;
+}
+
+static allocator_t __attribute__((noinline))
+get_alloca (void)
+{
+ return alloca;
+}
+
+static deallocator_t __attribute__((noinline))
+get_free (void)
+{
+ return free;
+}
+
+void test_1 (void *ptr)
+{
+ deallocator_t dealloc_fn = free;
+ dealloc_fn (ptr); /* { dg-message "first 'free' here" } */
+ dealloc_fn (ptr); /* { dg-warning "double-'free'" } */
+}
+
+void test_2 (void *ptr)
+{
+ deallocator_t dealloc_fn = get_free ();
+ dealloc_fn (ptr); /* { dg-message "first 'free' here" } */
+ dealloc_fn (ptr); /* { dg-warning "double-'free'" } */
+}
+
+static void __attribute__((noinline))
+called_by_test_3 (void *ptr, deallocator_t dealloc_fn)
+{
+ dealloc_fn (ptr); /* { dg-warning "double-'free'" } */
+}
+
+void test_3 (void *ptr)
+{
+ called_by_test_3 (ptr, free);
+ called_by_test_3 (ptr, free);
+}
+
+int *test_4 (void)
+{
+ allocator_t alloc_fn = get_malloc ();
+ int *ptr = alloc_fn (sizeof (int)); /* { dg-message "this call could return NULL" } */
+ *ptr = 42; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ return ptr;
+}
+
+int *test_5 (void)
+{
+ allocator_t alloc_fn = get_alloca ();
+ deallocator_t dealloc_fn = get_free ();
+ int *ptr = alloc_fn (sizeof (int)); /* { dg-message "pointer is from here" } */
+ /* TODO: message should read "memory is allocated on the stack here". */
+ dealloc_fn (ptr); /* { dg-warning "'free' of 'ptr' which points to memory not on the heap" } */
+}
+
+static void __attribute__((noinline))
+called_by_test_6a (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free'" "" { xfail *-*-* } } */
+}
+
+static deallocator_t __attribute__((noinline))
+called_by_test_6b (void)
+{
+ return called_by_test_6a;
+}
+
+void test_6 (void *ptr)
+{
+ deallocator_t dealloc_fn = called_by_test_6b ();
+ dealloc_fn (ptr);
+ dealloc_fn (ptr);
+}
--- /dev/null
+/* { dg-additional-options "-O2" } */
+
+#include <stdlib.h>
+
+void test(void)
+{
+ void *ptr = malloc(512);
+ free(ptr);
+ free(ptr); /* { dg-warning "double-'free'" "" { xfail *-*-* } } */
+}
+/* With optimization, the whole of test() goes away in the "cddce" pass
+ before the analysis pass sees it, and hence we get no error message. */
--- /dev/null
+#include <stdlib.h>
+
+extern void foo (void);
+extern void bar (void);
+
+void test_1 (int flag, int n);
+
+void caller_1_of_test_1 (int n)
+{
+ test_1 (1, n); /* { dg-bogus "test_1" } */
+ test_1 (0, n); /* { dg-bogus "test_1" } */
+}
+
+void __attribute__((noinline))
+test_1 (int flag, int n)
+{
+ int *ptr = (int *)malloc (sizeof (int));
+
+ if (flag)
+ {
+ int i;
+ for (i = 0; i < n; i++)
+ foo ();
+ }
+ else
+ bar ();
+
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free'" } */
+ /* FIXME: we get duplicates intraprocedurally, as there are two paths
+ through the function.
+ The calls in test_2 also generate additional duplicates.
+ How to verify lack of duplicates?
+ Putting a bogus on the interprocedual one detects that, at least. */
+
+ if (flag)
+ foo ();
+ else
+ bar ();
+}
+
+void caller_2_of_test_1 (int n)
+{
+ test_1 (1, n); /* { dg-bogus "test_1" } */
+ test_1 (0, n); /* { dg-bogus "test_1" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+void *
+calls_malloc (void)
+{
+ void *result = malloc (1024);
+ return result;
+}
+
+int *test_1 (int i)
+{
+ int *ptr = (int *)calls_malloc ();
+ *ptr = i; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ return ptr;
+}
+
+/* Same as test_1, to exercise the caches. */
+
+int *test_2 (int i)
+{
+ int *ptr = (int *)calls_malloc ();
+ *ptr = i; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ return ptr;
+}
--- /dev/null
+#include <stdlib.h>
+
+void
+calls_free (void *victim) /* { dg-message "\\(3\\) entry to 'calls_free'" } */
+/* { dg-message "\\(7\\) entry to 'calls_free'" "" { target *-*-* } .-1 } */
+{
+ free (victim); /* { dg-warning "double-'free' of 'victim'" } */
+ /* { dg-message "\\(4\\) first 'free' here" "" { target *-*-* } .-1 } */
+ /* { dg-message "\\(8\\) second 'free' here; first 'free' was at \\(4\\)" "" { target *-*-* } .-2 } */
+
+ /* TODO: would this be better emitted at the callsite,
+ for such a simple wrapper? */
+}
+
+void do_stuff (void)
+{
+ /* Empty. Irrelevant, and thus should not be expanded into in paths. */
+}
+
+void test (void *ptr) /* { dg-message "\\(1\\) entry to 'test'" } */
+{
+ do_stuff ();
+
+ calls_free (ptr); /* { dg-message "\\(2\\) calling 'calls_free' from 'test'" } */
+ /* { dg-message "\\(5\\) returning to 'test' from 'calls_free'" "" { target *-*-* } .-1 } */
+
+ do_stuff ();
+
+ calls_free (ptr); /* { dg-message "\\(6\\) passing freed pointer 'ptr' in call to 'calls_free' from 'test'" } */
+
+ do_stuff ();
+}
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdlib.h>
+
+extern int some_condition ();
+extern void do_stuff (int);
+
+void
+may_call_free (void *victim)
+{
+ if (some_condition ())
+ return;
+
+ free (victim); /* { dg-warning "double-'free' of 'victim'" } */
+}
+
+void test (void *ptr)
+{
+ do_stuff (1);
+
+ may_call_free (ptr);
+
+ do_stuff (2);
+
+ may_call_free (ptr);
+
+ do_stuff (3);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | free (victim);
+ | ^~~~~~~~~~~~~
+ 'test': events 1-2
+ |
+ | NN | void test (void *ptr)
+ | | ^~~~
+ | | |
+ | | (1) entry to 'test'
+ |......
+ | NN | may_call_free (ptr);
+ | | ~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (2) calling 'may_call_free' from 'test'
+ |
+ +--> 'may_call_free': events 3-6
+ |
+ | NN | may_call_free (void *victim)
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (3) entry to 'may_call_free'
+ | NN | {
+ | NN | if (some_condition ())
+ | | ~
+ | | |
+ | | (4) following 'false' branch...
+ |......
+ | NN | free (victim);
+ | | ~~~~~~~~~~~~~
+ | | |
+ | | (5) ...to here
+ | | (6) first 'free' here
+ |
+ <------+
+ |
+ 'test': events 7-8
+ |
+ | NN | may_call_free (ptr);
+ | | ^~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) returning to 'test' from 'may_call_free'
+ |......
+ | NN | may_call_free (ptr);
+ | | ~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (8) passing freed pointer 'ptr' in call to 'may_call_free' from 'test'
+ |
+ +--> 'may_call_free': events 9-12
+ |
+ | NN | may_call_free (void *victim)
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (9) entry to 'may_call_free'
+ | NN | {
+ | NN | if (some_condition ())
+ | | ~
+ | | |
+ | | (10) following 'false' branch...
+ |......
+ | NN | free (victim);
+ | | ~~~~~~~~~~~~~
+ | | |
+ | | (11) ...to here
+ | | (12) second 'free' here; first 'free' was at (6)
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+#include <stdlib.h>
+
+void recursive_free (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+ recursive_free (ptr);
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-verbosity=1" } */
+
+#include <stdlib.h>
+
+void
+calls_free (void *victim)
+{
+ free (victim); /* { dg-warning "double-'free' of 'victim'" } */
+}
+
+extern void do_stuff (void);
+
+struct foo
+{
+ void *m_p;
+};
+
+void test (struct foo f)
+{
+ do_stuff ();
+
+ calls_free (f.m_p);
+
+ do_stuff ();
+
+ calls_free (f.m_p); /* { dg-message "passing freed pointer '<unknown>' in call to 'calls_free' from 'test'" } */
+ // TODO: something better than '<unknown>'
+
+ do_stuff ();
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-verbosity=1" } */
+
+#include <stdlib.h>
+
+void
+calls_free (void *victim) /* { dg-message "\\(3\\) entry to 'calls_free'" } */
+/* { dg-message "\\(7\\) entry to 'calls_free'" "" { target *-*-* } .-1 } */
+{
+ free (victim); /* { dg-warning "double-'free' of 'victim'" } */
+ /* { dg-message "\\(4\\) first 'free' here" "" { target *-*-* } .-1 } */
+ /* { dg-message "\\(8\\) second 'free' here; first 'free' was at \\(4\\)" "" { target *-*-* } .-2 } */
+
+ /* TODO: would this be better emitted at the callsite,
+ for such a simple wrapper? */
+}
+
+extern void do_stuff (void);
+
+void test (void *ptr) /* { dg-message "\\(1\\) entry to 'test'" } */
+{
+ do_stuff ();
+
+ calls_free (ptr); /* { dg-message "\\(2\\) calling 'calls_free' from 'test'" } */
+ /* { dg-message "\\(5\\) returning to 'test' from 'calls_free'" "" { target *-*-* } .-1 } */
+
+ do_stuff ();
+
+ calls_free (ptr); /* { dg-message "\\(6\\) passing freed pointer 'ptr' in call to 'calls_free' from 'test'" } */
+
+ do_stuff ();
+}
+
+
+
--- /dev/null
+#include <stdlib.h>
+
+void *
+calls_malloc (void)
+{
+ void *result = malloc (1024);
+ return result;
+}
+
+void
+calls_free (void *victim)
+{
+ free (victim); /* { dg-warning "double-'free' of 'victim'" } */
+ /* TODO: this would be better emitted at the callsite,
+ for such a simple wrapper. */
+}
+
+void test (void)
+{
+ void *ptr = calls_malloc ();
+ calls_free (ptr);
+ calls_free (ptr); /* BUG: double-'free'. */
+}
--- /dev/null
+#include <stdlib.h>
+
+static void calls_free(int *q)
+{
+ free(q);
+}
+
+void test(void *p)
+{
+ calls_free(p);
+
+ free(p); /* { dg-warning "double-'free' of 'p'" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+static int *calls_malloc(void)
+{
+ return malloc(sizeof(int));
+}
+
+int *test(void)
+{
+ int *p = calls_malloc(); /* { dg-message "possible return of NULL to 'test' from 'calls_malloc'" } */
+ *p = 42; /* { dg-warning "dereference of possibly-NULL 'p'" } */
+ return p;
+}
--- /dev/null
+#include <stdlib.h>
+
+void *
+calls_malloc (void)
+{
+ void *result = malloc (1024); /* { dg-message "allocated here" } */
+ return result; /* { dg-warning "leak of 'result'" } */
+}
+
+void test_1 ()
+{
+ calls_malloc (); /* { dg-message "calling 'calls_malloc' from 'test_1'" } */
+}
+
+static void callee (int i)
+{
+}
+
+void test_2 (int i)
+{
+ callee (i);
+}
--- /dev/null
+#include <stdlib.h>
+
+/**************************************************************************/
+
+static void maybe_calls_free_1(int *q, int flag)
+{
+ if (flag)
+ free(q); /* { dg-warning "double-'free' of 'q'" } */
+}
+
+void test_1(void *p)
+{
+ maybe_calls_free_1(p, 1);
+ maybe_calls_free_1(p, 1);
+}
+
+/**************************************************************************/
+
+static void maybe_calls_free_2(int *q, int flag)
+{
+ if (flag)
+ free(q); /* { dg-bogus "double-'free'" } */
+}
+
+void test_2(void *p)
+{
+ maybe_calls_free_2(p, 0);
+ maybe_calls_free_2(p, 0);
+}
--- /dev/null
+/* Example of a multilevel wrapper around malloc/free, with a double-'free'. */
+
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fanalyzer-checker=malloc -fanalyzer-verbose-state-changes -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdlib.h>
+
+void *wrapped_malloc (size_t size)
+{
+ return malloc (size);
+}
+
+void wrapped_free (void *ptr)
+{
+ free (ptr); /* { dg-warning "double-'free' of 'ptr' \\\[CWE-415\\\]" } */
+}
+
+typedef struct boxed_int
+{
+ int i;
+} boxed_int;
+
+boxed_int *
+make_boxed_int (int i)
+{
+ boxed_int *result = (boxed_int *)wrapped_malloc (sizeof (boxed_int));
+ if (!result)
+ abort ();
+ result->i = i;
+ return result;
+}
+
+void
+free_boxed_int (boxed_int *bi)
+{
+ wrapped_free (bi);
+}
+
+void test (int i)
+{
+ boxed_int *obj = make_boxed_int (i);
+
+ free_boxed_int (obj);
+
+ free_boxed_int (obj);
+}
+
+/* double-'free'. */
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test': events 1-2
+ |
+ | NN | void test (int i)
+ | | ^~~~
+ | | |
+ | | (1) entry to 'test'
+ | NN | {
+ | NN | boxed_int *obj = make_boxed_int (i);
+ | | ~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (2) calling 'make_boxed_int' from 'test'
+ |
+ +--> 'make_boxed_int': events 3-6
+ |
+ | NN | make_boxed_int (int i)
+ | | ^~~~~~~~~~~~~~
+ | | |
+ | | (3) entry to 'make_boxed_int'
+ |......
+ | NN | if (!result)
+ | | ~
+ | | |
+ | | (4) following 'false' branch (when 'result' is non-NULL)...
+ | NN | abort ();
+ | NN | result->i = i;
+ | | ~~~~~~~~~~~~~
+ | | |
+ | | (5) ...to here
+ | NN | return result;
+ | | ~~~~~~
+ | | |
+ | | (6) state of '<return-value>': 'start' -> 'nonnull' (origin: NULL)
+ |
+ <------+
+ |
+ 'test': events 7-8
+ |
+ | NN | boxed_int *obj = make_boxed_int (i);
+ | | ^~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) returning to 'test' from 'make_boxed_int'
+ | NN |
+ | NN | free_boxed_int (obj);
+ | | ~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (8) calling 'free_boxed_int' from 'test'
+ |
+ +--> 'free_boxed_int': events 9-10
+ |
+ | NN | free_boxed_int (boxed_int *bi)
+ | | ^~~~~~~~~~~~~~
+ | | |
+ | | (9) entry to 'free_boxed_int'
+ | NN | {
+ | NN | wrapped_free (bi);
+ | | ~~~~~~~~~~~~~~~~~
+ | | |
+ | | (10) calling 'wrapped_free' from 'free_boxed_int'
+ |
+ +--> 'wrapped_free': events 11-12
+ |
+ | NN | void wrapped_free (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (11) entry to 'wrapped_free'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (12) first 'free' here (state of 'ptr': 'nonnull' -> 'freed', origin: NULL)
+ |
+ <------+
+ |
+ 'free_boxed_int': event 13
+ |
+ | NN | wrapped_free (bi);
+ | | ^~~~~~~~~~~~~~~~~
+ | | |
+ | | (13) returning to 'free_boxed_int' from 'wrapped_free'
+ |
+ <------+
+ |
+ 'test': events 14-15
+ |
+ | NN | free_boxed_int (obj);
+ | | ^~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (14) returning to 'test' from 'free_boxed_int'
+ | NN |
+ | NN | free_boxed_int (obj);
+ | | ~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (15) passing freed pointer 'obj' in call to 'free_boxed_int' from 'test'
+ |
+ +--> 'free_boxed_int': events 16-17
+ |
+ | NN | free_boxed_int (boxed_int *bi)
+ | | ^~~~~~~~~~~~~~
+ | | |
+ | | (16) entry to 'free_boxed_int'
+ | NN | {
+ | NN | wrapped_free (bi);
+ | | ~~~~~~~~~~~~~~~~~
+ | | |
+ | | (17) passing freed pointer 'bi' in call to 'wrapped_free' from 'free_boxed_int'
+ |
+ +--> 'wrapped_free': events 18-19
+ |
+ | NN | void wrapped_free (void *ptr)
+ | | ^~~~~~~~~~~~
+ | | |
+ | | (18) entry to 'wrapped_free'
+ | NN | {
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (19) second 'free' here; first 'free' was at (12) ('ptr' is in state 'freed')
+ |
+ { dg-end-multiline-output "" } */
+
+/* TODO: the event describing the allocation is uninteresting and probably
+ should be purged. */
--- /dev/null
+#include <stdlib.h>
+#include "malloc-ipa-8-lto.h"
+
+void *wrapped_malloc (size_t size)
+{
+ return malloc (size);
+}
+
+void wrapped_free (void *ptr)
+{
+ free (ptr);
+}
--- /dev/null
+#include <stdlib.h>
+#include "malloc-ipa-8-lto.h"
+
+boxed_int *
+make_boxed_int (int i)
+{
+ boxed_int *result = (boxed_int *)wrapped_malloc (sizeof (boxed_int));
+ if (!result)
+ abort ();
+ result->i = i;
+ return result;
+}
+
+void
+free_boxed_int (boxed_int *bi)
+{
+ wrapped_free (bi);
+}
--- /dev/null
+/* { dg-do link } */
+/* { dg-require-effective-target lto } */
+/* { dg-additional-options "-flto" } */
+/* { dg-additional-sources "malloc-ipa-8-lto-a.c malloc-ipa-8-lto-b.c" } */
+
+#include <stdlib.h>
+#include "malloc-ipa-8-lto.h"
+
+void test (int i)
+{
+ boxed_int *obj = make_boxed_int (i);
+
+ free_boxed_int (obj);
+ free (obj); /* { dg-warning "double-free" } */
+}
+
+int main() { return 0; }
--- /dev/null
+#include <stddef.h>
+
+extern void *wrapped_malloc (size_t size);
+extern void wrapped_free (void *ptr);
+
+typedef struct boxed_int
+{
+ int i;
+} boxed_int;
+
+extern boxed_int *make_boxed_int (int i);
+extern void free_boxed_int (boxed_int *bi);
--- /dev/null
+/* Example of a multilevel wrapper around malloc, with an unchecked write. */
+
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fanalyzer-checker=malloc -fdiagnostics-show-caret -fanalyzer-verbose-state-changes" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdlib.h>
+
+void *wrapped_malloc (size_t size)
+{
+ return malloc (size);
+}
+
+typedef struct boxed_int
+{
+ int i;
+} boxed_int;
+
+boxed_int *
+make_boxed_int (int i)
+{
+ boxed_int *result = (boxed_int *)wrapped_malloc (sizeof (boxed_int));
+ result->i = i; /* { dg-warning "dereference of possibly-NULL 'result'" } */
+ return result;
+}
+
+/* "dereference of possibly-NULL 'result' [CWE-690]". */
+/* { dg-begin-multiline-output "" }
+ NN | result->i = i;
+ | ~~~~~~~~~~^~~
+ 'make_boxed_int': events 1-2
+ |
+ | NN | make_boxed_int (int i)
+ | | ^~~~~~~~~~~~~~
+ | | |
+ | | (1) entry to 'make_boxed_int'
+ | NN | {
+ | NN | boxed_int *result = (boxed_int *)wrapped_malloc (sizeof (boxed_int));
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (2) calling 'wrapped_malloc' from 'make_boxed_int'
+ |
+ +--> 'wrapped_malloc': events 3-4
+ |
+ | NN | void *wrapped_malloc (size_t size)
+ | | ^~~~~~~~~~~~~~
+ | | |
+ | | (3) entry to 'wrapped_malloc'
+ | NN | {
+ | NN | return malloc (size);
+ | | ~~~~~~~~~~~~~
+ | | |
+ | | (4) this call could return NULL (state of '<return-value>': 'start' -> 'unchecked', origin: NULL)
+ |
+ <------+
+ |
+ 'make_boxed_int': events 5-6
+ |
+ | NN | boxed_int *result = (boxed_int *)wrapped_malloc (sizeof (boxed_int));
+ | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (5) possible return of NULL to 'make_boxed_int' from 'wrapped_malloc'
+ | NN | result->i = i;
+ | | ~~~~~~~~~~~~~
+ | | |
+ | | (6) 'result' could be NULL: unchecked value from (4) ('result' is in state 'unchecked')
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-path-format=none -fanalyzer-verbosity=1" } */
+
+#include <stdlib.h>
+
+void
+two_frees (void *p, void *q)
+{
+ free (p);
+ free (q); /* { dg-warning "double-'free' of 'q'" } */
+ /* TODO: could be useful to identify that p == q when called from 'test'. */
+}
+
+extern void do_stuff (void);
+
+void test (void *ptr)
+{
+ two_frees (ptr, ptr);
+}
--- /dev/null
+/* Test path-printing in the face of macros. */
+
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include "malloc-macro.h"
+
+/* { dg-warning "double-'free' of 'ptr'" "" { target *-*-* } 2 } */
+
+int test (void *ptr)
+{
+ WRAPPED_FREE (ptr); /* { dg-message "in expansion of macro 'WRAPPED_FREE'" } */
+ WRAPPED_FREE (ptr); /* { dg-message "in expansion of macro 'WRAPPED_FREE'" } */
+
+ /* Erase the spans indicating the header file
+ (to avoid embedding path assumptions). */
+ /* { dg-regexp "\[^|\]+/malloc-macro.h:\[0-9\]+:\[0-9\]+:" } */
+ /* { dg-regexp "\[^|\]+/malloc-macro.h:\[0-9\]+:\[0-9\]+:" } */
+
+ /* { dg-begin-multiline-output "" }
+ NN | #define WRAPPED_FREE(PTR) free(PTR)
+ | ^~~~~~~~~
+ NN | WRAPPED_FREE (ptr);
+ | ^~~~~~~~~~~~
+ 'test': event 1
+ |
+ |
+ | NN | #define WRAPPED_FREE(PTR) free(PTR)
+ | | ^~~~~~~~~
+ | | |
+ | | (1) first 'free' here
+ | NN | WRAPPED_FREE (ptr);
+ | | ^~~~~~~~~~~~
+ |
+ 'test': event 2
+ |
+ |
+ | NN | #define WRAPPED_FREE(PTR) free(PTR)
+ | | ^~~~~~~~~
+ | | |
+ | | (2) second 'free' here; first 'free' was at (1)
+ | NN | WRAPPED_FREE (ptr);
+ | | ^~~~~~~~~~~~
+ |
+ { dg-end-multiline-output "" } */
+}
--- /dev/null
+/* Test path-printing in the face of macros. */
+
+/* { dg-additional-options "-fdiagnostics-path-format=separate-events" } */
+
+#include "malloc-macro.h"
+
+/* { dg-warning "double-'free' of 'ptr'" "" { target *-*-* } 2 } */
+/* { dg-message "first 'free' here" "" { target *-*-* } 2 } */
+/* { dg-message "second 'free' here" "" { target *-*-* } 2 } */
+
+int test (void *ptr)
+{
+ WRAPPED_FREE (ptr); /* { dg-message "in expansion of macro 'WRAPPED_FREE'" } */
+ WRAPPED_FREE (ptr); /* { dg-message "in expansion of macro 'WRAPPED_FREE'" } */
+}
--- /dev/null
+#include <stdlib.h>
+#define WRAPPED_FREE(PTR) free(PTR)
--- /dev/null
+#include <stdlib.h>
+
+/* Ensure that we don't need to laboriously walk every path to get
+ to the end of the function. */
+
+int test_1 (int n)
+{
+ int i, j, k;
+ k = 0;
+ for (int i = 0; i < n; i++)
+ for (int j = 0; j < 1000; j++)
+ k++;
+ return k;
+}
--- /dev/null
+#include <stdlib.h>
+
+/* Ensure that we don't get an exponential growth in paths due to
+ repeated diamonds in the CFG. */
+
+typedef struct obj {
+ int ob_refcnt;
+} PyObject;
+
+extern void Py_Dealloc (PyObject *op);
+
+#define Py_DECREF(op) \
+ do { \
+ if (--((PyObject*)(op))->ob_refcnt == 0) \
+ Py_Dealloc((PyObject *)(op)); \
+ } while (0)
+
+int test (PyObject *obj_01, PyObject *obj_02, PyObject *obj_03,
+ PyObject *obj_04, PyObject *obj_05, PyObject *obj_06,
+ PyObject *obj_07, PyObject *obj_08, PyObject *obj_09,
+ PyObject *obj_10, PyObject *obj_11, PyObject *obj_12,
+ PyObject *obj_13, PyObject *obj_14, PyObject *obj_15
+)
+{
+ Py_DECREF (obj_01); Py_DECREF (obj_02); Py_DECREF (obj_03);
+ Py_DECREF (obj_04); Py_DECREF (obj_05); Py_DECREF (obj_06);
+ Py_DECREF (obj_07); Py_DECREF (obj_08); Py_DECREF (obj_09);
+ Py_DECREF (obj_10); Py_DECREF (obj_11); Py_DECREF (obj_12);
+ Py_DECREF (obj_13); Py_DECREF (obj_14); Py_DECREF (obj_15);
+}
--- /dev/null
+#include <stdlib.h>
+
+extern int foo (void);
+
+int successes;
+int failures;
+
+#define ONE_DIAMOND \
+ do { \
+ void *ptr = malloc (128); \
+ if (foo ()) \
+ successes++; \
+ else \
+ failures++; \
+ free (ptr); \
+ } while (0)
+
+#define TEN_DIAMONDS \
+ do { \
+ ONE_DIAMOND; ONE_DIAMOND; ONE_DIAMOND; ONE_DIAMOND; ONE_DIAMOND; \
+ ONE_DIAMOND; ONE_DIAMOND; ONE_DIAMOND; ONE_DIAMOND; ONE_DIAMOND; \
+ } while (0)
+
+void test_3 (void *ptr)
+{
+ free (ptr);
+#if 1
+ ONE_DIAMOND;
+#else
+ /* TODO: enabling this leads to numerous duplicated reports,
+ all of them detailing all the extraneous info about the malloc/free
+ within the diamonds. */
+ TEN_DIAMONDS;
+#endif
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
--- /dev/null
+/* Verify that we emit sane paths for state machine errors. */
+
+#include <stdlib.h>
+
+void test_1 (void)
+{
+ void *ptr = malloc (1024); /* { dg-line malloc } */
+ free (ptr); /* { dg-line first_free } */
+ free (ptr); /* { dg-line second_free } */
+
+ /* { dg-warning "double-'free' of 'ptr'" "" { target *-*-* } second_free } */
+ /* { dg-message "\\(1\\) allocated here" "" { target *-*-* } malloc } */
+ /* { dg-message "\\(2\\) first 'free' here" "" { target *-*-* } first_free } */
+ /* { dg-message "\\(3\\) second 'free' here; first 'free' was at \\(2\\)" "" { target *-*-* } second_free } */
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+int test (int flag)
+{
+ int other_flag;
+ if (flag)
+ other_flag = 1;
+ else
+ other_flag = 0;
+
+ /* With state-merging, we lose the relationship between 'flag' and 'other_flag'. */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (other_flag)
+ __analyzer_eval (flag); /* { dg-warning "UNKNOWN" } */
+ else
+ __analyzer_eval (flag); /* { dg-warning "UNKNOWN" } */
+}
--- /dev/null
+/* Verify that we emit sane paths for state machine errors. */
+
+#include <stdlib.h>
+
+void test_2 (void *ptr)
+{
+ free (ptr); /* { dg-line first_free } */
+ free (ptr); /* { dg-line second_free } */
+
+ /* { dg-warning "double-'free' of 'ptr'" "" { target *-*-* } second_free } */
+ /* { dg-message "\\(1\\) first 'free' here" "" { target *-*-* } first_free } */
+ /* { dg-message "\\(2\\) second 'free' here; first 'free' was at \\(1\\)" "" { target *-*-* } second_free } */
+}
--- /dev/null
+/* Verify that we emit sane paths for state machine errors. */
+
+#include <stdlib.h>
+
+int *test_3 (void)
+{
+ int *ptr = (int *)malloc (sizeof (int)); /* { dg-line malloc } */
+ *ptr = 42; /* { dg-line unchecked_deref } */
+ return ptr;
+
+ /* { dg-warning "dereference of possibly-NULL 'ptr'" "" { target *-*-* } unchecked_deref } */
+ /* { dg-message "\\(1\\) this call could return NULL" "" { target *-*-* } malloc } */
+ /* { dg-message "\\(2\\) 'ptr' could be NULL" "" { target *-*-* } unchecked_deref } */
+}
--- /dev/null
+/* Verify that we emit sane paths for state machine errors. */
+
+#include <stdlib.h>
+
+int *test_4 (void)
+{
+ int *ptr = (int *)malloc (sizeof (int)); /* { dg-line malloc } */
+ if (ptr) /* { dg-line cond } */
+ *ptr = 42;
+ else
+ *ptr = 43; /* { dg-line on_null_ptr } */
+ return ptr;
+
+ /* { dg-warning "dereference of NULL 'ptr'" "" { target *-*-* } on_null_ptr } */
+ /* { dg-message "\\(1\\) allocated here" "" { target *-*-* } malloc } */
+ /* { dg-message "\\(2\\) assuming 'ptr' is NULL" "" { target *-*-* } cond } */
+ /* { dg-message "\\(3\\) following 'false' branch \\(when 'ptr' is NULL\\)\\.\\.\\." "" { target *-*-* } cond } */
+ /* { dg-message "\\(4\\) \\.\\.\\.to here" "" { target *-*-* } on_null_ptr } */
+ /* { dg-message "\\(5\\) dereference of NULL 'ptr'" "" { target *-*-* } on_null_ptr } */
+}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+
+extern void do_stuff (void);
+
+int test (const char *filename, int flag)
+{
+ FILE *f;
+ int *p, *q;
+ int i;
+
+ p = (int *)malloc (sizeof (int)); /* { dg-line malloc_of_p } */
+ if (!p) /* { dg-line test_of_p } */
+ {
+ free (p);
+ return -1;
+ }
+
+ q = (int *)malloc (sizeof (int)); /* { dg-line malloc_of_q } */
+ if (!q) /* { dg-line test_of_q } */
+ {
+ free (p); /* { dg-line first_free_of_p } */
+ /* oops: forgot the "return" here, so it falls through. */
+ }
+
+ do_stuff ();
+
+ free (p); /* { dg-line second_free_of_p } */
+ free (q);
+ return 0;
+
+ /* { dg-warning "double-'free' of 'p'" "" { target *-*-* } second_free_of_p } */
+ /* { dg-message "\\(1\\) allocated here" "" { target *-*-* } malloc_of_p } */
+ /* { dg-message "\\(2\\) assuming 'p' is non-NULL" "" { target *-*-* } test_of_p } */
+ /* { dg-message "\\(3\\) following 'false' branch \\(when 'p' is non-NULL\\)\\.\\.\\." "" { target *-*-* } test_of_p } */
+ /* { dg-message "\\(4\\) \\.\\.\\.to here" "" { target *-*-* } malloc_of_q } */
+ /* { dg-message "\\(5\\) following 'true' branch \\(when 'q' is NULL\\)\\.\\.\\." "" { target *-*-* } test_of_q } */
+ /* { dg-message "\\(6\\) \\.\\.\\.to here" "" { target *-*-* } first_free_of_p } */
+ /* { dg-message "\\(7\\) first 'free' here" "" { target *-*-* } first_free_of_p } */
+ /* { dg-message "\\(8\\) second 'free' here; first 'free' was at \\(7\\)" "" { target *-*-* } second_free_of_p } */
+
+ /* We don't care about the state changes to q. */
+}
--- /dev/null
+#include <stdlib.h>
+
+void test (void *ptr)
+{
+ void *q;
+ q = ptr;
+ free (ptr);
+ free (q); /* { dg-warning "double-'free' of 'q'" } */
+ /* The above case requires us to handle equivalence classes in
+ state transitions. */
+}
--- /dev/null
+#include <stdlib.h>
+
+extern int foo (void);
+extern int bar (void);
+
+void test (void)
+{
+ void *p = malloc (1024); /* { dg-message "\\(1\\) allocated here" } */
+ void *q = malloc (1024);
+
+ foo ();
+ if (!q) /* { dg-message "\\(2\\) following 'true' branch \\(when 'q' is NULL\\)\\.\\.\\." } */
+ {
+ free (q); /* { dg-message "\\(3\\) \\.\\.\\.to here" } */
+ return; /* { dg-warning "leak of 'p'" } */
+ /* { dg-message "\\(4\\) 'p' leaks here; was allocated at \\(1\\)" "" { target *-*-* } .-1 } */
+ }
+ bar ();
+ free (q);
+ free (p);
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-transitivity" } */
+
+#include <stddef.h>
+#include <stdlib.h>
+
+extern void do_stuff (const void *);
+
+#define LIMIT 1024
+
+void test_1 (size_t sz)
+{
+ void *ptr;
+ if (sz >= LIMIT)
+ ptr = malloc (sz);
+ else
+ ptr = alloca (sz);
+
+ do_stuff (ptr);
+
+ if (sz >= LIMIT)
+ free (ptr);
+}
+
+void test_2 (size_t sz)
+{
+ void *ptr;
+ if (sz < LIMIT)
+ ptr = alloca (sz);
+ else
+ ptr = malloc (sz);
+
+ do_stuff (ptr);
+
+ if (sz >= LIMIT)
+ free (ptr);
+}
+
+void test_3 (size_t sz)
+{
+ void *ptr;
+ if (sz <= LIMIT)
+ ptr = alloca (sz); /* { dg-message "memory is allocated on the stack here" } */
+ else
+ ptr = malloc (sz);
+
+ do_stuff (ptr);
+
+ /* Bug: the "sz <= LIMIT" above should have been "sz < LIMIT",
+ so there's a free-of-alloca when sz == LIMIT. */
+ if (sz >= LIMIT)
+ free (ptr); /* { dg-warning "'free' of memory allocated on the stack by 'alloca'" } */
+}
+/* { dg-bogus "leak of 'ptr'" } */
+/* This can't happen, as "sz > 1024" && "sz <= 1023" is impossible. */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdlib.h>
+
+void test_1 (void)
+{
+ void *ptr = malloc (1024);
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_1': events 1-3
+ |
+ | NN | void *ptr = malloc (1024);
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (1) allocated here
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (2) first 'free' here
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (3) second 'free' here; first 'free' was at (2)
+ |
+ { dg-end-multiline-output "" } */
+
+void test_2 (int x, int y)
+{
+ void *ptr = malloc (1024);
+ if (x)
+ free (ptr);
+ if (y)
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+} /* { dg-warning "leak of 'ptr'" } */
+
+/* "double-'free' of 'ptr'". */
+/* { dg-begin-multiline-output "" }
+ NN | free (ptr);
+ | ^~~~~~~~~~
+ 'test_2': events 1-7
+ |
+ | NN | void *ptr = malloc (1024);
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (1) allocated here
+ | NN | if (x)
+ | | ~
+ | | |
+ | | (2) following 'true' branch (when 'x != 0')...
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (3) ...to here
+ | | (4) first 'free' here
+ | NN | if (y)
+ | | ~
+ | | |
+ | | (5) following 'true' branch (when 'y != 0')...
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (6) ...to here
+ | | (7) second 'free' here; first 'free' was at (4)
+ |
+ { dg-end-multiline-output "" } */
+
+/* "leak of 'ptr'. */
+/* { dg-begin-multiline-output "" }
+ NN | }
+ | ^
+ 'test_2': events 1-6
+ |
+ | NN | void *ptr = malloc (1024);
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (1) allocated here
+ | NN | if (x)
+ | | ~
+ | | |
+ | | (2) following 'false' branch (when 'x == 0')...
+ | NN | free (ptr);
+ | NN | if (y)
+ | | ~
+ | | |
+ | | (3) ...to here
+ | | (4) following 'false' branch (when 'y == 0')...
+ | NN | free (ptr);
+ | NN | }
+ | | ~
+ | | |
+ | | (5) ...to here
+ | | (6) 'ptr' leaks here; was allocated at (1)
+ |
+ { dg-end-multiline-output "" } */
+
+int test_3 (int x, int y)
+{
+ int *ptr = (int *)malloc (sizeof (int));
+ *ptr = 42; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+ if (x)
+ free (ptr);
+
+ *ptr = 19; /* { dg-warning "use after 'free' of 'ptr'" } */
+ // TODO: two warnings here: one is from sm-malloc, the other from region model
+
+ if (y)
+ free (ptr); /* No double-'free' warning: we've already attempted
+ to dereference it above. */
+ return *ptr; /* { dg-warning "use after 'free' of 'ptr'" } */
+ // TODO: two warnings here: one is from sm-malloc, the other from region model
+ /* { dg-warning "leak of 'ptr'" "" { target *-*-* } .-2 } */
+}
+
+/* "dereference of possibly-NULL 'ptr'". */
+/* { dg-begin-multiline-output "" }
+ NN | *ptr = 42;
+ | ~~~~~^~~~
+ 'test_3': events 1-2
+ |
+ | NN | int *ptr = (int *)malloc (sizeof (int));
+ | | ^~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (1) this call could return NULL
+ | NN | *ptr = 42;
+ | | ~~~~~~~~~
+ | | |
+ | | (2) 'ptr' could be NULL: unchecked value from (1)
+ |
+ { dg-end-multiline-output "" } */
+
+/* "use after 'free' of 'ptr'". */
+/* { dg-begin-multiline-output "" }
+ NN | *ptr = 19;
+ | ~~~~~^~~~
+ 'test_3': events 1-6
+ |
+ | NN | int *ptr = (int *)malloc (sizeof (int));
+ | | ^~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (1) allocated here
+ | NN | *ptr = 42;
+ | | ~~~~~~~~~
+ | | |
+ | | (2) assuming 'ptr' is non-NULL
+ | NN | if (x)
+ | | ~
+ | | |
+ | | (3) following 'true' branch (when 'x != 0')...
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (4) ...to here
+ | | (5) freed here
+ | NN |
+ | NN | *ptr = 19;
+ | | ~~~~~~~~~
+ | | |
+ | | (6) use after 'free' of 'ptr'; freed at (5)
+ |
+ { dg-end-multiline-output "" } */
+
+/* "use after 'free' of 'ptr'". */
+/* { dg-begin-multiline-output "" }
+ NN | return *ptr;
+ | ^~~~
+ 'test_3': events 1-8
+ |
+ | NN | int *ptr = (int *)malloc (sizeof (int));
+ | | ^~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (1) allocated here
+ | NN | *ptr = 42;
+ | | ~~~~~~~~~
+ | | |
+ | | (2) assuming 'ptr' is non-NULL
+ | NN | if (x)
+ | | ~
+ | | |
+ | | (3) following 'false' branch (when 'x == 0')...
+ |......
+ | NN | *ptr = 19;
+ | | ~~~~~~~~~
+ | | |
+ | | (4) ...to here
+ |......
+ | NN | if (y)
+ | | ~
+ | | |
+ | | (5) following 'true' branch (when 'y != 0')...
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (6) ...to here
+ | | (7) freed here
+ | NN |
+ | NN | return *ptr;
+ | | ~~~~
+ | | |
+ | | (8) use after 'free' of 'ptr'; freed at (7)
+ |
+ { dg-end-multiline-output "" } */
+
+/* "leak of 'ptr'". */
+/* { dg-begin-multiline-output "" }
+ NN | return *ptr;
+ | ^~~~
+ 'test_3': events 1-7
+ |
+ | NN | int *ptr = (int *)malloc (sizeof (int));
+ | | ^~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (1) allocated here
+ | NN | *ptr = 42;
+ | | ~~~~~~~~~
+ | | |
+ | | (2) assuming 'ptr' is non-NULL
+ | NN | if (x)
+ | | ~
+ | | |
+ | | (3) following 'false' branch (when 'x == 0')...
+ |......
+ | NN | *ptr = 19;
+ | | ~~~~~~~~~
+ | | |
+ | | (4) ...to here
+ |......
+ | NN | if (y)
+ | | ~
+ | | |
+ | | (5) following 'false' branch (when 'y == 0')...
+ |......
+ | NN | return *ptr;
+ | | ~~~~
+ | | |
+ | | (6) ...to here
+ | | (7) 'ptr' leaks here; was allocated at (1)
+ |
+ { dg-end-multiline-output "" } */
+
+/* "use after 'free' of 'ptr'". */
+/* { dg-begin-multiline-output "" }
+ NN | *ptr = 19;
+ | ~~~~~^~~~
+ 'test_3': events 1-3
+ |
+ | NN | if (x)
+ | | ^
+ | | |
+ | | (1) following 'true' branch (when 'x != 0')...
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (2) ...to here
+ | NN |
+ | NN | *ptr = 19;
+ | | ~~~~~~~~~
+ | | |
+ | | (3) use after 'free' of 'ptr' here
+ |
+ { dg-end-multiline-output "" } */
+
+/* "use after 'free' of 'ptr'". */
+/* { dg-begin-multiline-output "" }
+ NN | return *ptr;
+ | ^~~~
+ 'test_3': events 1-5
+ |
+ | NN | if (x)
+ | | ^
+ | | |
+ | | (1) following 'false' branch (when 'x == 0')...
+ |......
+ | NN | *ptr = 19;
+ | | ~~~~~~~~~
+ | | |
+ | | (2) ...to here
+ |......
+ | NN | if (y)
+ | | ~
+ | | |
+ | | (3) following 'true' branch (when 'y != 0')...
+ | NN | free (ptr);
+ | | ~~~~~~~~~~
+ | | |
+ | | (4) ...to here
+ | NN | to dereference it above
+ | NN | return *ptr;
+ | | ~~~~
+ | | |
+ | | (5) use after 'free' of 'ptr' here
+ |
+ { dg-end-multiline-output "" } */
+/* TODO: this is really a duplicate; can we either eliminate it, or
+ improve the path? */
--- /dev/null
+/* { dg-additional-options "-fno-analyzer-call-summaries -fanalyzer-transitivity" } */
+
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+extern int foo (int);
+
+static int __attribute__((noinline))
+do_stuff (int *p, int n)
+{
+ int sum = 0;
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ return sum;
+}
+
+static int __attribute__((noinline))
+do_stuff_2 (int *p, int n)
+{
+ return 0;
+}
+
+/* Various examples of functions that use either a malloc buffer
+ or a local buffer, do something, then conditionally free the
+ buffer, tracking whether "free" is necessary in various
+ ways.
+
+ In each case, there ought to be only two paths through the function,
+ not four. */
+
+/* Repeated (n > 10) predicate. */
+
+int test_repeated_predicate_1 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ // FIXME: why 3 here?
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ // FIXME: why 3 here?
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* A simpler version of the above. */
+
+int test_repeated_predicate_2 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff_2 (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* A predicate that sets a flag for the 2nd test. */
+
+int test_explicit_flag (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+ int need_to_free = 0;
+
+ if (n > 10)
+ {
+ ptr = (int *)malloc (sizeof (int) * n);
+ need_to_free = 1;
+ }
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ // FIXME: why 3 here?
+
+ if (need_to_free)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* Pointer comparison. */
+
+int test_pointer_comparison (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ // FIXME: why 3 here?
+
+ if (ptr != buf)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* Set a flag based on a conditional, then use it, then reuse the
+ conditional. */
+
+int test_initial_flag (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+ int on_heap = 0;
+
+ if (n > 10)
+ on_heap = 1;
+ else
+ on_heap = 0;
+
+ /* Due to state-merging, we lose the relationship between 'n > 10'
+ and 'on_heap' here; we have to rely on feasibility-checking
+ in the diagnostic_manager to reject the false warnings. */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (on_heap)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 exploded nodes" } */
+ // FIXME: why 5 here?
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-call-summaries" } */
+
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+extern int foo (int);
+
+static int __attribute__((noinline))
+do_stuff (int *p, int n)
+{
+ int sum = 0;
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ return sum;
+}
+
+static int __attribute__((noinline))
+do_stuff_2 (int *p, int n)
+{
+ return 0;
+}
+
+/* Various examples of functions that use either a malloc buffer
+ or a local buffer, do something, then conditionally free the
+ buffer, tracking whether "free" is necessary in various
+ ways.
+
+ In each case, there ought to be only two paths through the function,
+ not four. */
+
+/* Repeated (n > 10) predicate. */
+
+int test_repeated_predicate_1 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* A simpler version of the above. */
+
+int test_repeated_predicate_2 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff_2 (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* A predicate that sets a flag for the 2nd test. */
+
+int test_explicit_flag (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+ int need_to_free = 0;
+
+ if (n > 10)
+ {
+ ptr = (int *)malloc (sizeof (int) * n);
+ need_to_free = 1;
+ }
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (need_to_free)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* Pointer comparison. */
+
+int test_pointer_comparison (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (ptr != buf)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* Set a flag based on a conditional, then use it, then reuse the
+ conditional. */
+
+int test_initial_flag (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+ int on_heap = 0;
+
+ if (n > 10)
+ on_heap = 1;
+ else
+ on_heap = 0;
+
+ /* Due to state-merging, we lose the relationship between 'n > 10'
+ and 'on_heap' here; we have to rely on feasibility-checking
+ in the diagnostic_manager to reject the false warnings. */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (on_heap)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+extern int foo (int);
+
+static int __attribute__((noinline))
+do_stuff_2 (int *p, int n)
+{
+ return 0;
+}
+
+/* As malloc-vs-local.c, but hand-inlining the logic. */
+
+/* Repeated (n > 10) predicate. */
+
+int test_repeated_predicate_1 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ {
+ int *p = ptr;
+ int sum = 0;
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ result = sum;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* As above, but with just one loop. */
+
+int test_repeated_predicate_1a (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ {
+ int *p = ptr;
+ int sum = 0;
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ result = sum;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* A simpler version of the above. */
+
+int test_repeated_predicate_2 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff_2 (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (n > 10)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* A predicate that sets a flag for the 2nd test. */
+
+int test_explicit_flag (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+ int need_to_free = 0;
+
+ if (n > 10)
+ {
+ ptr = (int *)malloc (sizeof (int) * n);
+ need_to_free = 1;
+ }
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ {
+ int *p = ptr;
+ int sum = 0;
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ result = sum;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+
+ if (need_to_free)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
+
+/* Pointer comparison. */
+
+int test_pointer_comparison (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ {
+ int *p = ptr;
+ int sum = 0;
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ result = sum;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+
+ if (ptr != buf)
+ free (ptr); /* { dg-bogus "not on the heap" } */
+
+ return result; /* { dg-bogus "leak" } */
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+extern int foo (int);
+
+static int __attribute__((noinline))
+do_stuff_2 (int *p, int n)
+{
+ return 0;
+}
+
+/* As malloc-vs-local-2.c, but with a memory leak for the "on the heap case"
+ by not attempting to free at the end. */
+
+int test_1 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ {
+ int *p = ptr;
+ int sum = 0;
+ int i;
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ result = sum;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+
+ return result; /* { dg-message "leak of 'p'" } */
+ /* FIXME: should this be 'ptr'? */
+}
+
+/* A simpler version of the above. */
+
+int test_2 (int n)
+{
+ int buf[10];
+ int *ptr;
+ int result;
+
+ if (n > 10)
+ ptr = (int *)malloc (sizeof (int) * n);
+ else
+ ptr = buf;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ result = do_stuff_2 (ptr, n);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ return result; /* { dg-message "leak of 'ptr'" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+void __attribute__((noinline)) callee_1 (int *ptr)
+{
+ *ptr = 42; /* { dg-warning "dereference of possibly-NULL 'ptr'" } */
+}
+
+int test_1 (int i, int flag)
+{
+ /* Double diamond CFG; either use &i, or a malloc-ed buffer. */
+ int *ptr = &i;
+ if (flag)
+ ptr = (int *)malloc (sizeof (int));
+ callee_1 (ptr);
+ if (flag)
+ free (ptr);
+ return i;
+}
+
+void __attribute__((noinline)) callee_2 (int *ptr)
+{
+ *ptr = 42;
+}
+
+int test_2 (int flag)
+{
+ int i;
+
+ if (flag)
+ callee_2 (&i);
+
+ callee_2 (&i);
+
+ if (!flag)
+ {
+ void *ptr = malloc (16);
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+ }
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test (int i, int j)
+{
+ int k, m;
+
+ if (i > 42) {
+ __analyzer_eval (i > 42); /* { dg-warning "TRUE" } */
+
+ i += 3;
+
+ __analyzer_eval (i > 45); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): do we really know this? what about overflow? */
+
+ i -= 1;
+
+ __analyzer_eval (i > 44); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): do we really know this? what about overflow? */
+
+ i = 3 * i;
+
+ __analyzer_eval (i > 132); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): do we really know this? what about overflow? */
+
+ i /= 2;
+
+ __analyzer_eval (i > 66); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): do we really know this? what about overflow? */
+
+ /* We don't know anything about j, so we don't know anything about k: */
+ k = i + j;
+ __analyzer_eval (k == 0); /* { dg-warning "UNKNOWN" } */
+
+ /* However, we should now know that m > 67: */
+ m = i + 1;
+ __analyzer_eval (m > 67); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): do we really know this? what about overflow? */
+ }
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+static void ensure_equal (int a, int b)
+{
+ if (a != b)
+ abort ();
+}
+
+void test(int i, int j)
+{
+ __analyzer_eval (i == j); /* { dg-warning "UNKNOWN" } */
+
+ ensure_equal (i, j);
+
+ __analyzer_eval (i == j); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+static int called_function(int j)
+{
+ int k;
+
+ __analyzer_eval (j > 4); /* { dg-warning "TRUE" } */
+
+ k = j - 1;
+
+ __analyzer_eval (k > 3); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): we're not then updating based on the assignment. */
+
+ return k;
+}
+
+void test(int i)
+{
+ __analyzer_eval (i > 4); /* { dg-warning "UNKNOWN" } */
+
+ if (i > 4) {
+
+ __analyzer_eval (i > 4); /* { dg-warning "TRUE" } */
+
+ i = called_function(i);
+
+ __analyzer_eval (i > 3); /* { dg-warning "TRUE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail): we're not updating from the returned value. */
+ }
+
+ __analyzer_eval (i > 3); /* { dg-warning "UNKNOWN" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+struct foo
+{
+ int m_flag;
+};
+
+extern void bar (int);
+
+void test (struct foo *pf)
+{
+ if (pf->m_flag)
+ bar (0);
+ else
+ bar (1);
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+union foo
+{
+ int m_flag;
+};
+
+extern void bar (int);
+
+void test (union foo *pf)
+{
+ if (pf->m_flag)
+ bar (0);
+ else
+ bar (1);
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+int test (int a)
+{
+ if (a != 42 && a != 113) {
+ return (-2);
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ return 0;
+}
+
+int test_2 (int a)
+{
+ if (a != 42 && a != 113 && a != 666) {
+ return (-2);
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "4 exploded nodes" } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ return 0;
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-transitivity" } */
+
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+int test_1 (int a, int b)
+{
+ void *p;
+
+ if (a > 5)
+ if (b)
+ p = malloc (16);
+ else
+ p = malloc (32);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "4 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (a > 5)
+ {
+ free (p);
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ }
+
+ return 0; /* { dg-bogus "leak" } */
+}
+
+int test_2 (int a, int b)
+{
+ void *p;
+
+ if (a > 5)
+ if (b)
+ p = malloc (16);
+ else
+ p = malloc (32);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "4 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (a > 6) /* different condition */
+ {
+ free (p);
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ }
+
+ return 0; /* { dg-warning "leak of 'p'" } */
+ /* leaks when a == 5. */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+struct state
+{
+ int mode;
+ int data;
+};
+
+extern void do_stuff (struct state *, int);
+
+int test_1 (struct state *s)
+{
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ while (1)
+ {
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ do_stuff (s, s->mode);
+ }
+}
+
+int test_2 (struct state *s)
+{
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ while (1)
+ {
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ switch (s->mode)
+ {
+ case 0:
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ do_stuff (s, 0);
+ break;
+ case 1:
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ do_stuff (s, 17);
+ break;
+ case 2:
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ do_stuff (s, 5);
+ break;
+ case 3:
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ return 42;
+ case 4:
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ return -3;
+ }
+ }
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test (int *p, int n)
+{
+ int i;
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ for (i = 0; i < n; i++)
+ {
+ p[i] = i; /* { dg-bogus "uninitialized" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ }
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+/* Verify that ordering of writes doesn't matter when merging states. */
+
+/* Test with locals. */
+
+void test_1 (int flag)
+{
+ int a, b;
+ if (flag)
+ {
+ a = 3;
+ b = 4;
+ }
+ else
+ {
+ b = 4;
+ a = 3;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ // FIXME: the above can vary between 2 and 3 exploded nodes
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ __analyzer_eval (a == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (b == 4); /* { dg-warning "TRUE" } */
+}
+
+/* Test with globals. */
+
+int f, g, h;
+void test_2 (int flag)
+{
+ if (flag)
+ {
+ f = 3;
+ g = 4;
+ }
+ else
+ {
+ g = 4;
+ f = 3;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ __analyzer_eval (f == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (g == 4); /* { dg-warning "TRUE" } */
+}
+
+/* All 6 orderings of writes to 3 globals. */
+
+void test_3 (int i)
+{
+ switch (i)
+ {
+ default:
+ case 0:
+ f = 3;
+ g = 4;
+ h = 5;
+ break;
+
+ case 1:
+ f = 3;
+ h = 5;
+ g = 4;
+ break;
+
+ case 2:
+ g = 4;
+ f = 3;
+ h = 5;
+ break;
+
+ case 3:
+ g = 4;
+ h = 5;
+ f = 3;
+ break;
+
+ case 4:
+ h = 5;
+ f = 3;
+ g = 4;
+ break;
+
+ case 5:
+ h = 5;
+ g = 4;
+ f = 3;
+ break;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "6 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded node" } */
+ __analyzer_eval (f == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (g == 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (h == 5); /* { dg-warning "TRUE" } */
+}
+
+void test_4 (int flag)
+{
+ void *p, *q;
+ if (flag)
+ {
+ p = malloc (256);
+ q = malloc (256);
+ }
+ else
+ {
+ q = malloc (256);
+ p = malloc (256);
+ }
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+ free (p);
+ free (q);
+}
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+extern int foo (int);
+
+int test (int flag, void *ptr, int *p, int n)
+{
+ int result;
+ int sum = 0;
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (flag)
+ free (ptr);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ result = sum;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ if (flag)
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+ return result;
+}
+
+int test_2 (int flag, int *p, int n)
+{
+ int result;
+ int sum = 0;
+ int i;
+
+ void *ptr = malloc (16);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (flag)
+ free (ptr);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+
+ for (i = 0; i < n; i++)
+ p[i] = i;
+ for (i = 0; i < n; i++)
+ sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
+ result = sum;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 exploded nodes" } */
+ // FIXME: why 5 here?
+
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+ return result;
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-checker=pattern-test" } */
+
+#include <stdlib.h>
+
+extern void foo(void *);
+extern void bar(void *);
+
+void test1(void *ptr)
+{
+ if (ptr) { /* { dg-warning "pattern match on 'ptr != 0'" } */
+ /* { dg-warning "pattern match on 'ptr == 0'" "" { target *-*-* } .-1 } */
+ foo(ptr);
+ } else {
+ bar(ptr);
+ }
+}
+
+void test_2 (void *p, void *q)
+{
+ if (p == NULL || q == NULL) /* { dg-line cond_2 } */
+ return;
+ foo(p);
+
+ /* { dg-warning "pattern match on 'p == 0'" "" { target *-*-* } cond_2 } */
+ /* { dg-warning "pattern match on 'q == 0'" "" { target *-*-* } cond_2 } */
+ /* { dg-warning "pattern match on 'p != 0'" "" { target *-*-* } cond_2 } */
+ /* { dg-warning "pattern match on 'q != 0'" "" { target *-*-* } cond_2 } */
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-checker=pattern-test -O2" } */
+// TODO: run this at every optimization level
+
+#include <stdlib.h>
+
+extern void foo(void *);
+extern void bar(void *);
+
+void test1(void *ptr)
+{
+ if (ptr) { /* { dg-warning "pattern match on 'ptr != 0'" } */
+ /* { dg-warning "pattern match on 'ptr == 0'" "" { target *-*-* } .-1 } */
+ foo(ptr);
+ } else {
+ bar(ptr);
+ }
+}
+
+void test_2 (void *p, void *q)
+{
+ if (p == NULL || q == NULL) /* { dg-line cond_2 } */
+ return;
+ foo(p);
+
+ /* { dg-warning "pattern match on '<unknown> == 0'" "" { target *-*-* } cond_2 } */
+ /* { dg-warning "pattern match on '<unknown> != 0'" "" { target *-*-* } cond_2 } */
+ /* { dg-warning "pattern match on 'p != 0'" "" { target *-*-* } cond_2 } */
+ /* { dg-warning "pattern match on 'q != 0'" "" { target *-*-* } cond_2 } */
+}
--- /dev/null
+static char * __attribute__((noinline))
+test_1_callee (int flag, char *a, char *b)
+{
+ char *p;
+ if (flag)
+ p = a;
+ else
+ p = b;
+ return p;
+}
+
+char test_1_caller(int flag) {
+ char a = 42;
+ char b = 43;
+ return *test_1_callee(flag, &a, &b);
+}
--- /dev/null
+/* { dg-additional-options "-Wno-int-conversion" } */
+#include "../pr61861.c"
--- /dev/null
+/* Verify that we can disable analyzer warnings via pragmas. */
+
+#include <stdlib.h>
+
+void test_1 (void *ptr)
+{
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free'" } */
+}
+
+void test_2 (void *ptr)
+{
+ _Pragma("GCC diagnostic push")
+ _Pragma("GCC diagnostic ignored \"-Wanalyzer-double-free\"")
+
+ free (ptr);
+ free (ptr); /* { dg-bogus "double-'free'" } */
+
+ _Pragma("GCC diagnostic pop")
+}
+
+void test_3 (void *ptr)
+{
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free'" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+int test_1 (void)
+{
+ {
+ int *q = malloc (1024);
+ }
+
+ return 42; /* { dg-warning "leak of 'q'" } */
+ // FIXME: would be better to report it at the close-of-scope
+}
+
+int test_2 (void)
+{
+ {
+ void *q = malloc (1024);
+ }
+
+ int q = 42;
+
+ return q; /* { dg-warning "leak of 'q'" } */
+ // FIXME: would be better to report it at the close-of-scope
+}
--- /dev/null
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+
+char test_1 (FILE *logfile)
+{
+ char *password = getpass (">"); /* { dg-message "\\(1\\) sensitive value acquired here" } */
+ fprintf (logfile, "got password %s\n", password); /* { dg-warning "sensitive value 'password' written to output file \\\[CWE-532\\\]" } */
+ /* { dg-message "\\(2\\) sensitive value 'password' written to output file; acquired at \\(1\\)" "" { target *-*-* } .-1 } */
+}
+
+char test_2 (FILE *logfile, int i)
+{
+ char *password = getpass (">"); /* { dg-message "\\(1\\) sensitive value acquired here" } */
+ fprintf (logfile, "got password[%i]: %s\n", i, password); /* { dg-warning "sensitive value 'password' written to output file \\\[CWE-532\\\]" } */
+ /* { dg-message "\\(2\\) sensitive value 'password' written to output file; acquired at \\(1\\)" "" { target *-*-* } .-1 } */
+}
+
+char test_3 (FILE *logfile)
+{
+ char *password = getpass (">"); /* { dg-message "\\(1\\) sensitive value acquired here" } */
+ printf ("got password %s\n", password); /* { dg-warning "sensitive value 'password' written to output file \\\[CWE-532\\\]" } */
+ /* { dg-message "\\(2\\) sensitive value 'password' written to output file; acquired at \\(1\\)" "" { target *-*-* } .-1 } */
+}
+
+char test_4 (FILE *logfile)
+{
+ char *password = getpass (">"); /* { dg-message "\\(1\\) sensitive value acquired here" } */
+ fwrite (password, strlen (password), 1, logfile); /* { dg-warning "sensitive value 'password' written to output file \\\[CWE-532\\\]" } */
+ /* { dg-message "\\(2\\) sensitive value 'password' written to output file; acquired at \\(1\\)" "" { target *-*-* } .-1 } */
+}
+
+static void called_by_test_5 (const char *value)
+{
+ printf ("%s", value); /* { dg-warning "sensitive value 'value' written to output file \\\[CWE-532\\\]" } */
+}
+
+char test_5 (FILE *logfile)
+{
+ char *password = getpass (">");
+ called_by_test_5 (password); /* { dg-message "passing sensitive value 'password' in call to 'called_by_test_5' from 'test_5'" } */
+}
+
+static char *called_by_test_6 (void)
+{
+ return getpass (">"); /* { dg-message "sensitive value acquired here" } */
+}
+
+char test_6 (FILE *logfile)
+{
+ char *password = called_by_test_6 (); /* { dg-message "returning sensitive value to 'test_6' from 'called_by_test_6'" } */
+ printf ("%s", password); /* { dg-warning "sensitive value 'password' written to output file \\\[CWE-532\\\]" } */
+}
+
+/* TODO: strdup etc, strcpy, memcpy, etc. */
--- /dev/null
+#include "../pr26983.c"
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <setjmp.h>
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+extern void foo (int);
+
+void test_1 (void)
+{
+ setjmp (NULL);
+}
+
+void test_2 (void)
+{
+ jmp_buf env;
+ int i;
+
+ foo (0);
+
+ i = setjmp(env);
+
+ foo (1);
+
+ if (i != 0)
+ {
+ foo (2);
+ __analyzer_dump_path (); /* { dg-message "path" } */
+ }
+ else
+ longjmp (env, 1);
+
+ foo (3);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | __analyzer_dump_path ();
+ | ^~~~~~~~~~~~~~~~~~~~~~~
+ 'test_2': event 1
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (1) 'setjmp' called here
+ |
+ 'test_2': events 2-4
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (2) following 'false' branch (when 'i == 0')...
+ |......
+ | NN | longjmp (env, 1);
+ | | ~~~~~~~~~~~~~~~~
+ | | |
+ | | (3) ...to here
+ | | (4) rewinding within 'test_2' from 'longjmp'...
+ |
+ 'test_2': event 5
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (5) ...to 'setjmp' (saved at (1))
+ |
+ 'test_2': events 6-8
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (6) following 'true' branch (when 'i != 0')...
+ | NN | {
+ | NN | foo (2);
+ | | ~~~~~~~
+ | | |
+ | | (7) ...to here
+ | NN | __analyzer_dump_path ();
+ | | ~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (8) here
+ |
+ { dg-end-multiline-output "" } */
+
+void test_3 (void)
+{
+ longjmp (NULL, 0);
+}
+
+void test_4 (void)
+{
+ longjmp (NULL, 1);
+}
+
+void test_5 (void)
+{
+ jmp_buf env;
+ longjmp (env, 1);
+}
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <setjmp.h>
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+extern void foo (int);
+
+static jmp_buf env;
+
+static void inner (void)
+{
+ longjmp (env, 1);
+}
+
+void outer (void)
+{
+ int i;
+
+ foo (0);
+
+ i = setjmp(env);
+
+ if (i != 0)
+ {
+ foo (2);
+ __analyzer_dump_path (); /* { dg-message "path" } */
+ }
+ else
+ {
+ foo (1);
+ inner ();
+ }
+ foo (3);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | __analyzer_dump_path ();
+ | ^~~~~~~~~~~~~~~~~~~~~~~
+ 'outer': event 1
+ |
+ | NN | void outer (void)
+ | | ^~~~~
+ | | |
+ | | (1) entry to 'outer'
+ |
+ 'outer': event 2
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (2) 'setjmp' called here
+ |
+ 'outer': events 3-5
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (3) following 'false' branch (when 'i == 0')...
+ |......
+ | NN | foo (1);
+ | | ~~~~~~~
+ | | |
+ | | (4) ...to here
+ | NN | inner ();
+ | | ~~~~~~~~
+ | | |
+ | | (5) calling 'inner' from 'outer'
+ |
+ +--> 'inner': events 6-7
+ |
+ | NN | static void inner (void)
+ | | ^~~~~
+ | | |
+ | | (6) entry to 'inner'
+ | NN | {
+ | NN | longjmp (env, 1);
+ | | ~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) rewinding from 'longjmp' in 'inner'...
+ |
+ <------+
+ |
+ 'outer': event 8
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (8) ...to 'setjmp' in 'outer' (saved at (2))
+ |
+ 'outer': events 9-11
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (9) following 'true' branch (when 'i != 0')...
+ | NN | {
+ | NN | foo (2);
+ | | ~~~~~~~
+ | | |
+ | | (10) ...to here
+ | NN | __analyzer_dump_path ();
+ | | ~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (11) here
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <setjmp.h>
+#include "analyzer-decls.h"
+
+extern int foo (int);
+static jmp_buf buf;
+
+void inner (int x)
+{
+ foo (x);
+ longjmp (buf, 1);
+ foo (x);
+}
+
+void outer (int y)
+{
+ foo (y);
+ inner (y);
+ foo (y);
+}
+
+int main (void)
+{
+ if (!setjmp(buf))
+ outer (42);
+ else
+ __analyzer_dump_path (); /* { dg-message "path" } */
+ return 0;
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | __analyzer_dump_path ();
+ | ^~~~~~~~~~~~~~~~~~~~~~~
+ 'main': event 1
+ |
+ | NN | int main (void)
+ | | ^~~~
+ | | |
+ | | (1) entry to 'main'
+ |
+ 'main': event 2
+ |
+ | NN | if (!setjmp(buf))
+ | | ^~~~~~
+ | | |
+ | | (2) 'setjmp' called here
+ |
+ 'main': events 3-5
+ |
+ | NN | if (!setjmp(buf))
+ | | ^
+ | | |
+ | | (3) following 'true' branch...
+ | NN | outer (42);
+ | | ~~~~~~~~~~
+ | | |
+ | | (4) ...to here
+ | | (5) calling 'outer' from 'main'
+ |
+ +--> 'outer': events 6-7
+ |
+ | NN | void outer (int y)
+ | | ^~~~~
+ | | |
+ | | (6) entry to 'outer'
+ |......
+ | NN | inner (y);
+ | | ~~~~~~~~~
+ | | |
+ | | (7) calling 'inner' from 'outer'
+ |
+ +--> 'inner': events 8-9
+ |
+ | NN | void inner (int x)
+ | | ^~~~~
+ | | |
+ | | (8) entry to 'inner'
+ |......
+ | NN | longjmp (buf, 1);
+ | | ~~~~~~~~~~~~~~~~
+ | | |
+ | | (9) rewinding from 'longjmp' in 'inner'...
+ |
+ <-------------+
+ |
+ 'main': event 10
+ |
+ | NN | if (!setjmp(buf))
+ | | ^~~~~~
+ | | |
+ | | (10) ...to 'setjmp' in 'main' (saved at (2))
+ |
+ 'main': events 11-13
+ |
+ | NN | if (!setjmp(buf))
+ | | ^
+ | | |
+ | | (11) following 'false' branch...
+ |......
+ | NN | __analyzer_dump_path ();
+ | | ~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (12) ...to here
+ | | (13) here
+ |
+ { dg-end-multiline-output "" } */
+
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <setjmp.h>
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+static jmp_buf env;
+
+static void inner (void)
+{
+ setjmp (env);
+}
+
+void outer (void)
+{
+ int i;
+
+ inner ();
+
+ longjmp (env, 42); /* { dg-warning "'longjmp' called after enclosing function of 'setjmp' has returned" } */
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | longjmp (env, 42);
+ | ^~~~~~~~~~~~~~~~~
+ 'outer': events 1-2
+ |
+ | NN | void outer (void)
+ | | ^~~~~
+ | | |
+ | | (1) entry to 'outer'
+ |......
+ | NN | inner ();
+ | | ~~~~~~~~
+ | | |
+ | | (2) calling 'inner' from 'outer'
+ |
+ +--> 'inner': event 3
+ |
+ | NN | static void inner (void)
+ | | ^~~~~
+ | | |
+ | | (3) entry to 'inner'
+ |
+ 'inner': event 4
+ |
+ | NN | setjmp (env);
+ | | ^~~~~~
+ | | |
+ | | (4) 'setjmp' called here
+ |
+ <------+
+ |
+ 'outer': events 5-6
+ |
+ | NN | inner ();
+ | | ^~~~~~~~
+ | | |
+ | | (5) returning to 'outer' from 'inner'
+ | NN |
+ | NN | longjmp (env, 42);
+ | | ~~~~~~~~~~~~~~~~~
+ | | |
+ | | (6) here
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+#include <setjmp.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+extern void foo (int);
+
+static jmp_buf env;
+
+static void inner (void)
+{
+ void *ptr = malloc (1024); /* { dg-message "allocated here" } */
+ longjmp (env, 1); /* { dg-warning "leak of 'ptr'" } */
+ free (ptr);
+}
+
+void outer (void)
+{
+ int i;
+
+ foo (0);
+
+ i = setjmp(env);
+
+ if (i == 0)
+ {
+ foo (1);
+ inner ();
+ }
+
+ foo (3);
+}
--- /dev/null
+#include <setjmp.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+extern void foo (int);
+
+static jmp_buf env;
+
+static void inner (void)
+{
+ longjmp (env, 1); /* { dg-warning "leak of 'ptr'" } */
+}
+
+static void middle (void)
+{
+ void *ptr = malloc (1024); /* { dg-message "allocated here" } */
+ inner ();
+ free (ptr);
+}
+
+void outer (void)
+{
+ int i;
+
+ foo (0);
+
+ i = setjmp(env);
+
+ if (i == 0)
+ {
+ foo (1);
+ middle ();
+ }
+
+ foo (3);
+}
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <setjmp.h>
+#include <stdlib.h>
+
+extern void foo (int);
+
+static jmp_buf env;
+
+static void inner (void)
+{
+ longjmp (env, 1); /* { dg-warning "leak of 'ptr'" } */
+}
+
+static void middle (void)
+{
+ void *ptr = malloc (1024);
+ inner ();
+ free (ptr);
+}
+
+void outer (void)
+{
+ int i;
+
+ foo (0);
+
+ i = setjmp(env);
+
+ if (i == 0)
+ {
+ foo (1);
+ middle ();
+ }
+
+ foo (3);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | longjmp (env, 1);
+ | ^~~~~~~~~~~~~~~~
+ 'outer': event 1
+ |
+ | NN | void outer (void)
+ | | ^~~~~
+ | | |
+ | | (1) entry to 'outer'
+ |
+ 'outer': event 2
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (2) 'setjmp' called here
+ |
+ 'outer': events 3-5
+ |
+ | NN | if (i == 0)
+ | | ^
+ | | |
+ | | (3) following 'true' branch (when 'i == 0')...
+ | NN | {
+ | NN | foo (1);
+ | | ~~~~~~~
+ | | |
+ | | (4) ...to here
+ | NN | middle ();
+ | | ~~~~~~~~~
+ | | |
+ | | (5) calling 'middle' from 'outer'
+ |
+ +--> 'middle': events 6-8
+ |
+ | NN | static void middle (void)
+ | | ^~~~~~
+ | | |
+ | | (6) entry to 'middle'
+ | NN | {
+ | NN | void *ptr = malloc (1024);
+ | | ~~~~~~~~~~~~~
+ | | |
+ | | (7) allocated here
+ | NN | inner ();
+ | | ~~~~~~~~
+ | | |
+ | | (8) calling 'inner' from 'middle'
+ |
+ +--> 'inner': events 9-11
+ |
+ | NN | static void inner (void)
+ | | ^~~~~
+ | | |
+ | | (9) entry to 'inner'
+ | NN | {
+ | NN | longjmp (env, 1);
+ | | ~~~~~~~~~~~~~~~~
+ | | |
+ | | (10) 'ptr' leaks here; was allocated at (7)
+ | | (11) rewinding from 'longjmp' in 'inner'...
+ |
+ <-------------+
+ |
+ 'outer': event 12
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (12) ...to 'setjmp' in 'outer' (saved at (2))
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <setjmp.h>
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+extern void foo (int);
+
+static jmp_buf env;
+
+static void inner (void)
+{
+ /* Pass known 0 to longjmp. */
+ longjmp (env, 0);
+}
+
+void outer (void)
+{
+ int i;
+
+ foo (0);
+
+ i = setjmp(env);
+
+ if (i != 0)
+ {
+ foo (2);
+ __analyzer_dump_path (); /* { dg-message "path" } */
+ }
+ else
+ {
+ foo (1);
+ inner ();
+ }
+ foo (3);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | __analyzer_dump_path ();
+ | ^~~~~~~~~~~~~~~~~~~~~~~
+ 'outer': event 1
+ |
+ | NN | void outer (void)
+ | | ^~~~~
+ | | |
+ | | (1) entry to 'outer'
+ |
+ 'outer': event 2
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (2) 'setjmp' called here
+ |
+ 'outer': events 3-5
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (3) following 'false' branch (when 'i == 0')...
+ |......
+ | NN | foo (1);
+ | | ~~~~~~~
+ | | |
+ | | (4) ...to here
+ | NN | inner ();
+ | | ~~~~~~~~
+ | | |
+ | | (5) calling 'inner' from 'outer'
+ |
+ +--> 'inner': events 6-7
+ |
+ | NN | static void inner (void)
+ | | ^~~~~
+ | | |
+ | | (6) entry to 'inner'
+ |......
+ | NN | longjmp (env, 0);
+ | | ~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) rewinding from 'longjmp' in 'inner'...
+ |
+ <------+
+ |
+ 'outer': event 8
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (8) ...to 'setjmp' in 'outer' (saved at (2))
+ |
+ 'outer': events 9-11
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (9) following 'true' branch (when 'i != 0')...
+ | NN | {
+ | NN | foo (2);
+ | | ~~~~~~~
+ | | |
+ | | (10) ...to here
+ | NN | __analyzer_dump_path ();
+ | | ~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (11) here
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <setjmp.h>
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+extern void foo (int);
+
+static jmp_buf env;
+
+extern int unknown_val;
+
+static void inner (void)
+{
+ /* Pass value that might be 0 to longjmp. */
+ longjmp (env, unknown_val);
+}
+
+void outer (void)
+{
+ int i;
+
+ foo (0);
+
+ i = setjmp(env);
+
+ if (i != 0)
+ {
+ foo (2);
+ __analyzer_dump_path (); /* { dg-message "path" } */
+ }
+ else
+ {
+ foo (1);
+ inner ();
+ }
+ foo (3);
+}
+
+/* { dg-begin-multiline-output "" }
+ NN | __analyzer_dump_path ();
+ | ^~~~~~~~~~~~~~~~~~~~~~~
+ 'outer': event 1
+ |
+ | NN | void outer (void)
+ | | ^~~~~
+ | | |
+ | | (1) entry to 'outer'
+ |
+ 'outer': event 2
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (2) 'setjmp' called here
+ |
+ 'outer': events 3-5
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (3) following 'false' branch (when 'i == 0')...
+ |......
+ | NN | foo (1);
+ | | ~~~~~~~
+ | | |
+ | | (4) ...to here
+ | NN | inner ();
+ | | ~~~~~~~~
+ | | |
+ | | (5) calling 'inner' from 'outer'
+ |
+ +--> 'inner': events 6-7
+ |
+ | NN | static void inner (void)
+ | | ^~~~~
+ | | |
+ | | (6) entry to 'inner'
+ |......
+ | NN | longjmp (env, unknown_val);
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) rewinding from 'longjmp' in 'inner'...
+ |
+ <------+
+ |
+ 'outer': event 8
+ |
+ | NN | i = setjmp(env);
+ | | ^~~~~~
+ | | |
+ | | (8) ...to 'setjmp' in 'outer' (saved at (2))
+ |
+ 'outer': events 9-11
+ |
+ | NN | if (i != 0)
+ | | ^
+ | | |
+ | | (9) following 'true' branch (when 'i != 0')...
+ | NN | {
+ | NN | foo (2);
+ | | ~~~~~~~
+ | | |
+ | | (10) ...to here
+ | NN | __analyzer_dump_path ();
+ | | ~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (11) here
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* Example of a bad call within a signal handler.
+ 'handler' calls 'custom_logger' which calls 'fprintf', and 'fprintf' is
+ not allowed from a signal handler. */
+
+#include <stdio.h>
+#include <signal.h>
+
+extern void body_of_program(void);
+
+void custom_logger(const char *msg)
+{
+ fprintf(stderr, "LOG: %s", msg); /* { dg-warning "call to 'fprintf' from within signal handler" } */
+}
+
+static void handler(int signum)
+{
+ custom_logger("got signal");
+}
+
+int main(int argc, const char *argv)
+{
+ custom_logger("started");
+
+ signal(SIGINT, handler); /* { dg-message "registering 'handler' as signal handler" } */
+
+ body_of_program();
+
+ custom_logger("stopped");
+
+ return 0;
+}
--- /dev/null
+/* Example of a bad call within a signal handler.
+ 'handler' calls 'custom_logger' which calls 'fprintf', and 'fprintf' is
+ not allowed from a signal handler. */
+
+#include <stdio.h>
+#include <signal.h>
+
+extern void body_of_program(void);
+
+int logging = 1;
+
+void custom_logger(const char *msg)
+{
+ if (logging)
+ fprintf(stderr, "LOG: %s", msg); /* { dg-warning "call to 'fprintf' from within signal handler" } */
+}
+
+static void handler(int signum)
+{
+ custom_logger("got signal");
+}
+
+int main(int argc, const char *argv)
+{
+ custom_logger("started");
+
+ signal(SIGINT, handler); /* { dg-message "registering 'handler' as signal handler" } */
+
+ body_of_program();
+
+ custom_logger("stopped");
+
+ return 0;
+}
--- /dev/null
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+
+extern void body_of_program(void);
+
+void custom_logger(const char *msg)
+{
+ fprintf(stderr, "LOG: %s", msg); /* { dg-warning "call to 'fprintf' from within signal handler" } */
+}
+
+static void handler(int signum)
+{
+ custom_logger("got signal");
+}
+
+void test (void)
+{
+ void *ptr = malloc (1024);
+ signal(SIGINT, handler); /* { dg-message "registering 'handler' as signal handler" } */
+ body_of_program();
+ free (ptr);
+}
--- /dev/null
+/* Verify how paths are printed for signal-handler diagnostics. */
+
+/* { dg-options "-fanalyzer -fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+
+extern void body_of_program(void);
+
+void custom_logger(const char *msg)
+{
+ fprintf(stderr, "LOG: %s", msg); /* { dg-warning "call to 'fprintf' from within signal handler" } */
+}
+
+static void int_handler(int signum)
+{
+ custom_logger("got signal");
+}
+
+void test (void)
+{
+ void *ptr = malloc (1024);
+ signal(SIGINT, int_handler);
+ body_of_program();
+ free (ptr);
+}
+
+/* "call to 'fprintf' from within signal handler [CWE-479]". */
+/* { dg-begin-multiline-output "" }
+ NN | fprintf(stderr, "LOG: %s", msg);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'test': events 1-2
+ |
+ | NN | void test (void)
+ | | ^~~~
+ | | |
+ | | (1) entry to 'test'
+ |......
+ | NN | signal(SIGINT, int_handler);
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (2) registering 'int_handler' as signal handler
+ |
+ event 3
+ |
+ |cc1:
+ | (3): later on, when the signal is delivered to the process
+ |
+ +--> 'int_handler': events 4-5
+ |
+ | NN | static void int_handler(int signum)
+ | | ^~~~~~~~~~~
+ | | |
+ | | (4) entry to 'int_handler'
+ | NN | {
+ | NN | custom_logger("got signal");
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (5) calling 'custom_logger' from 'int_handler'
+ |
+ +--> 'custom_logger': events 6-7
+ |
+ | NN | void custom_logger(const char *msg)
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (6) entry to 'custom_logger'
+ | NN | {
+ | NN | fprintf(stderr, "LOG: %s", msg);
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) call to 'fprintf' from within signal handler
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* Verify how paths are printed for signal-handler diagnostics. */
+
+/* { dg-options "-fanalyzer -fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fdiagnostics-show-caret" } */
+/* { dg-enable-nn-line-numbers "" } */
+
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+
+extern void body_of_program(void);
+
+void custom_logger(const char *msg)
+{
+ fprintf(stderr, "LOG: %s", msg); /* { dg-warning "call to 'fprintf' from within signal handler" } */
+}
+
+static void int_handler(int signum)
+{
+ custom_logger("got signal");
+}
+
+static void register_handler ()
+{
+ signal(SIGINT, int_handler);
+}
+
+void test (void)
+{
+ register_handler ();
+ body_of_program();
+}
+
+/* "call to 'fprintf' from within signal handler [CWE-479]". */
+/* { dg-begin-multiline-output "" }
+ NN | fprintf(stderr, "LOG: %s", msg);
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'test': events 1-2
+ |
+ | NN | void test (void)
+ | | ^~~~
+ | | |
+ | | (1) entry to 'test'
+ | NN | {
+ | NN | register_handler ();
+ | | ~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (2) calling 'register_handler' from 'test'
+ |
+ +--> 'register_handler': events 3-4
+ |
+ | NN | static void register_handler ()
+ | | ^~~~~~~~~~~~~~~~
+ | | |
+ | | (3) entry to 'register_handler'
+ | NN | {
+ | NN | signal(SIGINT, int_handler);
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (4) registering 'int_handler' as signal handler
+ |
+ event 5
+ |
+ |cc1:
+ | (5): later on, when the signal is delivered to the process
+ |
+ +--> 'int_handler': events 6-7
+ |
+ | NN | static void int_handler(int signum)
+ | | ^~~~~~~~~~~
+ | | |
+ | | (6) entry to 'int_handler'
+ | NN | {
+ | NN | custom_logger("got signal");
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) calling 'custom_logger' from 'int_handler'
+ |
+ +--> 'custom_logger': events 8-9
+ |
+ | NN | void custom_logger(const char *msg)
+ | | ^~~~~~~~~~~~~
+ | | |
+ | | (8) entry to 'custom_logger'
+ | NN | {
+ | NN | fprintf(stderr, "LOG: %s", msg);
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (9) call to 'fprintf' from within signal handler
+ |
+ { dg-end-multiline-output "" } */
--- /dev/null
+/* Verify that we clarify the sense of paths involving strcmp. */
+
+#include <string.h>
+#include <stdlib.h>
+
+int test_1 (const char *str, char *ptr)
+{
+ if (strcmp (str, "VALUE")) /* { dg-message "following 'true' branch \\(when the strings are non-equal\\)\\.\\.\\." } */
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+int test_2 (const char *str, char *ptr)
+{
+ if (strcmp (str, "VALUE") == 0) /* { dg-message "following 'true' branch \\(when the strings are equal\\)\\.\\.\\." } */
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+int test_3 (const char *str, char *ptr)
+{
+ if (!strcmp (str, "VALUE")) /* { dg-message "following 'true' branch \\(when the strings are equal\\)\\.\\.\\." } */
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
+
+int test_4 (const char *str, char *ptr)
+{
+ if (strcmp (str, "VALUE")) /* { dg-message "following 'false' branch \\(when the strings are equal\\)\\.\\.\\." } */
+ {
+ }
+ else
+ free (ptr);
+ free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-transitivity" } */
+
+#include "analyzer-decls.h"
+
+void test (int i)
+{
+ switch (i)
+ {
+ case 0:
+ __analyzer_eval (i == 0); /* { dg-warning "TRUE" } */
+ break;
+
+ case 3 ... 5:
+ __analyzer_eval (i >= 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (i <= 5); /* { dg-warning "TRUE" } */
+ break;
+
+ default:
+ __analyzer_eval (i == 0); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i == 2); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (i == 3); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i == 4); /* { dg-warning "FALSE" "" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): we're only checking against endpoints of case
+ ranges, not the insides. */
+ __analyzer_eval (i == 5); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i == 6); /* { dg-warning "UNKNOWN" } */
+ break;
+ }
+}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+struct foo
+{
+ signed int i;
+ char buf[256];
+};
+
+char test_1(FILE *f)
+{
+ struct foo tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) { /* { dg-message "\\(1\\) 'tmp' gets an unchecked value here" } */
+ /* { dg-message "\\(2\\) following 'true' branch\\.\\.\\." "" { target *-*-* } .-1 } */
+ /* BUG: the following array lookup trusts that the input data's index is
+ in the range 0 <= i < 256; otherwise it's accessing the stack */
+ return tmp.buf[tmp.i]; // { dg-warning "use of tainted value 'tmp.i' in array lookup without bounds checking" } */
+ /* { dg-message "23: \\(3\\) \\.\\.\\.to here" "" { target *-*-* } .-1 } */
+ /* { dg-message "23: \\(4\\) 'tmp.i' has an unchecked value here \\(from 'tmp'\\)" "" { target *-*-* } .-2 } */
+ /* { dg-message "\\(5\\) use of tainted value 'tmp.i' in array lookup without bounds checking" "" { target *-*-* } .-3 } */
+
+ // TOOD: better messages for state changes
+ }
+ return 0;
+}
+
+char test_2(struct foo *f, int i)
+{
+ /* not a bug: the data is not known to be tainted: */
+ return f->buf[f->i];
+}
+
+char test_3(FILE *f)
+{
+ struct foo tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
+ if (tmp.i >= 0 && tmp.i < 256) {
+ /* not a bug: the access is guarded by upper and lower bounds: */
+ return tmp.buf[tmp.i];
+ }
+ }
+ return 0;
+}
+
+char test_4(FILE *f)
+{
+ struct foo tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
+ if (tmp.i >= 0) { /* { dg-message "'tmp.i' has an unchecked value here \\(from 'tmp'\\)" } */
+ /* { dg-message "'tmp.i' has its lower bound checked here" "" { target *-*-* } .-1 } */
+ return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without upper-bounds checking" } */
+ }
+ }
+ return 0;
+}
+
+char test_5(FILE *f)
+{
+ struct foo tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
+ if (tmp.i < 256) { /* { dg-message "'tmp.i' has an unchecked value here \\(from 'tmp'\\)" } */
+ /* { dg-message "'tmp.i' has its upper bound checked here" "" { target *-*-* } .-1 } */
+ return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without lower-bounds checking" } */
+ }
+ }
+ return 0;
+}
+
+/* unsigned types have a natural lower bound of 0 */
+struct bar
+{
+ unsigned int i;
+ char buf[256];
+};
+
+char test_6(FILE *f)
+{
+ struct bar tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
+ return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without upper-bounds checking" } */
+ }
+ return 0;
+}
+
+char test_7(FILE *f)
+{
+ struct bar tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
+ if (tmp.i >= 0) {
+ return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without upper-bounds checking" } */
+ }
+ }
+ return 0;
+}
+
+char test_8(FILE *f)
+{
+ struct bar tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
+ if (tmp.i < 256) {
+ /* not a bug: has an upper bound, and an implicit lower bound: */
+ return tmp.buf[tmp.i];
+ }
+ }
+ return 0;
+}
+
+char test_9(FILE *f)
+{
+ struct foo tmp;
+
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
+ if (tmp.i == 42) {
+ /* not a bug: tmp.i compared against a specific value: */
+ return tmp.buf[tmp.i]; /* { dg-bogus "tainted" "" { xfail *-*-* } } */
+ // TODO: xfail
+ }
+ }
+ return 0;
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+typedef void (*free_func)(void *opaque, void *address);
+
+typedef struct z_stream_s {
+ struct internal_state *state;
+ free_func zfree;
+ void *opaque;
+} z_stream;
+
+struct internal_state {
+ z_stream *strm;
+ int status;
+ unsigned char *pending_buf;
+ unsigned char *window;
+ unsigned short *prev;
+ unsigned short *head;
+};
+
+int deflateEnd(z_stream *strm)
+{
+ int status;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (strm == 0 || strm->state == 0)
+ return (-2);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ status = strm->state->status;
+ if (status != 42 && status != 113 && status != 666) {
+ return (-2);
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "4 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (strm->state->pending_buf)
+ (*(strm->zfree))(strm->opaque, (void *)(strm->state->pending_buf));
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (strm->state->head)
+ (*(strm->zfree))(strm->opaque, (void *)(strm->state->head));
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (strm->state->prev)
+ (*(strm->zfree))(strm->opaque, (void *)(strm->state->prev));
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ if (strm->state->window)
+ (*(strm->zfree))(strm->opaque, (void *)(strm->state->window));
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 exploded nodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ (*(strm->zfree))(strm->opaque, (void *)(strm->state));
+ strm->state = 0;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ return status == 113 ? (-3) : 0;
+}
--- /dev/null
+/* { dg-additional-options "-Wno-analyzer-too-complex" } */
+
+typedef void * (*alloc_func)(void * opaque, unsigned items, unsigned size);
+typedef void (*free_func)(void * opaque, void * address);
+
+typedef struct z_stream_s {
+ char *msg;
+ alloc_func zalloc;
+ free_func zfree;
+ void * opaque;
+} z_stream;
+
+void * zcalloc(void * opaque, unsigned items, unsigned size);
+void zcfree(void * opaque, void * ptr);
+
+int deflateInit2_(z_stream *strm, int level, int method, int windowBits,
+ int memLevel, int strategy, const char *version,
+ int stream_size) {
+ int noheader = 0;
+ static const char *my_version = "1.1.3";
+
+ if (version == 0 || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return (-6);
+ }
+ if (strm == 0)
+ return (-2);
+
+ strm->msg = 0;
+ if (strm->zalloc == 0) {
+ strm->zalloc = zcalloc;
+ strm->opaque = (void *)0;
+ }
+ if (strm->zfree == 0)
+ strm->zfree = zcfree;
+
+ if (level == (-1))
+ level = 6;
+
+ if (windowBits < 0) {
+ noheader = 1;
+ windowBits = -windowBits;
+ }
+ if (memLevel < 1 || memLevel > 9 || method != 8 || windowBits < 8 ||
+ windowBits > 15 || level < 0 || level > 9 || strategy < 0 ||
+ strategy > 2) {
+ return (-2);
+ }
+ (*((strm)->zalloc))((strm)->opaque, (1), 112);
+ return 0;
+}
--- /dev/null
+/* { dg-additional-options "-O3 -Wno-analyzer-too-complex" } */
+/* TODO: reduce this; was triggering this assert:
+ gcc_assert (pruned_state != existing_state);
+*/
+
+typedef unsigned char Byte;
+typedef unsigned int uInt;
+
+typedef void *voidp;
+
+typedef voidp (*alloc_func)(voidp opaque, uInt items, uInt size);
+
+typedef struct z_stream_s {
+ alloc_func zalloc;
+ voidp opaque;
+} z_stream;
+
+typedef z_stream *z_streamp;
+
+typedef struct inflate_huft_s inflate_huft;
+
+struct inflate_huft_s {
+ struct {
+ Byte Exop;
+ Byte Bits;
+ } what;
+ uInt base;
+};
+
+static int huft_build(uInt *, uInt, uInt, const uInt *, const uInt *,
+ inflate_huft **, uInt *, inflate_huft *, uInt *, uInt *);
+
+static int huft_build(uInt *b, uInt n, uInt s, const uInt *d, const uInt *e,
+ inflate_huft **t, uInt *m, inflate_huft *hp, uInt *hn,
+ uInt *v) {
+
+ uInt a;
+ uInt c[15 + 1];
+ uInt f;
+ int g;
+ int h;
+ register uInt i;
+ register uInt j;
+ register int k;
+ int l;
+ uInt mask;
+ register uInt *p;
+ inflate_huft *q;
+ struct inflate_huft_s r;
+ inflate_huft *u[15];
+ register int w;
+ uInt x[15 + 1];
+ uInt *xp;
+ int y;
+ uInt z;
+
+ p = c;
+
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ *p++ = 0;
+ p = b;
+ i = n;
+ do {
+ c[*p++]++;
+ } while (--i);
+ if (c[0] == n) {
+ *t = (inflate_huft *)0;
+ *m = 0;
+ return 0;
+ }
+
+ l = *m;
+ for (j = 1; j <= 15; j++)
+ if (c[j])
+ break;
+ k = j;
+ if ((uInt)l < j)
+ l = j;
+ for (i = 15; i; i--)
+ if (c[i])
+ break;
+ g = i;
+ if ((uInt)l > i)
+ l = i;
+ *m = l;
+
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0)
+ return (-3);
+ if ((y -= c[i]) < 0)
+ return (-3);
+ c[i] += y;
+
+ x[1] = j = 0;
+ p = c + 1;
+ xp = x + 2;
+ while (--i) {
+ *xp++ = (j += *p++);
+ }
+
+ p = b;
+ i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+ n = x[g];
+
+ x[0] = i = 0;
+ p = v;
+ h = -1;
+ w = -l;
+ u[0] = (inflate_huft *)0;
+ q = (inflate_huft *)0;
+ z = 0;
+
+ for (; k <= g; k++) {
+ a = c[k];
+ while (a--) {
+
+ while (k > w + l) {
+ h++;
+ w += l;
+
+ z = g - w;
+ z = z > (uInt)l ? l : z;
+ if ((f = 1 << (j = k - w)) > a + 1) {
+ f -= a + 1;
+ xp = c + k;
+ if (j < z)
+ while (++j < z) {
+ if ((f <<= 1) <= *++xp)
+ break;
+ f -= *xp;
+ }
+ }
+ z = 1 << j;
+
+ if (*hn + z > 1440)
+ return (-4);
+ u[h] = q = hp + *hn;
+ *hn += z;
+
+ if (h) {
+ x[h] = i;
+ r.what.Bits = (Byte)l;
+ r.what.Exop = (Byte)j;
+ j = i >> (w - l);
+ r.base = (uInt)(q - u[h - 1] - j);
+ u[h - 1][j] = r;
+ } else
+ *t = q;
+ }
+
+ r.what.Bits = (Byte)(k - w);
+ if (p >= v + n)
+ r.what.Exop = 128 + 64;
+ else if (*p < s) {
+ r.what.Exop = (Byte)(*p < 256 ? 0 : 32 + 64);
+ r.base = *p++;
+ } else {
+ r.what.Exop = (Byte)(e[*p - s] + 16 + 64);
+ r.base = d[*p++ - s];
+ }
+
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ mask = (1 << w) - 1;
+ while ((i & mask) != x[h]) {
+ h--;
+ w -= l;
+ mask = (1 << w) - 1;
+ }
+ }
+ }
+
+ return y != 0 && g != 1 ? (-5) : 0;
+}
+
+extern const uInt cplens[31];
+extern const uInt cplext[31];
+extern const uInt cpdist[30];
+extern const uInt cpdext[30];
+
+int inflate_trees_dynamic(uInt nl, uInt nd, uInt *c, uInt *bl, uInt *bd,
+ inflate_huft **tl, inflate_huft **td,
+ inflate_huft *hp, z_streamp z) {
+ int r;
+ uInt hn = 0;
+ uInt *v;
+
+ if ((v = (uInt *)(*((z)->zalloc))((z)->opaque, (288), (sizeof(uInt)))) == 0)
+ return (-4);
+
+ r = huft_build(c, nl, 257, cplens, cplext, tl, bl, hp, &hn, v);
+ r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, hp, &hn, v);
+ return 0;
+}
--- /dev/null
+#include <stdlib.h>
+#include <string.h>
+
+typedef unsigned char Byte;
+typedef unsigned int uInt;
+typedef unsigned long uLong;
+
+#define Z_NULL 0
+
+void test ()
+{
+ uLong comprLen = 10000*sizeof(int);
+ uLong uncomprLen = comprLen;
+ Byte *compr = (Byte*)calloc((uInt)comprLen, 1);
+ Byte *uncompr = (Byte*)calloc((uInt)uncomprLen, 1);
+ if (compr == Z_NULL || uncompr == Z_NULL)
+ exit (1);
+ strcpy((char*)uncompr, "garbage");
+ exit (0);
+}
--- /dev/null
+/* { dg-additional-options "-O3" } */
+
+#include "analyzer-decls.h"
+
+typedef long unsigned int size_t;
+typedef unsigned char Byte;
+typedef unsigned int uInt;
+typedef unsigned long uLong;
+
+extern size_t strlen(const char *__s) __attribute__((__nothrow__, __leaf__))
+ __attribute__((__pure__)) __attribute__((__nonnull__(1)));
+extern void exit(int __status) __attribute__((__nothrow__, __leaf__))
+ __attribute__((__noreturn__));
+extern char *strcpy(char *__restrict __dest, const char *__restrict __src)
+ __attribute__((__nothrow__, __leaf__)) __attribute__((__nonnull__(1, 2)));
+extern void *calloc(size_t __nmemb, size_t __size)
+ __attribute__((__nothrow__, __leaf__)) __attribute__((__malloc__));
+
+extern int compress(Byte *dest, uLong *destLen, const Byte *source,
+ uLong sourceLen);
+
+const char hello[] = "hello, hello!";
+
+void test_compress(Byte *compr, uLong comprLen, Byte *uncompr,
+ uLong uncomprLen) {
+ int err;
+ uLong len = strlen(hello) + 1;
+
+ err = compress(compr, &comprLen, (const Byte *)hello, len);
+ if (err != 0)
+ exit(1);
+ strcpy((char *)uncompr, "garbage"); /* { dg-bogus "NULL" } */
+}
+
+int main(int argc, char *argv[]) {
+ Byte *compr, *uncompr;
+ uLong comprLen = 10000 * sizeof(int);
+ uLong uncomprLen = comprLen;
+
+ compr = (Byte *)calloc((uInt)comprLen, 1);
+ uncompr = (Byte *)calloc((uInt)uncomprLen, 1);
+ if (compr == 0 || uncompr == 0)
+ exit(1);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 exploded node" } */
+
+ test_compress(compr, comprLen, uncompr, uncomprLen);
+
+ exit(0);
+ return 0;
+}
--- /dev/null
+typedef unsigned char Byte;
+typedef unsigned int uInt;
+typedef unsigned long uLong;
+
+typedef struct z_stream_s {
+ Byte *next_in;
+ uInt avail_in;
+ uLong total_in;
+} z_stream;
+
+typedef struct inflate_blocks_state {
+ uInt bitk;
+ uLong bitb;
+ Byte *write;
+} inflate_blocks_statef;
+
+extern int inflate_flush(inflate_blocks_statef *, z_stream *, int);
+
+int inflate_blocks(inflate_blocks_statef *s, z_stream *z, int r) {
+ uInt t;
+ uLong b;
+ uInt k;
+ Byte *p;
+ uInt n;
+ Byte *q;
+ uInt m;
+
+ while (k < (3)) {
+ {
+ if (n)
+ r = 0;
+ else {
+ {
+ s->bitb = b;
+ s->bitk = k;
+ z->avail_in = n;
+ z->total_in += p - z->next_in;
+ z->next_in = p;
+ s->write = q;
+ }
+ return inflate_flush(s, z, r);
+ }
+ };
+ b |= ((uLong)(n--, *p++)) << k; /* { dg-warning "use of uninitialized value" } */
+ k += 8;
+ }
+}
return $text
}
+
+# Verify that the initial arg is a valid .dot file
+# (by running dot -Tpng on it, and verifying the exit code is 0).
+
+proc dg-check-dot { args } {
+ verbose "dg-check-dot: args: $args" 2
+
+ set testcase [testname-for-summary]
+
+ set dotfile [lindex $args 0]
+ verbose " dotfile: $dotfile" 2
+
+ set status [remote_exec host "dot" "-O -Tpng $dotfile"]
+ verbose " status: $status" 2
+ if { [lindex $status 0] != 0 } {
+ fail "$testcase dg-check-dot $dotfile"
+ return 0
+ }
+
+ pass "$testcase dg-check-dot $dotfile"
+}
}
}
+# If this host does not have "dot", skip this test.
+
+proc dg-require-dot { args } {
+ verbose "dg-require-dot" 2
+ if { ![ check_dot_available ] } {
+ upvar dg-do-what dg-do-what
+ set dg-do-what [list [lindex ${dg-do-what} 0] "N" "P"]
+ }
+}
+
# If this target does not have sufficient stack size, skip this test.
proc dg-require-stack-size { args } {
}]
}
+# Returns 1 if "dot" is supported on the host.
+
+proc check_dot_available { } {
+ verbose "check_dot_available" 2
+
+ set status [remote_exec host "dot" "-V"]
+ verbose " status: $status" 2
+ if { [lindex $status 0] != 0 } {
+ return 0
+ }
+ return 1
+}
+
# Return 1 if according to target_info struct and explicit target list
# target is supposed to support trampolines.
} "-flto -r -nostdlib"]
}
+# Return 1 if the compiler has been configured with analyzer support.
+
+proc check_effective_target_analyzer { } {
+ return [check_no_compiler_messages analyzer object {
+ void foo (void) { }
+ } "-fanalyzer"]
+}
+
# Return 1 if -mx32 -maddress-mode=short can compile, 0 otherwise.
proc check_effective_target_maybe_x32 { } {
DEFTIMEVAR (TV_LOAD , "load JIT result")
DEFTIMEVAR (TV_JIT_ACQUIRING_MUTEX , "acquiring JIT mutex")
DEFTIMEVAR (TV_JIT_CLIENT_CODE , "JIT client code")
+
+/* Analyzer timevars. */
+DEFTIMEVAR (TV_ANALYZER , "analyzer")
+DEFTIMEVAR (TV_ANALYZER_SUPERGRAPH , "analyzer: supergraph")
+DEFTIMEVAR (TV_ANALYZER_STATE_PURGE , "analyzer: state purge")
+DEFTIMEVAR (TV_ANALYZER_PLAN , "analyzer: planning")
+DEFTIMEVAR (TV_ANALYZER_SCC , "analyzer: scc")
+DEFTIMEVAR (TV_ANALYZER_WORKLIST , "analyzer: processing worklist")
+DEFTIMEVAR (TV_ANALYZER_DUMP , "analyzer: dump")
+DEFTIMEVAR (TV_ANALYZER_DIAGNOSTICS , "analyzer: emitting diagnostics")
+DEFTIMEVAR (TV_ANALYZER_SHORTEST_PATHS, "analyzer: shortest paths")
extern simple_ipa_opt_pass *make_pass_local_optimization_passes (gcc::context *ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_remove_symbols (gcc::context *ctxt);
+extern ipa_opt_pass_d *make_pass_analyzer (gcc::context *ctxt);
extern ipa_opt_pass_d *make_pass_ipa_whole_program_visibility (gcc::context
*ctxt);
extern simple_ipa_opt_pass *make_pass_ipa_increase_alignment (gcc::context
--- /dev/null
+/* "True" vs "False" vs "Unknown".
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tristate.h"
+#include "selftest.h"
+
+const char *
+tristate::as_string () const
+{
+ switch (m_value)
+ {
+ default:
+ gcc_unreachable ();
+ case TS_UNKNOWN:
+ return "UNKNOWN";
+ case TS_TRUE:
+ return "TRUE";
+ case TS_FALSE:
+ return "FALSE";
+ }
+}
+
+tristate
+tristate::not_ () const
+{
+ switch (m_value)
+ {
+ default:
+ gcc_unreachable ();
+ case TS_UNKNOWN:
+ return tristate (TS_UNKNOWN);
+ case TS_TRUE:
+ return tristate (TS_FALSE);
+ case TS_FALSE:
+ return tristate (TS_TRUE);
+ }
+}
+
+tristate
+tristate::or_ (tristate other) const
+{
+ switch (m_value)
+ {
+ default:
+ gcc_unreachable ();
+ case TS_UNKNOWN:
+ if (other.is_true ())
+ return tristate (TS_TRUE);
+ else
+ return tristate (TS_UNKNOWN);
+ case TS_FALSE:
+ return other;
+ case TS_TRUE:
+ return tristate (TS_TRUE);
+ }
+}
+
+tristate
+tristate::and_ (tristate other) const
+{
+ switch (m_value)
+ {
+ default:
+ gcc_unreachable ();
+ case TS_UNKNOWN:
+ if (other.is_false ())
+ return tristate (TS_FALSE);
+ else
+ return tristate (TS_UNKNOWN);
+ case TS_TRUE:
+ return other;
+ case TS_FALSE:
+ return tristate (TS_FALSE);
+ }
+}
+
+#if CHECKING_P
+
+namespace selftest {
+
+#define ASSERT_TRISTATE_TRUE(TRISTATE) \
+ SELFTEST_BEGIN_STMT \
+ ASSERT_EQ (TRISTATE, tristate (tristate::TS_TRUE)); \
+ SELFTEST_END_STMT
+
+#define ASSERT_TRISTATE_FALSE(TRISTATE) \
+ SELFTEST_BEGIN_STMT \
+ ASSERT_EQ (TRISTATE, tristate (tristate::TS_FALSE)); \
+ SELFTEST_END_STMT
+
+#define ASSERT_TRISTATE_UNKNOWN(TRISTATE) \
+ SELFTEST_BEGIN_STMT \
+ ASSERT_EQ (TRISTATE, tristate (tristate::TS_UNKNOWN)); \
+ SELFTEST_END_STMT
+
+/* Test tristate's ctors, along with is_*, as_string, operator==, and
+ operator!=. */
+
+static void
+test_ctors ()
+{
+ tristate u (tristate::TS_UNKNOWN);
+ ASSERT_FALSE (u.is_known ());
+ ASSERT_FALSE (u.is_true ());
+ ASSERT_FALSE (u.is_false ());
+ ASSERT_STREQ (u.as_string (), "UNKNOWN");
+
+ tristate t (tristate::TS_TRUE);
+ ASSERT_TRUE (t.is_known ());
+ ASSERT_TRUE (t.is_true ());
+ ASSERT_FALSE (t.is_false ());
+ ASSERT_STREQ (t.as_string (), "TRUE");
+
+ tristate f (tristate::TS_FALSE);
+ ASSERT_TRUE (f.is_known ());
+ ASSERT_FALSE (f.is_true ());
+ ASSERT_TRUE (f.is_false ());
+ ASSERT_STREQ (f.as_string (), "FALSE");
+
+ ASSERT_EQ (u, u);
+ ASSERT_EQ (t, t);
+ ASSERT_EQ (f, f);
+ ASSERT_NE (u, t);
+ ASSERT_NE (u, f);
+ ASSERT_NE (t, f);
+
+ tristate t2 (true);
+ ASSERT_TRUE (t2.is_true ());
+ ASSERT_EQ (t, t2);
+
+ tristate f2 (false);
+ ASSERT_TRUE (f2.is_false ());
+ ASSERT_EQ (f, f2);
+
+ tristate u2 (tristate::unknown ());
+ ASSERT_TRUE (!u2.is_known ());
+ ASSERT_EQ (u, u2);
+}
+
+/* Test && on tristate instances. */
+
+static void
+test_and ()
+{
+ ASSERT_TRISTATE_UNKNOWN (tristate::unknown () && tristate::unknown ());
+
+ ASSERT_TRISTATE_FALSE (tristate (false) && tristate (false));
+ ASSERT_TRISTATE_FALSE (tristate (false) && tristate (true));
+ ASSERT_TRISTATE_FALSE (tristate (true) && tristate (false));
+ ASSERT_TRISTATE_TRUE (tristate (true) && tristate (true));
+
+ ASSERT_TRISTATE_UNKNOWN (tristate::unknown () && tristate (true));
+ ASSERT_TRISTATE_UNKNOWN (tristate (true) && tristate::unknown ());
+
+ ASSERT_TRISTATE_FALSE (tristate::unknown () && tristate (false));
+ ASSERT_TRISTATE_FALSE (tristate (false) && tristate::unknown ());
+}
+
+/* Test || on tristate instances. */
+
+static void
+test_or ()
+{
+ ASSERT_TRISTATE_UNKNOWN (tristate::unknown () || tristate::unknown ());
+
+ ASSERT_TRISTATE_FALSE (tristate (false) || tristate (false));
+ ASSERT_TRISTATE_TRUE (tristate (false) || tristate (true));
+ ASSERT_TRISTATE_TRUE (tristate (true) || tristate (false));
+ ASSERT_TRISTATE_TRUE (tristate (true) || tristate (true));
+
+ ASSERT_TRISTATE_TRUE (tristate::unknown () || tristate (true));
+ ASSERT_TRISTATE_TRUE (tristate (true) || tristate::unknown ());
+
+ ASSERT_TRISTATE_UNKNOWN (tristate::unknown () || tristate (false));
+ ASSERT_TRISTATE_UNKNOWN (tristate (false) || tristate::unknown ());
+}
+
+/* Test ! on tristate instances. */
+
+static void
+test_not ()
+{
+ ASSERT_TRISTATE_UNKNOWN (!tristate::unknown ());
+ ASSERT_TRISTATE_FALSE (!tristate (true));
+ ASSERT_TRISTATE_TRUE (!tristate (false));
+}
+
+/* Run all of the selftests within this file. */
+
+void
+tristate_cc_tests ()
+{
+ test_ctors ();
+ test_and ();
+ test_or ();
+ test_not ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
--- /dev/null
+/* "True" vs "False" vs "Unknown".
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TRISTATE_H
+#define GCC_TRISTATE_H
+
+/* "True" vs "False" vs "Unknown". */
+
+class tristate {
+ public:
+ enum value {
+ TS_UNKNOWN,
+ TS_TRUE,
+ TS_FALSE
+ };
+
+ tristate (enum value val) : m_value (val) {}
+ tristate (bool val) : m_value (val ? TS_TRUE : TS_FALSE) {}
+ static tristate unknown () { return tristate (TS_UNKNOWN); }
+
+ const char *as_string () const;
+
+ bool is_known () const { return m_value != TS_UNKNOWN; }
+ bool is_true () const { return m_value == TS_TRUE; }
+ bool is_false () const { return m_value == TS_FALSE; }
+
+ tristate not_ () const;
+ tristate or_ (tristate other) const;
+ tristate and_ (tristate other) const;
+
+ bool operator== (const tristate &other) const
+ {
+ return m_value == other.m_value;
+ }
+
+ bool operator!= (const tristate &other) const
+ {
+ return m_value != other.m_value;
+ }
+
+ private:
+ enum value m_value;
+};
+
+/* Overloaded boolean operators on tristates. */
+
+inline tristate
+operator ! (tristate t)
+{
+ return t.not_ ();
+}
+
+inline tristate
+operator || (tristate a, tristate b)
+{
+ return a.or_ (b);
+}
+
+inline tristate
+operator && (tristate a, tristate b)
+{
+ return a.and_ (b);
+}
+
+#endif /* GCC_TRISTATE_H */