analyzer/pending-diagnostic.o \
analyzer/program-point.o \
analyzer/program-state.o \
+ analyzer/region.o \
analyzer/region-model.o \
+ analyzer/region-model-impl-calls.o \
+ analyzer/region-model-manager.o \
+ analyzer/region-model-reachability.o \
analyzer/sm.o \
analyzer/sm-file.o \
analyzer/sm-malloc.o \
analyzer/sm-signal.o \
analyzer/sm-taint.o \
analyzer/state-purge.o \
- analyzer/supergraph.o
+ analyzer/store.o \
+ analyzer/supergraph.o \
+ analyzer/svalue.o
# Language-independent object files.
# We put the *-match.o and insn-*.o files first so that a parallel make
#if ENABLE_ANALYZER
+#pragma GCC diagnostic ignored "-Wformat-diag"
+
namespace ana {
/* Implementation of class logger. */
logger::enter_scope (const char *scope_name)
{
log ("entering: %s", scope_name);
- m_indent_level += 1;
+ inc_indent ();
}
void
log_va_partial (fmt, ap);
end_log_line ();
- m_indent_level += 1;
+ inc_indent ();
}
logger::exit_scope (const char *scope_name)
{
if (m_indent_level)
- m_indent_level -= 1;
+ dec_indent ();
else
log ("(mismatching indentation)");
log ("exiting: %s", scope_name);
void enter_scope (const char *scope_name, const char *fmt, va_list *ap)
ATTRIBUTE_GCC_DIAG(3, 0);
void exit_scope (const char *scope_name);
+ void inc_indent () { m_indent_level++; }
+ void dec_indent () { m_indent_level--; }
pretty_printer *get_printer () const { return m_pp; }
FILE *get_file () const { return m_f_out; }
analyzer_region_model_cc_tests ();
analyzer_sm_file_cc_tests ();
analyzer_sm_signal_cc_tests ();
+ analyzer_store_cc_tests ();
#endif /* #if ENABLE_ANALYZER */
}
extern void analyzer_region_model_cc_tests ();
extern void analyzer_sm_file_cc_tests ();
extern void analyzer_sm_signal_cc_tests ();
+extern void analyzer_store_cc_tests ();
} /* end of namespace ana::selftest. */
#if ENABLE_ANALYZER
+namespace ana {
+
+/* Workaround for missing location information for some stmts,
+ which ultimately should be solved by fixing the frontends
+ to provide the locations (TODO). */
+
+location_t
+get_stmt_location (const gimple *stmt, function *fun)
+{
+ if (get_pure_location (stmt->location) == UNKNOWN_LOCATION)
+ {
+ /* Workaround for missing location information for clobber
+ stmts, which seem to lack location information in the C frontend
+ at least. Created by gimplify_bind_expr, which uses the
+ BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (bind_expr))
+ but this is never set up when the block is created in
+ c_end_compound_stmt's pop_scope.
+ TODO: fix this missing location information.
+
+ For now, as a hackish workaround, use the location of the end of
+ the function. */
+ if (gimple_clobber_p (stmt) && fun)
+ return fun->function_end_locus;
+ }
+
+ return stmt->location;
+}
+
+} // namespace ana
+
/* Helper function for checkers. Is the CALL to the given function name,
and with the given number of arguments?
class callgraph_superedge;
class call_superedge;
class return_superedge;
+
class svalue;
class region_svalue;
class constant_svalue;
- class poisoned_svalue;
class unknown_svalue;
+ class poisoned_svalue;
class setjmp_svalue;
+ class initial_svalue;
+ class unaryop_svalue;
+ class binop_svalue;
+ class sub_svalue;
+ class unmergeable_svalue;
+ class placeholder_svalue;
+ class widening_svalue;
+ class compound_svalue;
+ class conjured_svalue;
+typedef hash_set<const svalue *> svalue_set;
class region;
- class map_region;
- class array_region;
+ class frame_region;
+ class function_region;
+ class label_region;
+ class decl_region;
class symbolic_region;
+ class element_region;
+ class offset_region;
+ class cast_region;
+ class field_region;
+ class string_region;
+class region_model_manager;
+struct model_merger;
+class store_manager;
+class store;
class region_model;
class region_model_context;
class impl_region_model_context;
+class call_details;
class constraint_manager;
class equiv_class;
-struct model_merger;
-struct svalue_id_merger_mapping;
-struct canonicalization;
+
class pending_diagnostic;
class state_change_event;
class checker_path;
class sm_state_map;
class stmt_finder;
class program_point;
+class function_point;
class program_state;
class exploded_graph;
class exploded_node;
class state_change;
class rewind_info_t;
+class engine;
+
/* Forward decls of functions. */
+extern void dump_tree (pretty_printer *pp, tree t);
extern void dump_quoted_tree (pretty_printer *pp, tree t);
+extern void print_quoted_type (pretty_printer *pp, tree t);
+extern int readability_comparator (const void *p1, const void *p2);
+extern int tree_cmp (const void *p1, const void *p2);
+
+/* A tree, extended with stack frame information for locals, so that
+ we can distinguish between different values of locals within a potentially
+ recursive callstack. */
+
+class path_var
+{
+public:
+ path_var (tree t, int stack_depth)
+ : m_tree (t), m_stack_depth (stack_depth)
+ {
+ // TODO: ignore stack depth for globals and constants
+ }
+
+ bool operator== (const path_var &other) const
+ {
+ return (m_tree == other.m_tree
+ && m_stack_depth == other.m_stack_depth);
+ }
+
+ operator bool () const
+ {
+ return m_tree != NULL_TREE;
+ }
+
+ void dump (pretty_printer *pp) const;
+
+ tree m_tree;
+ int m_stack_depth; // or -1 for globals?
+};
+
+typedef offset_int bit_offset_t;
+typedef offset_int bit_size_t;
+typedef offset_int byte_size_t;
+
+/* The location of a region expressesd as an offset relative to a
+ base region. */
+
+class region_offset
+{
+public:
+ static region_offset make_concrete (const region *base_region,
+ bit_offset_t offset)
+ {
+ return region_offset (base_region, offset, false);
+ }
+ static region_offset make_symbolic (const region *base_region)
+ {
+ return region_offset (base_region, 0, true);
+ }
+
+ const region *get_base_region () const { return m_base_region; }
+
+ bool symbolic_p () const { return m_is_symbolic; }
+
+ bit_offset_t get_bit_offset () const
+ {
+ gcc_assert (!symbolic_p ());
+ return m_offset;
+ }
+
+ bool operator== (const region_offset &other)
+ {
+ return (m_base_region == other.m_base_region
+ && m_offset == other.m_offset
+ && m_is_symbolic == other.m_is_symbolic);
+ }
+
+private:
+ region_offset (const region *base_region, bit_offset_t offset,
+ bool is_symbolic)
+ : m_base_region (base_region), m_offset (offset), m_is_symbolic (is_symbolic)
+ {}
+
+ const region *m_base_region;
+ bit_offset_t m_offset;
+ bool m_is_symbolic;
+};
+
+extern location_t get_stmt_location (const gimple *stmt, function *fun);
} // namespace ana
static inline bool is_empty (Type);
};
+/* A hash traits class that uses member functions to implement
+ the various required ops. */
+
+template <typename Type>
+struct member_function_hash_traits : public typed_noop_remove<Type>
+{
+ typedef Type value_type;
+ typedef Type compare_type;
+ static inline hashval_t hash (value_type v) { return v.hash (); }
+ static inline bool equal (const value_type &existing,
+ const value_type &candidate)
+ {
+ return existing == candidate;
+ }
+ static inline void mark_deleted (Type &t) { t.mark_deleted (); }
+ static inline void mark_empty (Type &t) { t.mark_empty (); }
+ static inline bool is_deleted (Type t) { return t.is_deleted (); }
+ static inline bool is_empty (Type t) { return t.is_empty (); }
+};
+
+/* A map from T::key_t to T* for use in consolidating instances of T.
+ Owns all instances of T.
+ T::key_t should have operator== and be hashable. */
+
+template <typename T>
+class consolidation_map
+{
+public:
+ typedef typename T::key_t key_t;
+ typedef T instance_t;
+ typedef hash_map<key_t, instance_t *> inner_map_t;
+ typedef typename inner_map_t::iterator iterator;
+
+ /* Delete all instances of T. */
+
+ ~consolidation_map ()
+ {
+ for (typename inner_map_t::iterator iter = m_inner_map.begin ();
+ iter != m_inner_map.end (); ++iter)
+ delete (*iter).second;
+ }
+
+ /* Get the instance of T for K if one exists, or NULL. */
+
+ T *get (const key_t &k) const
+ {
+ if (instance_t **slot = const_cast<inner_map_t &> (m_inner_map).get (k))
+ return *slot;
+ return NULL;
+ }
+
+ /* Take ownership of INSTANCE. */
+
+ void put (const key_t &k, T *instance)
+ {
+ m_inner_map.put (k, instance);
+ }
+
+ size_t elements () const { return m_inner_map.elements (); }
+
+ iterator begin () const { return m_inner_map.begin (); }
+ iterator end () const { return m_inner_map.end (); }
+
+private:
+ inner_map_t m_inner_map;
+};
+
+/* Disable -Wformat-diag; we want to be able to use pp_printf
+ for logging/dumping without complying with the rules for diagnostics. */
+
+#pragma GCC diagnostic ignored "-Wformat-diag"
+
#endif /* GCC_ANALYZER_ANALYZER_H */
Common Joined UInteger Var(param_analyzer_max_recursion_depth) Init(2) Param
The maximum number of times a callsite can appear in a call stack within the analyzer, before terminating analysis of a call that would recurse deeper.
+-param=analyzer-max-svalue-depth=
+Common Joined UInteger Var(param_analyzer_max_svalue_depth) Init(13) Param
+The maximum depth of a symbolic value, before approximating the value as unknown.
+
-param=analyzer-min-snodes-for-call-summary=
Common Joined UInteger Var(param_analyzer_min_snodes_for_call_summary) Init(10) Param
The minimum number of supernodes within a function for the analyzer to consider summarizing its effects at call sites.
+-param=analyzer-max-enodes-for-full-dump=
+Common Joined UInteger Var(param_analyzer_max_enodes_for_full_dump) Init(200) Param
+The maximum depth of exploded nodes that should appear in a dot dump before switching to a less verbose format.
+
Wanalyzer-double-fclose
Common Var(warn_analyzer_double_fclose) Init(1) Warning
Warn about code paths in which a stdio FILE can be closed more than once.
#if ENABLE_ANALYZER
+#pragma GCC diagnostic ignored "-Wformat-diag"
+
/* class call_string. */
/* call_string's copy ctor. */
#include "tristate.h"
#include "ordered-hash-map.h"
#include "selftest.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/program-state.h"
#include "analyzer/checker-path.h"
#include "analyzer/constraint-manager.h"
#include "analyzer/diagnostic-manager.h"
#include "analyzer/checker-path.h"
-#include "analyzer/call-string.h"
-#include "analyzer/program-point.h"
#include "analyzer/exploded-graph.h"
#if ENABLE_ANALYZER
const gimple *stmt,
int stack_depth,
const state_machine &sm,
- tree var,
+ const svalue *sval,
state_machine::state_t from,
state_machine::state_t to,
- tree origin,
+ const svalue *origin,
const program_state &dst_state)
: checker_event (EK_STATE_CHANGE,
stmt->location, node->m_fun->decl,
stack_depth),
m_node (node), m_stmt (stmt), m_sm (sm),
- m_var (var), m_from (from), m_to (to),
+ m_sval (sval), m_from (from), m_to (to),
m_origin (origin),
m_dst_state (dst_state)
{
{
if (m_pending_diagnostic)
{
+ region_model *model = m_dst_state.m_region_model;
+ tree var = model->get_representative_tree (m_sval);
+ tree origin = model->get_representative_tree (m_origin);
label_text custom_desc
= m_pending_diagnostic->describe_state_change
- (evdesc::state_change (can_colorize, m_var, m_origin,
+ (evdesc::state_change (can_colorize, var, origin,
m_from, m_to, m_emission_id, *this));
if (custom_desc.m_buffer)
{
(can_colorize,
"%s (state of %qE: %qs -> %qs, origin: %qE)",
custom_desc.m_buffer,
- m_var,
+ var,
m_sm.get_state_name (m_from),
m_sm.get_state_name (m_to),
- m_origin);
+ origin);
else
result = make_label_text
(can_colorize,
- "%s (state of %qE: %qs -> %qs, origin: NULL)",
+ "%s (state of %qE: %qs -> %qs, NULL origin)",
custom_desc.m_buffer,
- m_var,
+ var,
m_sm.get_state_name (m_from),
m_sm.get_state_name (m_to));
custom_desc.maybe_free ();
}
/* Fallback description. */
- if (m_var)
+ if (m_sval)
{
+ label_text sval_desc = m_sval->get_desc ();
if (m_origin)
- return make_label_text
- (can_colorize,
- "state of %qE: %qs -> %qs (origin: %qE)",
- m_var,
- m_sm.get_state_name (m_from),
- m_sm.get_state_name (m_to),
- m_origin);
+ {
+ label_text origin_desc = m_origin->get_desc ();
+ return make_label_text
+ (can_colorize,
+ "state of %qs: %qs -> %qs (origin: %qs)",
+ sval_desc.m_buffer,
+ m_sm.get_state_name (m_from),
+ m_sm.get_state_name (m_to),
+ origin_desc.m_buffer);
+ }
else
return make_label_text
(can_colorize,
- "state of %qE: %qs -> %qs (origin: NULL)",
- m_var,
+ "state of %qs: %qs -> %qs (NULL origin)",
+ sval_desc.m_buffer,
m_sm.get_state_name (m_from),
m_sm.get_state_name (m_to));
}
else
{
- gcc_assert (m_origin == NULL_TREE);
+ gcc_assert (m_origin == NULL);
return make_label_text
(can_colorize,
"global state: %qs -> %qs",
tree var, state_machine::state_t state)
{
checker_event *end_of_path
- = new warning_event (stmt->location,
+ = new warning_event (get_stmt_location (stmt, enode->get_function ()),
enode->get_function ()->decl,
enode->get_stack_depth (),
sm, var, state);
state_change_event (const supernode *node, const gimple *stmt,
int stack_depth,
const state_machine &sm,
- tree var,
+ const svalue *sval,
state_machine::state_t from,
state_machine::state_t to,
- tree origin,
+ const svalue *origin,
const program_state &dst_state);
label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
- region_id get_lvalue (tree expr, region_model_context *ctxt) const
+ function *get_dest_function () const
{
- return m_dst_state.m_region_model->get_lvalue (expr, ctxt);
+ return m_dst_state.get_current_function ();
}
const supernode *m_node;
const gimple *m_stmt;
const state_machine &m_sm;
- tree m_var;
+ const svalue *m_sval;
state_machine::state_t m_from;
state_machine::state_t m_to;
- tree m_origin;
+ const svalue *m_origin;
program_state m_dst_state;
};
#include "sbitmap.h"
#include "bitmap.h"
#include "tristate.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/constraint-manager.h"
#include "analyzer/analyzer-selftests.h"
namespace ana {
-/* One of the end-points of a range. */
-
-struct bound
-{
- bound () : m_constant (NULL_TREE), m_closed (false) {}
- bound (tree constant, bool closed)
- : m_constant (constant), m_closed (closed) {}
-
- void ensure_closed (bool is_upper);
-
- const char * get_relation_as_str () const;
-
- tree m_constant;
- bool m_closed;
-};
-
-/* A range of values, used for determining if a value has been
- constrained to just one possible constant value. */
-
-struct range
+static tristate
+compare_constants (tree lhs_const, enum tree_code op, tree rhs_const)
{
- range () : m_lower_bound (), m_upper_bound () {}
- range (const bound &lower, const bound &upper)
- : m_lower_bound (lower), m_upper_bound (upper) {}
-
- void dump (pretty_printer *pp) const;
-
- bool constrained_to_single_element (tree *out);
-
- bound m_lower_bound;
- bound m_upper_bound;
-};
+ tree comparison
+ = fold_binary (op, boolean_type_node, lhs_const, rhs_const);
+ if (comparison == boolean_true_node)
+ return tristate (tristate::TS_TRUE);
+ if (comparison == boolean_false_node)
+ return tristate (tristate::TS_FALSE);
+ return tristate (tristate::TS_UNKNOWN);
+}
/* struct bound. */
/* Dump this range to PP, which must support %E for tree. */
void
-range::dump (pretty_printer *pp) const
+range::dump_to_pp (pretty_printer *pp) const
+{
+ if (m_lower_bound.m_constant)
+ {
+ if (m_upper_bound.m_constant)
+ pp_printf (pp, "%qE %s x %s %qE",
+ m_lower_bound.m_constant,
+ m_lower_bound.get_relation_as_str (),
+ m_upper_bound.get_relation_as_str (),
+ m_upper_bound.m_constant);
+ else
+ pp_printf (pp, "%qE %s x",
+ m_lower_bound.m_constant,
+ m_lower_bound.get_relation_as_str ());
+ }
+ else
+ {
+ if (m_upper_bound.m_constant)
+ pp_printf (pp, "x %s %qE",
+ m_upper_bound.get_relation_as_str (),
+ m_upper_bound.m_constant);
+ else
+ pp_string (pp, "x");
+ }
+}
+
+/* Dump this range to stderr. */
+
+DEBUG_FUNCTION void
+range::dump () const
{
- pp_printf (pp, "%qE %s x %s %qE",
- m_lower_bound.m_constant,
- m_lower_bound.get_relation_as_str (),
- m_upper_bound.get_relation_as_str (),
- m_upper_bound.m_constant);
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_newline (&pp);
+ pp_flush (&pp);
}
/* Determine if there is only one possible value for this range.
- If so, return true and write the constant to *OUT.
- Otherwise, return false. */
+ If so, return the constant; otherwise, return NULL_TREE. */
-bool
-range::constrained_to_single_element (tree *out)
+tree
+range::constrained_to_single_element ()
{
+ if (m_lower_bound.m_constant == NULL_TREE
+ || m_upper_bound.m_constant == NULL_TREE)
+ return NULL_TREE;
+
if (!INTEGRAL_TYPE_P (TREE_TYPE (m_lower_bound.m_constant)))
- return false;
+ return NULL_TREE;
if (!INTEGRAL_TYPE_P (TREE_TYPE (m_upper_bound.m_constant)))
- return false;
+ return NULL_TREE;
/* Convert any open bounds to closed bounds. */
m_lower_bound.ensure_closed (false);
m_lower_bound.m_constant,
m_upper_bound.m_constant);
if (comparison == boolean_true_node)
+ return m_lower_bound.m_constant;
+ else
+ return NULL_TREE;
+}
+
+/* Eval the condition "X OP RHS_CONST" for X within the range. */
+
+tristate
+range::eval_condition (enum tree_code op, tree rhs_const) const
+{
+ range copy (*this);
+ if (tree single_element = copy.constrained_to_single_element ())
+ return compare_constants (single_element, op, rhs_const);
+
+ switch (op)
{
- *out = m_lower_bound.m_constant;
- return true;
+ case EQ_EXPR:
+ if (below_lower_bound (rhs_const))
+ return tristate (tristate::TS_FALSE);
+ if (above_upper_bound (rhs_const))
+ return tristate (tristate::TS_FALSE);
+ break;
+
+ case LT_EXPR:
+ case LE_EXPR:
+ /* Qn: "X </<= RHS_CONST". */
+ /* If RHS_CONST > upper bound, then it's true.
+ If RHS_CONST < lower bound, then it's false.
+ Otherwise unknown. */
+ if (above_upper_bound (rhs_const))
+ return tristate (tristate::TS_TRUE);
+ if (below_lower_bound (rhs_const))
+ return tristate (tristate::TS_FALSE);
+ break;
+
+ case NE_EXPR:
+ /* Qn: "X != RHS_CONST". */
+ /* If RHS_CONST < lower bound, then it's true.
+ If RHS_CONST > upper bound, then it's false.
+ Otherwise unknown. */
+ if (below_lower_bound (rhs_const))
+ return tristate (tristate::TS_TRUE);
+ if (above_upper_bound (rhs_const))
+ return tristate (tristate::TS_TRUE);
+ break;
+
+ case GE_EXPR:
+ case GT_EXPR:
+ /* Qn: "X >=/> RHS_CONST". */
+ if (above_upper_bound (rhs_const))
+ return tristate (tristate::TS_FALSE);
+ if (below_lower_bound (rhs_const))
+ return tristate (tristate::TS_TRUE);
+ break;
+
+ default:
+ gcc_unreachable ();
+ break;
}
- else
+ return tristate (tristate::TS_UNKNOWN);
+}
+
+/* Return true if RHS_CONST is below the lower bound of this range. */
+
+bool
+range::below_lower_bound (tree rhs_const) const
+{
+ if (!m_lower_bound.m_constant)
return false;
+
+ return compare_constants (rhs_const,
+ m_lower_bound.m_closed ? LT_EXPR : LE_EXPR,
+ m_lower_bound.m_constant).is_true ();
+}
+
+/* Return true if RHS_CONST is above the upper bound of this range. */
+
+bool
+range::above_upper_bound (tree rhs_const) const
+{
+ if (!m_upper_bound.m_constant)
+ return false;
+
+ return compare_constants (rhs_const,
+ m_upper_bound.m_closed ? GT_EXPR : GE_EXPR,
+ m_upper_bound.m_constant).is_true ();
}
/* class equiv_class. */
/* equiv_class's default ctor. */
equiv_class::equiv_class ()
-: m_constant (NULL_TREE), m_cst_sid (svalue_id::null ()),
- m_vars ()
+: m_constant (NULL_TREE), m_cst_sval (NULL), m_vars ()
{
}
/* equiv_class's copy ctor. */
equiv_class::equiv_class (const equiv_class &other)
-: m_constant (other.m_constant), m_cst_sid (other.m_cst_sid),
+: m_constant (other.m_constant), m_cst_sval (other.m_cst_sval),
m_vars (other.m_vars.length ())
{
int i;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (other.m_vars, i, sid)
- m_vars.quick_push (*sid);
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (other.m_vars, i, sval)
+ m_vars.quick_push (sval);
}
/* Print an all-on-one-line representation of this equiv_class to PP,
{
pp_character (pp, '{');
int i;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (m_vars, i, sid)
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (m_vars, i, sval)
{
if (i > 0)
pp_string (pp, " == ");
- sid->print (pp);
+ sval->dump_to_pp (pp, true);
}
if (m_constant)
{
if (i > 0)
pp_string (pp, " == ");
- pp_printf (pp, "%qE", m_constant);
+ pp_printf (pp, "[m_constant]%qE", m_constant);
}
pp_character (pp, '}');
}
-/* Generate a hash value for this equiv_class. */
+/* Generate a hash value for this equiv_class.
+ This relies on the ordering of m_vars, and so this object needs to
+ have been canonicalized for this to be meaningful. */
hashval_t
equiv_class::hash () const
{
inchash::hash hstate;
- int i;
- svalue_id *sid;
inchash::add_expr (m_constant, hstate);
- FOR_EACH_VEC_ELT (m_vars, i, sid)
- inchash::add (*sid, hstate);
+ int i;
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (m_vars, i, sval)
+ hstate.add_ptr (sval);
return hstate.end ();
}
-/* Equality operator for equiv_class. */
+/* Equality operator for equiv_class.
+ This relies on the ordering of m_vars, and so this object
+ and OTHER need to have been canonicalized for this to be
+ meaningful. */
bool
equiv_class::operator== (const equiv_class &other)
if (m_constant != other.m_constant)
return false; // TODO: use tree equality here?
- /* FIXME: should we compare m_cst_sid? */
+ /* FIXME: should we compare m_cst_sval? */
if (m_vars.length () != other.m_vars.length ())
return false;
int i;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (m_vars, i, sid)
- if (! (*sid == other.m_vars[i]))
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (m_vars, i, sval)
+ if (sval != other.m_vars[i])
return false;
return true;
/* Add SID to this equiv_class, using CM to check if it's a constant. */
void
-equiv_class::add (svalue_id sid, const constraint_manager &cm)
+equiv_class::add (const svalue *sval)
{
- gcc_assert (!sid.null_p ());
- if (tree cst = cm.maybe_get_constant (sid))
+ gcc_assert (sval);
+ if (tree cst = sval->maybe_get_constant ())
{
gcc_assert (CONSTANT_CLASS_P (cst));
/* FIXME: should we canonicalize which svalue is the constant
when there are multiple equal constants? */
m_constant = cst;
- m_cst_sid = sid;
+ m_cst_sval = sval;
}
- m_vars.safe_push (sid);
+ m_vars.safe_push (sval);
}
/* Remove SID from this equivalence class.
a possible leak). */
bool
-equiv_class::del (svalue_id sid)
+equiv_class::del (const svalue *sval)
{
- gcc_assert (!sid.null_p ());
- gcc_assert (sid != m_cst_sid);
+ gcc_assert (sval);
+ gcc_assert (sval != m_cst_sval);
int i;
- svalue_id *iv;
+ const svalue *iv;
FOR_EACH_VEC_ELT (m_vars, i, iv)
{
- if (*iv == sid)
+ if (iv == sval)
{
m_vars[i] = m_vars[m_vars.length () - 1];
m_vars.pop ();
}
}
- /* SID must be in the class. */
+ /* SVAL must be in the class. */
gcc_unreachable ();
return false;
}
/* Get a representative member of this class, for handling cases
where the IDs can change mid-traversal. */
-svalue_id
+const svalue *
equiv_class::get_representative () const
{
- if (!m_cst_sid.null_p ())
- return m_cst_sid;
- else
- {
- gcc_assert (m_vars.length () > 0);
- return m_vars[0];
- }
-}
-
-/* Remap all svalue_ids within this equiv_class using MAP. */
-
-void
-equiv_class::remap_svalue_ids (const svalue_id_map &map)
-{
- int i;
- svalue_id *iv;
- FOR_EACH_VEC_ELT (m_vars, i, iv)
- map.update (iv);
- map.update (&m_cst_sid);
+ gcc_assert (m_vars.length () > 0);
+ return m_vars[0];
}
/* Comparator for use by equiv_class::canonicalize. */
static int
-svalue_id_cmp_by_id (const void *p1, const void *p2)
+svalue_cmp_by_ptr (const void *p1, const void *p2)
{
- const svalue_id *sid1 = (const svalue_id *)p1;
- const svalue_id *sid2 = (const svalue_id *)p2;
- return sid1->as_int () - sid2->as_int ();
+ const svalue *sval1 = *(const svalue * const *)p1;
+ const svalue *sval2 = *(const svalue * const *)p2;
+ if (sval1 < sval2)
+ return 1;
+ if (sval1 > sval2)
+ return -1;
+ return 0;
}
-/* Sort the svalues_ids within this equiv_class. */
+/* Sort the svalues within this equiv_class. */
void
equiv_class::canonicalize ()
{
- m_vars.qsort (svalue_id_cmp_by_id);
+ m_vars.qsort (svalue_cmp_by_ptr);
}
/* Get a debug string for C_OP. */
return true;
}
+/* Return true if this constraint is implied by OTHER. */
+
+bool
+constraint::implied_by (const constraint &other,
+ const constraint_manager &cm) const
+{
+ if (m_lhs == other.m_lhs)
+ if (tree rhs_const = m_rhs.get_obj (cm).get_any_constant ())
+ if (tree other_rhs_const = other.m_rhs.get_obj (cm).get_any_constant ())
+ if (m_lhs.get_obj (cm).get_any_constant () == NULL_TREE)
+ if (m_op == other.m_op)
+ switch (m_op)
+ {
+ default:
+ break;
+ case CONSTRAINT_LE:
+ case CONSTRAINT_LT:
+ if (compare_constants (rhs_const,
+ GE_EXPR,
+ other_rhs_const).is_true ())
+ return true;
+ break;
+ }
+ return false;
+}
+
/* class equiv_class_id. */
/* Get the underlying equiv_class for this ID from CM. */
constraint_manager::constraint_manager (const constraint_manager &other)
: m_equiv_classes (other.m_equiv_classes.length ()),
- m_constraints (other.m_constraints.length ())
+ m_constraints (other.m_constraints.length ()),
+ m_mgr (other.m_mgr)
{
int i;
equiv_class *ec;
pp_printf (pp, "}");
}
-/* Dump a multiline representation of this constraint_manager to PP
+/* Dump a representation of this constraint_manager to PP
(which must support %E for trees). */
void
-constraint_manager::dump_to_pp (pretty_printer *pp) const
+constraint_manager::dump_to_pp (pretty_printer *pp, bool multiline) const
{
- // TODO
- pp_string (pp, " equiv classes:");
- pp_newline (pp);
+ if (multiline)
+ pp_string (pp, " ");
+ pp_string (pp, "equiv classes:");
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, " {");
int i;
equiv_class *ec;
FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
{
- pp_string (pp, " ");
+ if (multiline)
+ pp_string (pp, " ");
+ else if (i > 0)
+ pp_string (pp, ", ");
equiv_class_id (i).print (pp);
pp_string (pp, ": ");
ec->print (pp);
- pp_newline (pp);
+ if (multiline)
+ pp_newline (pp);
}
- pp_string (pp, " constraints:");
- pp_newline (pp);
+ if (multiline)
+ pp_string (pp, " ");
+ else
+ pp_string (pp, "}");
+ pp_string (pp, "constraints:");
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, "{");
constraint *c;
FOR_EACH_VEC_ELT (m_constraints, i, c)
{
- pp_printf (pp, " %i: ", i);
+ if (multiline)
+ pp_string (pp, " ");
+ pp_printf (pp, "%i: ", i);
c->print (pp, *this);
- pp_newline (pp);
+ if (multiline)
+ pp_newline (pp);
}
+ if (!multiline)
+ pp_string (pp, "}");
}
/* Dump a multiline representation of this constraint_manager to FP. */
pp_format_decoder (&pp) = default_tree_printer;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = fp;
- dump_to_pp (&pp);
+ dump_to_pp (&pp, true);
pp_flush (&pp);
}
Return false if the constraint contradicts existing knowledge. */
bool
-constraint_manager::add_constraint (svalue_id lhs,
- enum tree_code op,
- svalue_id rhs)
+constraint_manager::add_constraint (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs)
{
+ lhs = lhs->unwrap_any_unmergeable ();
+ rhs = rhs->unwrap_any_unmergeable ();
+
+ /* Nothing can be known about unknown values. */
+ if (lhs->get_kind () == SK_UNKNOWN
+ || rhs->get_kind () == SK_UNKNOWN)
+ /* Not a contradiction. */
+ return true;
+
+ /* Check the conditions on svalues. */
+ {
+ tristate t_cond = eval_condition (lhs, op, rhs);
+
+ /* If we already have the condition, do nothing. */
+ if (t_cond.is_true ())
+ return true;
+
+ /* Reject a constraint that would contradict existing knowledge, as
+ unsatisfiable. */
+ if (t_cond.is_false ())
+ return false;
+ }
+
equiv_class_id lhs_ec_id = get_or_add_equiv_class (lhs);
equiv_class_id rhs_ec_id = get_or_add_equiv_class (rhs);
- return add_constraint (lhs_ec_id, op,rhs_ec_id);
+
+ /* Check the stronger conditions on ECs. */
+ {
+ tristate t = eval_condition (lhs_ec_id, op, rhs_ec_id);
+
+ /* Discard constraints that are already known. */
+ if (t.is_true ())
+ return true;
+
+ /* Reject unsatisfiable constraints. */
+ if (t.is_false ())
+ return false;
+ }
+
+ add_unknown_constraint (lhs_ec_id, op, rhs_ec_id);
+ return true;
}
/* Attempt to add the constraint LHS_EC_ID OP RHS_EC_ID to this
bool
constraint_manager::add_constraint (equiv_class_id lhs_ec_id,
- enum tree_code op,
- equiv_class_id rhs_ec_id)
+ enum tree_code op,
+ equiv_class_id rhs_ec_id)
{
tristate t = eval_condition (lhs_ec_id, op, rhs_ec_id);
if (t.is_false ())
return false;
+ add_unknown_constraint (lhs_ec_id, op, rhs_ec_id);
+ return true;
+}
+
+/* Add the constraint LHS_EC_ID OP RHS_EC_ID to this constraint_manager,
+ where the constraint has already been checked for being "unknown". */
+
+void
+constraint_manager::add_unknown_constraint (equiv_class_id lhs_ec_id,
+ enum tree_code op,
+ equiv_class_id rhs_ec_id)
+{
gcc_assert (lhs_ec_id != rhs_ec_id);
/* For now, simply accumulate constraints, without attempting any further
const equiv_class &rhs_ec_obj = rhs_ec_id.get_obj (*this);
int i;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (rhs_ec_obj.m_vars, i, sid)
- lhs_ec_obj.add (*sid, *this);
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (rhs_ec_obj.m_vars, i, sval)
+ lhs_ec_obj.add (sval);
if (rhs_ec_obj.m_constant)
{
lhs_ec_obj.m_constant = rhs_ec_obj.m_constant;
- lhs_ec_obj.m_cst_sid = rhs_ec_obj.m_cst_sid;
+ lhs_ec_obj.m_cst_sval = rhs_ec_obj.m_cst_sval;
}
/* Drop rhs equivalence class, overwriting it with the
if (c->m_rhs == final_ec_id)
c->m_rhs = rhs_ec_id;
}
+
+ /* We may now have self-comparisons due to the merger; these
+ constraints should be removed. */
+ unsigned read_index, write_index;
+ VEC_ORDERED_REMOVE_IF (m_constraints, read_index, write_index, c,
+ (c->m_lhs == c->m_rhs));
}
break;
case GE_EXPR:
break;
}
validate ();
- return true;
}
/* Subroutine of constraint_manager::add_constraint, for handling all
void
constraint_manager::add_constraint_internal (equiv_class_id lhs_id,
- enum constraint_op c_op,
- equiv_class_id rhs_id)
+ enum constraint_op c_op,
+ equiv_class_id rhs_id)
{
+ constraint new_c (lhs_id, c_op, rhs_id);
+
+ /* Remove existing constraints that would be implied by the
+ new constraint. */
+ unsigned read_index, write_index;
+ constraint *c;
+ VEC_ORDERED_REMOVE_IF (m_constraints, read_index, write_index, c,
+ (c->implied_by (new_c, *this)));
+
/* Add the constraint. */
- m_constraints.safe_push (constraint (lhs_id, c_op, rhs_id));
+ m_constraints.safe_push (new_c);
if (!flag_analyzer_transitivity)
return;
/* The following can potentially add EQ_EXPR facts, which could lead
to ECs being merged, which would change the meaning of the EC IDs.
Hence we need to do this via representatives. */
- svalue_id lhs = lhs_id.get_obj (*this).get_representative ();
- svalue_id rhs = rhs_id.get_obj (*this).get_representative ();
+ const svalue *lhs = lhs_id.get_obj (*this).get_representative ();
+ const svalue *rhs = rhs_id.get_obj (*this).get_representative ();
/* We have LHS </<= RHS */
range r (bound (lhs_const, c_op == CONSTRAINT_LE),
bound (other_rhs_const,
other->m_op == CONSTRAINT_LE));
- tree constant;
- if (r.constrained_to_single_element (&constant))
+ if (tree constant = r.constrained_to_single_element ())
{
- svalue_id cst_sid = get_sid_for_constant (constant);
+ const svalue *cst_sval
+ = m_mgr->get_or_create_constant_svalue (constant);
add_constraint
(rhs_id, EQ_EXPR,
- get_or_add_equiv_class (cst_sid));
+ get_or_add_equiv_class (cst_sval));
return;
}
}
other->m_op == CONSTRAINT_LE),
bound (rhs_const,
c_op == CONSTRAINT_LE));
- tree constant;
- if (r.constrained_to_single_element (&constant))
+ if (tree constant = r.constrained_to_single_element ())
{
- svalue_id cst_sid = get_sid_for_constant (constant);
+ const svalue *cst_sval
+ = m_mgr->get_or_create_constant_svalue (constant);
add_constraint
(lhs_id, EQ_EXPR,
- get_or_add_equiv_class (cst_sid));
+ get_or_add_equiv_class (cst_sval));
return;
}
}
}
}
-/* Look for SID within the equivalence classes of this constraint_manager;
- if found, write the id to *OUT and return true, otherwise return false. */
+/* Look for SVAL within the equivalence classes of this constraint_manager;
+ if found, return true, writing the id to *OUT if OUT is non-NULL,
+ otherwise return false. */
bool
-constraint_manager::get_equiv_class_by_sid (svalue_id sid, equiv_class_id *out) const
+constraint_manager::get_equiv_class_by_svalue (const svalue *sval,
+ equiv_class_id *out) const
{
/* TODO: should we have a map, rather than these searches? */
int i;
FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
{
int j;
- svalue_id *iv;
+ const svalue *iv;
FOR_EACH_VEC_ELT (ec->m_vars, j, iv)
- if (*iv == sid)
+ if (iv == sval)
{
- *out = equiv_class_id (i);
+ if (out)
+ *out = equiv_class_id (i);
return true;
}
}
return false;
}
-/* Ensure that SID has an equivalence class within this constraint_manager;
+/* Ensure that SVAL has an equivalence class within this constraint_manager;
return the ID of the class. */
equiv_class_id
-constraint_manager::get_or_add_equiv_class (svalue_id sid)
+constraint_manager::get_or_add_equiv_class (const svalue *sval)
{
equiv_class_id result (-1);
- /* Try svalue_id match. */
- if (get_equiv_class_by_sid (sid, &result))
+ gcc_assert (sval->get_kind () != SK_UNKNOWN);
+
+ /* Convert all NULL pointers to (void *) to avoid state explosions
+ involving all of the various (foo *)NULL vs (bar *)NULL. */
+ if (POINTER_TYPE_P (sval->get_type ()))
+ if (tree cst = sval->maybe_get_constant ())
+ if (zerop (cst))
+ sval = m_mgr->get_or_create_constant_svalue (null_pointer_node);
+
+ /* Try svalue match. */
+ if (get_equiv_class_by_svalue (sval, &result))
return result;
/* Try equality of constants. */
- if (tree cst = maybe_get_constant (sid))
+ if (tree cst = sval->maybe_get_constant ())
{
int i;
equiv_class *ec;
cst, ec->m_constant);
if (eq == boolean_true_node)
{
- ec->add (sid, *this);
+ ec->add (sval);
return equiv_class_id (i);
}
}
/* Not found. */
equiv_class *new_ec = new equiv_class ();
- new_ec->add (sid, *this);
+ new_ec->add (sval);
m_equiv_classes.safe_push (new_ec);
equiv_class_id new_id (m_equiv_classes.length () - 1);
- if (maybe_get_constant (sid))
+ if (sval->maybe_get_constant ())
{
/* If we have a new EC for a constant, add constraints comparing this
to other constants we may have (so that we accumulate the transitive
add_constraint_internal (new_id, CONSTRAINT_LT, other_id);
else if (lt == boolean_false_node)
add_constraint_internal (other_id, CONSTRAINT_LT, new_id);
- /* Refresh new_id, in case ECs were merged. SID should always
+ /* Refresh new_id, in case ECs were merged. SVAL should always
be present by now, so this should never lead to a
recursion. */
- new_id = get_or_add_equiv_class (sid);
+ new_id = get_or_add_equiv_class (sval);
}
}
}
tristate
constraint_manager::eval_condition (equiv_class_id lhs_ec,
enum tree_code op,
- equiv_class_id rhs_ec)
+ equiv_class_id rhs_ec) const
{
if (lhs_ec == rhs_ec)
{
tree rhs_const = rhs_ec.get_obj (*this).get_any_constant ();
if (lhs_const && rhs_const)
{
- tree comparison
- = fold_binary (op, boolean_type_node, lhs_const, rhs_const);
- if (comparison == boolean_true_node)
- return tristate (tristate::TS_TRUE);
- if (comparison == boolean_false_node)
- return tristate (tristate::TS_FALSE);
+ tristate result_for_constants
+ = compare_constants (lhs_const, op, rhs_const);
+ if (result_for_constants.is_known ())
+ return result_for_constants;
}
enum tree_code swapped_op = swap_tree_comparison (op);
return tristate (tristate::TS_UNKNOWN);
}
-/* Evaluate the condition LHS OP RHS, creating equiv_class instances for
- LHS and RHS if they aren't already in equiv_classes. */
+range
+constraint_manager::get_ec_bounds (equiv_class_id ec_id) const
+{
+ range result;
+
+ int i;
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ if (c->m_lhs == ec_id)
+ {
+ if (tree other_cst = c->m_rhs.get_obj (*this).get_any_constant ())
+ switch (c->m_op)
+ {
+ default:
+ gcc_unreachable ();
+ case CONSTRAINT_NE:
+ continue;
+
+ case CONSTRAINT_LT:
+ /* We have "EC_ID < OTHER_CST". */
+ result.m_upper_bound = bound (other_cst, false);
+ break;
+
+ case CONSTRAINT_LE:
+ /* We have "EC_ID <= OTHER_CST". */
+ result.m_upper_bound = bound (other_cst, true);
+ break;
+ }
+ }
+ if (c->m_rhs == ec_id)
+ {
+ if (tree other_cst = c->m_lhs.get_obj (*this).get_any_constant ())
+ switch (c->m_op)
+ {
+ default:
+ gcc_unreachable ();
+ case CONSTRAINT_NE:
+ continue;
+
+ case CONSTRAINT_LT:
+ /* We have "OTHER_CST < EC_ID"
+ i.e. "EC_ID > OTHER_CST". */
+ result.m_lower_bound = bound (other_cst, false);
+ break;
+
+ case CONSTRAINT_LE:
+ /* We have "OTHER_CST <= EC_ID"
+ i.e. "EC_ID >= OTHER_CST". */
+ result.m_lower_bound = bound (other_cst, true);
+ break;
+ }
+ }
+ }
+
+ return result;
+}
+
+/* Evaluate the condition LHS_EC OP RHS_CONST, avoiding the creation
+ of equiv_class instances. */
tristate
-constraint_manager::eval_condition (svalue_id lhs,
+constraint_manager::eval_condition (equiv_class_id lhs_ec,
enum tree_code op,
- svalue_id rhs)
+ tree rhs_const) const
{
- return eval_condition (get_or_add_equiv_class (lhs),
- op,
- get_or_add_equiv_class (rhs));
+ gcc_assert (!lhs_ec.null_p ());
+ gcc_assert (CONSTANT_CLASS_P (rhs_const));
+
+ if (tree lhs_const = lhs_ec.get_obj (*this).get_any_constant ())
+ return compare_constants (lhs_const, op, rhs_const);
+
+ /* Check for known inequalities of the form
+ (LHS_EC != OTHER_CST) or (OTHER_CST != LHS_EC).
+ If RHS_CONST == OTHER_CST, then we also know that LHS_EC != OTHER_CST.
+ For example, we might have the constraint
+ ptr != (void *)0
+ so we want the condition
+ ptr == (foo *)0
+ to be false. */
+ int i;
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ if (c->m_op == CONSTRAINT_NE)
+ {
+ if (c->m_lhs == lhs_ec)
+ {
+ if (tree other_cst = c->m_rhs.get_obj (*this).get_any_constant ())
+ if (compare_constants
+ (rhs_const, EQ_EXPR, other_cst).is_true ())
+ {
+ switch (op)
+ {
+ case EQ_EXPR:
+ return tristate (tristate::TS_FALSE);
+ case NE_EXPR:
+ return tristate (tristate::TS_TRUE);
+ default:
+ break;
+ }
+ }
+ }
+ if (c->m_rhs == lhs_ec)
+ {
+ if (tree other_cst = c->m_lhs.get_obj (*this).get_any_constant ())
+ if (compare_constants
+ (rhs_const, EQ_EXPR, other_cst).is_true ())
+ {
+ switch (op)
+ {
+ case EQ_EXPR:
+ return tristate (tristate::TS_FALSE);
+ case NE_EXPR:
+ return tristate (tristate::TS_TRUE);
+ default:
+ break;
+ }
+ }
+ }
+ }
+ }
+ /* Look at existing bounds on LHS_EC. */
+ range lhs_bounds = get_ec_bounds (lhs_ec);
+ return lhs_bounds.eval_condition (op, rhs_const);
}
-/* Delete any information about svalue_id instances identified by P.
+/* Evaluate the condition LHS OP RHS, without modifying this
+ constraint_manager (avoiding the creation of equiv_class instances). */
+
+tristate
+constraint_manager::eval_condition (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs) const
+{
+ lhs = lhs->unwrap_any_unmergeable ();
+ rhs = rhs->unwrap_any_unmergeable ();
+
+ /* Nothing can be known about unknown or poisoned values. */
+ if (lhs->get_kind () == SK_UNKNOWN
+ || lhs->get_kind () == SK_POISONED
+ || rhs->get_kind () == SK_UNKNOWN
+ || rhs->get_kind () == SK_POISONED)
+ return tristate (tristate::TS_UNKNOWN);
+
+ if (lhs == rhs
+ && !(FLOAT_TYPE_P (lhs->get_type ())
+ || FLOAT_TYPE_P (rhs->get_type ())))
+ {
+ switch (op)
+ {
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ return tristate (tristate::TS_TRUE);
+
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ return tristate (tristate::TS_FALSE);
+ default:
+ break;
+ }
+ }
+
+ equiv_class_id lhs_ec (-1);
+ equiv_class_id rhs_ec (-1);
+ get_equiv_class_by_svalue (lhs, &lhs_ec);
+ get_equiv_class_by_svalue (rhs, &rhs_ec);
+ if (!lhs_ec.null_p () && !rhs_ec.null_p ())
+ {
+ tristate result_for_ecs
+ = eval_condition (lhs_ec, op, rhs_ec);
+ if (result_for_ecs.is_known ())
+ return result_for_ecs;
+ }
+
+ /* If at least one is not in an EC, we have no constraints
+ comparing LHS and RHS yet.
+ They might still be comparable if one (or both) is a constant.
+
+ Alternatively, we can also get here if we had ECs but they weren't
+ comparable. Again, constant comparisons might give an answer. */
+ tree lhs_const = lhs->maybe_get_constant ();
+ tree rhs_const = rhs->maybe_get_constant ();
+ if (lhs_const && rhs_const)
+ {
+ tristate result_for_constants
+ = compare_constants (lhs_const, op, rhs_const);
+ if (result_for_constants.is_known ())
+ return result_for_constants;
+ }
+
+ if (!lhs_ec.null_p ())
+ {
+ if (rhs_const)
+ return eval_condition (lhs_ec, op, rhs_const);
+ }
+ if (!rhs_ec.null_p ())
+ {
+ if (lhs_const)
+ {
+ enum tree_code swapped_op = swap_tree_comparison (op);
+ return eval_condition (rhs_ec, swapped_op, lhs_const);
+ }
+ }
+
+ return tristate (tristate::TS_UNKNOWN);
+}
+
+/* Delete any information about svalues identified by P.
Such instances are removed from equivalence classes, and any
redundant ECs and constraints are also removed.
Accumulate stats into STATS. */
+template <typename PurgeCriteria>
void
-constraint_manager::purge (const purge_criteria &p, purge_stats *stats)
+constraint_manager::purge (const PurgeCriteria &p, purge_stats *stats)
{
- /* Delete any svalue_ids identified by P within the various equivalence
+ /* Delete any svalues identified by P within the various equivalence
classes. */
for (unsigned ec_idx = 0; ec_idx < m_equiv_classes.length (); )
{
equiv_class *ec = m_equiv_classes[ec_idx];
int i;
- svalue_id *pv;
+ const svalue *sval;
bool delete_ec = false;
- FOR_EACH_VEC_ELT (ec->m_vars, i, pv)
+ FOR_EACH_VEC_ELT (ec->m_vars, i, sval)
{
- if (*pv == ec->m_cst_sid)
+ if (sval == ec->m_cst_sval)
continue;
- if (p.should_purge_p (*pv))
+ if (p.should_purge_p (sval))
{
- if (ec->del (*pv))
+ if (ec->del (sval))
if (!ec->m_constant)
delete_ec = true;
}
validate ();
}
-/* Remap all svalue_ids within this constraint_manager using MAP. */
+/* Implementation of PurgeCriteria: purge svalues that are not live
+ with respect to LIVE_SVALUES and MODEL. */
+
+class dead_svalue_purger
+{
+public:
+ dead_svalue_purger (const svalue_set &live_svalues,
+ const region_model *model)
+ : m_live_svalues (live_svalues), m_model (model)
+ {
+ }
+
+ bool should_purge_p (const svalue *sval) const
+ {
+ return !sval->live_p (m_live_svalues, m_model);
+ }
+
+private:
+ const svalue_set &m_live_svalues;
+ const region_model *m_model;
+};
+
+/* Purge dead svalues from equivalence classes and update constraints
+ accordingly. */
void
-constraint_manager::remap_svalue_ids (const svalue_id_map &map)
+constraint_manager::
+on_liveness_change (const svalue_set &live_svalues,
+ const region_model *model)
{
- int i;
- equiv_class *ec;
- FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
- ec->remap_svalue_ids (map);
+ dead_svalue_purger p (live_svalues, model);
+ purge (p, NULL);
}
/* Comparator for use by constraint_manager::canonicalize.
Sort a pair of equiv_class instances, using the representative
- svalue_id as a sort key. */
+ svalue as a sort key. */
static int
equiv_class_cmp (const void *p1, const void *p2)
const equiv_class *ec1 = *(const equiv_class * const *)p1;
const equiv_class *ec2 = *(const equiv_class * const *)p2;
- svalue_id rep1 = ec1->get_representative ();
- svalue_id rep2 = ec2->get_representative ();
+ const svalue *rep1 = ec1->get_representative ();
+ const svalue *rep2 = ec2->get_representative ();
+
+ gcc_assert (rep1);
+ gcc_assert (rep2);
- return rep1.as_int () - rep2.as_int ();
+ if (rep1 < rep2)
+ return 1;
+ if (rep1 > rep2)
+ return -1;
+ return 0;
}
/* Comparator for use by constraint_manager::canonicalize.
return c1->m_op - c2->m_op;
}
-/* Reorder the equivalence classes and constraints within this
- constraint_manager into a canonical order, to increase the
+/* Purge redundant equivalence classes and constraints, and reorder them
+ within this constraint_manager into a canonical order, to increase the
chances of finding equality with another instance. */
void
-constraint_manager::canonicalize (unsigned num_svalue_ids)
+constraint_manager::canonicalize ()
{
- /* First, sort svalue_ids within the ECs. */
+ /* First, sort svalues within the ECs. */
unsigned i;
equiv_class *ec;
FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
ec->canonicalize ();
- /* Next, sort the ECs into a canonical order. */
+ /* TODO: remove constraints where both sides have a constant, and are
+ thus implicit. But does this break transitivity? */
- /* We will need to remap the equiv_class_ids in the constraints,
+ /* We will be purging and reordering ECs.
+ We will need to remap the equiv_class_ids in the constraints,
so we need to store the original index of each EC.
- Build a lookup table, mapping from representative svalue_id
- to the original equiv_class_id of that svalue_id. */
- auto_vec<equiv_class_id> original_ec_id (num_svalue_ids);
- for (i = 0; i < num_svalue_ids; i++)
- original_ec_id.quick_push (equiv_class_id::null ());
+ Build a lookup table, mapping from the representative svalue
+ to the original equiv_class_id of that svalue. */
+ hash_map<const svalue *, equiv_class_id> original_ec_id;
+ const unsigned orig_num_equiv_classes = m_equiv_classes.length ();
FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
{
- svalue_id rep = ec->get_representative ();
- gcc_assert (!rep.null_p ());
- original_ec_id[rep.as_int ()] = i;
+ const svalue *rep = ec->get_representative ();
+ gcc_assert (rep);
+ original_ec_id.put (rep, i);
}
- /* Sort the equivalence classes. */
+ /* Find ECs used by constraints. */
+ hash_set<const equiv_class *> used_ecs;
+ constraint *c;
+ FOR_EACH_VEC_ELT (m_constraints, i, c)
+ {
+ used_ecs.add (m_equiv_classes[c->m_lhs.as_int ()]);
+ used_ecs.add (m_equiv_classes[c->m_rhs.as_int ()]);
+ }
+
+ /* Purge unused ECs: those that aren't used by constraints and
+ that effectively have only one svalue (either in m_constant
+ or in m_vars). */
+ {
+ /* "unordered remove if" from a vec. */
+ unsigned i = 0;
+ while (i < m_equiv_classes.length ())
+ {
+ equiv_class *ec = m_equiv_classes[i];
+ if (!used_ecs.contains (ec)
+ && ((ec->m_vars.length () < 2 && ec->m_constant == NULL_TREE)
+ || (ec->m_vars.length () == 0)))
+ {
+ m_equiv_classes.unordered_remove (i);
+ delete ec;
+ }
+ else
+ i++;
+ }
+ }
+
+ /* Next, sort the surviving ECs into a canonical order. */
m_equiv_classes.qsort (equiv_class_cmp);
/* Populate ec_id_map based on the old vs new EC ids. */
- one_way_id_map<equiv_class_id> ec_id_map (m_equiv_classes.length ());
+ one_way_id_map<equiv_class_id> ec_id_map (orig_num_equiv_classes);
FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
{
- svalue_id rep = ec->get_representative ();
- ec_id_map.put (original_ec_id[rep.as_int ()], i);
+ const svalue *rep = ec->get_representative ();
+ gcc_assert (rep);
+ ec_id_map.put (*original_ec_id.get (rep), i);
}
- /* Update the EC ids within the constraints. */
- constraint *c;
+ /* Use ec_id_map to update the EC ids within the constraints. */
FOR_EACH_VEC_ELT (m_constraints, i, c)
{
ec_id_map.update (&c->m_lhs);
m_constraints.qsort (constraint_cmp);
}
-/* A concrete subclass of constraint_manager for use when
- merging two constraint_manager into a third constraint_manager,
- each of which has its own region_model.
- Calls are delegated to the constraint_manager for the merged model,
- and thus affect its region_model. */
-
-class cleaned_constraint_manager : public constraint_manager
-{
-public:
- cleaned_constraint_manager (constraint_manager *merged) : m_merged (merged) {}
-
- constraint_manager *clone (region_model *) const FINAL OVERRIDE
- {
- gcc_unreachable ();
- }
- tree maybe_get_constant (svalue_id sid) const FINAL OVERRIDE
- {
- return m_merged->maybe_get_constant (sid);
- }
- svalue_id get_sid_for_constant (tree cst) const FINAL OVERRIDE
- {
- return m_merged->get_sid_for_constant (cst);
- }
- virtual int get_num_svalues () const FINAL OVERRIDE
- {
- return m_merged->get_num_svalues ();
- }
-private:
- constraint_manager *m_merged;
-};
-
/* Concrete subclass of fact_visitor for use by constraint_manager::merge.
For every fact in CM_A, see if it is also true in *CM_B. Add such
facts to *OUT. */
class merger_fact_visitor : public fact_visitor
{
public:
- merger_fact_visitor (constraint_manager *cm_b,
- constraint_manager *out)
- : m_cm_b (cm_b), m_out (out)
+ merger_fact_visitor (const constraint_manager *cm_b,
+ constraint_manager *out,
+ const model_merger &merger)
+ : m_cm_b (cm_b), m_out (out), m_merger (merger)
{}
- void on_fact (svalue_id lhs, enum tree_code code, svalue_id rhs)
+ void on_fact (const svalue *lhs, enum tree_code code, const svalue *rhs)
FINAL OVERRIDE
{
+ /* Special-case for widening. */
+ if (lhs->get_kind () == SK_WIDENING)
+ if (!m_cm_b->get_equiv_class_by_svalue (lhs, NULL))
+ {
+ /* LHS isn't constrained within m_cm_b. */
+ bool sat = m_out->add_constraint (lhs, code, rhs);
+ gcc_assert (sat);
+ return;
+ }
+
if (m_cm_b->eval_condition (lhs, code, rhs).is_true ())
{
bool sat = m_out->add_constraint (lhs, code, rhs);
}
private:
- constraint_manager *m_cm_b;
+ const constraint_manager *m_cm_b;
constraint_manager *m_out;
+ const model_merger &m_merger;
};
/* Use MERGER to merge CM_A and CM_B into *OUT.
constraint_manager *out,
const model_merger &merger)
{
- gcc_assert (merger.m_sid_mapping);
-
- /* Map svalue_ids in each equiv class from both sources
- to the merged region_model, dropping ids that don't survive merger,
- and potentially creating svalues in *OUT for constants. */
- cleaned_constraint_manager cleaned_cm_a (out);
- const one_way_svalue_id_map &map_a_to_m
- = merger.m_sid_mapping->m_map_from_a_to_m;
- clean_merger_input (cm_a, map_a_to_m, &cleaned_cm_a);
-
- cleaned_constraint_manager cleaned_cm_b (out);
- const one_way_svalue_id_map &map_b_to_m
- = merger.m_sid_mapping->m_map_from_b_to_m;
- clean_merger_input (cm_b, map_b_to_m, &cleaned_cm_b);
-
- /* At this point, the two cleaned CMs have ECs and constraints referring
- to svalues in the merged region model, but both of them have separate
- ECs. */
-
/* Merge the equivalence classes and constraints.
The easiest way to do this seems to be to enumerate all of the facts
- in cleaned_cm_a, see which are also true in cleaned_cm_b,
+ in cm_a, see which are also true in cm_b,
and add those to *OUT. */
- merger_fact_visitor v (&cleaned_cm_b, out);
- cleaned_cm_a.for_each_fact (&v);
-}
-
-/* A subroutine of constraint_manager::merge.
- Use MAP_SID_TO_M to map equivalence classes and constraints from
- SM_IN to *OUT. Purge any non-constant svalue_id that don't appear
- in the result of MAP_SID_TO_M, purging any ECs and their constraints
- that become empty as a result. Potentially create svalues in
- the merged region_model for constants that weren't already in use there. */
-
-void
-constraint_manager::
-clean_merger_input (const constraint_manager &cm_in,
- const one_way_svalue_id_map &map_sid_to_m,
- constraint_manager *out)
-{
- one_way_id_map<equiv_class_id> map_ec_to_m
- (cm_in.m_equiv_classes.length ());
- unsigned ec_idx;
- equiv_class *ec;
- FOR_EACH_VEC_ELT (cm_in.m_equiv_classes, ec_idx, ec)
- {
- equiv_class cleaned_ec;
- if (tree cst = ec->get_any_constant ())
- {
- cleaned_ec.m_constant = cst;
- /* Lazily create the constant in the out region_model. */
- cleaned_ec.m_cst_sid = out->get_sid_for_constant (cst);
- }
- unsigned var_idx;
- svalue_id *var_in_sid;
- FOR_EACH_VEC_ELT (ec->m_vars, var_idx, var_in_sid)
- {
- svalue_id var_m_sid = map_sid_to_m.get_dst_for_src (*var_in_sid);
- if (!var_m_sid.null_p ())
- cleaned_ec.m_vars.safe_push (var_m_sid);
- }
- if (cleaned_ec.get_any_constant () || !cleaned_ec.m_vars.is_empty ())
- {
- map_ec_to_m.put (ec_idx, out->m_equiv_classes.length ());
- out->m_equiv_classes.safe_push (new equiv_class (cleaned_ec));
- }
- }
-
- /* Write out to *OUT any constraints for which both sides survived
- cleaning, using the new EC IDs. */
- unsigned con_idx;
- constraint *c;
- FOR_EACH_VEC_ELT (cm_in.m_constraints, con_idx, c)
- {
- equiv_class_id new_lhs = map_ec_to_m.get_dst_for_src (c->m_lhs);
- if (new_lhs.null_p ())
- continue;
- equiv_class_id new_rhs = map_ec_to_m.get_dst_for_src (c->m_rhs);
- if (new_rhs.null_p ())
- continue;
- out->m_constraints.safe_push (constraint (new_lhs,
- c->m_op,
- new_rhs));
- }
+ merger_fact_visitor v (&cm_b, out, merger);
+ cm_a.for_each_fact (&v);
}
/* Call VISITOR's on_fact vfunc repeatedly to express the various
equiv_class *ec;
FOR_EACH_VEC_ELT (m_equiv_classes, ec_idx, ec)
{
- if (!ec->m_cst_sid.null_p ())
+ if (ec->m_cst_sval)
{
unsigned i;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (ec->m_vars, i, sid)
- visitor->on_fact (ec->m_cst_sid, EQ_EXPR, *sid);
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (ec->m_vars, i, sval)
+ visitor->on_fact (ec->m_cst_sval, EQ_EXPR, sval);
}
for (unsigned i = 0; i < ec->m_vars.length (); i++)
for (unsigned j = i + 1; j < ec->m_vars.length (); j++)
const equiv_class &ec_rhs = c->m_rhs.get_obj (*this);
enum tree_code code = constraint_tree_code (c->m_op);
- if (!ec_lhs.m_cst_sid.null_p ())
+ if (ec_lhs.m_cst_sval)
{
for (unsigned j = 0; j < ec_rhs.m_vars.length (); j++)
{
- visitor->on_fact (ec_lhs.m_cst_sid, code, ec_rhs.m_vars[j]);
+ visitor->on_fact (ec_lhs.m_cst_sval, code, ec_rhs.m_vars[j]);
}
}
for (unsigned i = 0; i < ec_lhs.m_vars.length (); i++)
{
- if (!ec_rhs.m_cst_sid.null_p ())
- visitor->on_fact (ec_lhs.m_vars[i], code, ec_rhs.m_cst_sid);
+ if (ec_rhs.m_cst_sval)
+ visitor->on_fact (ec_lhs.m_vars[i], code, ec_rhs.m_cst_sval);
for (unsigned j = 0; j < ec_rhs.m_vars.length (); j++)
visitor->on_fact (ec_lhs.m_vars[i], code, ec_rhs.m_vars[j]);
}
gcc_assert (ec);
int j;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (ec->m_vars, j, sid)
- {
- gcc_assert (!sid->null_p ());
- gcc_assert (sid->as_int () < get_num_svalues ());
- }
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (ec->m_vars, j, sval)
+ gcc_assert (sval);
if (ec->m_constant)
{
gcc_assert (CONSTANT_CLASS_P (ec->m_constant));
- gcc_assert (!ec->m_cst_sid.null_p ());
- gcc_assert (ec->m_cst_sid.as_int () < get_num_svalues ());
+ gcc_assert (ec->m_cst_sval);
}
#if 0
else
/* Various constraint_manager selftests.
These have to be written in terms of a region_model, since
- the latter is responsible for managing svalue and svalue_id
- instances. */
+ the latter is responsible for managing svalue instances. */
/* Verify that setting and getting simple conditions within a region_model
work (thus exercising the underlying constraint_manager). */
/* Self-comparisons. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_CONDITION_TRUE (model, x, EQ_EXPR, x);
ASSERT_CONDITION_TRUE (model, x, LE_EXPR, x);
ASSERT_CONDITION_TRUE (model, x, GE_EXPR, x);
ASSERT_CONDITION_FALSE (model, x, GT_EXPR, x);
}
+ /* Adding self-equality shouldn't add equiv classes. */
+ {
+ region_model_manager mgr;
+ region_model model (&mgr);
+ ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, x);
+ ADD_SAT_CONSTRAINT (model, int_42, EQ_EXPR, int_42);
+ /* ...even when done directly via svalues: */
+ const svalue *sval_int_42 = model.get_rvalue (int_42, NULL);
+ bool sat = model.get_constraints ()->add_constraint (sval_int_42,
+ EQ_EXPR,
+ sval_int_42);
+ ASSERT_TRUE (sat);
+ ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 0);
+ }
+
/* x == y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
/* x == y, then y == z */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
/* x != y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, NE_EXPR, y);
/* x < y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, LT_EXPR, y);
/* x <= y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, LE_EXPR, y);
/* x > y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, GT_EXPR, y);
/* x >= y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, GE_EXPR, y);
/* Constants. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_CONDITION_FALSE (model, int_0, EQ_EXPR, int_42);
ASSERT_CONDITION_TRUE (model, int_0, NE_EXPR, int_42);
ASSERT_CONDITION_TRUE (model, int_0, LT_EXPR, int_42);
/* x == 0, y == 42. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
ADD_SAT_CONSTRAINT (model, y, EQ_EXPR, int_42);
/* x == y && x != y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
ADD_UNSAT_CONSTRAINT (model, x, NE_EXPR, y);
}
/* x == 0 then x == 42. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
ADD_UNSAT_CONSTRAINT (model, x, EQ_EXPR, int_42);
}
/* x == 0 then x != 0. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
ADD_UNSAT_CONSTRAINT (model, x, NE_EXPR, int_0);
}
/* x == 0 then x > 0. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
ADD_UNSAT_CONSTRAINT (model, x, GT_EXPR, int_0);
}
/* x != y && x == y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, NE_EXPR, y);
ADD_UNSAT_CONSTRAINT (model, x, EQ_EXPR, y);
}
/* x <= y && x > y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, LE_EXPR, y);
ADD_UNSAT_CONSTRAINT (model, x, GT_EXPR, y);
}
/* a == b, then c == d, then c == b. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_CONDITION_UNKNOWN (model, a, EQ_EXPR, b);
ASSERT_CONDITION_UNKNOWN (model, b, EQ_EXPR, c);
ASSERT_CONDITION_UNKNOWN (model, c, EQ_EXPR, d);
/* Transitivity: "a < b", "b < c" should imply "a < c". */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, LT_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, LT_EXPR, c);
/* Transitivity: "a <= b", "b < c" should imply "a < c". */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, LE_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, LT_EXPR, c);
/* Transitivity: "a <= b", "b <= c" should imply "a <= c". */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, LE_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, LE_EXPR, c);
/* Transitivity: "a > b", "b > c" should imply "a > c". */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GT_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, GT_EXPR, c);
/* Transitivity: "a >= b", "b > c" should imply " a > c". */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, GT_EXPR, c);
/* Transitivity: "a >= b", "b >= c" should imply "a >= c". */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, GE_EXPR, c);
but also that:
(a < d). */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, LT_EXPR, b);
ADD_SAT_CONSTRAINT (model, c, LT_EXPR, d);
ADD_SAT_CONSTRAINT (model, b, LT_EXPR, c);
/* Transitivity: "a >= b", "b >= a" should imply that a == b. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, GE_EXPR, a);
// TODO:
ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, b);
+
+ /* The ECs for a and b should have merged, and any constraints removed. */
+ ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 1);
+ ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 0);
}
/* Transitivity: "a >= b", "b > a" should be impossible. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
ADD_UNSAT_CONSTRAINT (model, b, GT_EXPR, a);
}
/* Transitivity: "a >= b", "b >= c", "c >= a" should imply
that a == b == c. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, GE_EXPR, c);
ADD_SAT_CONSTRAINT (model, c, GE_EXPR, a);
/* Transitivity: "a > b", "b > c", "c > a"
should be impossible. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GT_EXPR, b);
ADD_SAT_CONSTRAINT (model, b, GT_EXPR, c);
ADD_UNSAT_CONSTRAINT (model, c, GT_EXPR, a);
/* Given a >= 1024, then a <= 1023 should be impossible. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, int_1024);
ADD_UNSAT_CONSTRAINT (model, a, LE_EXPR, int_1023);
}
/* a > 4. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_4);
ASSERT_CONDITION_TRUE (model, a, GT_EXPR, int_4);
ASSERT_CONDITION_TRUE (model, a, NE_EXPR, int_3);
/* a <= 4. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
ASSERT_CONDITION_FALSE (model, a, GT_EXPR, int_4);
ASSERT_CONDITION_FALSE (model, a, GT_EXPR, int_5);
/* If "a > b" and "a == 3", then "b == 4" ought to be unsatisfiable. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GT_EXPR, b);
ADD_SAT_CONSTRAINT (model, a, EQ_EXPR, int_3);
ADD_UNSAT_CONSTRAINT (model, b, EQ_EXPR, int_4);
/* If "a <= 4" && "a > 3", then "a == 4",
assuming a is of integral type. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_3);
ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
/* If "a > 3" && "a <= 4", then "a == 4",
assuming a is of integral type. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_3);
ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
/* If "a > 3" && "a < 5", then "a == 4",
assuming a is of integral type. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GT_EXPR, int_3);
ADD_SAT_CONSTRAINT (model, a, LT_EXPR, int_5);
ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
/* If "a >= 4" && "a < 5", then "a == 4",
assuming a is of integral type. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, int_4);
ADD_SAT_CONSTRAINT (model, a, LT_EXPR, int_5);
ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
}
/* If "a >= 4" && "a <= 4", then "a == 4". */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, a, GE_EXPR, int_4);
ADD_SAT_CONSTRAINT (model, a, LE_EXPR, int_4);
ASSERT_CONDITION_TRUE (model, a, EQ_EXPR, int_4);
tree float_3 = build_real_from_int_cst (double_type_node, int_3);
tree float_4 = build_real_from_int_cst (double_type_node, int_4);
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, f, GT_EXPR, float_3);
ADD_SAT_CONSTRAINT (model, f, LE_EXPR, float_4);
ASSERT_CONDITION_UNKNOWN (model, f, EQ_EXPR, float_4);
/* x == y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, y);
/* y <= z; x == y. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
/* y <= z; y == x. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_CONDITION_UNKNOWN (model, x, EQ_EXPR, y);
ASSERT_CONDITION_UNKNOWN (model, x, GE_EXPR, z);
/* x == 0, then x != 42. */
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
ADD_SAT_CONSTRAINT (model, x, NE_EXPR, int_42);
/* Assert various things about the insides of model. */
constraint_manager *cm = model.get_constraints ();
- ASSERT_EQ (cm->m_constraints.length (), 1);
- ASSERT_EQ (cm->m_equiv_classes.length (), 2);
- ASSERT_EQ (cm->m_constraints[0].m_lhs,
- cm->get_or_add_equiv_class (model.get_rvalue (int_0, NULL)));
- ASSERT_EQ (cm->m_constraints[0].m_rhs,
- cm->get_or_add_equiv_class (model.get_rvalue (int_42, NULL)));
- ASSERT_EQ (cm->m_constraints[0].m_op, CONSTRAINT_LT);
+ ASSERT_EQ (cm->m_constraints.length (), 0);
+ ASSERT_EQ (cm->m_equiv_classes.length (), 1);
}
// TODO: selftest for merging ecs "in the middle"
tree y = build_global_decl ("y", integer_type_node);
{
- region_model model0;
- region_model model1;
+ region_model_manager mgr;
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
constraint_manager *cm0 = model0.get_constraints ();
constraint_manager *cm1 = model1.get_constraints ();
ASSERT_NE (model0.hash (), model1.hash ());
ASSERT_NE (model0, model1);
- region_model model2;
+ region_model model2 (&mgr);
constraint_manager *cm2 = model2.get_constraints ();
/* Make the same change to cm2. */
ADD_SAT_CONSTRAINT (model2, x, EQ_EXPR, y);
static void
test_many_constants ()
{
+ program_point point (program_point::origin ());
tree a = build_global_decl ("a", integer_type_node);
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
auto_vec<tree> constants;
for (int i = 0; i < 20; i++)
{
/* Merge, and check the result. */
region_model other (model);
- region_model merged;
- ASSERT_TRUE (model.can_merge_with_p (other, &merged));
- model.canonicalize (NULL);
- merged.canonicalize (NULL);
+ region_model merged (&mgr);
+ ASSERT_TRUE (model.can_merge_with_p (other, point, &merged));
+ model.canonicalize ();
+ merged.canonicalize ();
ASSERT_EQ (model, merged);
for (int j = 0; j <= i; j++)
{
/* These selftests assume transitivity. */
test_transitivity ();
- test_constant_comparisons ();
}
+ test_constant_comparisons ();
test_constraint_impl ();
test_equality ();
test_many_constants ();
class constraint_manager;
-/* Abstract base class for specifying how state should be purged. */
+/* One of the end-points of a range. */
-class purge_criteria
+struct bound
{
-public:
- virtual ~purge_criteria () {}
- virtual bool should_purge_p (svalue_id sid) const = 0;
+ bound () : m_constant (NULL_TREE), m_closed (false) {}
+ bound (tree constant, bool closed)
+ : m_constant (constant), m_closed (closed) {}
+
+ void ensure_closed (bool is_upper);
+
+ const char * get_relation_as_str () const;
+
+ tree m_constant;
+ bool m_closed;
+};
+
+/* A range of values, used for determining if a value has been
+ constrained to just one possible constant value. */
+
+struct range
+{
+ range () : m_lower_bound (), m_upper_bound () {}
+ range (const bound &lower, const bound &upper)
+ : m_lower_bound (lower), m_upper_bound (upper) {}
+
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump () const;
+
+ tree constrained_to_single_element ();
+
+ tristate eval_condition (enum tree_code op,
+ tree rhs_const) const;
+ bool below_lower_bound (tree rhs_const) const;
+ bool above_upper_bound (tree rhs_const) const;
+
+ bound m_lower_bound;
+ bound m_upper_bound;
};
/* An equivalence class within a constraint manager: a set of
- svalue_ids that are known to all be equal to each other,
+ svalues that are known to all be equal to each other,
together with an optional tree constant that they are equal to. */
class equiv_class
hashval_t hash () const;
bool operator== (const equiv_class &other);
- void add (svalue_id sid, const constraint_manager &cm);
- bool del (svalue_id sid);
+ void add (const svalue *sval);
+ bool del (const svalue *sval);
tree get_any_constant () const { return m_constant; }
- svalue_id get_representative () const;
-
- void remap_svalue_ids (const svalue_id_map &map);
+ const svalue *get_representative () const;
void canonicalize ();
different zeroes, for different types); these are just for the last
constant added. */
tree m_constant;
- svalue_id m_cst_sid;
+ const svalue *m_cst_sval;
// TODO: should this be a set rather than a vec?
- auto_vec<svalue_id> m_vars;
+ auto_vec<const svalue *> m_vars;
};
/* The various kinds of constraint. */
return m_op != CONSTRAINT_NE;
}
+ bool implied_by (const constraint &other,
+ const constraint_manager &cm) const;
+
equiv_class_id m_lhs;
enum constraint_op m_op;
equiv_class_id m_rhs;
{
public:
virtual ~fact_visitor () {}
- virtual void on_fact (svalue_id lhs, enum tree_code, svalue_id rhs) = 0;
+ virtual void on_fact (const svalue *lhs,
+ enum tree_code,
+ const svalue *rhs) = 0;
};
/* A collection of equivalence classes and constraints on them.
class constraint_manager
{
public:
- constraint_manager () {}
+ constraint_manager (region_model_manager *mgr) : m_mgr (mgr) {}
constraint_manager (const constraint_manager &other);
virtual ~constraint_manager () {}
- virtual constraint_manager *clone (region_model *) const = 0;
- virtual tree maybe_get_constant (svalue_id sid) const = 0;
- virtual svalue_id get_sid_for_constant (tree cst) const = 0;
- virtual int get_num_svalues () const = 0;
-
constraint_manager& operator= (const constraint_manager &other);
hashval_t hash () const;
}
void print (pretty_printer *pp) const;
- void dump_to_pp (pretty_printer *pp) const;
+ void dump_to_pp (pretty_printer *pp, bool multiline) const;
void dump (FILE *fp) const;
void dump () const;
return *m_equiv_classes[idx];
}
- equiv_class &get_equiv_class (svalue_id sid)
+ equiv_class &get_equiv_class (const svalue *sval)
{
- equiv_class_id ec_id = get_or_add_equiv_class (sid);
+ equiv_class_id ec_id = get_or_add_equiv_class (sval);
return ec_id.get_obj (*this);
}
- bool add_constraint (svalue_id lhs, enum tree_code op, svalue_id rhs);
+ bool add_constraint (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs);
bool add_constraint (equiv_class_id lhs_ec_id,
enum tree_code op,
equiv_class_id rhs_ec_id);
- bool get_equiv_class_by_sid (svalue_id sid, equiv_class_id *out) const;
- equiv_class_id get_or_add_equiv_class (svalue_id sid);
+ void add_unknown_constraint (equiv_class_id lhs_ec_id,
+ enum tree_code op,
+ equiv_class_id rhs_ec_id);
+
+ bool get_equiv_class_by_svalue (const svalue *sval,
+ equiv_class_id *out) const;
+ equiv_class_id get_or_add_equiv_class (const svalue *sval);
tristate eval_condition (equiv_class_id lhs,
enum tree_code op,
- equiv_class_id rhs);
- tristate eval_condition (svalue_id lhs,
+ equiv_class_id rhs) const;
+ tristate eval_condition (equiv_class_id lhs_ec,
+ enum tree_code op,
+ tree rhs_const) const;
+ tristate eval_condition (const svalue *lhs,
enum tree_code op,
- svalue_id rhs);
+ const svalue *rhs) const;
+ range get_ec_bounds (equiv_class_id ec_id) const;
- void purge (const purge_criteria &p, purge_stats *stats);
+ /* PurgeCriteria should have:
+ bool should_purge_p (const svalue *sval) const. */
+ template <typename PurgeCriteria>
+ void purge (const PurgeCriteria &p, purge_stats *stats);
- void remap_svalue_ids (const svalue_id_map &map);
+ void on_liveness_change (const svalue_set &live_svalues,
+ const region_model *model);
- void canonicalize (unsigned num_svalue_ids);
+ void canonicalize ();
static void merge (const constraint_manager &cm_a,
const constraint_manager &cm_b,
auto_vec<constraint> m_constraints;
private:
- static void clean_merger_input (const constraint_manager &cm_in,
- const one_way_svalue_id_map &map_sid_to_m,
- constraint_manager *out);
-
void add_constraint_internal (equiv_class_id lhs_id,
enum constraint_op c_op,
equiv_class_id rhs_id);
+
+ region_model_manager *m_mgr;
};
} // namespace ana
#include "analyzer/sm.h"
#include "analyzer/pending-diagnostic.h"
#include "analyzer/diagnostic-manager.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/constraint-manager.h"
#include "cfg.h"
#include "cgraph.h"
#include "digraph.h"
#include "analyzer/supergraph.h"
-#include "analyzer/call-string.h"
-#include "analyzer/program-point.h"
#include "analyzer/program-state.h"
#include "analyzer/exploded-graph.h"
#include "analyzer/checker-path.h"
const exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *stmt_finder,
- tree var, state_machine::state_t state,
+ tree var,
+ const svalue *sval,
+ state_machine::state_t state,
pending_diagnostic *d)
: m_sm (sm), m_enode (enode), m_snode (snode), m_stmt (stmt),
/* stmt_finder could be on-stack; we want our own copy that can
outlive that. */
m_stmt_finder (stmt_finder ? stmt_finder->clone () : NULL),
- m_var (var), m_state (state),
+ m_var (var), m_sval (sval), m_state (state),
m_d (d), m_trailing_eedge (NULL),
m_status (STATUS_NEW), m_epath_length (0), m_problem (NULL)
{
/* diagnostic_manager's ctor. */
-diagnostic_manager::diagnostic_manager (logger *logger, int verbosity)
-: log_user (logger), m_verbosity (verbosity)
+diagnostic_manager::diagnostic_manager (logger *logger, engine *eng,
+ int verbosity)
+: log_user (logger), m_eng (eng), m_verbosity (verbosity)
{
}
const exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *finder,
- tree var, state_machine::state_t state,
+ tree var,
+ const svalue *sval,
+ state_machine::state_t state,
pending_diagnostic *d)
{
LOG_FUNC (get_logger ());
gcc_assert (enode);
saved_diagnostic *sd
- = new saved_diagnostic (sm, enode, snode, stmt, finder, var, state, d);
+ = new saved_diagnostic (sm, enode, snode, stmt, finder, var, sval,
+ state, d);
m_saved_diagnostics.safe_push (sd);
if (get_logger ())
log ("adding saved diagnostic %i at SN %i: %qs",
pending_diagnostic *d)
{
gcc_assert (enode);
- add_diagnostic (NULL, enode, snode, stmt, finder, NULL_TREE, 0, d);
+ add_diagnostic (NULL, enode, snode, stmt, finder, NULL_TREE, NULL, 0, d);
}
/* A class for identifying sets of duplicated pending_diagnostic.
class dedupe_winners
{
public:
+ dedupe_winners (engine *eng) : m_engine (eng) {}
+
~dedupe_winners ()
{
/* Delete all keys and candidates. */
void add (logger *logger,
const shortest_exploded_paths &sp,
+ const exploded_graph *eg,
saved_diagnostic *sd)
{
/* Build a dedupe_candidate for SD.
sd->m_snode->m_index);
feasibility_problem *p = NULL;
- if (!dc->get_path ().feasible_p (logger, &p))
+ if (!dc->get_path ().feasible_p (logger, &p, m_engine, eg))
{
if (logger)
logger->log ("rejecting %qs at EN: %i, SN: %i"
}
private:
+ engine *m_engine;
/* This maps from each dedupe_key to a current best dedupe_candidate. */
instance. This partitions the saved diagnostics by dedupe_key,
generating exploded_paths for them, and retaining the best one in each
partition. */
- dedupe_winners best_candidates;
+ dedupe_winners best_candidates (eg.get_engine ());
int i;
saved_diagnostic *sd;
FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
- best_candidates.add (get_logger (), sp, sd);
+ best_candidates.add (get_logger (), sp, &eg, sd);
/* For each dedupe-key, call emit_saved_diagnostic on the "best"
saved_diagnostic. */
build_emission_path (pb, epath, &emission_path);
/* Now prune it to just cover the most pertinent events. */
- prune_path (&emission_path, sd.m_sm, sd.m_var, sd.m_state);
+ prune_path (&emission_path, sd.m_sm, sd.m_sval, sd.m_state);
/* Add a final event to the path, covering the diagnostic itself.
We use the final enode from the epath, which might be different from
emission_path.prepare_for_emission (sd.m_d);
- gcc_rich_location rich_loc (stmt->location);
+ gcc_rich_location rich_loc (get_stmt_location (stmt, sd.m_snode->m_fun));
rich_loc.set_path (&emission_path);
auto_diagnostic_group d;
delete pp;
}
-/* Given a state change to DST_REP, determine a tree that gives the origin
- of that state at STMT, using DST_STATE's region model, so that state
- changes based on assignments can be tracked back to their origins.
-
- For example, if we have
-
- (S1) _1 = malloc (64);
- (S2) EXPR = _1;
-
- then at stmt S2 we can get the origin of EXPR's state as being _1,
- and thus track the allocation back to S1. */
-
-static tree
-get_any_origin (const gimple *stmt,
- tree dst_rep,
- const program_state &dst_state)
-{
- if (!stmt)
- return NULL_TREE;
-
- gcc_assert (dst_rep);
-
- if (const gassign *assign = dyn_cast <const gassign *> (stmt))
- {
- tree lhs = gimple_assign_lhs (assign);
- /* Use region IDs to compare lhs with DST_REP, bulletproofing against
- cases where they can't have lvalues by using
- tentative_region_model_context. */
- tentative_region_model_context ctxt;
- region_id lhs_rid = dst_state.m_region_model->get_lvalue (lhs, &ctxt);
- region_id dst_rep_rid
- = dst_state.m_region_model->get_lvalue (dst_rep, &ctxt);
- if (lhs_rid == dst_rep_rid && !ctxt.had_errors_p ())
- {
- tree rhs1 = gimple_assign_rhs1 (assign);
- enum tree_code op = gimple_assign_rhs_code (assign);
- switch (op)
- {
- default:
- //gcc_unreachable (); // TODO
- break;
- case COMPONENT_REF:
- case SSA_NAME:
- return rhs1;
- }
- }
- }
- return NULL_TREE;
-}
-
/* Emit a "path" of events to EMISSION_PATH describing the exploded path
EPATH within EG. */
stmt,
stack_depth,
sm,
- NULL_TREE,
+ NULL,
src_sm_val,
dst_sm_val,
- NULL_TREE,
+ NULL,
dst_state));
return false;
}
bool on_state_change (const state_machine &sm,
state_machine::state_t src_sm_val,
state_machine::state_t dst_sm_val,
- tree dst_rep,
- svalue_id dst_origin_sid) FINAL OVERRIDE
+ const svalue *sval,
+ const svalue *dst_origin_sval) FINAL OVERRIDE
{
const exploded_node *src_node = m_eedge.m_src;
const program_point &src_point = src_node->get_point ();
if (!stmt)
return false;
- tree origin_rep
- = dst_state.get_representative_tree (dst_origin_sid);
-
- if (origin_rep == NULL_TREE)
- origin_rep = get_any_origin (stmt, dst_rep, dst_state);
m_emission_path->add_event (new state_change_event (supernode,
stmt,
stack_depth,
sm,
- dst_rep,
+ sval,
src_sm_val,
dst_sm_val,
- origin_rep,
+ dst_origin_sval,
dst_state));
return false;
}
iter != dst_smap.end ();
++iter)
{
- /* Ideally we'd directly compare the SM state between src state
- and dst state, but there's no guarantee that the IDs can
- be meaningfully compared. */
- svalue_id dst_sid = (*iter).first;
+ const svalue *sval = (*iter).first;
state_machine::state_t dst_sm_val = (*iter).second.m_state;
-
- auto_vec<path_var> dst_pvs;
- dst_state.m_region_model->get_path_vars_for_svalue (dst_sid,
- &dst_pvs);
-
- unsigned j;
- path_var *dst_pv;
- FOR_EACH_VEC_ELT (dst_pvs, j, dst_pv)
+ state_machine::state_t src_sm_val
+ = src_smap.get_state (sval, ext_state);
+ if (dst_sm_val != src_sm_val)
{
- tree dst_rep = dst_pv->m_tree;
- gcc_assert (dst_rep);
- if (dst_pv->m_stack_depth
- >= src_state.m_region_model->get_stack_depth ())
- continue;
- tentative_region_model_context ctxt;
- svalue_id src_sid
- = src_state.m_region_model->get_rvalue (*dst_pv, &ctxt);
- if (src_sid.null_p () || ctxt.had_errors_p ())
- continue;
- state_machine::state_t src_sm_val = src_smap.get_state (src_sid);
- if (dst_sm_val != src_sm_val)
- {
- svalue_id dst_origin_sid = (*iter).second.m_origin;
- if (visitor->on_state_change (sm, src_sm_val, dst_sm_val,
- dst_rep, dst_origin_sid))
- return true;
- }
+ const svalue *origin_sval = (*iter).second.m_origin;
+ if (visitor->on_state_change (sm, src_sm_val, dst_sm_val,
+ sval, origin_sval))
+ return true;
}
}
}
return false;
}
+/* An sm_context for adding state_change_event on assignments to NULL,
+ where the default state isn't m_start. Storing such state in the
+ sm_state_map would lead to bloat of the exploded_graph, so we want
+ to leave it as a default state, and inject state change events here
+ when we have a diagnostic.
+ Find transitions of constants, for handling on_zero_assignment. */
+
+struct null_assignment_sm_context : public sm_context
+{
+ null_assignment_sm_context (int sm_idx,
+ const state_machine &sm,
+ const program_state *new_state,
+ const gimple *stmt,
+ const program_point *point,
+ checker_path *emission_path)
+ : sm_context (sm_idx, sm), m_new_state (new_state),
+ m_stmt (stmt), m_point (point), m_emission_path (emission_path)
+ {
+ }
+
+ tree get_fndecl_for_call (const gcall */*call*/) FINAL OVERRIDE
+ {
+ return NULL_TREE;
+ }
+
+ void on_transition (const supernode *node ATTRIBUTE_UNUSED,
+ const gimple *stmt ATTRIBUTE_UNUSED,
+ tree var,
+ state_machine::state_t from,
+ state_machine::state_t to,
+ tree origin ATTRIBUTE_UNUSED) FINAL OVERRIDE
+ {
+ if (from != 0)
+ return;
+
+ const svalue *var_new_sval
+ = m_new_state->m_region_model->get_rvalue (var, NULL);
+ const supernode *supernode = m_point->get_supernode ();
+ int stack_depth = m_point->get_stack_depth ();
+
+ m_emission_path->add_event (new state_change_event (supernode,
+ m_stmt,
+ stack_depth,
+ m_sm,
+ var_new_sval,
+ from, to,
+ NULL,
+ *m_new_state));
+
+ }
+
+ void warn_for_state (const supernode *, const gimple *,
+ tree, state_machine::state_t,
+ pending_diagnostic *d) FINAL OVERRIDE
+ {
+ delete d;
+ }
+
+ tree get_diagnostic_tree (tree expr) FINAL OVERRIDE
+ {
+ return expr;
+ }
+
+ state_machine::state_t get_global_state () const FINAL OVERRIDE
+ {
+ return 0;
+ }
+
+ void set_global_state (state_machine::state_t) FINAL OVERRIDE
+ {
+ /* No-op. */
+ }
+
+ void on_custom_transition (custom_transition *) FINAL OVERRIDE
+ {
+ }
+
+ tree is_zero_assignment (const gimple *stmt) FINAL OVERRIDE
+ {
+ const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
+ if (!assign_stmt)
+ return NULL_TREE;
+ if (const svalue *sval
+ = m_new_state->m_region_model->get_gassign_result (assign_stmt, NULL))
+ if (tree cst = sval->maybe_get_constant ())
+ if (::zerop(cst))
+ return gimple_assign_lhs (assign_stmt);
+ return NULL_TREE;
+ }
+
+ const program_state *m_new_state;
+ const gimple *m_stmt;
+ const program_point *m_point;
+ state_change_visitor *m_visitor;
+ checker_path *m_emission_path;
+};
+
/* Subroutine of diagnostic_manager::build_emission_path.
Add any events for EEDGE to EMISSION_PATH. */
(new statement_event (stmt,
dst_point.get_fndecl (),
dst_stack_depth, dst_state));
+
+ /* Create state change events for assignment to NULL.
+ Iterate through the stmts in dst_enode, adding state change
+ events for them. */
+ if (dst_state.m_region_model)
+ {
+ program_state iter_state (dst_state);
+ program_point iter_point (dst_point);
+ while (1)
+ {
+ const gimple *stmt = iter_point.get_stmt ();
+ if (const gassign *assign = dyn_cast<const gassign *> (stmt))
+ {
+ const extrinsic_state &ext_state = pb.get_ext_state ();
+ iter_state.m_region_model->on_assignment (assign, NULL);
+ for (unsigned i = 0; i < ext_state.get_num_checkers (); i++)
+ {
+ const state_machine &sm = ext_state.get_sm (i);
+ null_assignment_sm_context sm_ctxt (i, sm,
+ &iter_state,
+ stmt,
+ &iter_point,
+ emission_path);
+ sm.on_stmt (&sm_ctxt, dst_point.get_supernode (), stmt);
+ // TODO: what about phi nodes?
+ }
+ }
+ iter_point.next_stmt ();
+ if (iter_point.get_kind () == PK_AFTER_SUPERNODE
+ || (dst_node->m_succs.length () > 1
+ && (iter_point
+ == dst_node->m_succs[0]->m_dest->get_point ())))
+ break;
+ }
+ }
}
break;
}
void
diagnostic_manager::prune_path (checker_path *path,
const state_machine *sm,
- tree var,
+ const svalue *sval,
state_machine::state_t state) const
{
LOG_FUNC (get_logger ());
path->maybe_log (get_logger (), "path");
- prune_for_sm_diagnostic (path, sm, var, state);
+ prune_for_sm_diagnostic (path, sm, sval, state);
prune_interproc_events (path);
finish_pruning (path);
path->maybe_log (get_logger (), "pruned");
void
diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
const state_machine *sm,
- tree var,
+ const svalue *sval,
state_machine::state_t state) const
{
- update_for_unsuitable_sm_exprs (&var);
-
int idx = path->num_events () - 1;
while (idx >= 0 && idx < (signed)path->num_events ())
{
{
if (sm)
{
- if (var)
- log ("considering event %i, with var: %qE, state: %qs",
- idx, var, sm->get_state_name (state));
+ if (sval)
+ {
+ label_text sval_desc = sval->get_desc ();
+ log ("considering event %i (%s), with sval: %qs, state: %qs",
+ idx, event_kind_to_string (base_event->m_kind),
+ sval_desc.m_buffer, sm->get_state_name (state));
+ }
else
- log ("considering event %i, with global state: %qs",
- idx, sm->get_state_name (state));
+ log ("considering event %i (%s), with global state: %qs",
+ idx, event_kind_to_string (base_event->m_kind),
+ sm->get_state_name (state));
}
else
log ("considering event %i", idx);
}
- gcc_assert (var == NULL || can_be_expr_of_interest_p (var));
+
switch (base_event->m_kind)
{
default:
case EK_STMT:
{
- /* If this stmt is the origin of "var", update var. */
- if (var)
- {
- statement_event *stmt_event = (statement_event *)base_event;
- tree new_var = get_any_origin (stmt_event->m_stmt, var,
- stmt_event->m_dst_state);
- if (new_var)
- {
- log ("event %i: switching var of interest from %qE to %qE",
- idx, var, new_var);
- var = new_var;
- }
- }
if (m_verbosity < 4)
{
log ("filtering event %i: statement event", idx);
case EK_STATE_CHANGE:
{
state_change_event *state_change = (state_change_event *)base_event;
- /* Use region IDs to compare var with the state_change's m_var,
- bulletproofing against cases where they can't have lvalues by
- using tentative_region_model_context. */
- tentative_region_model_context ctxt;
- region_id state_var_rid
- = state_change->get_lvalue (state_change->m_var, &ctxt);
- region_id var_rid = state_change->get_lvalue (var, &ctxt);
- if (state_var_rid == var_rid && !ctxt.had_errors_p ())
+ gcc_assert (state_change->m_dst_state.m_region_model);
+
+ if (state_change->m_sval == sval)
{
if (state_change->m_origin)
{
- log ("event %i: switching var of interest from %qE to %qE",
- idx, var, state_change->m_origin);
- var = state_change->m_origin;
- update_for_unsuitable_sm_exprs (&var);
+ if (get_logger ())
+ {
+ label_text sval_desc = sval->get_desc ();
+ label_text origin_sval_desc
+ = state_change->m_origin->get_desc ();
+ log ("event %i:"
+ " switching var of interest from %qs to %qs",
+ idx, sval_desc.m_buffer,
+ origin_sval_desc.m_buffer);
+ }
+ sval = state_change->m_origin;
}
log ("event %i: switching state of interest from %qs to %qs",
idx, sm->get_state_name (state_change->m_to),
}
else if (m_verbosity < 4)
{
- if (var)
- log ("filtering event %i:"
- " state change to %qE unrelated to %qE",
- idx, state_change->m_var, var);
- else
- log ("filtering event %i: state change to %qE",
- idx, state_change->m_var);
- if (ctxt.had_errors_p ())
- log ("context had errors");
+ if (get_logger ())
+ {
+ if (state_change->m_sval)
+ {
+ label_text change_sval_desc
+ = state_change->m_sval->get_desc ();
+ if (sval)
+ {
+ label_text sval_desc = sval->get_desc ();
+ log ("filtering event %i:"
+ " state change to %qs unrelated to %qs",
+ idx, change_sval_desc.m_buffer,
+ sval_desc.m_buffer);
+ }
+ else
+ log ("filtering event %i: state change to %qs",
+ idx, change_sval_desc.m_buffer);
+ }
+ else
+ log ("filtering event %i: global state change", idx);
+ }
path->delete_event (idx);
}
}
case EK_START_CFG_EDGE:
{
cfg_edge_event *event = (cfg_edge_event *)base_event;
- const cfg_superedge& cfg_superedge
- = event->get_cfg_superedge ();
- const supernode *dest = event->m_sedge->m_dest;
- /* Do we have an SSA_NAME defined via a phi node in
- the dest CFG node? */
- if (var && TREE_CODE (var) == SSA_NAME)
- if (SSA_NAME_DEF_STMT (var)->bb == dest->m_bb)
- {
- if (gphi *phi
- = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (var)))
- {
- /* Update var based on its phi node. */
- tree old_var = var;
- var = cfg_superedge.get_phi_arg (phi);
- log ("updating from %qE to %qE based on phi node",
- old_var, var);
- if (get_logger ())
- {
- pretty_printer pp;
- pp_gimple_stmt_1 (&pp, phi, 0, (dump_flags_t)0);
- log (" phi: %s", pp_formatted_text (&pp));
- }
- /* If we've chosen a bad exploded_path, then the
- phi arg might be a constant. Fail gracefully for
- this case. */
- update_for_unsuitable_sm_exprs (&var);
- }
- }
/* TODO: is this edge significant to var?
See if var can be in other states in the dest, but not
if (event->should_filter_p (m_verbosity))
{
- log ("filtering event %i: CFG edge", idx);
+ log ("filtering events %i and %i: CFG edge", idx, idx + 1);
path->delete_event (idx);
/* Also delete the corresponding EK_END_CFG_EDGE. */
gcc_assert (path->get_checker_event (idx)->m_kind
call_event *event = (call_event *)base_event;
const callgraph_superedge& cg_superedge
= event->get_callgraph_superedge ();
+ const region_model *callee_model
+ = event->m_eedge.m_dest->get_state ().m_region_model;
+ tree callee_var = callee_model->get_representative_tree (sval);
+ /* We could just use caller_model->get_representative_tree (sval);
+ to get the caller_var, but for now use
+ map_expr_from_callee_to_caller so as to only record critical
+ state for parms and the like. */
callsite_expr expr;
tree caller_var
- = cg_superedge.map_expr_from_callee_to_caller (var, &expr);
+ = cg_superedge.map_expr_from_callee_to_caller (callee_var, &expr);
if (caller_var)
{
- log ("event %i:"
- " switching var of interest"
- " from %qE in callee to %qE in caller",
- idx, var, caller_var);
- var = caller_var;
+ if (get_logger ())
+ {
+ label_text sval_desc = sval->get_desc ();
+ log ("event %i:"
+ " recording critical state for %qs at call"
+ " from %qE in callee to %qE in caller",
+ idx, sval_desc.m_buffer, callee_var, caller_var);
+ }
if (expr.param_p ())
- event->record_critical_state (var, state);
- update_for_unsuitable_sm_exprs (&var);
+ event->record_critical_state (caller_var, state);
}
}
break;
case EK_RETURN_EDGE:
- // TODO: potentially update var/state based on return value,
- // args etc
{
- if (var)
+ if (sval)
{
return_event *event = (return_event *)base_event;
const callgraph_superedge& cg_superedge
= event->get_callgraph_superedge ();
+ const region_model *caller_model
+ = event->m_eedge.m_dest->get_state ().m_region_model;
+ tree caller_var = caller_model->get_representative_tree (sval);
callsite_expr expr;
tree callee_var
- = cg_superedge.map_expr_from_caller_to_callee (var, &expr);
+ = cg_superedge.map_expr_from_caller_to_callee (caller_var,
+ &expr);
if (callee_var)
{
- log ("event %i:"
- " switching var of interest"
- " from %qE in caller to %qE in callee",
- idx, var, callee_var);
- var = callee_var;
+ if (get_logger ())
+ {
+ label_text sval_desc = sval->get_desc ();
+ log ("event %i:"
+ " recording critical state for %qs at return"
+ " from %qE in caller to %qE in callee",
+ idx, sval_desc.m_buffer, callee_var, callee_var);
+ }
if (expr.return_value_p ())
- event->record_critical_state (var, state);
- update_for_unsuitable_sm_exprs (&var);
+ event->record_critical_state (callee_var, state);
}
}
}
const exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *stmt_finder,
- tree var, state_machine::state_t state,
+ tree var, const svalue *sval,
+ state_machine::state_t state,
pending_diagnostic *d);
~saved_diagnostic ();
const gimple *m_stmt;
stmt_finder *m_stmt_finder;
tree m_var;
+ const svalue *m_sval;
state_machine::state_t m_state;
pending_diagnostic *m_d;
exploded_edge *m_trailing_eedge;
class diagnostic_manager : public log_user
{
public:
- diagnostic_manager (logger *logger, int verbosity);
+ diagnostic_manager (logger *logger, engine *eng, int verbosity);
+
+ engine *get_engine () const { return m_eng; }
void add_diagnostic (const state_machine *sm,
const exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *finder,
- tree var, state_machine::state_t state,
+ tree var,
+ const svalue *sval,
+ state_machine::state_t state,
pending_diagnostic *d);
void add_diagnostic (const exploded_node *enode,
void prune_path (checker_path *path,
const state_machine *sm,
- tree var, state_machine::state_t state) const;
+ const svalue *sval,
+ state_machine::state_t state) const;
void prune_for_sm_diagnostic (checker_path *path,
const state_machine *sm,
tree var,
state_machine::state_t state) const;
+ void prune_for_sm_diagnostic (checker_path *path,
+ const state_machine *sm,
+ const svalue *sval,
+ state_machine::state_t state) const;
void update_for_unsuitable_sm_exprs (tree *expr) const;
void prune_interproc_events (checker_path *path) const;
void finish_pruning (checker_path *path) const;
+ engine *m_eng;
auto_delete_vec<saved_diagnostic> m_saved_diagnostics;
const int m_verbosity;
};
#include "selftest.h"
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/constraint-manager.h"
#include "analyzer/sm.h"
#include "cgraph.h"
#include "digraph.h"
#include "analyzer/supergraph.h"
-#include "analyzer/call-string.h"
-#include "analyzer/program-point.h"
#include "analyzer/program-state.h"
#include "analyzer/exploded-graph.h"
#include "analyzer/analysis-plan.h"
namespace ana {
-static int readability_comparator (const void *p1, const void *p2);
-
/* class impl_region_model_context : public region_model_context. */
impl_region_model_context::
const exploded_node *enode_for_diag,
const program_state *old_state,
program_state *new_state,
- state_change *change,
const gimple *stmt,
stmt_finder *stmt_finder)
: m_eg (&eg), m_logger (eg.get_logger ()),
m_enode_for_diag (enode_for_diag),
m_old_state (old_state),
m_new_state (new_state),
- m_change (change),
m_stmt (stmt),
m_stmt_finder (stmt_finder),
m_ext_state (eg.get_ext_state ())
impl_region_model_context::
impl_region_model_context (program_state *state,
- state_change *change,
const extrinsic_state &ext_state,
logger *logger)
: m_eg (NULL), m_logger (logger), m_enode_for_diag (NULL),
m_old_state (NULL),
m_new_state (state),
- m_change (change),
m_stmt (NULL),
m_stmt_finder (NULL),
m_ext_state (ext_state)
}
void
-impl_region_model_context::remap_svalue_ids (const svalue_id_map &map)
+impl_region_model_context::on_svalue_leak (const svalue *sval)
+
{
- m_new_state->remap_svalue_ids (map);
- if (m_change)
- m_change->remap_svalue_ids (map);
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ smap->on_svalue_leak (sval, this);
}
-int
-impl_region_model_context::on_svalue_purge (svalue_id first_unused_sid,
- const svalue_id_map &map)
+void
+impl_region_model_context::
+on_liveness_change (const svalue_set &live_svalues,
+ const region_model *model)
{
- int total = 0;
int sm_idx;
sm_state_map *smap;
FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
- {
- const state_machine &sm = m_ext_state.get_sm (sm_idx);
- total += smap->on_svalue_purge (sm, sm_idx, first_unused_sid,
- map, this);
- }
- if (m_change)
- total += m_change->on_svalue_purge (first_unused_sid);
- return total;
+ smap->on_liveness_change (live_svalues, model, this);
}
void
-impl_region_model_context::on_unknown_change (svalue_id sid)
+impl_region_model_context::on_unknown_change (const svalue *sval,
+ bool is_mutable)
{
int sm_idx;
sm_state_map *smap;
FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
- smap->on_unknown_change (sid);
+ smap->on_unknown_change (sval, is_mutable, m_ext_state);
}
/* class setjmp_svalue : public svalue. */
-/* Compare the fields of this setjmp_svalue with OTHER, returning true
- if they are equal.
- For use by svalue::operator==. */
+/* Implementation of svalue::accept vfunc for setjmp_svalue. */
-bool
-setjmp_svalue::compare_fields (const setjmp_svalue &other) const
+void
+setjmp_svalue::accept (visitor *v) const
{
- return m_setjmp_record == other.m_setjmp_record;
+ v->visit_setjmp_svalue (this);
}
-/* Implementation of svalue::add_to_hash vfunc for setjmp_svalue. */
+/* Implementation of svalue::dump_to_pp vfunc for setjmp_svalue. */
void
-setjmp_svalue::add_to_hash (inchash::hash &hstate) const
+setjmp_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
{
- hstate.add_int (m_setjmp_record.m_enode->m_index);
+ if (simple)
+ pp_printf (pp, "SETJMP(EN: %i)", get_enode_index ());
+ else
+ pp_printf (pp, "setjmp_svalue(EN%i)", get_enode_index ());
}
/* Get the index of the stored exploded_node. */
return m_setjmp_record.m_enode->m_index;
}
-/* Implementation of svalue::print_details vfunc for setjmp_svalue. */
-
-void
-setjmp_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
- svalue_id this_sid ATTRIBUTE_UNUSED,
- pretty_printer *pp) const
-{
- pp_printf (pp, "setjmp: EN: %i", get_enode_index ());
-}
-
/* Concrete implementation of sm_context, wiring it up to the rest of this
file. */
const exploded_node *enode_for_diag,
const program_state *old_state,
program_state *new_state,
- state_change *change,
const sm_state_map *old_smap,
sm_state_map *new_smap,
stmt_finder *stmt_finder = NULL)
m_logger (eg.get_logger ()),
m_eg (eg), m_enode_for_diag (enode_for_diag),
m_old_state (old_state), m_new_state (new_state),
- m_change (change),
m_old_smap (old_smap), m_new_smap (new_smap),
m_stmt_finder (stmt_finder)
{
{
impl_region_model_context old_ctxt
(m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
- m_change, call);
+ call);
region_model *model = m_new_state->m_region_model;
return model->get_fndecl_for_call (call, &old_ctxt);
}
void on_transition (const supernode *node ATTRIBUTE_UNUSED,
- const gimple *stmt ATTRIBUTE_UNUSED,
+ const gimple *stmt,
tree var,
state_machine::state_t from,
state_machine::state_t to,
LOG_FUNC (logger);
impl_region_model_context old_ctxt
(m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
- m_change, stmt);
- svalue_id var_old_sid
+ stmt);
+ const svalue *var_old_sval
= m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
m_old_state, m_new_state,
- m_change, NULL);
- svalue_id var_new_sid
+ stmt);
+ const svalue *var_new_sval
= m_new_state->m_region_model->get_rvalue (var, &new_ctxt);
- svalue_id origin_new_sid
+ const svalue *origin_new_sval
= m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
- state_machine::state_t current = m_old_smap->get_state (var_old_sid);
+ state_machine::state_t current
+ = m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
if (current == from)
{
if (logger)
var,
m_sm.get_state_name (from),
m_sm.get_state_name (to));
- m_new_smap->set_state (m_new_state->m_region_model, var_new_sid,
- to, origin_new_sid);
- if (m_change)
- m_change->add_sm_change (m_sm_idx, var_new_sid, from, to);
+ m_new_smap->set_state (m_new_state->m_region_model, var_new_sval,
+ to, origin_new_sval, m_eg.get_ext_state ());
}
}
gcc_assert (d); // take ownership
impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, m_old_state, m_new_state, m_change, NULL);
+ (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL);
state_machine::state_t current;
if (var)
{
- svalue_id var_old_sid
+ const svalue *var_old_sval
= m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
- current = m_old_smap->get_state (var_old_sid);
+ current = m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
}
else
current = m_old_smap->get_global_state ();
if (state == current)
{
+ const svalue *var_old_sval
+ = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
m_eg.get_diagnostic_manager ().add_diagnostic
(&m_sm, m_enode_for_diag, snode, stmt, m_stmt_finder,
- var, state, d);
+ var, var_old_sval, state, d);
}
else
delete d;
we can print:
"double-free of 'inbuf.data'". */
- tree get_readable_tree (tree expr) FINAL OVERRIDE
+ tree get_diagnostic_tree (tree expr) FINAL OVERRIDE
{
/* Only for SSA_NAMEs of temporaries; otherwise, return EXPR, as it's
likely to be the least surprising tree to report. */
return expr;
gcc_assert (m_new_state);
- svalue_id sid = m_new_state->m_region_model->get_rvalue (expr, NULL);
+ const svalue *sval = m_new_state->m_region_model->get_rvalue (expr, NULL);
/* Find trees for all regions storing the value. */
- auto_vec<path_var> pvs;
- m_new_state->m_region_model->get_path_vars_for_svalue (sid, &pvs);
- if (pvs.length () < 1)
+ if (tree t = m_new_state->m_region_model->get_representative_tree (sval))
+ return t;
+ else
return expr;
- /* Pick the "best" such tree. */
- // TODO: should we also consider (and consolidate) equiv classes?
- pvs.qsort (readability_comparator);
- return pvs[0].m_tree;
}
state_machine::state_t get_global_state () const FINAL OVERRIDE
m_sm_idx);
}
+ tree is_zero_assignment (const gimple *stmt) FINAL OVERRIDE
+ {
+ const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
+ if (!assign_stmt)
+ return NULL_TREE;
+ impl_region_model_context old_ctxt
+ (m_eg, m_enode_for_diag, m_old_state, m_new_state, stmt);
+ if (const svalue *sval
+ = m_new_state->m_region_model->get_gassign_result (assign_stmt,
+ &old_ctxt))
+ if (tree cst = sval->maybe_get_constant ())
+ if (::zerop(cst))
+ return gimple_assign_lhs (assign_stmt);
+ return NULL_TREE;
+ }
+
log_user m_logger;
exploded_graph &m_eg;
const exploded_node *m_enode_for_diag;
const program_state *m_old_state;
program_state *m_new_state;
- state_change *m_change;
const sm_state_map *m_old_smap;
sm_state_map *m_new_smap;
stmt_finder *m_stmt_finder;
logger * const logger = m_eg.get_logger ();
LOG_FUNC (logger);
- if (TREE_CODE (m_var) == SSA_NAME)
+ if (m_var && TREE_CODE (m_var) == SSA_NAME)
{
/* Locate the final write to this SSA name in the path. */
const gimple *def_stmt = SSA_NAME_DEF_STMT (m_var);
case MEM_REF:
/* Impose a slight readability penalty relative to that of
operand 0. */
- return readability (TREE_OPERAND (expr, 0)) - 1;
+ return readability (TREE_OPERAND (expr, 0)) - 16;
case SSA_NAME:
{
if (tree var = SSA_NAME_VAR (expr))
- return readability (var);
+ /* Slightly favor the underlying var over the SSA name to
+ avoid having them compare equal. */
+ return readability (var) - 1;
/* Avoid printing '<unknown>' for SSA names for temporaries. */
return -1;
}
break;
+ case PARM_DECL:
case VAR_DECL:
- /* Arbitrarily-chosen "high readability" value. */
- return 256;
+ if (DECL_NAME (expr))
+ /* Arbitrarily-chosen "high readability" value. */
+ return 65536;
+ else
+ /* We don't want to print temporaries. For example, the C FE
+ prints them as e.g. "<Uxxxx>" where "xxxx" is the low 16 bits
+ of the tree pointer (see pp_c_tree_decl_identifier). */
+ return -1;
+
+ case RESULT_DECL:
+ /* Printing "<return-value>" isn't ideal, but is less awful than
+ trying to print a temporary. */
+ return 32768;
default:
return 0;
/* A qsort comparator for trees to sort them into most user-readable to
least user-readable. */
-static int
+int
readability_comparator (const void *p1, const void *p2)
{
path_var pv1 = *(path_var const *)p1;
path_var pv2 = *(path_var const *)p2;
- /* TODO: should we consider stack depths? */
int r1 = readability (pv1.m_tree);
int r2 = readability (pv2.m_tree);
+ if (int cmp = r2 - r1)
+ return cmp;
+
+ /* Favor items that are deeper on the stack and hence more recent;
+ this also favors locals over globals. */
+ if (int cmp = pv2.m_stack_depth - pv1.m_stack_depth)
+ return cmp;
- return r2 - r1;
+ /* TODO: We ought to find ways of sorting such cases. */
+ return 0;
}
-/* Create an sm_context and use it to call SM's on_leak vfunc, so that
- it can potentially complain about a leak of DST_SID (in a new region_model)
- in the given STATE, where MAP can be used to map SID back to an "old"
- region_model. */
+/* Find the best tree for SVAL and call SM's on_leak vfunc with it.
+ If on_leak returns a pending_diagnostic, queue it up to be reported,
+ so that we potentially complain about a leak of SVAL in the given STATE. */
void
impl_region_model_context::on_state_leak (const state_machine &sm,
- int sm_idx,
- svalue_id dst_sid,
- svalue_id first_unused_sid,
- const svalue_id_map &map,
+ const svalue *sval,
state_machine::state_t state)
{
logger * const logger = get_logger ();
LOG_SCOPE (logger);
if (logger)
- logger->log ("considering leak of sv%i", dst_sid.as_int ());
+ {
+ logger->start_log_line ();
+ logger->log_partial ("considering leak of ");
+ sval->dump_to_pp (logger->get_printer (), true);
+ logger->end_log_line ();
+ }
if (!m_eg)
return;
/* m_old_state also needs to be non-NULL so that the sm_ctxt can look
- up the old state of the sid. */
+ up the old state of SVAL. */
gcc_assert (m_old_state);
- /* Don't report on sid leaking if it's equal to one of the used sids.
- For example, given:
- some_non_trivial_expression = malloc (sizeof (struct foo));
- we have:
- _1 = malloc; (void *)
- some_non_trivial_expression = _1; (struct foo *)
- and at leak-detection time we may have:
- sv5: {type: 'struct foo *', &r3} (used)
- sv6: {type: 'void *', &r3} (unused)
- where both point to the same region. We don't want to report a
- leak of sv6, so we reject the report due to its equality with sv5. */
- gcc_assert (m_new_state);
- gcc_assert (!first_unused_sid.null_p ());
- for (int i = 0; i < first_unused_sid.as_int (); i++)
+ /* SVAL has leaked within the new state: it is not used by any reachable
+ regions.
+ We need to convert it back to a tree, but since it's likely no regions
+ use it, we have to find the "best" tree for it in the old_state. */
+ svalue_set visited;
+ path_var leaked_pv
+ = m_old_state->m_region_model->get_representative_path_var (sval,
+ &visited);
+
+ /* This might be NULL; the pending_diagnostic subclasses need to cope
+ with this. */
+ tree leaked_tree = leaked_pv.m_tree;
+ if (logger)
{
- svalue_id used_sid = svalue_id::from_int (i);
-
- /* Use the "_without_cm" form of eval_condition, since
- we're half-way through purging - we don't want to introduce new
- equivalence classes into the constraint_manager for "sid" and
- for each of the used_sids. */
- const region_model &rm = *m_new_state->m_region_model;
- tristate eq = rm.eval_condition_without_cm (dst_sid, EQ_EXPR, used_sid);
- if (eq.is_true ())
- {
- if (logger)
- logger->log ("rejecting leak of sv%i due to equality with sv%i",
- dst_sid.as_int (), used_sid.as_int ());
- return;
- }
+ if (leaked_tree)
+ logger->log ("best leaked_tree: %qE", leaked_tree);
+ else
+ logger->log ("best leaked_tree: NULL");
}
- /* SID has leaked within the new state: no regions use it.
- We need to convert it back to a tree, but since no regions use it, we
- have to use MAP to convert it back to an svalue_id within the old state.
- We can then look that svalue_id up to locate regions and thus tree(s)
- that use it. */
-
- svalue_id old_sid = map.get_src_for_dst (dst_sid);
-
- auto_vec<path_var> leaked_pvs;
- m_old_state->m_region_model->get_path_vars_for_svalue (old_sid, &leaked_pvs);
-
- if (leaked_pvs.length () < 1)
- return;
-
- /* Find "best" leaked tree.
- Sort the leaks into most human-readable first, through
- to least user-readable. Given that we only emit one
- leak per EC, this ought to ensure that we pick the most
- user-readable description of each leaking EC.
- This assumes that all vars in the EC have the same state. */
- leaked_pvs.qsort (readability_comparator);
-
- tree leaked_tree = leaked_pvs[0].m_tree;
- if (logger)
- logger->log ("best leaked_tree: %qE", leaked_tree);
-
leak_stmt_finder stmt_finder (*m_eg, leaked_tree);
- impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
- m_old_state, m_new_state,
- m_change,
- m_old_state->m_checker_states[sm_idx],
- m_new_state->m_checker_states[sm_idx],
- &stmt_finder);
gcc_assert (m_enode_for_diag);
/* Don't complain about leaks when returning from "main". */
&& m_enode_for_diag->get_supernode ()->return_p ())
{
tree fndecl = m_enode_for_diag->get_function ()->decl;
- if (0 == strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), "main"))
+ if (id_equal (DECL_NAME (fndecl), "main"))
{
if (logger)
logger->log ("not reporting leak from main");
m_eg->get_diagnostic_manager ().add_diagnostic
(&sm, m_enode_for_diag, m_enode_for_diag->get_supernode (),
m_stmt, &stmt_finder,
- leaked_tree, state, pd);
-}
-
-/* Implementation of region_model_context::on_inherited_svalue vfunc
- for impl_region_model_context.
- Notify all checkers that CHILD_SID has been created from PARENT_SID,
- so that those state machines that inherit state can propagate the state
- from parent to child. */
-
-void
-impl_region_model_context::on_inherited_svalue (svalue_id parent_sid,
- svalue_id child_sid)
-{
- if (!m_new_state)
- return;
-
- int sm_idx;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
- {
- const state_machine &sm = m_ext_state.get_sm (sm_idx);
- if (sm.inherited_state_p ())
- smap->on_inherited_svalue (parent_sid, child_sid);
- }
-}
-
-/* Implementation of region_model_context::on_cast vfunc
- for impl_region_model_context.
- Notify all checkers that DST_SID is a cast of SRC_SID, so that sm-state
- can be propagated from src to dst. */
-
-void
-impl_region_model_context::on_cast (svalue_id src_sid,
- svalue_id dst_sid)
-{
- if (!m_new_state)
- return;
-
- int sm_idx;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
- smap->on_cast (src_sid, dst_sid);
+ leaked_tree, sval, state, pd);
}
/* Implementation of region_model_context::on_condition vfunc.
const state_machine &sm = m_ext_state.get_sm (sm_idx);
impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
m_old_state, m_new_state,
- m_change,
m_old_state->m_checker_states[sm_idx],
m_new_state->m_checker_states[sm_idx]);
sm.on_condition (&sm_ctxt,
const state_machine &sm = m_ext_state.get_sm (sm_idx);
impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
m_old_state, m_new_state,
- m_change,
m_old_state->m_checker_states[sm_idx],
m_new_state->m_checker_states[sm_idx]);
sm.on_phi (&sm_ctxt, m_enode_for_diag->get_supernode (), phi, rhs);
logger * const logger = get_logger ();
if (logger)
logger->log ("unhandled tree code: %qs in %qs at %s:%i",
- t ? get_tree_code_name (TREE_CODE (t)) : "(null)",
+ get_tree_code_name (TREE_CODE (t)),
loc.get_impl_location ().m_function,
loc.get_impl_location ().m_file,
loc.get_impl_location ().m_line);
== m_state.m_region_model->get_stack_depth ());
/* Check the functions in the callstring vs those in the frames
at each depth. */
- for (int depth = 0; depth < m_point.get_stack_depth (); ++depth)
+ for (const frame_region *iter_frame
+ = m_state.m_region_model->get_current_frame ();
+ iter_frame; iter_frame = iter_frame->get_calling_frame ())
{
- gcc_assert (m_point.get_function_at_depth (depth)
- == m_state.m_region_model->get_function_at_depth (depth));
+ int index = iter_frame->get_index ();
+ gcc_assert (m_point.get_function_at_depth (index)
+ == iter_frame->get_function ());
}
}
}
}
+/* struct eg_traits::dump_args_t. */
+
+/* The <FILENAME>.eg.dot output can quickly become unwieldy if we show
+ full details for all enodes (both in terms of CPU time to render it,
+ and in terms of being meaningful to a human viewing it).
+
+ If we show just the IDs then the resulting graph is usually viewable,
+ but then we have to keep switching back and forth between the .dot
+ view and other dumps.
+
+ This function implements a heuristic for showing detail at the enodes
+ that (we hope) matter, and just the ID at other enodes, fixing the CPU
+ usage of the .dot viewer, and drawing the attention of the viewer
+ to these enodes.
+
+ Return true if ENODE should be shown in detail in .dot output.
+ Return false if no detail should be shown for ENODE. */
+
+bool
+eg_traits::dump_args_t::show_enode_details_p (const exploded_node &enode) const
+{
+ /* If the number of exploded nodes isn't too large, we may as well show
+ all enodes in full detail in the .dot output. */
+ if (m_eg.m_nodes.length ()
+ <= (unsigned) param_analyzer_max_enodes_for_full_dump)
+ return true;
+
+ /* Otherwise, assume that what's most interesting are state explosions,
+ and thus the places where this happened.
+ Expand enodes at program points where we hit the per-enode limit, so we
+ can investigate what exploded. */
+ const per_program_point_data *per_point_data
+ = m_eg.get_per_program_point_data (enode.get_point ());
+ return per_point_data->m_excess_enodes > 0;
+}
+
/* class exploded_node : public dnode<eg_traits>. */
/* exploded_node's ctor. */
exploded_node::exploded_node (const point_and_state &ps,
int index)
-: m_ps (ps), m_status (STATUS_WORKLIST), m_index (index)
+: m_ps (ps), m_status (STATUS_WORKLIST), m_index (index),
+ m_num_processed_stmts (0)
{
gcc_checking_assert (ps.get_state ().m_region_model->canonicalized_p ());
}
+/* Get the stmt that was processed in this enode at index IDX.
+ IDX is an index within the stmts processed at this enode, rather
+ than within those of the supernode. */
+
+const gimple *
+exploded_node::get_processed_stmt (unsigned idx) const
+{
+ gcc_assert (idx < m_num_processed_stmts);
+ const program_point &point = get_point ();
+ gcc_assert (point.get_kind () == PK_BEFORE_STMT);
+ const supernode *snode = get_supernode ();
+ const unsigned int point_stmt_idx = point.get_stmt_idx ();
+ const unsigned int idx_within_snode = point_stmt_idx + idx;
+ const gimple *stmt = snode->m_stmts[idx_within_snode];
+ return stmt;
+}
+
/* For use by dump_dot, get a value for the .dot "fillcolor" attribute.
Colorize by sm-state, to make it easier to see how sm-state propagates
through the exploded_graph. */
FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
{
for (sm_state_map::iterator_t iter = smap->begin ();
- iter != smap->end ();
+ iter != smap->end ();
++iter)
total_sm_state += (*iter).second.m_state;
total_sm_state += smap->get_global_state ();
{
/* An arbitrarily-picked collection of light colors. */
const char * const colors[]
- = {"azure", "coral", "cornsilk", "lightblue", "yellow"};
+ = {"azure", "coral", "cornsilk", "lightblue", "yellow",
+ "honeydew", "lightpink", "lightsalmon", "palegreen1",
+ "wheat", "seashell"};
const int num_colors = sizeof (colors) / sizeof (colors[0]);
return colors[total_sm_state % num_colors];
}
pp_string (pp, " (merger)");
pp_newline (pp);
- format f (true);
- m_ps.get_point ().print (pp, f);
- pp_newline (pp);
+ if (args.show_enode_details_p (*this))
+ {
+ format f (true);
+ m_ps.get_point ().print (pp, f);
+ pp_newline (pp);
- const extrinsic_state &ext_state = args.m_eg.get_ext_state ();
- const program_state &state = m_ps.get_state ();
- state.dump_to_pp (ext_state, true, pp);
- pp_newline (pp);
+ const extrinsic_state &ext_state = args.m_eg.get_ext_state ();
+ const program_state &state = m_ps.get_state ();
+ state.dump_to_pp (ext_state, false, true, pp);
+ pp_newline (pp);
- {
- int i;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (state.m_checker_states, i, smap)
- {
- if (!smap->is_empty_p ())
- {
- pp_printf (pp, "%s: ", ext_state.get_name (i));
- smap->print (ext_state.get_sm (i), state.m_region_model, pp);
- pp_newline (pp);
- }
- }
- }
+ /* Show any stmts that were processed within this enode,
+ and their index within the supernode. */
+ if (m_num_processed_stmts > 0)
+ {
+ const program_point &point = get_point ();
+ gcc_assert (point.get_kind () == PK_BEFORE_STMT);
+ const supernode *snode = get_supernode ();
+ const unsigned int point_stmt_idx = point.get_stmt_idx ();
+
+ pp_printf (pp, "stmts: %i", m_num_processed_stmts);
+ pp_newline (pp);
+ for (unsigned i = 0; i < m_num_processed_stmts; i++)
+ {
+ const unsigned int idx_within_snode = point_stmt_idx + i;
+ const gimple *stmt = snode->m_stmts[idx_within_snode];
+ pp_printf (pp, " %i: ", idx_within_snode);
+ pp_gimple_stmt_1 (pp, stmt, 0, (dump_flags_t)0);
+ pp_newline (pp);
+ }
+ }
+ }
/* Dump any saved_diagnostics at this enode. */
{
m_ps.get_point ().print (pp, f);
pp_newline (pp);
- m_ps.get_state ().dump_to_pp (ext_state, false, pp);
+ m_ps.get_state ().dump_to_pp (ext_state, false, true, pp);
pp_newline (pp);
}
exploded_node::on_stmt (exploded_graph &eg,
const supernode *snode,
const gimple *stmt,
- program_state *state,
- state_change *change) const
+ program_state *state) const
{
+ logger *logger = eg.get_logger ();
+ LOG_SCOPE (logger);
+ if (logger)
+ {
+ logger->start_log_line ();
+ pp_gimple_stmt_1 (logger->get_printer (), stmt, 0, (dump_flags_t)0);
+ logger->end_log_line ();
+ }
+
+ /* Update input_location in case of ICE: make it easier to track down which
+ source construct we're failing to handle. */
+ input_location = stmt->location;
+
+ gcc_assert (state->m_region_model);
+
/* Preserve the old state. It is used here for looking
up old checker states, for determining state transitions, and
also within impl_region_model_context and impl_sm_context for
const program_state old_state (*state);
impl_region_model_context ctxt (eg, this,
- &old_state, state, change,
+ &old_state, state,
stmt);
if (const gassign *assign = dyn_cast <const gassign *> (stmt))
if (const gcall *call = dyn_cast <const gcall *> (stmt))
{
/* Debugging/test support. */
- if (is_special_named_call_p (call, "__analyzer_dump", 0))
+ if (is_special_named_call_p (call, "__analyzer_describe", 2))
+ state->m_region_model->impl_call_analyzer_describe (call, &ctxt);
+ else if (is_special_named_call_p (call, "__analyzer_dump", 0))
{
/* Handle the builtin "__analyzer_dump" by dumping state
to stderr. */
- dump (eg.get_ext_state ());
+ state->dump (eg.get_ext_state (), true);
}
else if (is_special_named_call_p (call, "__analyzer_dump_path", 0))
{
state->m_region_model->dump (false);
}
else if (is_special_named_call_p (call, "__analyzer_eval", 1))
- {
- /* Handle the builtin "__analyzer_eval" by evaluating the input
- and dumping as a dummy warning, so that test cases can use
- dg-warning to validate the result (and so unexpected warnings will
- lead to DejaGnu failures). */
- tree t_arg = gimple_call_arg (call, 0);
- tristate t
- = state->m_region_model->eval_condition (t_arg,
- NE_EXPR,
- integer_zero_node,
- &ctxt);
- warning_at (call->location, 0, "%s", t.as_string ());
- }
+ state->m_region_model->impl_call_analyzer_eval (call, &ctxt);
else if (is_special_named_call_p (call, "__analyzer_break", 0))
{
/* Handle the builtin "__analyzer_break" by triggering a
= old_state.m_checker_states[sm_idx];
sm_state_map *new_smap = state->m_checker_states[sm_idx];
impl_sm_context sm_ctxt (eg, sm_idx, sm, this, &old_state, state,
- change,
old_smap, new_smap);
/* Allow the state_machine to handle the stmt. */
if (sm.on_stmt (&sm_ctxt, snode, stmt))
unknown_side_effects = false;
- else
- {
- /* For those stmts that were not handled by the state machine. */
- if (const gcall *call = dyn_cast <const gcall *> (stmt))
- {
- tree callee_fndecl
- = state->m_region_model->get_fndecl_for_call (call, &ctxt);
-
- if (!fndecl_has_gimple_body_p (callee_fndecl))
- new_smap->purge_for_unknown_fncall (eg, sm, call, callee_fndecl,
- state->m_region_model,
- &ctxt);
- }
- }
if (*old_smap != *new_smap)
any_sm_changes = true;
}
exploded_node::on_edge (exploded_graph &eg,
const superedge *succ,
program_point *next_point,
- program_state *next_state,
- state_change *change) const
+ program_state *next_state) const
{
LOG_FUNC (eg.get_logger ());
if (!next_point->on_edge (eg, succ))
return false;
- if (!next_state->on_edge (eg, *this, succ, change))
+ if (!next_state->on_edge (eg, *this, succ))
return false;
return true;
tree buf_ptr = gimple_call_arg (longjmp_call, 0);
region_model *new_region_model = new_state->m_region_model;
- region_id buf_rid = new_region_model->deref_rvalue (buf_ptr, ctxt);
- region *buf = new_region_model->get_region (buf_rid);
- if (!buf)
- return;
+ const svalue *buf_ptr_sval = new_region_model->get_rvalue (buf_ptr, ctxt);
+ const region *buf = new_region_model->deref_rvalue (buf_ptr_sval, buf_ptr,
+ ctxt);
- svalue_id buf_content_sid
- = buf->get_value (*new_region_model, false, ctxt);
- svalue *buf_content_sval = new_region_model->get_svalue (buf_content_sid);
- if (!buf_content_sval)
- return;
- setjmp_svalue *setjmp_sval = buf_content_sval->dyn_cast_setjmp_svalue ();
+ const svalue *buf_content_sval = new_region_model->get_store_value (buf);
+ const setjmp_svalue *setjmp_sval
+ = buf_content_sval->dyn_cast_setjmp_svalue ();
if (!setjmp_sval)
return;
new_region_model->on_longjmp (longjmp_call, setjmp_call,
setjmp_point.get_stack_depth (), ctxt);
+ /* Detect leaks in the new state relative to the old state. */
+ program_state::detect_leaks (get_state (), *new_state, NULL,
+ eg.get_ext_state (), ctxt);
+
program_point next_point
= program_point::after_supernode (setjmp_point.get_supernode (),
setjmp_point.get_call_string ());
- state_change change;
- exploded_node *next = eg.get_or_create_node (next_point, *new_state, &change);
+ exploded_node *next
+ = eg.get_or_create_node (next_point, *new_state, this);
/* Create custom exploded_edge for a longjmp. */
if (next)
{
exploded_edge *eedge
= eg.add_edge (const_cast<exploded_node *> (this), next, NULL,
- change,
new rewind_info_t (tmp_setjmp_record, longjmp_call));
/* For any diagnostics that were queued here (such as leaks) we want
gcc_assert (new_state.m_region_model);
- purge_stats stats;
impl_region_model_context ctxt (eg, this,
&old_state, &new_state,
- NULL,
get_stmt ());
- new_state.m_region_model->pop_frame (region_id::null (),
- true, &stats, &ctxt);
+ const svalue *result = NULL;
+ new_state.m_region_model->pop_frame (NULL, &result, &ctxt);
+ program_state::detect_leaks (old_state, new_state, result,
+ eg.get_ext_state (), &ctxt);
}
/* Dump the successors and predecessors of this enode to OUTF. */
/* exploded_edge's ctor. */
exploded_edge::exploded_edge (exploded_node *src, exploded_node *dest,
- const extrinsic_state &ext_state,
const superedge *sedge,
- const state_change &change,
custom_info_t *custom_info)
-: dedge<eg_traits> (src, dest), m_sedge (sedge), m_change (change),
+: dedge<eg_traits> (src, dest), m_sedge (sedge),
m_custom_info (custom_info)
{
- change.validate (dest->get_state (), ext_state);
}
/* exploded_edge's dtor. */
Use the label of the underlying superedge, if any. */
void
-exploded_edge::dump_dot (graphviz_out *gv, const dump_args_t &args) const
+exploded_edge::dump_dot (graphviz_out *gv, const dump_args_t &) const
{
pretty_printer *pp = gv->get_pp ();
else if (m_custom_info)
m_custom_info->print (pp);
- m_change.dump (pp, args.m_eg.get_ext_state ());
//pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
pp_printf (pp, "\"];\n");
m_ext_state (ext_state),
m_purge_map (purge_map),
m_plan (plan),
- m_diagnostic_manager (logger, verbosity),
+ m_diagnostic_manager (logger, ext_state.get_engine (), verbosity),
m_global_stats (m_sg.num_nodes ()),
m_functionless_stats (m_sg.num_nodes ()),
m_PK_AFTER_SUPERNODE_per_snode (m_sg.num_nodes ())
{
- m_origin = get_or_create_node (program_point (function_point (NULL, NULL,
- 0, PK_ORIGIN),
- call_string ()),
+ m_origin = get_or_create_node (program_point::origin (),
program_state (ext_state), NULL);
for (int i = 0; i < m_sg.num_nodes (); i++)
m_PK_AFTER_SUPERNODE_per_snode.quick_push (i);
{
program_point point = program_point::from_function_entry (m_sg, fun);
program_state state (m_ext_state);
- impl_region_model_context ctxt (&state, NULL, m_ext_state, get_logger ());
- state.m_region_model->push_frame (fun, NULL, &ctxt);
+ state.push_frame (m_ext_state, fun);
if (!state.m_valid)
return NULL;
exploded_node *enode = get_or_create_node (point, state, NULL);
/* We should never fail to add such a node. */
gcc_assert (enode);
- state_change change;
- add_edge (m_origin, enode, NULL, change);
+ add_edge (m_origin, enode, NULL);
return enode;
}
/* Get or create an exploded_node for (POINT, STATE).
If a new node is created, it is added to the worklist.
- If CHANGE is non-NULL, use it to suppress some purging of state,
- to make generation of state_change_event instances easier. */
+
+ Use ENODE_FOR_DIAG, a pre-existing enode, for any diagnostics
+ that need to be emitted (e.g. when purging state *before* we have
+ a new enode). */
exploded_node *
exploded_graph::get_or_create_node (const program_point &point,
const program_state &state,
- state_change *change)
+ const exploded_node *enode_for_diag)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
logger->end_log_line ();
logger->start_log_line ();
pp_string (pp, "state: ");
- state.dump_to_pp (m_ext_state, true, pp);
+ state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
}
/* Prune state to try to improve the chances of a cache hit,
avoiding generating redundant nodes. */
- program_state pruned_state = state.prune_for_point (*this, point, change);
+ program_state pruned_state
+ = state.prune_for_point (*this, point, enode_for_diag);
pruned_state.validate (get_ext_state ());
pretty_printer *pp = logger->get_printer ();
logger->start_log_line ();
pp_string (pp, "pruned_state: ");
- pruned_state.dump_to_pp (m_ext_state, true, pp);
+ pruned_state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
- pruned_state.m_region_model->dump_to_pp (logger->get_printer (), true);
+ pruned_state.m_region_model->dump_to_pp (logger->get_printer (), true,
+ false);
}
stats *per_fn_stats = get_or_create_function_stats (point.get_function ());
/* This merges successfully within the loop. */
program_state merged_state (m_ext_state);
- if (pruned_state.can_merge_with_p (existing_state, m_ext_state,
+ if (pruned_state.can_merge_with_p (existing_state, point,
&merged_state))
{
if (logger)
relationship to those of the input state, and thus to those
of CHANGE, so we must purge any svalue_ids from *CHANGE. */
ps.set_state (merged_state);
- if (change)
- change->on_svalue_purge (svalue_id::from_int (0));
if (exploded_node **slot = m_point_and_state_to_node.get (&ps))
{
if ((int)per_point_data->m_enodes.length ()
> param_analyzer_max_enodes_per_program_point)
{
+ pretty_printer pp;
+ print_enode_indices (&pp, per_point_data->m_enodes);
if (logger)
- logger->log ("not creating enode; too many at program point");
+ logger->log ("not creating enode; too many at program point: %s",
+ pp_formatted_text (&pp));
warning_at (point.get_location (), OPT_Wanalyzer_too_complex,
- "terminating analysis for this program point");
+ "terminating analysis for this program point: %s",
+ pp_formatted_text (&pp));
per_point_data->m_excess_enodes++;
return NULL;
}
logger->end_log_line ();
logger->start_log_line ();
pp_string (pp, "pruned_state: ");
- pruned_state.dump_to_pp (m_ext_state, true, pp);
+ pruned_state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
}
exploded_edge *
exploded_graph::add_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
- const state_change &change,
exploded_edge::custom_info_t *custom_info)
{
- exploded_edge *e = new exploded_edge (src, dest, m_ext_state,
- sedge, change, custom_info);
+ if (get_logger ())
+ get_logger ()->log ("creating edge EN: %i -> EN: %i",
+ src->m_index, dest->m_index);
+ exploded_edge *e = new exploded_edge (src, dest, sedge, custom_info);
digraph<eg_traits>::add_edge (e);
return e;
}
return per_point_data;
}
+/* Get this graph's per-program-point-data for POINT if there is any,
+ otherwise NULL. */
+
+per_program_point_data *
+exploded_graph::get_per_program_point_data (const program_point &point) const
+{
+ if (per_program_point_data **slot
+ = const_cast <point_map_t &> (m_per_point_data).get (&point))
+ return *slot;
+
+ return NULL;
+}
+
/* Ensure that this graph has per-call_string-data for CS;
borrow a pointer to it. */
if (node->get_point () == node_2->get_point ())
{
+ const program_point &point = node->get_point ();
if (logger)
{
format f (false);
logger->log_partial
("got potential merge EN: %i and EN: %i at ",
node->m_index, node_2->m_index);
- node->get_point ().print (pp, f);
+ point.print (pp, f);
logger->end_log_line ();
}
-
const program_state &state = node->get_state ();
const program_state &state_2 = node_2->get_state ();
gcc_assert (state != state_2);
program_state merged_state (m_ext_state);
- state_change change;
- if (state.can_merge_with_p (state_2, m_ext_state,
- &merged_state))
+ if (state.can_merge_with_p (state_2, point, &merged_state))
{
if (logger)
logger->log ("merging EN: %i and EN: %i",
if (merged_state == state)
{
/* Then merge node_2 into node by adding an edge. */
- add_edge (node_2, node, NULL, change);
+ add_edge (node_2, node, NULL);
/* Remove node_2 from the worklist. */
m_worklist.take_next ();
/* Then merge node into node_2, and leave node_2
in the worklist, to be processed on the next
iteration. */
- add_edge (node, node_2, NULL, change);
+ add_edge (node, node_2, NULL);
node->set_status (exploded_node::STATUS_MERGER);
continue;
}
states, adding to the worklist. */
exploded_node *merged_enode
= get_or_create_node (node->get_point (),
- merged_state, &change);
+ merged_state, node);
if (merged_enode == NULL)
continue;
m_worklist.add_node (merged_enode);
else
{
- add_edge (node, merged_enode, NULL, change);
+ add_edge (node, merged_enode, NULL);
node->set_status (exploded_node::STATUS_MERGER);
}
m_worklist.add_node (merged_enode);
else
{
- add_edge (node_2, merged_enode, NULL, change);
+ add_edge (node_2, merged_enode, NULL);
node_2->set_status (exploded_node::STATUS_MERGER);
}
pp_string (pp, "point: ");
point.print (pp, format (false));
pp_string (pp, ", state: ");
- state.dump_to_pp (m_ext_state, true, pp);
+ state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
}
case PK_BEFORE_SUPERNODE:
{
program_state next_state (state);
- state_change change;
if (point.get_from_edge ())
{
impl_region_model_context ctxt (*this, node,
- &state, &next_state, &change,
- NULL);
+ &state, &next_state, NULL);
const cfg_superedge *last_cfg_superedge
= point.get_from_edge ()->dyn_cast_cfg_superedge ();
if (last_cfg_superedge)
= program_point::before_stmt (point.get_supernode (), 0,
point.get_call_string ());
exploded_node *next
- = get_or_create_node (next_point, next_state, &change);
+ = get_or_create_node (next_point, next_state, node);
if (next)
- add_edge (node, next, NULL, change);
+ add_edge (node, next, NULL);
}
else
{
= program_point::after_supernode (point.get_supernode (),
point.get_call_string ());
exploded_node *next = get_or_create_node (next_point, next_state,
- &change);
+ node);
if (next)
- add_edge (node, next, NULL, change);
+ add_edge (node, next, NULL);
}
}
break;
their enode (for which stmt_requires_new_enode_p returns true)
Update next_state in-place, to get the result of the one
- or more stmts that are processed. */
+ or more stmts that are processed.
+
+ Split the node in-place if an sm-state-change occurs, so that
+ the sm-state-change occurs on an edge where the src enode has
+ exactly one stmt, the one that caused the change. */
program_state next_state (state);
- state_change change;
const supernode *snode = point.get_supernode ();
unsigned stmt_idx;
const gimple *prev_stmt = NULL;
}
prev_stmt = stmt;
+ program_state old_state (next_state);
+
/* Process the stmt. */
exploded_node::on_stmt_flags flags
- = node->on_stmt (*this, snode, stmt, &next_state, &change);
+ = node->on_stmt (*this, snode, stmt, &next_state);
+ node->m_num_processed_stmts++;
/* If flags.m_terminate_path, stop analyzing; any nodes/edges
will have been added by on_stmt (e.g. for handling longjmp). */
if (flags.m_terminate_path)
return;
+ if (next_state.m_region_model)
+ {
+ impl_region_model_context ctxt (*this, node,
+ &old_state, &next_state, stmt);
+ program_state::detect_leaks (old_state, next_state, NULL,
+ get_ext_state (), &ctxt);
+ }
+
+ unsigned next_idx = stmt_idx + 1;
+ program_point next_point
+ = (next_idx < point.get_supernode ()->m_stmts.length ()
+ ? program_point::before_stmt (point.get_supernode (), next_idx,
+ point.get_call_string ())
+ : program_point::after_supernode (point.get_supernode (),
+ point.get_call_string ()));
+ next_state = next_state.prune_for_point (*this, next_point, node);
+
if (flags.m_sm_changes || flag_analyzer_fine_grained)
- break;
+ {
+ program_point split_point
+ = program_point::before_stmt (point.get_supernode (),
+ stmt_idx,
+ point.get_call_string ());
+ if (split_point != node->get_point ())
+ {
+ /* If we're not at the start of NODE, split the enode at
+ this stmt, so we have:
+ node -> split_enode
+ so that when split_enode is processed the next edge
+ we add will be:
+ split_enode -> next
+ and any state change will effectively occur on that
+ latter edge, and split_enode will contain just stmt. */
+ if (logger)
+ logger->log ("getting split_enode");
+ exploded_node *split_enode
+ = get_or_create_node (split_point, old_state, node);
+ if (!split_enode)
+ return;
+ /* "stmt" will be reprocessed when split_enode is
+ processed. */
+ node->m_num_processed_stmts--;
+ if (logger)
+ logger->log ("creating edge to split_enode");
+ add_edge (node, split_enode, NULL);
+ return;
+ }
+ else
+ /* If we're at the start of NODE, stop iterating,
+ so that an edge will be created from NODE to
+ (next_point, next_state) below. */
+ break;
+ }
}
unsigned next_idx = stmt_idx + 1;
program_point next_point
point.get_call_string ())
: program_point::after_supernode (point.get_supernode (),
point.get_call_string ()));
- exploded_node *next = get_or_create_node (next_point,
- next_state, &change);
+ exploded_node *next = get_or_create_node (next_point, next_state, node);
if (next)
- add_edge (node, next, NULL, change);
+ add_edge (node, next, NULL);
}
break;
case PK_AFTER_SUPERNODE:
logger->log_partial
("would create function summary for %qE; state: ",
point.get_fndecl ());
- state.dump_to_pp (m_ext_state, true, pp);
+ state.dump_to_pp (m_ext_state, true, false, pp);
logger->end_log_line ();
}
per_function_data *per_fn_data
logger->log ("considering SN: %i -> SN: %i",
succ->m_src->m_index, succ->m_dest->m_index);
- state_change change;
-
program_point next_point
= program_point::before_supernode (succ->m_dest, succ,
point.get_call_string ());
program_state next_state (state);
- if (!node->on_edge (*this, succ, &next_point, &next_state, &change))
+ if (!node->on_edge (*this, succ, &next_point, &next_state))
{
if (logger)
logger->log ("skipping impossible edge to SN: %i",
}
exploded_node *next = get_or_create_node (next_point, next_state,
- &change);
+ node);
if (next)
- add_edge (node, next, succ, change);
+ add_edge (node, next, succ);
}
}
break;
LOG_SCOPE (logger);
+ m_ext_state.get_engine ()->log_stats (logger);
+
logger->log ("m_sg.num_nodes (): %i", m_sg.num_nodes ());
logger->log ("m_nodes.length (): %i", m_nodes.length ());
logger->log ("m_edges.length (): %i", m_edges.length ());
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
- enode->get_state ().dump_to_pp (m_ext_state, true, &pp);
+ enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
fprintf (out, "state %i: EN: %i\n %s\n",
state_idx++, enode->m_index,
pp_formatted_text (&pp));
feasibility_problem to *OUT. */
bool
-exploded_path::feasible_p (logger *logger, feasibility_problem **out) const
+exploded_path::feasible_p (logger *logger, feasibility_problem **out,
+ engine *eng, const exploded_graph *eg) const
{
LOG_SCOPE (logger);
+ auto_sbitmap snodes_visited (eg->get_supergraph ().m_nodes.length ());
+
/* Traverse the path, updating this model. */
- region_model model;
- for (unsigned i = 0; i < m_edges.length (); i++)
+ region_model model (eng->get_model_manager ());
+ for (unsigned edge_idx = 0; edge_idx < m_edges.length (); edge_idx++)
{
- const exploded_edge *eedge = m_edges[i];
+ const exploded_edge *eedge = m_edges[edge_idx];
if (logger)
logger->log ("considering edge %i: EN:%i -> EN:%i",
- i,
+ edge_idx,
eedge->m_src->m_index,
eedge->m_dest->m_index);
const exploded_node &src_enode = *eedge->m_src;
logger->end_log_line ();
}
- if (const gimple *stmt = src_point.get_stmt ())
+ /* Update state for the stmts that were processed in each enode. */
+ for (unsigned stmt_idx = 0; stmt_idx < src_enode.m_num_processed_stmts;
+ stmt_idx++)
{
+ const gimple *stmt = src_enode.get_processed_stmt (stmt_idx);
+
/* Update cfun and input_location in case of ICE: make it easier to
track down which source construct we're failing to handle. */
auto_cfun sentinel (src_point.get_function ());
if (logger)
{
logger->log ("rejecting due to region model");
- model.dump_to_pp (logger->get_printer (), false);
+ model.dump_to_pp (logger->get_printer (), true, false);
}
if (out)
- *out = new feasibility_problem (i, model, *eedge, last_stmt);
+ *out = new feasibility_problem (edge_idx, model, *eedge,
+ last_stmt);
return false;
}
}
{
/* Special-case the initial eedge from the origin node to the
initial function by pushing a frame for it. */
- if (i == 0)
+ if (edge_idx == 0)
{
gcc_assert (eedge->m_src->m_index == 0);
gcc_assert (src_point.get_kind () == PK_ORIGIN);
logger->log (" pushing frame for %qD", fun->decl);
}
else if (eedge->m_custom_info)
- eedge->m_custom_info->update_model (&model, *eedge);
+ {
+ eedge->m_custom_info->update_model (&model, *eedge);
+ }
}
/* Handle phi nodes on an edge leaving a PK_BEFORE_SUPERNODE (to
{
const cfg_superedge *last_cfg_superedge
= src_point.get_from_edge ()->dyn_cast_cfg_superedge ();
+ const exploded_node &dst_enode = *eedge->m_dest;
+ const unsigned dst_snode_idx = dst_enode.get_supernode ()->m_index;
if (last_cfg_superedge)
{
if (logger)
model.update_for_phis (src_enode.get_supernode (),
last_cfg_superedge,
NULL);
+ /* If we've entering an snode that we've already visited on this
+ epath, then we need do fix things up for loops; see the
+ comment for store::loop_replay_fixup.
+ Perhaps we should probably also verify the callstring,
+ and track program_points, but hopefully doing it by supernode
+ is good enough. */
+ if (bitmap_bit_p (snodes_visited, dst_snode_idx))
+ model.loop_replay_fixup (dst_enode.get_state ().m_region_model);
}
+ bitmap_set_bit (snodes_visited, dst_snode_idx);
}
if (logger)
{
logger->log ("state after edge %i: EN:%i -> EN:%i",
- i,
+ edge_idx,
eedge->m_src->m_index,
eedge->m_dest->m_index);
logger->start_log_line ();
- model.dump_to_pp (logger->get_printer (), true);
+ model.dump_to_pp (logger->get_printer (), true, false);
logger->end_log_line ();
}
}
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
- m_enode->get_state ().dump_to_pp (m_ext_state, true, &pp);
+ m_enode->get_state ().dump_to_pp (m_ext_state, true, false, &pp);
return make_label_text (false, "EN: %i: %s",
m_enode->m_index, pp_formatted_text (&pp));
}
pretty_printer pp;
enode->get_point ().print (&pp, format (true));
fprintf (outf, "%s\n", pp_formatted_text (&pp));
- enode->get_state ().dump_to_file (m_ext_state, false, outf);
+ enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
}
fclose (outf);
pretty_printer pp;
enode->get_point ().print (&pp, format (true));
fprintf (outf, "%s\n", pp_formatted_text (&pp));
- enode->get_state ().dump_to_file (m_ext_state, false, outf);
+ enode->get_state ().dump_to_file (m_ext_state, false, true, outf);
fclose (outf);
}
}
}
+DEBUG_FUNCTION exploded_node *
+exploded_graph::get_node_by_index (int idx) const
+{
+ exploded_node *enode = m_nodes[idx];
+ gcc_assert (enode->m_index == idx);
+ return enode;
+}
+
/* A collection of classes for visualizing the callgraph in .dot form
(as represented in the supergraph). */
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
node->get_untransformed_body ();
+ engine eng;
+
/* Create the supergraph. */
supergraph sg (logger);
}
/* Extrinsic state shared by nodes in the graph. */
- const extrinsic_state ext_state (checkers);
+ const extrinsic_state ext_state (checkers, logger, &eng);
const analysis_plan plan (sg, logger);
old state, rather than the new? */
const program_state *old_state,
program_state *new_state,
- state_change *change,
const gimple *stmt,
stmt_finder *stmt_finder = NULL);
impl_region_model_context (program_state *state,
- state_change *change,
const extrinsic_state &ext_state,
logger *logger = NULL);
void warn (pending_diagnostic *d) FINAL OVERRIDE;
-
- void remap_svalue_ids (const svalue_id_map &map) FINAL OVERRIDE;
-
- int on_svalue_purge (svalue_id first_unused_sid,
- const svalue_id_map &map) FINAL OVERRIDE;
-
+ void on_svalue_leak (const svalue *) OVERRIDE;
+ void on_liveness_change (const svalue_set &live_svalues,
+ const region_model *model) FINAL OVERRIDE;
logger *get_logger () FINAL OVERRIDE
{
return m_logger.get_logger ();
}
void on_state_leak (const state_machine &sm,
- int sm_idx,
- svalue_id sid,
- svalue_id first_unused_sid,
- const svalue_id_map &map,
+ const svalue *sval,
state_machine::state_t state);
- void on_inherited_svalue (svalue_id parent_sid,
- svalue_id child_sid) FINAL OVERRIDE;
-
- void on_cast (svalue_id src_sid,
- svalue_id dst_sid) FINAL OVERRIDE;
-
void on_condition (tree lhs, enum tree_code op, tree rhs) FINAL OVERRIDE;
- void on_unknown_change (svalue_id sid ATTRIBUTE_UNUSED) FINAL OVERRIDE;
+ void on_unknown_change (const svalue *sval, bool is_mutable) FINAL OVERRIDE;
void on_phi (const gphi *phi, tree rhs) FINAL OVERRIDE;
const exploded_node *m_enode_for_diag;
const program_state *m_old_state;
program_state *m_new_state;
- state_change *m_change;
const gimple *m_stmt;
stmt_finder *m_stmt_finder;
const extrinsic_state &m_ext_state;
struct dump_args_t
{
dump_args_t (const exploded_graph &eg) : m_eg (eg) {}
+
+ bool show_enode_details_p (const exploded_node &enode) const;
+
const exploded_graph &m_eg;
};
typedef exploded_cluster cluster_t;
/* Node was left unprocessed due to merger; it won't have had
exploded_graph::process_node called on it. */
- STATUS_MERGER
+ STATUS_MERGER,
};
exploded_node (const point_and_state &ps, int index);
on_stmt_flags on_stmt (exploded_graph &eg,
const supernode *snode,
const gimple *stmt,
- program_state *state,
- state_change *change) const;
+ program_state *state) const;
bool on_edge (exploded_graph &eg,
const superedge *succ,
program_point *next_point,
- program_state *next_state,
- state_change *change) const;
+ program_state *next_state) const;
void on_longjmp (exploded_graph &eg,
const gcall *call,
program_state *new_state,
return get_point ().get_stack_depth ();
}
const gimple *get_stmt () const { return get_point ().get_stmt (); }
+ const gimple *get_processed_stmt (unsigned idx) const;
const program_state &get_state () const { return m_ps.get_state (); }
public:
/* The index of this exploded_node. */
const int m_index;
+
+ /* The number of stmts that were processed when process_node was
+ called on this enode. */
+ unsigned m_num_processed_stmts;
};
/* An edge within the exploded graph.
};
exploded_edge (exploded_node *src, exploded_node *dest,
- const extrinsic_state &ext_state,
const superedge *sedge,
- const state_change &change,
custom_info_t *custom_info);
~exploded_edge ();
void dump_dot (graphviz_out *gv, const dump_args_t &args)
//private:
const superedge *const m_sedge;
- const state_change m_change;
-
/* NULL for most edges; will be non-NULL for special cases
such as an unwind from a longjmp to a setjmp, or when
a signal is delivered to a signal-handler.
const supergraph &get_supergraph () const { return m_sg; }
const extrinsic_state &get_ext_state () const { return m_ext_state; }
+ engine *get_engine () const { return m_ext_state.get_engine (); }
const state_purge_map *get_purge_map () const { return m_purge_map; }
const analysis_plan &get_analysis_plan () const { return m_plan; }
exploded_node *get_or_create_node (const program_point &point,
const program_state &state,
- state_change *change);
+ const exploded_node *enode_for_diag);
exploded_edge *add_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
- const state_change &change,
exploded_edge::custom_info_t *custom = NULL);
per_program_point_data *
get_or_create_per_program_point_data (const program_point &);
+ per_program_point_data *
+ get_per_program_point_data (const program_point &) const;
per_call_string_data *
get_or_create_per_call_string_data (const call_string &);
void dump_states_for_supernode (FILE *, const supernode *snode) const;
void dump_exploded_nodes () const;
+ exploded_node *get_node_by_index (int idx) const;
+
const call_string_data_map_t *get_per_call_string_data () const
{ return &m_per_call_string_data; }
void dump () const;
bool feasible_p (logger *logger, feasibility_problem **out) const;
+ bool feasible_p (logger *logger, feasibility_problem **out,
+ engine *eng, const exploded_graph *eg) const;
auto_vec<const exploded_edge *> m_edges;
};
#include "bitmap.h"
#include "tristate.h"
#include "selftest.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/sm.h"
#include "analyzer/program-state.h"
/* class function_point. */
+function_point::function_point (const supernode *supernode,
+ const superedge *from_edge,
+ unsigned stmt_idx,
+ enum point_kind kind)
+: m_supernode (supernode), m_from_edge (from_edge),
+ m_stmt_idx (stmt_idx), m_kind (kind)
+{
+ if (from_edge)
+ {
+ gcc_checking_assert (m_kind == PK_BEFORE_SUPERNODE);
+ gcc_checking_assert (from_edge->get_kind () == SUPEREDGE_CFG_EDGE);
+ }
+ if (stmt_idx)
+ gcc_checking_assert (m_kind == PK_BEFORE_STMT);
+}
+
/* Print this function_point to PP. */
void
return hstate.end ();
}
+/* Get the function at this point, if any. */
+
+function *
+function_point::get_function () const
+{
+ if (m_supernode)
+ return m_supernode->m_fun;
+ else
+ return NULL;
+}
+
/* Get the gimple stmt for this function_point, if any. */
const gimple *
return UNKNOWN_LOCATION;
}
+/* Create a function_point representing the entrypoint of function FUN. */
+
+function_point
+function_point::from_function_entry (const supergraph &sg, function *fun)
+{
+ return before_supernode (sg.get_node_for_function_entry (fun), NULL);
+}
+
+/* Create a function_point representing entering supernode SUPERNODE,
+ having reached it via FROM_EDGE (which could be NULL). */
+
+function_point
+function_point::before_supernode (const supernode *supernode,
+ const superedge *from_edge)
+{
+ if (from_edge && from_edge->get_kind () != SUPEREDGE_CFG_EDGE)
+ from_edge = NULL;
+ return function_point (supernode, from_edge, 0, PK_BEFORE_SUPERNODE);
+}
+
/* A subclass of diagnostic_context for use by
program_point::print_source_line. */
return result;
}
+/* For PK_BEFORE_STMT, go to next stmt (or to PK_AFTER_SUPERNODE). */
+
+void
+function_point::next_stmt ()
+{
+ gcc_assert (m_kind == PK_BEFORE_STMT);
+ if (++m_stmt_idx == m_supernode->m_stmts.length ())
+ {
+ m_kind = PK_AFTER_SUPERNODE;
+ m_stmt_idx = 0;
+ }
+}
+
#if CHECKING_P
namespace selftest {
function_point (const supernode *supernode,
const superedge *from_edge,
unsigned stmt_idx,
- enum point_kind kind)
- : m_supernode (supernode), m_from_edge (from_edge),
- m_stmt_idx (stmt_idx), m_kind (kind)
- {
- if (from_edge)
- {
- gcc_checking_assert (m_kind == PK_BEFORE_SUPERNODE);
- gcc_checking_assert (from_edge->get_kind () == SUPEREDGE_CFG_EDGE);
- }
- if (stmt_idx)
- gcc_checking_assert (m_kind == PK_BEFORE_STMT);
- }
+ enum point_kind kind);
void print (pretty_printer *pp, const format &f) const;
void print_source_line (pretty_printer *pp) const;
/* Accessors. */
const supernode *get_supernode () const { return m_supernode; }
- function *get_function () const
- {
- if (m_supernode)
- return m_supernode->m_fun;
- else
- return NULL;
- }
+ function *get_function () const;
const gimple *get_stmt () const;
location_t get_location () const;
enum point_kind get_kind () const { return m_kind; }
/* Factory functions for making various kinds of program_point. */
static function_point from_function_entry (const supergraph &sg,
- function *fun)
- {
- return before_supernode (sg.get_node_for_function_entry (fun),
- NULL);
- }
+ function *fun);
static function_point before_supernode (const supernode *supernode,
- const superedge *from_edge)
- {
- if (from_edge && from_edge->get_kind () != SUPEREDGE_CFG_EDGE)
- from_edge = NULL;
- return function_point (supernode, from_edge, 0, PK_BEFORE_SUPERNODE);
- }
+ const superedge *from_edge);
static function_point before_stmt (const supernode *supernode,
unsigned stmt_idx)
static int cmp_within_supernode (const function_point &point_a,
const function_point &point_b);
+ /* For before_stmt, go to next stmt. */
+ void next_stmt ();
+
private:
const supernode *m_supernode;
return (m_function_point == other.m_function_point
&& m_call_string == other.m_call_string);
}
+ bool operator!= (const program_point &other) const
+ {
+ return !(*this == other);
+ }
/* Accessors. */
}
/* Factory functions for making various kinds of program_point. */
+ static program_point origin ()
+ {
+ return program_point (function_point (NULL, NULL,
+ 0, PK_ORIGIN),
+ call_string ());
+ }
static program_point from_function_entry (const supergraph &sg,
function *fun)
void validate () const;
+ /* For before_stmt, go to next stmt. */
+ void next_stmt () { m_function_point.next_stmt (); }
+
private:
- const function_point m_function_point;
+ function_point m_function_point;
call_string m_call_string;
};
#include "tristate.h"
#include "ordered-hash-map.h"
#include "selftest.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/program-state.h"
#include "analyzer/constraint-manager.h"
#include "alloc-pool.h"
#include "fibonacci_heap.h"
#include "shortest-paths.h"
-#include "analyzer/constraint-manager.h"
#include "diagnostic-event-id.h"
#include "analyzer/pending-diagnostic.h"
#include "analyzer/diagnostic-manager.h"
#include "cgraph.h"
#include "digraph.h"
#include "analyzer/supergraph.h"
-#include "analyzer/call-string.h"
-#include "analyzer/program-point.h"
#include "analyzer/program-state.h"
#include "analyzer/exploded-graph.h"
#include "analyzer/state-purge.h"
dump_to_file (stderr);
}
+/* Get the region_model_manager for this extrinsic_state. */
+
+region_model_manager *
+extrinsic_state::get_model_manager () const
+{
+ if (m_engine)
+ return m_engine->get_model_manager ();
+ else
+ return NULL; /* for selftests. */
+}
+
/* class sm_state_map. */
/* sm_state_map's ctor. */
-sm_state_map::sm_state_map ()
-: m_map (), m_global_state (0)
+sm_state_map::sm_state_map (const state_machine &sm, int sm_idx)
+: m_sm (sm), m_sm_idx (sm_idx), m_map (), m_global_state (0)
{
}
return new sm_state_map (*this);
}
-/* Clone this sm_state_map, remapping all svalue_ids within it with ID_MAP.
-
- Return NULL if there are any svalue_ids that have sm-state for which
- ID_MAP maps them to svalue_id::null (and thus the clone would have lost
- the sm-state information). */
-
-sm_state_map *
-sm_state_map::clone_with_remapping (const one_way_svalue_id_map &id_map) const
-{
- sm_state_map *result = new sm_state_map ();
- result->m_global_state = m_global_state;
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- svalue_id sid = (*iter).first;
- gcc_assert (!sid.null_p ());
- entry_t e = (*iter).second;
- /* TODO: what should we do if the origin maps from non-null to null?
- Is that loss of information acceptable? */
- id_map.update (&e.m_origin);
-
- svalue_id new_sid = id_map.get_dst_for_src (sid);
- if (new_sid.null_p ())
- {
- delete result;
- return NULL;
- }
- result->m_map.put (new_sid, e);
- }
- return result;
-}
-
-/* Print this sm_state_map (for SM) to PP.
+/* Print this sm_state_map to PP.
If MODEL is non-NULL, print representative tree values where
available. */
void
-sm_state_map::print (const state_machine &sm, const region_model *model,
- pretty_printer *pp) const
+sm_state_map::print (const region_model *model,
+ bool simple, bool multiline,
+ pretty_printer *pp) const
{
bool first = true;
- pp_string (pp, "{");
+ if (!multiline)
+ pp_string (pp, "{");
if (m_global_state != 0)
{
- pp_printf (pp, "global: %s", sm.get_state_name (m_global_state));
+ if (multiline)
+ pp_string (pp, " ");
+ pp_printf (pp, "global: %s", m_sm.get_state_name (m_global_state));
+ if (multiline)
+ pp_newline (pp);
first = false;
}
for (map_t::iterator iter = m_map.begin ();
iter != m_map.end ();
++iter)
{
- if (!first)
+ if (multiline)
+ pp_string (pp, " ");
+ else if (!first)
pp_string (pp, ", ");
first = false;
- svalue_id sid = (*iter).first;
- sid.print (pp);
+ const svalue *sval = (*iter).first;
+ pp_pointer (pp, sval);
+ pp_string (pp, ": ");
+ sval->dump_to_pp (pp, simple);
entry_t e = (*iter).second;
- pp_printf (pp, ": %s", sm.get_state_name (e.m_state));
+ pp_printf (pp, ": %s", m_sm.get_state_name (e.m_state));
if (model)
- if (tree rep = model->get_representative_tree (sid))
+ if (tree rep = model->get_representative_tree (sval))
{
pp_string (pp, " (");
dump_quoted_tree (pp, rep);
pp_character (pp, ')');
}
- if (!e.m_origin.null_p ())
+ if (e.m_origin)
{
pp_string (pp, " (origin: ");
- e.m_origin.print (pp);
+ pp_pointer (pp, e.m_origin);
+ pp_string (pp, ": ");
+ e.m_origin->dump_to_pp (pp, simple);
if (model)
if (tree rep = model->get_representative_tree (e.m_origin))
{
}
pp_string (pp, ")");
}
+ if (multiline)
+ pp_newline (pp);
}
- pp_string (pp, "}");
+ if (!multiline)
+ pp_string (pp, "}");
}
-/* Dump this object (for SM) to stderr. */
+/* Dump this object to stderr. */
DEBUG_FUNCTION void
-sm_state_map::dump (const state_machine &sm) const
+sm_state_map::dump (bool simple) const
{
pretty_printer pp;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = stderr;
- print (sm, NULL, &pp);
+ print (NULL, simple, true, &pp);
pp_newline (&pp);
pp_flush (&pp);
}
++iter)
{
inchash::hash hstate;
- inchash::add ((*iter).first, hstate);
+ hstate.add_ptr ((*iter).first);
entry_t e = (*iter).second;
hstate.add_int (e.m_state);
- inchash::add (e.m_origin, hstate);
+ hstate.add_ptr (e.m_origin);
result ^= hstate.end ();
}
result ^= m_global_state;
iter != m_map.end ();
++iter)
{
- svalue_id sid = (*iter).first;
+ const svalue *sval = (*iter).first;
entry_t e = (*iter).second;
- entry_t *other_slot = const_cast <map_t &> (other.m_map).get (sid);
+ entry_t *other_slot = const_cast <map_t &> (other.m_map).get (sval);
if (other_slot == NULL)
return false;
if (e != *other_slot)
return true;
}
-/* Get the state of SID within this object.
+/* Get the state of SVAL within this object.
States default to the start state. */
state_machine::state_t
-sm_state_map::get_state (svalue_id sid) const
+sm_state_map::get_state (const svalue *sval,
+ const extrinsic_state &ext_state) const
{
- gcc_assert (!sid.null_p ());
+ gcc_assert (sval);
+
+ sval = canonicalize_svalue (sval, ext_state);
if (entry_t *slot
- = const_cast <map_t &> (m_map).get (sid))
+ = const_cast <map_t &> (m_map).get (sval))
return slot->m_state;
- else
- return 0;
+
+ /* SVAL has no explicit sm-state.
+ If this sm allows for state inheritance, then SVAL might have implicit
+ sm-state inherited via a parent.
+ For example INIT_VAL(foo.field) might inherit taintedness state from
+ INIT_VAL(foo). */
+ if (m_sm.inherited_state_p ())
+ if (region_model_manager *mgr = ext_state.get_model_manager ())
+ if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
+ {
+ const region *reg = init_sval->get_region ();
+ /* Try recursing upwards (up to the base region for the cluster). */
+ if (!reg->base_region_p ())
+ if (const region *parent_reg = reg->get_parent_region ())
+ {
+ const svalue *parent_init_sval
+ = mgr->get_or_create_initial_value (parent_reg);
+ state_machine::state_t parent_state
+ = get_state (parent_init_sval, ext_state);
+ if (parent_state)
+ return parent_state;
+ }
+ }
+
+ return m_sm.get_default_state (sval);
}
-/* Get the "origin" svalue_id for any state of SID. */
+/* Get the "origin" svalue for any state of SVAL. */
-svalue_id
-sm_state_map::get_origin (svalue_id sid) const
+const svalue *
+sm_state_map::get_origin (const svalue *sval,
+ const extrinsic_state &ext_state) const
{
- gcc_assert (!sid.null_p ());
+ gcc_assert (sval);
+
+ sval = canonicalize_svalue (sval, ext_state);
entry_t *slot
- = const_cast <map_t &> (m_map).get (sid);
+ = const_cast <map_t &> (m_map).get (sval);
if (slot)
return slot->m_origin;
else
- return svalue_id::null ();
+ return NULL;
}
/* Set the state of SID within MODEL to STATE, recording that
void
sm_state_map::set_state (region_model *model,
- svalue_id sid,
+ const svalue *sval,
state_machine::state_t state,
- svalue_id origin)
+ const svalue *origin,
+ const extrinsic_state &ext_state)
{
if (model == NULL)
return;
- equiv_class &ec = model->get_constraints ()->get_equiv_class (sid);
- if (!set_state (ec, state, origin))
- return;
- /* Also do it for all svalues that are equal via non-cm, so that
- e.g. (void *)&r and (foo *)&r transition together. */
- for (unsigned i = 0; i < model->get_num_svalues (); i++)
- {
- svalue_id other_sid = svalue_id::from_int (i);
- if (other_sid == sid)
- continue;
+ /* Reject attempts to set state on UNKNOWN. */
+ if (sval->get_kind () == SK_UNKNOWN)
+ return;
- tristate eq = model->eval_condition_without_cm (sid, EQ_EXPR, other_sid);
- if (eq.is_true ())
- impl_set_state (other_sid, state, origin);
- }
+ equiv_class &ec = model->get_constraints ()->get_equiv_class (sval);
+ if (!set_state (ec, state, origin, ext_state))
+ return;
}
/* Set the state of EC to STATE, recording that the state came from
bool
sm_state_map::set_state (const equiv_class &ec,
state_machine::state_t state,
- svalue_id origin)
+ const svalue *origin,
+ const extrinsic_state &ext_state)
{
int i;
- svalue_id *sid;
+ const svalue *sval;
bool any_changed = false;
- FOR_EACH_VEC_ELT (ec.m_vars, i, sid)
- any_changed |= impl_set_state (*sid, state, origin);
+ FOR_EACH_VEC_ELT (ec.m_vars, i, sval)
+ any_changed |= impl_set_state (sval, state, origin, ext_state);
return any_changed;
}
-/* Set state of SID to STATE, bypassing equivalence classes.
+/* Set state of SVAL to STATE, bypassing equivalence classes.
Return true if the state changed. */
bool
-sm_state_map::impl_set_state (svalue_id sid, state_machine::state_t state,
- svalue_id origin)
+sm_state_map::impl_set_state (const svalue *sval,
+ state_machine::state_t state,
+ const svalue *origin,
+ const extrinsic_state &ext_state)
{
- if (get_state (sid) == state)
+ sval = canonicalize_svalue (sval, ext_state);
+
+ if (get_state (sval, ext_state) == state)
return false;
/* Special-case state 0 as the default value. */
if (state == 0)
{
- if (m_map.get (sid))
- m_map.remove (sid);
+ if (m_map.get (sval))
+ m_map.remove (sval);
return true;
}
- gcc_assert (!sid.null_p ());
- m_map.put (sid, entry_t (state, origin));
+ gcc_assert (sval);
+ m_map.put (sval, entry_t (state, origin));
return true;
}
return m_global_state;
}
-/* Handle CALL to unknown FNDECL with an unknown function body, which
- could do anything to the states passed to it.
- Clear any state for SM for the params and any LHS.
- Note that the function might be known to other state machines, but
- not to this one. */
+/* Purge any state for SVAL.
+ If !SM::can_purge_p, then report the state as leaking,
+ using CTXT. */
void
-sm_state_map::purge_for_unknown_fncall (const exploded_graph &eg,
- const state_machine &sm,
- const gcall *call,
- tree fndecl,
- region_model *new_model,
- region_model_context *ctxt)
+sm_state_map::on_svalue_leak (const svalue *sval,
+ impl_region_model_context *ctxt)
{
- logger * const logger = eg.get_logger ();
- if (logger)
+ if (state_machine::state_t state = get_state (sval, ctxt->m_ext_state))
{
- if (fndecl)
- logger->log ("function %qE is unknown to checker %qs",
- fndecl, sm.get_name ());
- else
- logger->log ("unknown function pointer for checker %qs",
- sm.get_name ());
- }
-
- /* Purge any state for parms. */
- tree iter_param_types = NULL_TREE;
- if (fndecl)
- iter_param_types = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
- for (unsigned arg_idx = 0; arg_idx < gimple_call_num_args (call); arg_idx++)
- {
- /* Track expected param type, where available. */
- if (iter_param_types)
- {
- tree param_type = TREE_VALUE (iter_param_types);
- gcc_assert (param_type);
- iter_param_types = TREE_CHAIN (iter_param_types);
-
- /* Don't purge state if it was passed as a const pointer
- e.g. for things like strlen (PTR). */
- if (TREE_CODE (param_type) == POINTER_TYPE)
- if (TYPE_READONLY (TREE_TYPE (param_type)))
- continue;
- }
- tree parm = gimple_call_arg (call, arg_idx);
- svalue_id parm_sid = new_model->get_rvalue (parm, ctxt);
- set_state (new_model, parm_sid, 0, svalue_id::null ());
-
- /* Also clear sm-state from svalue_ids that are passed via a
- pointer. */
- if (TREE_CODE (parm) == ADDR_EXPR)
- {
- tree pointee = TREE_OPERAND (parm, 0);
- svalue_id parm_sid = new_model->get_rvalue (pointee, ctxt);
- set_state (new_model, parm_sid, 0, svalue_id::null ());
- }
- }
-
- /* Purge any state for any LHS. */
- if (tree lhs = gimple_call_lhs (call))
- {
- svalue_id lhs_sid = new_model->get_rvalue (lhs, ctxt);
- set_state (new_model, lhs_sid, 0, svalue_id::null ());
+ if (!m_sm.can_purge_p (state))
+ ctxt->on_state_leak (m_sm, sval, state);
+ m_map.remove (sval);
}
}
-/* Update this map based on MAP. */
+/* Purge any state for svalues that aren't live with respect to LIVE_SVALUES
+ and MODEL. */
void
-sm_state_map::remap_svalue_ids (const svalue_id_map &map)
+sm_state_map::on_liveness_change (const svalue_set &live_svalues,
+ const region_model *model,
+ impl_region_model_context *ctxt)
{
- map_t tmp_map;
+ svalue_set svals_to_unset;
- /* Build an intermediate map, using the new sids. */
for (map_t::iterator iter = m_map.begin ();
iter != m_map.end ();
++iter)
{
- svalue_id sid = (*iter).first;
- entry_t e = (*iter).second;
-
- map.update (&sid);
- map.update (&e.m_origin);
- tmp_map.put (sid, e);
- }
-
- /* Clear the existing values. */
- m_map.empty ();
-
- /* Copy over from intermediate map. */
- for (map_t::iterator iter = tmp_map.begin ();
- iter != tmp_map.end ();
- ++iter)
- {
- svalue_id sid = (*iter).first;
- entry_t e = (*iter).second;
-
- impl_set_state (sid, e.m_state, e.m_origin);
- }
-}
-
-/* Purge any state for svalue_ids >= FIRST_UNUSED_SID.
- If !SM::can_purge_p, then report the state as leaking,
- using SM_IDX, CTXT, and MAP.
- Return the number of states that were purged. */
-
-int
-sm_state_map::on_svalue_purge (const state_machine &sm,
- int sm_idx,
- svalue_id first_unused_sid,
- const svalue_id_map &map,
- impl_region_model_context *ctxt)
-{
- /* TODO: ideally remove the slot directly; for now
- do it in two stages. */
- auto_vec<svalue_id> to_remove;
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- svalue_id dst_sid ((*iter).first);
- if (dst_sid.as_int () >= first_unused_sid.as_int ())
+ const svalue *iter_sval = (*iter).first;
+ if (!iter_sval->live_p (live_svalues, model))
{
- /* Complain about leaks here. */
+ svals_to_unset.add (iter_sval);
entry_t e = (*iter).second;
-
- if (!sm.can_purge_p (e.m_state))
- ctxt->on_state_leak (sm, sm_idx, dst_sid, first_unused_sid,
- map, e.m_state);
-
- to_remove.safe_push (dst_sid);
- }
- else if ((*iter).second.m_origin.as_int () >= first_unused_sid.as_int ())
- {
- /* If the origin svalue is being purged, then reset it to null. */
- (*iter).second.m_origin = svalue_id::null ();
+ if (!m_sm.can_purge_p (e.m_state))
+ ctxt->on_state_leak (m_sm, iter_sval, e.m_state);
}
}
- int i;
- svalue_id *dst_sid;
- FOR_EACH_VEC_ELT (to_remove, i, dst_sid)
- m_map.remove (*dst_sid);
-
- return to_remove.length ();
+ for (svalue_set::iterator iter = svals_to_unset.begin ();
+ iter != svals_to_unset.end (); ++iter)
+ m_map.remove (*iter);
}
-/* Set the state of CHILD_SID to that of PARENT_SID. */
+/* Purge state from SVAL (in response to a call to an unknown function). */
void
-sm_state_map::on_inherited_svalue (svalue_id parent_sid,
- svalue_id child_sid)
+sm_state_map::on_unknown_change (const svalue *sval,
+ bool is_mutable,
+ const extrinsic_state &ext_state)
{
- state_machine::state_t state = get_state (parent_sid);
- impl_set_state (child_sid, state, parent_sid);
-}
-
-/* Set the state of DST_SID to that of SRC_SID. */
-
-void
-sm_state_map::on_cast (svalue_id src_sid,
- svalue_id dst_sid)
-{
- state_machine::state_t state = get_state (src_sid);
- impl_set_state (dst_sid, state, get_origin (src_sid));
-}
-
-/* Purge state from SID (in response to a call to an unknown function). */
-
-void
-sm_state_map::on_unknown_change (svalue_id sid)
-{
- impl_set_state (sid, (state_machine::state_t)0, svalue_id::null ());
-}
-
-/* Assert that this object is sane. */
-
-void
-sm_state_map::validate (const state_machine &sm,
- int num_svalues) const
-{
- /* Skip this in a release build. */
-#if !CHECKING_P
- return;
-#endif
+ svalue_set svals_to_unset;
for (map_t::iterator iter = m_map.begin ();
iter != m_map.end ();
++iter)
{
- svalue_id sid = (*iter).first;
+ const svalue *key = (*iter).first;
entry_t e = (*iter).second;
-
- gcc_assert (sid.as_int () < num_svalues);
- sm.validate (e.m_state);
- gcc_assert (e.m_origin.as_int () < num_svalues);
+ /* We only want to purge state for some states when things
+ are mutable. For example, in sm-malloc.cc, an on-stack ptr
+ doesn't stop being stack-allocated when passed to an unknown fn. */
+ if (!m_sm.reset_when_passed_to_unknown_fn_p (e.m_state, is_mutable))
+ continue;
+ if (key == sval)
+ svals_to_unset.add (key);
+ /* If we have INIT_VAL(BASE_REG), then unset any INIT_VAL(REG)
+ for REG within BASE_REG. */
+ if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
+ if (const initial_svalue *init_key = key->dyn_cast_initial_svalue ())
+ {
+ const region *changed_reg = init_sval->get_region ();
+ const region *changed_key = init_key->get_region ();
+ if (changed_key->get_base_region () == changed_reg)
+ svals_to_unset.add (key);
+ }
}
+
+ for (svalue_set::iterator iter = svals_to_unset.begin ();
+ iter != svals_to_unset.end (); ++iter)
+ impl_set_state (*iter, (state_machine::state_t)0, NULL, ext_state);
+}
+
+/* Canonicalize SVAL before getting/setting it within the map.
+ Convert all NULL pointers to (void *) to avoid state explosions
+ involving all of the various (foo *)NULL vs (bar *)NULL. */
+
+const svalue *
+sm_state_map::canonicalize_svalue (const svalue *sval,
+ const extrinsic_state &ext_state)
+{
+ region_model_manager *mgr = ext_state.get_model_manager ();
+ if (mgr && sval->get_type () && POINTER_TYPE_P (sval->get_type ()))
+ if (tree cst = sval->maybe_get_constant ())
+ if (zerop (cst))
+ return mgr->get_or_create_constant_svalue (null_pointer_node);
+
+ return sval;
}
/* class program_state. */
/* program_state's ctor. */
program_state::program_state (const extrinsic_state &ext_state)
-: m_region_model (new region_model ()),
+: m_region_model (NULL),
m_checker_states (ext_state.get_num_checkers ()),
m_valid (true)
{
- int num_states = ext_state.get_num_checkers ();
+ engine *eng = ext_state.get_engine ();
+ region_model_manager *mgr = eng->get_model_manager ();
+ m_region_model = new region_model (mgr);
+ const int num_states = ext_state.get_num_checkers ();
for (int i = 0; i < num_states; i++)
- m_checker_states.quick_push (new sm_state_map ());
+ {
+ sm_state_map *sm = new sm_state_map (ext_state.get_sm (i), i);
+ m_checker_states.quick_push (sm);
+ }
}
/* program_state's copy ctor. */
pretty_printer *pp) const
{
pp_printf (pp, "rmodel: ");
- m_region_model->print (pp);
+ m_region_model->dump_to_pp (pp, true, false);
pp_newline (pp);
int i;
if (!smap->is_empty_p ())
{
pp_printf (pp, "%s: ", ext_state.get_name (i));
- smap->print (ext_state.get_sm (i), m_region_model, pp);
+ smap->print (m_region_model, true, false, pp);
pp_newline (pp);
}
}
}
}
-/* Dump a representation of this state to PP.
- If SUMMARIZE is true, print a one-line summary;
- if false, print a detailed multiline representation. */
+/* Dump a representation of this state to PP. */
void
program_state::dump_to_pp (const extrinsic_state &ext_state,
- bool summarize,
+ bool /*summarize*/, bool multiline,
pretty_printer *pp) const
{
- pp_printf (pp, "rmodel: ");
- m_region_model->dump_to_pp (pp, summarize);
+ if (!multiline)
+ pp_string (pp, "{");
+ {
+ pp_printf (pp, "rmodel:");
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, " {");
+ m_region_model->dump_to_pp (pp, true, multiline);
+ if (!multiline)
+ pp_string (pp, "}");
+ }
int i;
sm_state_map *smap;
{
if (!smap->is_empty_p ())
{
- if (summarize)
- pp_space (pp);
+ if (!multiline)
+ pp_string (pp, " {");
pp_printf (pp, "%s: ", ext_state.get_name (i));
- smap->print (ext_state.get_sm (i), m_region_model, pp);
- if (!summarize)
+ if (multiline)
pp_newline (pp);
+ smap->print (m_region_model, true, multiline, pp);
+ if (!multiline)
+ pp_string (pp, "}");
}
}
if (!m_valid)
{
- if (summarize)
+ if (!multiline)
pp_space (pp);
pp_printf (pp, "invalid state");
- if (!summarize)
+ if (multiline)
pp_newline (pp);
}
+ if (!multiline)
+ pp_string (pp, "}");
}
-/* Dump a multiline representation of this state to OUTF. */
+/* Dump a representation of this state to OUTF. */
void
program_state::dump_to_file (const extrinsic_state &ext_state,
- bool summarize,
+ bool summarize, bool multiline,
FILE *outf) const
{
pretty_printer pp;
if (outf == stderr)
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = outf;
- dump_to_pp (ext_state, summarize, &pp);
+ dump_to_pp (ext_state, summarize, multiline, &pp);
pp_flush (&pp);
}
program_state::dump (const extrinsic_state &ext_state,
bool summarize) const
{
- dump_to_file (ext_state, summarize, stderr);
+ dump_to_file (ext_state, summarize, true, stderr);
+}
+
+/* Update this program_state to reflect a top-level call to FUN.
+ The params will have initial_svalues. */
+
+void
+program_state::push_frame (const extrinsic_state &ext_state ATTRIBUTE_UNUSED,
+ function *fun)
+{
+ m_region_model->push_frame (fun, NULL, NULL);
+}
+
+/* Get the current function of this state. */
+
+function *
+program_state::get_current_function () const
+{
+ return m_region_model->get_current_function ();
}
/* Determine if following edge SUCC from ENODE is valid within the graph EG
bool
program_state::on_edge (exploded_graph &eg,
const exploded_node &enode,
- const superedge *succ,
- state_change *change)
+ const superedge *succ)
{
/* Update state. */
const program_point &point = enode.get_point ();
impl_region_model_context ctxt (eg, &enode,
&enode.get_state (),
- this, change,
+ this,
last_stmt);
if (!m_region_model->maybe_update_for_edge (*succ,
last_stmt,
return false;
}
+ program_state::detect_leaks (enode.get_state (), *this,
+ NULL, eg.get_ext_state (),
+ &ctxt);
+
return true;
}
relevant at POINT.
The idea is that we're more likely to be able to consolidate
multiple (point, state) into single exploded_nodes if we discard
- irrelevant state (e.g. at the end of functions).
-
- Retain state affected by CHANGE, to make it easier to generate
- state_change_events. */
+ irrelevant state (e.g. at the end of functions). */
program_state
program_state::prune_for_point (exploded_graph &eg,
const program_point &point,
- state_change *change) const
+ const exploded_node *enode_for_diag) const
{
logger * const logger = eg.get_logger ();
LOG_SCOPE (logger);
program_state new_state (*this);
- purge_stats stats;
-
const state_purge_map *pm = eg.get_purge_map ();
if (pm)
{
- region_id_set purgeable_ssa_regions (new_state.m_region_model);
- region_id frame_rid
- = new_state.m_region_model->get_current_frame_id ();
- frame_region *frame
- = new_state.m_region_model->get_region <frame_region>(frame_rid);
-
- /* TODO: maybe move to a member of region_model? */
-
- auto_vec<tree> ssa_names_to_purge;
- for (frame_region::map_t::iterator iter = frame->begin ();
- iter != frame->end ();
- ++iter)
+ unsigned num_ssas_purged = 0;
+ auto_vec<const decl_region *> ssa_name_regs;
+ new_state.m_region_model->get_ssa_name_regions_for_current_frame
+ (&ssa_name_regs);
+ unsigned i;
+ const decl_region *reg;
+ FOR_EACH_VEC_ELT (ssa_name_regs, i, reg)
{
- tree var = (*iter).first;
- region_id rid = (*iter).second;
- if (TREE_CODE (var) == SSA_NAME)
+ tree ssa_name = reg->get_decl ();
+ const state_purge_per_ssa_name &per_ssa
+ = pm->get_data_for_ssa_name (ssa_name);
+ if (!per_ssa.needed_at_point_p (point.get_function_point ()))
{
- const state_purge_per_ssa_name &per_ssa
- = pm->get_data_for_ssa_name (var);
- if (!per_ssa.needed_at_point_p (point.get_function_point ()))
+ /* Don't purge bindings of SSA names to svalues
+ that have unpurgable sm-state, so that leaks are
+ reported at the end of the function, rather than
+ at the last place that such an SSA name is referred to.
+
+ But do purge them for temporaries (when SSA_NAME_VAR is
+ NULL), so that we report for cases where a leak happens when
+ a variable is overwritten with another value, so that the leak
+ is reported at the point of overwrite, rather than having
+ temporaries keep the value reachable until the frame is
+ popped. */
+ const svalue *sval
+ = new_state.m_region_model->get_store_value (reg);
+ if (!new_state.can_purge_p (eg.get_ext_state (), sval)
+ && SSA_NAME_VAR (ssa_name))
{
- region *region
- = new_state.m_region_model->get_region (rid);
- svalue_id sid = region->get_value_direct ();
- if (!sid.null_p ())
- {
- if (!new_state.can_purge_p (eg.get_ext_state (), sid))
- {
- /* (currently only state maps can keep things
- alive). */
- if (logger)
- logger->log ("not purging RID: %i for %qE"
- " (used by state map)",
- rid.as_int (), var);
- continue;
- }
-
- /* Don't purge regions containing svalues that
- have a change of sm-state, to make it easier to
- generate state_change_event messages. */
- if (change)
- if (change->affects_p (sid))
- {
- if (logger)
- logger->log ("not purging RID: %i for %qE"
- " (affected by change)",
- rid.as_int (), var);
- continue;
- }
- }
- purgeable_ssa_regions.add_region (rid);
- ssa_names_to_purge.safe_push (var);
+ /* (currently only state maps can keep things
+ alive). */
if (logger)
- logger->log ("purging RID: %i for %qE", rid.as_int (), var);
- /* We also need to remove the region from the map.
- We're in mid-traversal, so the removal is done in
- unbind below. */
+ logger->log ("not purging binding for %qE"
+ " (used by state map)", ssa_name);
+ continue;
}
+
+ new_state.m_region_model->purge_region (reg);
+ num_ssas_purged++;
}
}
- /* Unbind the regions from the frame's map of vars-to-regions. */
- unsigned i;
- tree var;
- FOR_EACH_VEC_ELT (ssa_names_to_purge, i, var)
- frame->unbind (var);
-
- /* Purge the regions. Nothing should point to them, and they
- should have no children, as they are for SSA names. */
- new_state.m_region_model->purge_regions (purgeable_ssa_regions,
- &stats,
- eg.get_logger ());
- }
-
- /* Purge unused svalues. */
- // TODO: which enode to use, if any?
- impl_region_model_context ctxt (eg, NULL,
- this,
- &new_state,
- change,
- NULL);
- new_state.m_region_model->purge_unused_svalues (&stats, &ctxt);
- if (logger)
- {
- logger->log ("num svalues purged: %i", stats.m_num_svalues);
- logger->log ("num regions purged: %i", stats.m_num_regions);
- logger->log ("num equiv_classes purged: %i", stats.m_num_equiv_classes);
- logger->log ("num constraints purged: %i", stats.m_num_constraints);
- logger->log ("num sm map items purged: %i", stats.m_num_client_items);
+ if (num_ssas_purged > 0)
+ {
+ if (logger)
+ logger->log ("num_ssas_purged: %i", num_ssas_purged);
+ impl_region_model_context ctxt (eg, enode_for_diag,
+ this,
+ &new_state,
+ point.get_stmt ());
+ detect_leaks (*this, new_state, NULL, eg.get_ext_state (), &ctxt);
+ }
}
- new_state.m_region_model->canonicalize (&ctxt);
+ new_state.m_region_model->canonicalize ();
return new_state;
}
-/* Remap all svalue_ids in this state's m_checker_states according to MAP.
- The svalues_ids in the region_model are assumed to already have been
- remapped. */
-
-void
-program_state::remap_svalue_ids (const svalue_id_map &map)
-{
- int i;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (m_checker_states, i, smap)
- smap->remap_svalue_ids (map);
-}
-
-/* Attempt to return a tree that represents SID, or return NULL_TREE.
- Find the first region that stores the value (e.g. a local) and
- generate a representative tree for it. */
+/* Get a representative tree to use for describing SVAL. */
tree
-program_state::get_representative_tree (svalue_id sid) const
+program_state::get_representative_tree (const svalue *sval) const
{
- return m_region_model->get_representative_tree (sid);
+ gcc_assert (m_region_model);
+ return m_region_model->get_representative_tree (sval);
}
-/* Attempt to merge this state with OTHER, both using EXT_STATE.
+/* Attempt to merge this state with OTHER, both at POINT.
Write the result to *OUT.
If the states were merged successfully, return true. */
bool
program_state::can_merge_with_p (const program_state &other,
- const extrinsic_state &ext_state,
+ const program_point &point,
program_state *out) const
{
gcc_assert (out);
+ gcc_assert (m_region_model);
- /* TODO: initially I had an early reject here if there
- are sm-differences between the states. However, this was
- falsely rejecting merger opportunities for states where the
- only difference was in svalue_id ordering. */
+ /* Early reject if there are sm-differences between the states. */
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (out->m_checker_states, i, smap)
+ if (*m_checker_states[i] != *other.m_checker_states[i])
+ return false;
/* Attempt to merge the region_models. */
-
- svalue_id_merger_mapping sid_mapping (*m_region_model,
- *other.m_region_model);
if (!m_region_model->can_merge_with_p (*other.m_region_model,
- out->m_region_model,
- &sid_mapping))
+ point,
+ out->m_region_model))
return false;
- /* Copy m_checker_states to result, remapping svalue_ids using
- sid_mapping. */
- int i;
- sm_state_map *smap;
+ /* Copy m_checker_states to OUT. */
FOR_EACH_VEC_ELT (out->m_checker_states, i, smap)
- delete smap;
- out->m_checker_states.truncate (0);
-
- /* Remap this and other's m_checker_states using sid_mapping.
- Only merge states that have equality between the two end-results:
- sm-state differences are likely to be interesting to end-users, and
- hence are worth exploring as separate paths in the exploded graph. */
- FOR_EACH_VEC_ELT (m_checker_states, i, smap)
{
- sm_state_map *other_smap = other.m_checker_states[i];
-
- /* If clone_with_remapping returns NULL for one of the input smaps,
- then it has sm-state for an svalue_id where the svalue_id is
- being mapped to svalue_id::null in its sid_mapping, meaning that
- the svalue is to be dropped during the merger. We don't want
- to lose sm-state during a state merger, so return false for these
- cases. */
- sm_state_map *remapped_a_smap
- = smap->clone_with_remapping (sid_mapping.m_map_from_a_to_m);
- if (!remapped_a_smap)
- return false;
- sm_state_map *remapped_b_smap
- = other_smap->clone_with_remapping (sid_mapping.m_map_from_b_to_m);
- if (!remapped_b_smap)
- {
- delete remapped_a_smap;
- return false;
- }
-
- /* Both states have sm-state for the same values; now ensure that the
- states are equal. */
- if (*remapped_a_smap == *remapped_b_smap)
- {
- out->m_checker_states.safe_push (remapped_a_smap);
- delete remapped_b_smap;
- }
- else
- {
- /* Don't merge if there are sm-state differences. */
- delete remapped_a_smap;
- delete remapped_b_smap;
- return false;
- }
+ delete smap;
+ out->m_checker_states[i] = m_checker_states[i]->clone ();
}
- impl_region_model_context ctxt (out, NULL, ext_state);
- out->m_region_model->canonicalize (&ctxt);
+ out->m_region_model->canonicalize ();
return true;
}
return;
#endif
- m_region_model->validate ();
gcc_assert (m_checker_states.length () == ext_state.get_num_checkers ());
- int sm_idx;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (m_checker_states, sm_idx, smap)
- {
- const state_machine &sm = ext_state.get_sm (sm_idx);
- smap->validate (sm, m_region_model->get_num_svalues ());
- }
}
-/* Dump this sm_change to PP. */
-
-void
-state_change::sm_change::dump (pretty_printer *pp,
- const extrinsic_state &ext_state) const
-{
- const state_machine &sm = get_sm (ext_state);
- pp_string (pp, "(");
- m_new_sid.print (pp);
- pp_printf (pp, ": %s: %qs -> %qs)",
- sm.get_name (),
- sm.get_state_name (m_old_state),
- sm.get_state_name (m_new_state));
-}
-
-/* Remap all svalue_ids in this change according to MAP. */
-
-void
-state_change::sm_change::remap_svalue_ids (const svalue_id_map &map)
-{
- map.update (&m_new_sid);
-}
-
-/* Purge any svalue_ids >= FIRST_UNUSED_SID.
- Return the number of states that were purged. */
-
-int
-state_change::sm_change::on_svalue_purge (svalue_id first_unused_sid)
+static void
+log_set_of_svalues (logger *logger, const char *name,
+ const svalue_set &set)
{
- if (m_new_sid.as_int () >= first_unused_sid.as_int ())
+ logger->log (name);
+ logger->inc_indent ();
+ for (svalue_set::iterator iter = set.begin ();
+ iter != set.end (); ++iter)
{
- m_new_sid = svalue_id::null ();
- return 1;
+ logger->start_log_line ();
+ pretty_printer *pp = logger->get_printer ();
+ const svalue *sval = (*iter);
+ pp_pointer (pp, sval);
+ pp_string (pp, ": ");
+ sval->dump_to_pp (pp, false);
+ logger->end_log_line ();
}
-
- return 0;
-}
-
-/* Assert that this object is sane. */
-
-void
-state_change::sm_change::validate (const program_state &new_state,
- const extrinsic_state &ext_state) const
-{
- gcc_assert ((unsigned)m_sm_idx < ext_state.get_num_checkers ());
- const state_machine &sm = ext_state.get_sm (m_sm_idx);
- sm.validate (m_old_state);
- sm.validate (m_new_state);
- m_new_sid.validate (*new_state.m_region_model);
-}
-
-/* state_change's ctor. */
-
-state_change::state_change ()
-{
+ logger->dec_indent ();
}
-/* state_change's copy ctor. */
+/* Compare the sets of svalues reachable from each of SRC_STATE and DEST_STATE.
+ For all svalues that are reachable in SRC_STATE and are not live in
+ DEST_STATE (whether explicitly reachable in DEST_STATE, or implicitly live
+ based on the former set), call CTXT->on_svalue_leak for them.
-state_change::state_change (const state_change &other)
-: m_sm_changes (other.m_sm_changes.length ())
-{
- unsigned i;
- sm_change *change;
- FOR_EACH_VEC_ELT (other.m_sm_changes, i, change)
- m_sm_changes.quick_push (*change);
-}
+ Call on_liveness_change on both the CTXT and on the DEST_STATE's
+ constraint_manager, purging dead svalues from sm-state and from
+ constraints, respectively.
-/* Record a state-machine state change. */
+ This function should be called at each fine-grained state change, not
+ just at exploded edges. */
void
-state_change::add_sm_change (int sm_idx,
- svalue_id new_sid,
- state_machine::state_t old_state,
- state_machine::state_t new_state)
-{
- m_sm_changes.safe_push (sm_change (sm_idx,
- new_sid,
- old_state, new_state));
-}
-
-/* Return true if SID (in the new state) was affected by any
- sm-state changes. */
-
-bool
-state_change::affects_p (svalue_id sid) const
+program_state::detect_leaks (const program_state &src_state,
+ const program_state &dest_state,
+ const svalue *extra_sval,
+ const extrinsic_state &ext_state,
+ region_model_context *ctxt)
{
- unsigned i;
- sm_change *change;
- FOR_EACH_VEC_ELT (m_sm_changes, i, change)
+ logger *logger = ext_state.get_logger ();
+ LOG_SCOPE (logger);
+ if (logger)
{
- if (sid == change->m_new_sid)
- return true;
+ pretty_printer *pp = logger->get_printer ();
+ logger->start_log_line ();
+ pp_string (pp, "src_state: ");
+ src_state.dump_to_pp (ext_state, true, false, pp);
+ logger->end_log_line ();
+ logger->start_log_line ();
+ pp_string (pp, "dest_state: ");
+ dest_state.dump_to_pp (ext_state, true, false, pp);
+ logger->end_log_line ();
+ if (extra_sval)
+ {
+ logger->start_log_line ();
+ pp_string (pp, "extra_sval: ");
+ extra_sval->dump_to_pp (pp, true);
+ logger->end_log_line ();
+ }
}
- return false;
-}
-/* Dump this state_change to PP. */
+ /* Get svalues reachable from each of src_state and dst_state. */
+ svalue_set src_svalues;
+ svalue_set dest_svalues;
+ src_state.m_region_model->get_reachable_svalues (&src_svalues, NULL);
+ dest_state.m_region_model->get_reachable_svalues (&dest_svalues, extra_sval);
-void
-state_change::dump (pretty_printer *pp,
- const extrinsic_state &ext_state) const
-{
- unsigned i;
- sm_change *change;
- FOR_EACH_VEC_ELT (m_sm_changes, i, change)
+ if (logger)
{
- if (i > 0)
- pp_string (pp, ", ");
- change->dump (pp, ext_state);
+ log_set_of_svalues (logger, "src_state reachable svalues:", src_svalues);
+ log_set_of_svalues (logger, "dest_state reachable svalues:",
+ dest_svalues);
}
-}
-
-/* Dump this state_change to stderr. */
-void
-state_change::dump (const extrinsic_state &ext_state) const
-{
- pretty_printer pp;
- pp_show_color (&pp) = pp_show_color (global_dc->printer);
- pp.buffer->stream = stderr;
- dump (&pp, ext_state);
- pp_newline (&pp);
- pp_flush (&pp);
-}
-
-/* Remap all svalue_ids in this state_change according to MAP. */
-
-void
-state_change::remap_svalue_ids (const svalue_id_map &map)
-{
- unsigned i;
- sm_change *change;
- FOR_EACH_VEC_ELT (m_sm_changes, i, change)
- change->remap_svalue_ids (map);
-}
-
-/* Purge any svalue_ids >= FIRST_UNUSED_SID.
- Return the number of states that were purged. */
-
-int
-state_change::on_svalue_purge (svalue_id first_unused_sid)
-{
- int result = 0;
- unsigned i;
- sm_change *change;
- FOR_EACH_VEC_ELT (m_sm_changes, i, change)
- result += change->on_svalue_purge (first_unused_sid);
- return result;
-}
+ for (svalue_set::iterator iter = src_svalues.begin ();
+ iter != src_svalues.end (); ++iter)
+ {
+ const svalue *sval = (*iter);
+ /* For each sval reachable from SRC_STATE, determine if it is
+ live in DEST_STATE: either explicitly reachable, or implicitly
+ live based on the set of explicitly reachable svalues.
+ Call CTXT->on_svalue_leak on those that have ceased to be live. */
+ if (!sval->live_p (dest_svalues, dest_state.m_region_model))
+ ctxt->on_svalue_leak (sval);
+ }
-/* Assert that this object is sane. */
+ /* Purge dead svals from sm-state. */
+ ctxt->on_liveness_change (dest_svalues, dest_state.m_region_model);
-void
-state_change::validate (const program_state &new_state,
- const extrinsic_state &ext_state) const
-{
- /* Skip this in a release build. */
-#if !CHECKING_P
- return;
-#endif
- unsigned i;
- sm_change *change;
- FOR_EACH_VEC_ELT (m_sm_changes, i, change)
- change->validate (new_state, ext_state);
+ /* Purge dead svals from constraints. */
+ dest_state.m_region_model->get_constraints ()->on_liveness_change
+ (dest_svalues, dest_state.m_region_model);
}
#if CHECKING_P
namespace selftest {
-/* Implementation detail of ASSERT_DUMP_EQ. */
-
-static void
-assert_dump_eq (const location &loc,
- const program_state &state,
- const extrinsic_state &ext_state,
- bool summarize,
- const char *expected)
-{
- auto_fix_quotes sentinel;
- pretty_printer pp;
- pp_format_decoder (&pp) = default_tree_printer;
- state.dump_to_pp (ext_state, summarize, &pp);
- ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
-}
-
-/* Assert that STATE.dump_to_pp (SUMMARIZE) is EXPECTED. */
-
-#define ASSERT_DUMP_EQ(STATE, EXT_STATE, SUMMARIZE, EXPECTED) \
- SELFTEST_BEGIN_STMT \
- assert_dump_eq ((SELFTEST_LOCATION), (STATE), (EXT_STATE), (SUMMARIZE), \
- (EXPECTED)); \
- SELFTEST_END_STMT
-
/* Tests for sm_state_map. */
static void
tree y = build_global_decl ("y", integer_type_node);
tree z = build_global_decl ("z", integer_type_node);
+ state_machine *sm = make_malloc_state_machine (NULL);
+ auto_delete_vec <state_machine> checkers;
+ checkers.safe_push (sm);
+ extrinsic_state ext_state (checkers);
+
/* Test setting states on svalue_id instances directly. */
{
- region_model model;
- svalue_id sid_x = model.get_rvalue (x, NULL);
- svalue_id sid_y = model.get_rvalue (y, NULL);
- svalue_id sid_z = model.get_rvalue (z, NULL);
+ region_model_manager mgr;
+ region_model model (&mgr);
+ const svalue *x_sval = model.get_rvalue (x, NULL);
+ const svalue *y_sval = model.get_rvalue (y, NULL);
+ const svalue *z_sval = model.get_rvalue (z, NULL);
- sm_state_map map;
+ sm_state_map map (*sm, 0);
ASSERT_TRUE (map.is_empty_p ());
- ASSERT_EQ (map.get_state (sid_x), 0);
+ ASSERT_EQ (map.get_state (x_sval, ext_state), 0);
- map.impl_set_state (sid_x, 42, sid_z);
- ASSERT_EQ (map.get_state (sid_x), 42);
- ASSERT_EQ (map.get_origin (sid_x), sid_z);
- ASSERT_EQ (map.get_state (sid_y), 0);
+ map.impl_set_state (x_sval, 42, z_sval, ext_state);
+ ASSERT_EQ (map.get_state (x_sval, ext_state), 42);
+ ASSERT_EQ (map.get_origin (x_sval, ext_state), z_sval);
+ ASSERT_EQ (map.get_state (y_sval, ext_state), 0);
ASSERT_FALSE (map.is_empty_p ());
- map.impl_set_state (sid_y, 0, sid_z);
- ASSERT_EQ (map.get_state (sid_y), 0);
+ map.impl_set_state (y_sval, 0, z_sval, ext_state);
+ ASSERT_EQ (map.get_state (y_sval, ext_state), 0);
- map.impl_set_state (sid_x, 0, sid_z);
- ASSERT_EQ (map.get_state (sid_x), 0);
+ map.impl_set_state (x_sval, 0, z_sval, ext_state);
+ ASSERT_EQ (map.get_state (x_sval, ext_state), 0);
ASSERT_TRUE (map.is_empty_p ());
}
/* Test setting states via equivalence classes. */
{
- region_model model;
- svalue_id sid_x = model.get_rvalue (x, NULL);
- svalue_id sid_y = model.get_rvalue (y, NULL);
- svalue_id sid_z = model.get_rvalue (z, NULL);
+ region_model_manager mgr;
+ region_model model (&mgr);
+ const svalue *x_sval = model.get_rvalue (x, NULL);
+ const svalue *y_sval = model.get_rvalue (y, NULL);
+ const svalue *z_sval = model.get_rvalue (z, NULL);
- sm_state_map map;
+ sm_state_map map (*sm, 0);
ASSERT_TRUE (map.is_empty_p ());
- ASSERT_EQ (map.get_state (sid_x), 0);
- ASSERT_EQ (map.get_state (sid_y), 0);
+ ASSERT_EQ (map.get_state (x_sval, ext_state), 0);
+ ASSERT_EQ (map.get_state (y_sval, ext_state), 0);
model.add_constraint (x, EQ_EXPR, y, NULL);
/* Setting x to a state should also update y, as they
are in the same equivalence class. */
- map.set_state (&model, sid_x, 5, sid_z);
- ASSERT_EQ (map.get_state (sid_x), 5);
- ASSERT_EQ (map.get_state (sid_y), 5);
- ASSERT_EQ (map.get_origin (sid_x), sid_z);
- ASSERT_EQ (map.get_origin (sid_y), sid_z);
+ map.set_state (&model, x_sval, 5, z_sval, ext_state);
+ ASSERT_EQ (map.get_state (x_sval, ext_state), 5);
+ ASSERT_EQ (map.get_state (y_sval, ext_state), 5);
+ ASSERT_EQ (map.get_origin (x_sval, ext_state), z_sval);
+ ASSERT_EQ (map.get_origin (y_sval, ext_state), z_sval);
}
/* Test equality and hashing. */
{
- region_model model;
- svalue_id sid_y = model.get_rvalue (y, NULL);
- svalue_id sid_z = model.get_rvalue (z, NULL);
+ region_model_manager mgr;
+ region_model model (&mgr);
+ const svalue *y_sval = model.get_rvalue (y, NULL);
+ const svalue *z_sval = model.get_rvalue (z, NULL);
- sm_state_map map0;
- sm_state_map map1;
- sm_state_map map2;
+ sm_state_map map0 (*sm, 0);
+ sm_state_map map1 (*sm, 0);
+ sm_state_map map2 (*sm, 0);
ASSERT_EQ (map0.hash (), map1.hash ());
ASSERT_EQ (map0, map1);
- map1.impl_set_state (sid_y, 5, sid_z);
+ map1.impl_set_state (y_sval, 5, z_sval, ext_state);
ASSERT_NE (map0.hash (), map1.hash ());
ASSERT_NE (map0, map1);
/* Make the same change to map2. */
- map2.impl_set_state (sid_y, 5, sid_z);
+ map2.impl_set_state (y_sval, 5, z_sval, ext_state);
ASSERT_EQ (map1.hash (), map2.hash ());
ASSERT_EQ (map1, map2);
}
/* Equality and hashing shouldn't depend on ordering. */
{
- sm_state_map map0;
- sm_state_map map1;
- sm_state_map map2;
+ sm_state_map map0 (*sm, 0);
+ sm_state_map map1 (*sm, 0);
+ sm_state_map map2 (*sm, 0);
ASSERT_EQ (map0.hash (), map1.hash ());
ASSERT_EQ (map0, map1);
- map1.impl_set_state (svalue_id::from_int (14), 2, svalue_id::null ());
- map1.impl_set_state (svalue_id::from_int (16), 3, svalue_id::null ());
- map1.impl_set_state (svalue_id::from_int (1), 2, svalue_id::null ());
- map1.impl_set_state (svalue_id::from_int (9), 2, svalue_id::null ());
+ region_model_manager mgr;
+ region_model model (&mgr);
+ const svalue *x_sval = model.get_rvalue (x, NULL);
+ const svalue *y_sval = model.get_rvalue (y, NULL);
+ const svalue *z_sval = model.get_rvalue (z, NULL);
- map2.impl_set_state (svalue_id::from_int (1), 2, svalue_id::null ());
- map2.impl_set_state (svalue_id::from_int (16), 3, svalue_id::null ());
- map2.impl_set_state (svalue_id::from_int (14), 2, svalue_id::null ());
- map2.impl_set_state (svalue_id::from_int (9), 2, svalue_id::null ());
+ map1.impl_set_state (x_sval, 2, NULL, ext_state);
+ map1.impl_set_state (y_sval, 3, NULL, ext_state);
+ map1.impl_set_state (z_sval, 2, NULL, ext_state);
+
+ map2.impl_set_state (z_sval, 2, NULL, ext_state);
+ map2.impl_set_state (y_sval, 3, NULL, ext_state);
+ map2.impl_set_state (x_sval, 2, NULL, ext_state);
ASSERT_EQ (map1.hash (), map2.hash ());
ASSERT_EQ (map1, map2);
}
- /* Test sm_state_map::remap_svalue_ids. */
- {
- sm_state_map map;
- svalue_id sid_0 = svalue_id::from_int (0);
- svalue_id sid_1 = svalue_id::from_int (1);
- svalue_id sid_2 = svalue_id::from_int (2);
-
- map.impl_set_state (sid_0, 42, sid_2);
- ASSERT_EQ (map.get_state (sid_0), 42);
- ASSERT_EQ (map.get_origin (sid_0), sid_2);
- ASSERT_EQ (map.get_state (sid_1), 0);
- ASSERT_EQ (map.get_state (sid_2), 0);
-
- /* Apply a remapping to the IDs. */
- svalue_id_map remapping (3);
- remapping.put (sid_0, sid_1);
- remapping.put (sid_1, sid_2);
- remapping.put (sid_2, sid_0);
- map.remap_svalue_ids (remapping);
-
- /* Verify that the IDs have been remapped. */
- ASSERT_EQ (map.get_state (sid_1), 42);
- ASSERT_EQ (map.get_origin (sid_1), sid_0);
- ASSERT_EQ (map.get_state (sid_2), 0);
- ASSERT_EQ (map.get_state (sid_0), 0);
- }
-
// TODO: coverage for purging
}
-/* Verify that program_state::dump_to_pp works as expected. */
+/* Check program_state works as expected. */
static void
-test_program_state_dumping ()
+test_program_state_1 ()
{
/* Create a program_state for a global ptr "p" that has
malloc sm-state, pointing to a region on the heap. */
= sm->get_state_by_name ("unchecked");
auto_delete_vec <state_machine> checkers;
checkers.safe_push (sm);
- extrinsic_state ext_state (checkers);
+ engine eng;
+ extrinsic_state ext_state (checkers, NULL, &eng);
+ region_model_manager *mgr = eng.get_model_manager ();
program_state s (ext_state);
region_model *model = s.m_region_model;
- region_id new_rid = model->add_new_malloc_region ();
- svalue_id ptr_sid
- = model->get_or_create_ptr_svalue (ptr_type_node, new_rid);
+ const svalue *size_in_bytes
+ = mgr->get_or_create_unknown_svalue (integer_type_node);
+ const region *new_reg = model->create_region_for_heap_alloc (size_in_bytes);
+ const svalue *ptr_sval = mgr->get_ptr_svalue (ptr_type_node, new_reg);
model->set_value (model->get_lvalue (p, NULL),
- ptr_sid, NULL);
+ ptr_sval, NULL);
sm_state_map *smap = s.m_checker_states[0];
- smap->impl_set_state (ptr_sid, UNCHECKED_STATE, svalue_id::null ());
- ASSERT_EQ (smap->get_state (ptr_sid), UNCHECKED_STATE);
-
- ASSERT_DUMP_EQ
- (s, ext_state, false,
- "rmodel: r0: {kind: `root', parent: null, sval: null}\n"
- "|-heap: r1: {kind: `heap', parent: r0, sval: null}\n"
- "| `-r2: {kind: `symbolic', parent: r1, sval: null, possibly_null: true}\n"
- "`-globals: r3: {kind: `globals', parent: r0, sval: null, map: {`p': r4}}\n"
- " `-`p': r4: {kind: `primitive', parent: r3, sval: sv0, type: `void *'}\n"
- " |: sval: sv0: {type: `void *', &r2}\n"
- " |: type: `void *'\n"
- "svalues:\n"
- " sv0: {type: `void *', &r2}\n"
- "constraint manager:\n"
- " equiv classes:\n"
- " constraints:\n"
- "malloc: {sv0: unchecked (`p')}\n");
-
- ASSERT_DUMP_EQ (s, ext_state, true,
- "rmodel: p: &r2 malloc: {sv0: unchecked (`p')}");
+ smap->impl_set_state (ptr_sval, UNCHECKED_STATE, NULL, ext_state);
+ ASSERT_EQ (smap->get_state (ptr_sval, ext_state), UNCHECKED_STATE);
}
-/* Verify that program_state::dump_to_pp works for string literals. */
+/* Check that program_state works for string literals. */
static void
-test_program_state_dumping_2 ()
+test_program_state_2 ()
{
- /* Create a program_state for a global ptr "p" that points to
- a string constant. */
+ /* Create a program_state for a global ptr "p" that points to
+ a string constant. */
tree p = build_global_decl ("p", ptr_type_node);
tree string_cst_ptr = build_string_literal (4, "foo");
auto_delete_vec <state_machine> checkers;
- extrinsic_state ext_state (checkers);
+ engine eng;
+ extrinsic_state ext_state (checkers, NULL, &eng);
program_state s (ext_state);
region_model *model = s.m_region_model;
- region_id p_rid = model->get_lvalue (p, NULL);
- svalue_id str_sid = model->get_rvalue (string_cst_ptr, NULL);
- model->set_value (p_rid, str_sid, NULL);
-
- ASSERT_DUMP_EQ
- (s, ext_state, false,
- "rmodel: r0: {kind: `root', parent: null, sval: null}\n"
- "|-globals: r1: {kind: `globals', parent: r0, sval: null, map: {`p': r2}}\n"
- "| `-`p': r2: {kind: `primitive', parent: r1, sval: sv3, type: `void *'}\n"
- "| |: sval: sv3: {type: `void *', &r4}\n"
- "| |: type: `void *'\n"
- "`-r3: {kind: `array', parent: r0, sval: sv0, type: `const char[4]', array: {[0]: r4}}\n"
- " |: sval: sv0: {type: `const char[4]', `\"foo\"'}\n"
- " |: type: `const char[4]'\n"
- " `-[0]: r4: {kind: `primitive', parent: r3, sval: null, type: `const char'}\n"
- " |: type: `const char'\n"
- "svalues:\n"
- " sv0: {type: `const char[4]', `\"foo\"'}\n"
- " sv1: {type: `int', `0'}\n"
- " sv2: {type: `const char *', &r4}\n"
- " sv3: {type: `void *', &r4}\n"
- "constraint manager:\n"
- " equiv classes:\n"
- " constraints:\n");
-
- ASSERT_DUMP_EQ (s, ext_state, true,
- "rmodel: p: &\"foo\"[0]");
+ const region *p_reg = model->get_lvalue (p, NULL);
+ const svalue *str_sval = model->get_rvalue (string_cst_ptr, NULL);
+ model->set_value (p_reg, str_sval, NULL);
}
/* Verify that program_states with identical sm-state can be merged,
malloc sm-state, pointing to a region on the heap. */
tree p = build_global_decl ("p", ptr_type_node);
+ program_point point (program_point::origin ());
auto_delete_vec <state_machine> checkers;
checkers.safe_push (make_malloc_state_machine (NULL));
- extrinsic_state ext_state (checkers);
+ engine eng;
+ extrinsic_state ext_state (checkers, NULL, &eng);
+ region_model_manager *mgr = eng.get_model_manager ();
program_state s0 (ext_state);
- impl_region_model_context ctxt (&s0, NULL, ext_state);
+ impl_region_model_context ctxt (&s0, ext_state);
region_model *model0 = s0.m_region_model;
- region_id new_rid = model0->add_new_malloc_region ();
- svalue_id ptr_sid
- = model0->get_or_create_ptr_svalue (ptr_type_node, new_rid);
+ const svalue *size_in_bytes
+ = mgr->get_or_create_unknown_svalue (integer_type_node);
+ const region *new_reg = model0->create_region_for_heap_alloc (size_in_bytes);
+ const svalue *ptr_sval = mgr->get_ptr_svalue (ptr_type_node, new_reg);
model0->set_value (model0->get_lvalue (p, &ctxt),
- ptr_sid, &ctxt);
+ ptr_sval, &ctxt);
sm_state_map *smap = s0.m_checker_states[0];
const state_machine::state_t TEST_STATE = 3;
- smap->impl_set_state (ptr_sid, TEST_STATE, svalue_id::null ());
- ASSERT_EQ (smap->get_state (ptr_sid), TEST_STATE);
+ smap->impl_set_state (ptr_sval, TEST_STATE, NULL, ext_state);
+ ASSERT_EQ (smap->get_state (ptr_sval, ext_state), TEST_STATE);
- model0->canonicalize (&ctxt);
+ model0->canonicalize ();
/* Verify that canonicalization preserves sm-state. */
- ASSERT_EQ (smap->get_state (model0->get_rvalue (p, NULL)), TEST_STATE);
+ ASSERT_EQ (smap->get_state (model0->get_rvalue (p, NULL), ext_state),
+ TEST_STATE);
/* Make a copy of the program_state. */
program_state s1 (s0);
with the given sm-state.
They ought to be mergeable, preserving the sm-state. */
program_state merged (ext_state);
- ASSERT_TRUE (s0.can_merge_with_p (s1, ext_state, &merged));
+ ASSERT_TRUE (s0.can_merge_with_p (s1, point, &merged));
merged.validate (ext_state);
/* Verify that the merged state has the sm-state for "p". */
region_model *merged_model = merged.m_region_model;
sm_state_map *merged_smap = merged.m_checker_states[0];
- ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL)),
+ ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL),
+ ext_state),
TEST_STATE);
/* Try canonicalizing. */
- impl_region_model_context merged_ctxt (&merged, NULL, ext_state);
- merged.m_region_model->canonicalize (&merged_ctxt);
+ merged.m_region_model->canonicalize ();
merged.validate (ext_state);
/* Verify that the merged state still has the sm-state for "p". */
- ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL)),
+ ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL),
+ ext_state),
TEST_STATE);
/* After canonicalization, we ought to have equality with the inputs. */
static void
test_program_state_merging_2 ()
{
+ program_point point (program_point::origin ());
auto_delete_vec <state_machine> checkers;
checkers.safe_push (make_signal_state_machine (NULL));
extrinsic_state ext_state (checkers);
/* They ought to not be mergeable. */
program_state merged (ext_state);
- ASSERT_FALSE (s0.can_merge_with_p (s1, ext_state, &merged));
+ ASSERT_FALSE (s0.can_merge_with_p (s1, point, &merged));
}
/* Run all of the selftests within this file. */
analyzer_program_state_cc_tests ()
{
test_sm_state_map ();
- test_program_state_dumping ();
- test_program_state_dumping_2 ();
+ test_program_state_1 ();
+ test_program_state_2 ();
test_program_state_merging ();
test_program_state_merging_2 ();
}
class extrinsic_state
{
public:
- extrinsic_state (auto_delete_vec <state_machine> &checkers)
- : m_checkers (checkers)
+ extrinsic_state (auto_delete_vec <state_machine> &checkers,
+ logger *logger = NULL,
+ engine *eng = NULL)
+ : m_checkers (checkers), m_logger (logger), m_engine (eng)
{
}
unsigned get_num_checkers () const { return m_checkers.length (); }
+ logger *get_logger () const { return m_logger; }
+
void dump_to_pp (pretty_printer *pp) const;
void dump_to_file (FILE *outf) const;
void dump () const;
+ engine *get_engine () const { return m_engine; }
+ region_model_manager *get_model_manager () const;
+
private:
/* The state machines. */
auto_delete_vec <state_machine> &m_checkers;
-};
-} // namespace ana
-
-template <> struct default_hash_traits<svalue_id>
-: public pod_hash_traits<svalue_id>
-{
- static const bool empty_zero_p = false;
+ logger *m_logger;
+ engine *m_engine;
};
-template <>
-inline hashval_t
-pod_hash_traits<svalue_id>::hash (value_type v)
-{
- return v.as_int ();
-}
-
-template <>
-inline bool
-pod_hash_traits<svalue_id>::equal (const value_type &existing,
- const value_type &candidate)
-{
- return existing == candidate;
-}
-template <>
-inline void
-pod_hash_traits<svalue_id>::mark_deleted (value_type &v)
-{
- v = svalue_id::from_int (-2);
-}
-template <>
-inline void
-pod_hash_traits<svalue_id>::mark_empty (value_type &v)
-{
- v = svalue_id::null ();
-}
-template <>
-inline bool
-pod_hash_traits<svalue_id>::is_deleted (value_type v)
-{
- return v.as_int () == -2;
-}
-template <>
-inline bool
-pod_hash_traits<svalue_id>::is_empty (value_type v)
-{
- return v.null_p ();
-}
-
-namespace ana {
-
-/* Map from svalue_id to state machine state, also capturing the origin of
+/* Map from svalue * to state machine state, also capturing the origin of
each state. */
class sm_state_map
{
/* Default ctor needed by hash_map::empty. */
entry_t ()
- : m_state (0), m_origin (svalue_id::null ())
+ : m_state (0), m_origin (NULL)
{
}
entry_t (state_machine::state_t state,
- svalue_id origin)
+ const svalue *origin)
: m_state (state), m_origin (origin)
{}
}
state_machine::state_t m_state;
- svalue_id m_origin;
+ const svalue *m_origin;
};
- typedef hash_map <svalue_id, entry_t> map_t;
+ typedef hash_map <const svalue *, entry_t> map_t;
typedef map_t::iterator iterator_t;
- sm_state_map ();
+ sm_state_map (const state_machine &sm, int m_sm_idx);
sm_state_map *clone () const;
- sm_state_map *
- clone_with_remapping (const one_way_svalue_id_map &id_map) const;
-
- void print (const state_machine &sm, const region_model *model,
+ void print (const region_model *model,
+ bool simple, bool multiline,
pretty_printer *pp) const;
- void dump (const state_machine &sm) const;
+ void dump (bool simple) const;
bool is_empty_p () const;
return !(*this == other);
}
- state_machine::state_t get_state (svalue_id sid) const;
- svalue_id get_origin (svalue_id sid) const;
+ state_machine::state_t get_state (const svalue *sval,
+ const extrinsic_state &ext_state) const;
+ const svalue *get_origin (const svalue *sval,
+ const extrinsic_state &ext_state) const;
void set_state (region_model *model,
- svalue_id sid,
+ const svalue *sval,
state_machine::state_t state,
- svalue_id origin);
+ const svalue *origin,
+ const extrinsic_state &ext_state);
bool set_state (const equiv_class &ec,
state_machine::state_t state,
- svalue_id origin);
- bool impl_set_state (svalue_id sid,
+ const svalue *origin,
+ const extrinsic_state &ext_state);
+ bool impl_set_state (const svalue *sval,
state_machine::state_t state,
- svalue_id origin);
+ const svalue *origin,
+ const extrinsic_state &ext_state);
void set_global_state (state_machine::state_t state);
state_machine::state_t get_global_state () const;
- void purge_for_unknown_fncall (const exploded_graph &eg,
- const state_machine &sm,
- const gcall *call, tree fndecl,
- region_model *new_model,
- region_model_context *ctxt);
-
- void remap_svalue_ids (const svalue_id_map &map);
-
- int on_svalue_purge (const state_machine &sm,
- int sm_idx,
- svalue_id first_unused_sid,
- const svalue_id_map &map,
+ void on_svalue_leak (const svalue *sval,
impl_region_model_context *ctxt);
+ void on_liveness_change (const svalue_set &live_svalues,
+ const region_model *model,
+ impl_region_model_context *ctxt);
- void on_inherited_svalue (svalue_id parent_sid,
- svalue_id child_sid);
-
- void on_cast (svalue_id src_sid,
- svalue_id dst_sid);
-
- void on_unknown_change (svalue_id sid);
-
- void validate (const state_machine &sm, int num_svalues) const;
+ void on_unknown_change (const svalue *sval,
+ bool is_mutable,
+ const extrinsic_state &ext_state);
iterator_t begin () const { return m_map.begin (); }
iterator_t end () const { return m_map.end (); }
+ static const svalue *
+ canonicalize_svalue (const svalue *sval, const extrinsic_state &ext_state);
+
private:
+ const state_machine &m_sm;
+ int m_sm_idx;
map_t m_map;
state_machine::state_t m_global_state;
};
#if __cplusplus >= 201103
program_state (program_state &&other);
- program_state& operator= (program_state &&other); // doesn't seem to be used
#endif
~program_state ();
void print (const extrinsic_state &ext_state,
pretty_printer *pp) const;
- void dump_to_pp (const extrinsic_state &ext_state, bool summarize,
- pretty_printer *pp) const;
- void dump_to_file (const extrinsic_state &ext_state, bool summarize,
- FILE *outf) const;
- void dump (const extrinsic_state &ext_state, bool summarize) const;
+ void dump_to_pp (const extrinsic_state &ext_state, bool simple,
+ bool multiline, pretty_printer *pp) const;
+ void dump_to_file (const extrinsic_state &ext_state, bool simple,
+ bool multiline, FILE *outf) const;
+ void dump (const extrinsic_state &ext_state, bool simple) const;
+
+ void push_frame (const extrinsic_state &ext_state, function *fun);
+ function * get_current_function () const;
bool on_edge (exploded_graph &eg,
const exploded_node &enode,
- const superedge *succ,
- state_change *change);
+ const superedge *succ);
program_state prune_for_point (exploded_graph &eg,
const program_point &point,
- state_change *change) const;
+ const exploded_node *enode_for_diag) const;
- void remap_svalue_ids (const svalue_id_map &map);
-
- tree get_representative_tree (svalue_id sid) const;
+ tree get_representative_tree (const svalue *sval) const;
bool can_purge_p (const extrinsic_state &ext_state,
- svalue_id sid)
+ const svalue *sval)
{
/* Don't purge vars that have non-purgeable sm state, to avoid
generating false "leak" complaints. */
FOR_EACH_VEC_ELT (m_checker_states, i, smap)
{
const state_machine &sm = ext_state.get_sm (i);
- if (!sm.can_purge_p (smap->get_state (sid)))
+ if (!sm.can_purge_p (smap->get_state (sval, ext_state)))
return false;
}
return true;
}
bool can_merge_with_p (const program_state &other,
- const extrinsic_state &ext_state,
+ const program_point &point,
program_state *out) const;
void validate (const extrinsic_state &ext_state) const;
+ static void detect_leaks (const program_state &src_state,
+ const program_state &dest_state,
+ const svalue *extra_sval,
+ const extrinsic_state &ext_state,
+ region_model_context *ctxt);
+
/* TODO: lose the pointer here (const-correctness issues?). */
region_model *m_region_model;
auto_delete_vec<sm_state_map> m_checker_states;
virtual bool on_state_change (const state_machine &sm,
state_machine::state_t src_sm_val,
state_machine::state_t dst_sm_val,
- tree dst_rep,
- svalue_id dst_origin_sid) = 0;
+ const svalue *dst_sval,
+ const svalue *dst_origin_sval) = 0;
};
extern bool for_each_state_change (const program_state &src_state,
- const program_state &dst_state,
- const extrinsic_state &ext_state,
- state_change_visitor *visitor);
-
-/* A class for recording "interesting" state changes.
- This is used for annotating edges in the GraphViz output of the
- exploded_graph, and for recording sm-state-changes, so that
- values that change aren't purged (to make it easier to generate
- state_change_event instances in the diagnostic_path). */
-
-class state_change
-{
- public:
- struct sm_change
- {
- sm_change (int sm_idx,
- svalue_id new_sid,
- state_machine::state_t old_state,
- state_machine::state_t new_state)
- : m_sm_idx (sm_idx),
- m_new_sid (new_sid),
- m_old_state (old_state), m_new_state (new_state)
- {}
-
- const state_machine &get_sm (const extrinsic_state &ext_state) const
- {
- return ext_state.get_sm (m_sm_idx);
- }
-
- void dump (pretty_printer *pp, const extrinsic_state &ext_state) const;
-
- void remap_svalue_ids (const svalue_id_map &map);
- int on_svalue_purge (svalue_id first_unused_sid);
-
- void validate (const program_state &new_state,
- const extrinsic_state &ext_state) const;
-
- int m_sm_idx;
- svalue_id m_new_sid;
- state_machine::state_t m_old_state;
- state_machine::state_t m_new_state;
- };
-
- state_change ();
- state_change (const state_change &other);
-
- void add_sm_change (int sm_idx,
- svalue_id new_sid,
- state_machine::state_t old_state,
- state_machine::state_t new_state);
-
- bool affects_p (svalue_id sid) const;
-
- void dump (pretty_printer *pp, const extrinsic_state &ext_state) const;
- void dump (const extrinsic_state &ext_state) const;
-
- void remap_svalue_ids (const svalue_id_map &map);
- int on_svalue_purge (svalue_id first_unused_sid);
-
- void validate (const program_state &new_state,
- const extrinsic_state &ext_state) const;
-
- private:
- auto_vec<sm_change> m_sm_changes;
-};
+ const program_state &dst_state,
+ const extrinsic_state &ext_state,
+ state_change_visitor *visitor);
} // namespace ana
--- /dev/null
+/* Handling for the known behavior of various specific functions.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "graphviz.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "convert.h"
+#include "target.h"
+#include "fold-const.h"
+#include "tree-pretty-print.h"
+#include "diagnostic-color.h"
+#include "diagnostic-metadata.h"
+#include "tristate.h"
+#include "bitmap.h"
+#include "selftest.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "sbitmap.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+#include "gimple-pretty-print.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* class call_details. */
+
+/* call_details's ctor. */
+
+call_details::call_details (const gcall *call, region_model *model,
+ region_model_context *ctxt)
+: m_call (call), m_model (model), m_ctxt (ctxt),
+ m_lhs_type (NULL_TREE), m_lhs_region (NULL)
+{
+ m_lhs_type = NULL_TREE;
+ if (tree lhs = gimple_call_lhs (call))
+ {
+ m_lhs_region = model->get_lvalue (lhs, ctxt);
+ m_lhs_type = TREE_TYPE (lhs);
+ }
+}
+
+/* If the callsite has a left-hand-side region, set it to RESULT
+ and return true.
+ Otherwise do nothing and return false. */
+
+bool
+call_details::maybe_set_lhs (const svalue *result) const
+{
+ gcc_assert (result);
+ if (m_lhs_region)
+ {
+ m_model->set_value (m_lhs_region, result, m_ctxt);
+ return true;
+ }
+ else
+ return false;
+}
+
+/* Get argument IDX at the callsite as a tree. */
+
+tree
+call_details::get_arg_tree (unsigned idx) const
+{
+ return gimple_call_arg (m_call, idx);
+}
+
+/* Get argument IDX at the callsite as an svalue. */
+
+const svalue *
+call_details::get_arg_svalue (unsigned idx) const
+{
+ tree arg = get_arg_tree (idx);
+ return m_model->get_rvalue (arg, m_ctxt);
+}
+
+/* Dump a multiline representation of this call to PP. */
+
+void
+call_details::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ pp_string (pp, "gcall: ");
+ pp_gimple_stmt_1 (pp, m_call, 0 /* spc */, TDF_NONE /* flags */);
+ pp_newline (pp);
+ pp_string (pp, "return region: ");
+ if (m_lhs_region)
+ m_lhs_region->dump_to_pp (pp, simple);
+ else
+ pp_string (pp, "NULL");
+ pp_newline (pp);
+ for (unsigned i = 0; i < gimple_call_num_args (m_call); i++)
+ {
+ const svalue *arg_sval = get_arg_svalue (i);
+ pp_printf (pp, "arg %i: ", i);
+ arg_sval->dump_to_pp (pp, simple);
+ pp_newline (pp);
+ }
+}
+
+/* Dump a multiline representation of this call to stderr. */
+
+DEBUG_FUNCTION void
+call_details::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple);
+ pp_flush (&pp);
+}
+
+/* Implementations of specific functions. */
+
+/* Handle the on_call_pre part of "alloca". */
+
+bool
+region_model::impl_call_alloca (const call_details &cd)
+{
+ const svalue *size_sval = cd.get_arg_svalue (0);
+ const region *new_reg = create_region_for_alloca (size_sval);
+ const svalue *ptr_sval
+ = m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
+ cd.maybe_set_lhs (ptr_sval);
+ return true;
+}
+
+/* Handle a call to "__analyzer_describe".
+
+ Emit a warning describing the 2nd argument (which can be of any
+ type), at the given verbosity level. This is for use when
+ debugging, and may be of use in DejaGnu tests. */
+
+void
+region_model::impl_call_analyzer_describe (const gcall *call,
+ region_model_context *ctxt)
+{
+ tree t_verbosity = gimple_call_arg (call, 0);
+ tree t_val = gimple_call_arg (call, 1);
+ const svalue *sval = get_rvalue (t_val, ctxt);
+ bool simple = zerop (t_verbosity);
+ label_text desc = sval->get_desc (simple);
+ warning_at (call->location, 0, "svalue: %qs", desc.m_buffer);
+}
+
+/* Handle a call to "__analyzer_eval" by evaluating the input
+ and dumping as a dummy warning, so that test cases can use
+ dg-warning to validate the result (and so unexpected warnings will
+ lead to DejaGnu failures).
+ Broken out as a subroutine to make it easier to put a breakpoint on it
+ - though typically this doesn't help, as we have an SSA name as the arg,
+ and what's more interesting is usually the def stmt for that name. */
+
+void
+region_model::impl_call_analyzer_eval (const gcall *call,
+ region_model_context *ctxt)
+{
+ tree t_arg = gimple_call_arg (call, 0);
+ tristate t = eval_condition (t_arg, NE_EXPR, integer_zero_node, ctxt);
+ warning_at (call->location, 0, "%s", t.as_string ());
+}
+
+/* Handle the on_call_pre part of "__builtin_expect" etc. */
+
+bool
+region_model::impl_call_builtin_expect (const call_details &cd)
+{
+ /* __builtin_expect's return value is its initial argument. */
+ const svalue *sval = cd.get_arg_svalue (0);
+ cd.maybe_set_lhs (sval);
+ return false;
+}
+
+/* Handle the on_call_pre part of "calloc". */
+
+bool
+region_model::impl_call_calloc (const call_details &cd)
+{
+ const svalue *nmemb_sval = cd.get_arg_svalue (0);
+ const svalue *size_sval = cd.get_arg_svalue (1);
+ /* TODO: check for overflow here? */
+ const svalue *prod_sval
+ = m_mgr->get_or_create_binop (size_type_node, MULT_EXPR,
+ nmemb_sval, size_sval);
+ const region *new_reg = create_region_for_heap_alloc (prod_sval);
+ zero_fill_region (new_reg);
+ if (cd.get_lhs_type ())
+ {
+ const svalue *ptr_sval
+ = m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
+ cd.maybe_set_lhs (ptr_sval);
+ }
+ return true;
+}
+
+/* Handle the on_call_post part of "free", after sm-handling.
+
+ If the ptr points to an underlying heap region, delete the region,
+ poisoning pointers to it and regions within it.
+
+ We delay this until after sm-state has been updated so that the
+ sm-handling can transition all of the various casts of the pointer
+ to a "freed" state *before* we delete the related region here.
+
+ This has to be done here so that the sm-handling can use the fact
+ that they point to the same region to establish that they are equal
+ (in region_model::eval_condition_without_cm), and thus transition
+ all pointers to the region to the "freed" state together, regardless
+ of casts. */
+
+void
+region_model::impl_call_free (const call_details &cd)
+{
+ const svalue *ptr_sval = cd.get_arg_svalue (0);
+ if (const region_svalue *ptr_to_region_sval
+ = ptr_sval->dyn_cast_region_svalue ())
+ {
+ /* If the ptr points to an underlying heap region, delete it,
+ poisoning pointers. */
+ const region *freed_reg = ptr_to_region_sval->get_pointee ();
+ unbind_region_and_descendents (freed_reg, POISON_KIND_FREED);
+ }
+}
+
+/* Handle the on_call_pre part of "malloc". */
+
+bool
+region_model::impl_call_malloc (const call_details &cd)
+{
+ const svalue *size_sval = cd.get_arg_svalue (0);
+ const region *new_reg = create_region_for_heap_alloc (size_sval);
+ if (cd.get_lhs_type ())
+ {
+ const svalue *ptr_sval
+ = m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
+ cd.maybe_set_lhs (ptr_sval);
+ }
+ return true;
+}
+
+/* Handle the on_call_pre part of "memset" and "__builtin_memset". */
+
+bool
+region_model::impl_call_memset (const call_details &cd)
+{
+ const svalue *dest_sval = cd.get_arg_svalue (0);
+ const svalue *fill_value_sval = cd.get_arg_svalue (1);
+ const svalue *num_bytes_sval = cd.get_arg_svalue (2);
+
+ const region *dest_reg = deref_rvalue (dest_sval, cd.get_arg_tree (0),
+ cd.get_ctxt ());
+
+ if (tree num_bytes = num_bytes_sval->maybe_get_constant ())
+ {
+ /* "memset" of zero size is a no-op. */
+ if (zerop (num_bytes))
+ return true;
+
+ /* Set with known amount. */
+ byte_size_t reg_size;
+ if (dest_reg->get_byte_size (®_size))
+ {
+ /* Check for an exact size match. */
+ if (reg_size == wi::to_offset (num_bytes))
+ {
+ if (tree cst = fill_value_sval->maybe_get_constant ())
+ {
+ if (zerop (cst))
+ {
+ zero_fill_region (dest_reg);
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ /* Otherwise, mark region's contents as unknown. */
+ mark_region_as_unknown (dest_reg);
+ return false;
+}
+
+/* Handle the on_call_pre part of "strlen".
+ Return true if the LHS is updated. */
+
+bool
+region_model::impl_call_strlen (const call_details &cd)
+{
+ region_model_context *ctxt = cd.get_ctxt ();
+ const svalue *arg_sval = cd.get_arg_svalue (0);
+ const region *buf_reg = deref_rvalue (arg_sval, cd.get_arg_tree (0), ctxt);
+ if (const string_region *str_reg
+ = buf_reg->dyn_cast_string_region ())
+ {
+ tree str_cst = str_reg->get_string_cst ();
+ /* TREE_STRING_LENGTH is sizeof, not strlen. */
+ int sizeof_cst = TREE_STRING_LENGTH (str_cst);
+ int strlen_cst = sizeof_cst - 1;
+ if (cd.get_lhs_type ())
+ {
+ tree t_cst = build_int_cst (cd.get_lhs_type (), strlen_cst);
+ const svalue *result_sval
+ = m_mgr->get_or_create_constant_svalue (t_cst);
+ cd.maybe_set_lhs (result_sval);
+ return true;
+ }
+ }
+ /* Otherwise an unknown value. */
+ return true;
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Consolidation of svalues and regions.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "diagnostic-core.h"
+#include "gimple-pretty-print.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "graphviz.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "convert.h"
+#include "target.h"
+#include "fold-const.h"
+#include "tree-pretty-print.h"
+#include "tristate.h"
+#include "bitmap.h"
+#include "selftest.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "sbitmap.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* class region_model_manager. */
+
+/* region_model_manager's ctor. */
+
+region_model_manager::region_model_manager ()
+: m_next_region_id (0),
+ m_root_region (alloc_region_id ()),
+ m_stack_region (alloc_region_id (), &m_root_region),
+ m_heap_region (alloc_region_id (), &m_root_region),
+ m_unknown_NULL (NULL),
+ m_max_complexity (0, 0),
+ m_code_region (alloc_region_id (), &m_root_region),
+ m_fndecls_map (), m_labels_map (),
+ m_globals_region (alloc_region_id (), &m_root_region),
+ m_globals_map (),
+ m_store_mgr (this)
+{
+}
+
+/* region_model_manager's dtor. Delete all of the managed svalues
+ and regions. */
+
+region_model_manager::~region_model_manager ()
+{
+ /* Delete consolidated svalues. */
+ for (constants_map_t::iterator iter = m_constants_map.begin ();
+ iter != m_constants_map.end (); ++iter)
+ delete (*iter).second;
+ for (unknowns_map_t::iterator iter = m_unknowns_map.begin ();
+ iter != m_unknowns_map.end (); ++iter)
+ delete (*iter).second;
+ delete m_unknown_NULL;
+ for (setjmp_values_map_t::iterator iter = m_setjmp_values_map.begin ();
+ iter != m_setjmp_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (poisoned_values_map_t::iterator iter = m_poisoned_values_map.begin ();
+ iter != m_poisoned_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (initial_values_map_t::iterator iter = m_initial_values_map.begin ();
+ iter != m_initial_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (pointer_values_map_t::iterator iter = m_pointer_values_map.begin ();
+ iter != m_pointer_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (unaryop_values_map_t::iterator iter = m_unaryop_values_map.begin ();
+ iter != m_unaryop_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (binop_values_map_t::iterator iter = m_binop_values_map.begin ();
+ iter != m_binop_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (sub_values_map_t::iterator iter = m_sub_values_map.begin ();
+ iter != m_sub_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (unmergeable_values_map_t::iterator iter
+ = m_unmergeable_values_map.begin ();
+ iter != m_unmergeable_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (widening_values_map_t::iterator iter = m_widening_values_map.begin ();
+ iter != m_widening_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (compound_values_map_t::iterator iter = m_compound_values_map.begin ();
+ iter != m_compound_values_map.end (); ++iter)
+ delete (*iter).second;
+ for (conjured_values_map_t::iterator iter = m_conjured_values_map.begin ();
+ iter != m_conjured_values_map.end (); ++iter)
+ delete (*iter).second;
+
+ /* Delete consolidated regions. */
+ for (fndecls_map_t::iterator iter = m_fndecls_map.begin ();
+ iter != m_fndecls_map.end (); ++iter)
+ delete (*iter).second;
+ for (labels_map_t::iterator iter = m_labels_map.begin ();
+ iter != m_labels_map.end (); ++iter)
+ delete (*iter).second;
+ for (globals_map_t::iterator iter = m_globals_map.begin ();
+ iter != m_globals_map.end (); ++iter)
+ delete (*iter).second;
+ for (string_map_t::iterator iter = m_string_map.begin ();
+ iter != m_string_map.end (); ++iter)
+ delete (*iter).second;
+}
+
+/* Return true if C exceeds the complexity limit for svalues. */
+
+bool
+region_model_manager::too_complex_p (const complexity &c) const
+{
+ if (c.m_max_depth > (unsigned)param_analyzer_max_svalue_depth)
+ return true;
+ return false;
+}
+
+/* If SVAL exceeds the complexity limit for svalues, delete it
+ and return true.
+ Otherwise update m_max_complexity and return false. */
+
+bool
+region_model_manager::reject_if_too_complex (svalue *sval)
+{
+ const complexity &c = sval->get_complexity ();
+ if (!too_complex_p (c))
+ {
+ if (m_max_complexity.m_num_nodes < c.m_num_nodes)
+ m_max_complexity.m_num_nodes = c.m_num_nodes;
+ if (m_max_complexity.m_max_depth < c.m_max_depth)
+ m_max_complexity.m_max_depth = c.m_max_depth;
+ return false;
+ }
+
+ delete sval;
+ return true;
+}
+
+/* Macro for imposing a complexity limit on svalues, for use within
+ region_model_manager member functions.
+
+ If SVAL exceeds the complexity limit, delete it and return an UNKNOWN
+ value of the same type.
+ Otherwise update m_max_complexity and carry on. */
+
+#define RETURN_UNKNOWN_IF_TOO_COMPLEX(SVAL) \
+ do { \
+ svalue *sval_ = (SVAL); \
+ tree type_ = sval_->get_type (); \
+ if (reject_if_too_complex (sval_)) \
+ return get_or_create_unknown_svalue (type_); \
+ } while (0)
+
+/* svalue consolidation. */
+
+/* Return the svalue * for a constant_svalue for CST_EXPR,
+ creating it if necessary.
+ The constant_svalue instances are reused, based on pointer equality
+ of trees */
+
+const svalue *
+region_model_manager::get_or_create_constant_svalue (tree cst_expr)
+{
+ gcc_assert (cst_expr);
+
+ constant_svalue **slot = m_constants_map.get (cst_expr);
+ if (slot)
+ return *slot;
+ constant_svalue *cst_sval = new constant_svalue (cst_expr);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (cst_sval);
+ m_constants_map.put (cst_expr, cst_sval);
+ return cst_sval;
+}
+
+/* Return the svalue * for a unknown_svalue for TYPE (which can be NULL),
+ creating it if necessary.
+ The unknown_svalue instances are reused, based on pointer equality
+ of the types */
+
+const svalue *
+region_model_manager::get_or_create_unknown_svalue (tree type)
+{
+ /* Special-case NULL, so that the hash_map can use NULL as the
+ "empty" value. */
+ if (type == NULL_TREE)
+ {
+ if (!m_unknown_NULL)
+ m_unknown_NULL = new unknown_svalue (type);
+ return m_unknown_NULL;
+ }
+
+ unknown_svalue **slot = m_unknowns_map.get (type);
+ if (slot)
+ return *slot;
+ unknown_svalue *sval = new unknown_svalue (type);
+ m_unknowns_map.put (type, sval);
+ return sval;
+}
+
+/* Return the svalue * for the initial value of REG, creating it if
+ necessary. */
+
+const svalue *
+region_model_manager::get_or_create_initial_value (const region *reg)
+{
+ /* The initial value of a cast is a cast of the initial value. */
+ if (const cast_region *cast_reg = reg->dyn_cast_cast_region ())
+ {
+ const region *original_reg = cast_reg->get_original_region ();
+ return get_or_create_cast (cast_reg->get_type (),
+ get_or_create_initial_value (original_reg));
+ }
+
+ if (initial_svalue **slot = m_initial_values_map.get (reg))
+ return *slot;
+ initial_svalue *initial_sval = new initial_svalue (reg->get_type (), reg);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (initial_sval);
+ m_initial_values_map.put (reg, initial_sval);
+ return initial_sval;
+}
+
+/* Return the svalue * for R using type TYPE, creating it if
+ necessary. */
+
+const svalue *
+region_model_manager::get_or_create_setjmp_svalue (const setjmp_record &r,
+ tree type)
+{
+ setjmp_svalue::key_t key (r, type);
+ if (setjmp_svalue **slot = m_setjmp_values_map.get (key))
+ return *slot;
+ setjmp_svalue *setjmp_sval = new setjmp_svalue (r, type);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (setjmp_sval);
+ m_setjmp_values_map.put (key, setjmp_sval);
+ return setjmp_sval;
+}
+
+/* Return the svalue * for a poisoned value of KIND and TYPE, creating it if
+ necessary. */
+
+const svalue *
+region_model_manager::get_or_create_poisoned_svalue (enum poison_kind kind,
+ tree type)
+{
+ poisoned_svalue::key_t key (kind, type);
+ if (poisoned_svalue **slot = m_poisoned_values_map.get (key))
+ return *slot;
+ poisoned_svalue *poisoned_sval = new poisoned_svalue (kind, type);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (poisoned_sval);
+ m_poisoned_values_map.put (key, poisoned_sval);
+ return poisoned_sval;
+}
+
+/* Return the svalue * for a pointer to POINTEE of type PTR_TYPE,
+ creating it if necessary. */
+
+const svalue *
+region_model_manager::get_ptr_svalue (tree ptr_type, const region *pointee)
+{
+ /* If this is a symbolic region from dereferencing a pointer, and the types
+ match, then return the original pointer. */
+ if (const symbolic_region *sym_reg = pointee->dyn_cast_symbolic_region ())
+ if (ptr_type == sym_reg->get_pointer ()->get_type ())
+ return sym_reg->get_pointer ();
+
+ region_svalue::key_t key (ptr_type, pointee);
+ if (region_svalue **slot = m_pointer_values_map.get (key))
+ return *slot;
+ region_svalue *sval = new region_svalue (ptr_type, pointee);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (sval);
+ m_pointer_values_map.put (key, sval);
+ return sval;
+}
+
+/* Subroutine of region_model_manager::get_or_create_unaryop.
+ Attempt to fold the inputs and return a simpler svalue *.
+ Otherwise, return NULL. */
+
+const svalue *
+region_model_manager::maybe_fold_unaryop (tree type, enum tree_code op,
+ const svalue *arg)
+{
+ /* Ops on "unknown" are also unknown. */
+ if (arg->get_kind () == SK_UNKNOWN)
+ return get_or_create_unknown_svalue (type);
+
+ switch (op)
+ {
+ default: break;
+ case NOP_EXPR:
+ {
+ /* Handle redundant casts. */
+ if (arg->get_type ()
+ && useless_type_conversion_p (arg->get_type (), type))
+ return arg;
+
+ /* Fold "cast<TYPE> (cast <INNER_TYPE> (innermost_arg))
+ => "cast<TYPE> (innermost_arg)",
+ unless INNER_TYPE is narrower than TYPE. */
+ if (const svalue *innermost_arg = arg->maybe_undo_cast ())
+ {
+ tree inner_type = arg->get_type ();
+ if (TYPE_SIZE (type)
+ && TYPE_SIZE (inner_type)
+ && (fold_binary (LE_EXPR, boolean_type_node,
+ TYPE_SIZE (type), TYPE_SIZE (inner_type))
+ == boolean_true_node))
+ return maybe_fold_unaryop (type, op, innermost_arg);
+ }
+ }
+ break;
+ case TRUTH_NOT_EXPR:
+ {
+ /* Invert comparisons e.g. "!(x == y)" => "x != y". */
+ if (const binop_svalue *binop = arg->dyn_cast_binop_svalue ())
+ if (TREE_CODE_CLASS (binop->get_op ()) == tcc_comparison)
+ {
+ enum tree_code inv_op
+ = invert_tree_comparison (binop->get_op (),
+ HONOR_NANS (binop->get_type ()));
+ if (inv_op != ERROR_MARK)
+ return get_or_create_binop (binop->get_type (), inv_op,
+ binop->get_arg0 (),
+ binop->get_arg1 ());
+ }
+ }
+ break;
+ }
+
+ /* Constants. */
+ if (tree cst = arg->maybe_get_constant ())
+ if (tree result = fold_unary (op, type, cst))
+ return get_or_create_constant_svalue (result);
+
+ return NULL;
+}
+
+/* Return the svalue * for an unary operation OP on ARG with a result of
+ type TYPE, creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_unaryop (tree type, enum tree_code op,
+ const svalue *arg)
+{
+ if (const svalue *folded = maybe_fold_unaryop (type, op, arg))
+ return folded;
+ unaryop_svalue::key_t key (type, op, arg);
+ if (unaryop_svalue **slot = m_unaryop_values_map.get (key))
+ return *slot;
+ unaryop_svalue *unaryop_sval = new unaryop_svalue (type, op, arg);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (unaryop_sval);
+ m_unaryop_values_map.put (key, unaryop_sval);
+ return unaryop_sval;
+}
+
+/* Return the svalue * for a cast of ARG to type TYPE, creating it
+ if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_cast (tree type, const svalue *arg)
+{
+ return get_or_create_unaryop (type, NOP_EXPR, arg);
+}
+
+/* Subroutine of region_model_manager::get_or_create_binop.
+ Attempt to fold the inputs and return a simpler svalue *.
+ Otherwise, return NULL. */
+
+const svalue *
+region_model_manager::maybe_fold_binop (tree type, enum tree_code op,
+ const svalue *arg0,
+ const svalue *arg1)
+{
+ tree cst0 = arg0->maybe_get_constant ();
+ tree cst1 = arg1->maybe_get_constant ();
+ /* (CST OP CST). */
+ if (cst0 && cst1)
+ {
+ if (tree result = fold_binary (op, type, cst0, cst1))
+ if (CONSTANT_CLASS_P (result))
+ return get_or_create_constant_svalue (result);
+ }
+
+ if (FLOAT_TYPE_P (type)
+ || (arg0->get_type () && FLOAT_TYPE_P (arg0->get_type ()))
+ || (arg1->get_type () && FLOAT_TYPE_P (arg1->get_type ())))
+ return NULL;
+
+ switch (op)
+ {
+ default:
+ break;
+ case POINTER_PLUS_EXPR:
+ case PLUS_EXPR:
+ /* (VAL + 0) -> VAL. */
+ if (cst1 && zerop (cst1) && type == arg0->get_type ())
+ return arg0;
+ break;
+ case MINUS_EXPR:
+ /* (VAL - 0) -> VAL. */
+ if (cst1 && zerop (cst1) && type == arg0->get_type ())
+ return arg0;
+ break;
+ case MULT_EXPR:
+ /* (VAL * 0). */
+ if (cst1 && zerop (cst1))
+ return get_or_create_constant_svalue (build_int_cst (type, 0));
+ /* (VAL * 1) -> VAL. */
+ if (cst1 && integer_onep (cst1))
+ return arg0;
+ break;
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_AND_EXPR:
+ if (cst1)
+ {
+ if (zerop (cst1))
+ /* "(ARG0 && 0)" -> "0". */
+ return get_or_create_constant_svalue (build_int_cst (type, 0));
+ else
+ /* "(ARG0 && nonzero-cst)" -> "ARG0". */
+ return get_or_create_cast (type, arg0);
+ }
+ break;
+ case TRUTH_ORIF_EXPR:
+ case TRUTH_OR_EXPR:
+ if (cst1)
+ {
+ if (zerop (cst1))
+ /* "(ARG0 || 0)" -> "ARG0". */
+ return get_or_create_cast (type, arg0);
+ else
+ /* "(ARG0 && nonzero-cst)" -> "nonzero-cst". */
+ return get_or_create_cast (type, arg1);
+ }
+ break;
+ }
+
+ /* For associative ops, fold "(X op CST_A) op CST_B)" to
+ "X op (CST_A op CST_B)". */
+ if (cst1 && associative_tree_code (op))
+ if (const binop_svalue *binop = arg0->dyn_cast_binop_svalue ())
+ if (binop->get_op () == op
+ && binop->get_arg1 ()->maybe_get_constant ()
+ && type == binop->get_type ()
+ && type == binop->get_arg0 ()->get_type ()
+ && type == binop->get_arg1 ()->get_type ())
+ return get_or_create_binop
+ (type, op, binop->get_arg0 (),
+ get_or_create_binop (type, op,
+ binop->get_arg1 (), arg1));
+
+ /* associative_tree_code is false for POINTER_PLUS_EXPR, but we
+ can fold:
+ "(PTR ptr+ CST_A) ptr+ CST_B)" to "PTR ptr+ (CST_A ptr+ CST_B)"
+ e.g. in data-model-1.c: test_4c. */
+ if (cst1 && op == POINTER_PLUS_EXPR)
+ if (const binop_svalue *binop = arg0->dyn_cast_binop_svalue ())
+ if (binop->get_op () == POINTER_PLUS_EXPR)
+ if (binop->get_arg1 ()->maybe_get_constant ())
+ return get_or_create_binop
+ (type, op, binop->get_arg0 (),
+ get_or_create_binop (size_type_node, op,
+ binop->get_arg1 (), arg1));
+
+ /* Ops on "unknown" are also unknown (unless we can use one of the
+ identities above). */
+ if (arg0->get_kind () == SK_UNKNOWN
+ || arg1->get_kind () == SK_UNKNOWN)
+ return get_or_create_unknown_svalue (type);
+
+ /* etc. */
+
+ return NULL;
+}
+
+/* Return the svalue * for an binary operation OP on ARG0 and ARG1
+ with a result of type TYPE, creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_binop (tree type, enum tree_code op,
+ const svalue *arg0,
+ const svalue *arg1)
+{
+ /* For commutative ops, put any constant on the RHS. */
+ if (arg0->maybe_get_constant () && commutative_tree_code (op))
+ std::swap (arg0, arg1);
+
+ if (const svalue *folded = maybe_fold_binop (type, op, arg0, arg1))
+ return folded;
+
+ binop_svalue::key_t key (type, op, arg0, arg1);
+ if (binop_svalue **slot = m_binop_values_map.get (key))
+ return *slot;
+ binop_svalue *binop_sval = new binop_svalue (type, op, arg0, arg1);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (binop_sval);
+ m_binop_values_map.put (key, binop_sval);
+ return binop_sval;
+}
+
+/* Subroutine of region_model_manager::get_or_create_sub_svalue.
+ Return a folded svalue, or NULL. */
+
+const svalue *
+region_model_manager::maybe_fold_sub_svalue (tree type,
+ const svalue *parent_svalue,
+ const region *subregion)
+{
+ /* Subvalues of "unknown" are unknown. */
+ if (parent_svalue->get_kind () == SK_UNKNOWN)
+ return get_or_create_unknown_svalue (type);
+
+ /* If we have a subregion of a zero-fill, it's zero. */
+ if (const unaryop_svalue *unary
+ = parent_svalue->dyn_cast_unaryop_svalue ())
+ {
+ if (unary->get_op () == NOP_EXPR)
+ if (tree cst = unary->get_arg ()->maybe_get_constant ())
+ if (zerop (cst))
+ {
+ const svalue *cst_sval
+ = get_or_create_constant_svalue (cst);
+ return get_or_create_cast (type, cst_sval);
+ }
+ }
+
+ /* Handle getting individual chars from a STRING_CST. */
+ if (tree cst = parent_svalue->maybe_get_constant ())
+ if (TREE_CODE (cst) == STRING_CST)
+ if (const element_region *element_reg
+ = subregion->dyn_cast_element_region ())
+ {
+ const svalue *idx_sval = element_reg->get_index ();
+ if (tree cst_idx = idx_sval->maybe_get_constant ())
+ if (const svalue *char_sval
+ = maybe_get_char_from_string_cst (cst, cst_idx))
+ return get_or_create_cast (type, char_sval);
+ }
+
+ /* SUB(INIT(r)).FIELD -> INIT(r.FIELD)
+ i.e.
+ Subvalue(InitialValue(R1), FieldRegion(R2, F))
+ -> InitialValue(FieldRegion(R1, F)). */
+ if (const initial_svalue *init_sval
+ = parent_svalue->dyn_cast_initial_svalue ())
+ {
+ if (const field_region *field_reg = subregion->dyn_cast_field_region ())
+ {
+ const region *field_reg_new
+ = get_field_region (init_sval->get_region (),
+ field_reg->get_field ());
+ return get_or_create_initial_value (field_reg_new);
+ }
+ }
+
+ return NULL;
+}
+
+/* Return the svalue * for extracting a subvalue of type TYPE from
+ PARENT_SVALUE based on SUBREGION, creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_sub_svalue (tree type,
+ const svalue *parent_svalue,
+ const region *subregion)
+{
+ if (const svalue *folded
+ = maybe_fold_sub_svalue (type, parent_svalue, subregion))
+ return folded;
+
+ sub_svalue::key_t key (type, parent_svalue, subregion);
+ if (sub_svalue **slot = m_sub_values_map.get (key))
+ return *slot;
+ sub_svalue *sub_sval
+ = new sub_svalue (type, parent_svalue, subregion);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (sub_sval);
+ m_sub_values_map.put (key, sub_sval);
+ return sub_sval;
+}
+
+/* Return the svalue * that decorates ARG as being unmergeable,
+ creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_unmergeable (const svalue *arg)
+{
+ if (arg->get_kind () == SK_UNMERGEABLE)
+ return arg;
+
+ if (unmergeable_svalue **slot = m_unmergeable_values_map.get (arg))
+ return *slot;
+ unmergeable_svalue *unmergeable_sval = new unmergeable_svalue (arg);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (unmergeable_sval);
+ m_unmergeable_values_map.put (arg, unmergeable_sval);
+ return unmergeable_sval;
+}
+
+/* Return the svalue * of type TYPE for the merger of value BASE_SVAL
+ and ITER_SVAL at POINT, creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_widening_svalue (tree type,
+ const program_point &point,
+ const svalue *base_sval,
+ const svalue *iter_sval)
+{
+ widening_svalue::key_t key (type, point, base_sval, iter_sval);
+ if (widening_svalue **slot = m_widening_values_map.get (key))
+ return *slot;
+ widening_svalue *widening_sval
+ = new widening_svalue (type, point, base_sval, iter_sval);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (widening_sval);
+ m_widening_values_map.put (key, widening_sval);
+ return widening_sval;
+}
+
+/* Return the svalue * of type TYPE for the compound values in MAP,
+ creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_compound_svalue (tree type,
+ const binding_map &map)
+{
+ compound_svalue::key_t tmp_key (type, &map);
+ if (compound_svalue **slot = m_compound_values_map.get (tmp_key))
+ return *slot;
+ compound_svalue *compound_sval
+ = new compound_svalue (type, map);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (compound_sval);
+ /* Use make_key rather than reusing the key, so that we use a
+ ptr to compound_sval's binding_map, rather than the MAP param. */
+ m_compound_values_map.put (compound_sval->make_key (), compound_sval);
+ return compound_sval;
+}
+
+/* Return the svalue * of type TYPE for the value conjured for ID_REG
+ at STMT, creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_conjured_svalue (tree type,
+ const gimple *stmt,
+ const region *id_reg)
+{
+ conjured_svalue::key_t key (type, stmt, id_reg);
+ if (conjured_svalue **slot = m_conjured_values_map.get (key))
+ return *slot;
+ conjured_svalue *conjured_sval
+ = new conjured_svalue (type, stmt, id_reg);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (conjured_sval);
+ m_conjured_values_map.put (key, conjured_sval);
+ return conjured_sval;
+}
+
+/* Given STRING_CST, a STRING_CST and BYTE_OFFSET_CST a constant,
+ attempt to get the character at that offset, returning either
+ the svalue for the character constant, or NULL if unsuccessful. */
+
+const svalue *
+region_model_manager::maybe_get_char_from_string_cst (tree string_cst,
+ tree byte_offset_cst)
+{
+ gcc_assert (TREE_CODE (string_cst) == STRING_CST);
+
+ /* Adapted from fold_read_from_constant_string. */
+ scalar_int_mode char_mode;
+ if (TREE_CODE (byte_offset_cst) == INTEGER_CST
+ && compare_tree_int (byte_offset_cst,
+ TREE_STRING_LENGTH (string_cst)) < 0
+ && is_int_mode (TYPE_MODE (TREE_TYPE (TREE_TYPE (string_cst))),
+ &char_mode)
+ && GET_MODE_SIZE (char_mode) == 1)
+ {
+ tree char_cst
+ = build_int_cst_type (TREE_TYPE (TREE_TYPE (string_cst)),
+ (TREE_STRING_POINTER (string_cst)
+ [TREE_INT_CST_LOW (byte_offset_cst)]));
+ return get_or_create_constant_svalue (char_cst);
+ }
+ return NULL;
+}
+
+/* region consolidation. */
+
+/* Return the region for FNDECL, creating it if necessary. */
+
+const function_region *
+region_model_manager::get_region_for_fndecl (tree fndecl)
+{
+ gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
+
+ function_region **slot = m_fndecls_map.get (fndecl);
+ if (slot)
+ return *slot;
+ function_region *reg
+ = new function_region (alloc_region_id (), &m_code_region, fndecl);
+ m_fndecls_map.put (fndecl, reg);
+ return reg;
+}
+
+/* Return the region for LABEL, creating it if necessary. */
+
+const label_region *
+region_model_manager::get_region_for_label (tree label)
+{
+ gcc_assert (TREE_CODE (label) == LABEL_DECL);
+
+ label_region **slot = m_labels_map.get (label);
+ if (slot)
+ return *slot;
+
+ tree fndecl = DECL_CONTEXT (label);
+ gcc_assert (fndecl && TREE_CODE (fndecl) == FUNCTION_DECL);
+
+ const function_region *func_reg = get_region_for_fndecl (fndecl);
+ label_region *reg
+ = new label_region (alloc_region_id (), func_reg, label);
+ m_labels_map.put (label, reg);
+ return reg;
+}
+
+/* Return the region for EXPR, creating it if necessary. */
+
+const decl_region *
+region_model_manager::get_region_for_global (tree expr)
+{
+ gcc_assert (TREE_CODE (expr) == VAR_DECL);
+
+ decl_region **slot = m_globals_map.get (expr);
+ if (slot)
+ return *slot;
+ decl_region *reg
+ = new decl_region (alloc_region_id (), &m_globals_region, expr);
+ m_globals_map.put (expr, reg);
+ return reg;
+}
+
+/* Return the region that describes accessing field FIELD of PARENT,
+ creating it if necessary. */
+
+const region *
+region_model_manager::get_field_region (const region *parent, tree field)
+{
+ field_region::key_t key (parent, field);
+ if (field_region *reg = m_field_regions.get (key))
+ return reg;
+
+ field_region *field_reg
+ = new field_region (alloc_region_id (), parent, field);
+ m_field_regions.put (key, field_reg);
+ return field_reg;
+}
+
+/* Return the region that describes accessing the element of type
+ ELEMENT_TYPE at index INDEX of PARENT, creating it if necessary. */
+
+const region *
+region_model_manager::get_element_region (const region *parent,
+ tree element_type,
+ const svalue *index)
+{
+ element_region::key_t key (parent, element_type, index);
+ if (element_region *reg = m_element_regions.get (key))
+ return reg;
+
+ element_region *element_reg
+ = new element_region (alloc_region_id (), parent, element_type, index);
+ m_element_regions.put (key, element_reg);
+ return element_reg;
+}
+
+/* Return the region that describes accessing the subregion of type
+ ELEMENT_TYPE at offset BYTE_OFFSET within PARENT, creating it if
+ necessary. */
+
+const region *
+region_model_manager::get_offset_region (const region *parent,
+ tree type,
+ const svalue *byte_offset)
+{
+ /* If BYTE_OFFSET is zero, return PARENT. */
+ if (tree cst_offset = byte_offset->maybe_get_constant ())
+ if (zerop (cst_offset))
+ return get_cast_region (parent, type);
+
+ /* Fold OFFSET_REGION(OFFSET_REGION(REG, X), Y)
+ to OFFSET_REGION(REG, (X + Y)). */
+ if (const offset_region *parent_offset_reg
+ = parent->dyn_cast_offset_region ())
+ {
+ const svalue *sval_x = parent_offset_reg->get_byte_offset ();
+ const svalue *sval_sum
+ = get_or_create_binop (byte_offset->get_type (),
+ PLUS_EXPR, sval_x, byte_offset);
+ return get_offset_region (parent->get_parent_region (), type, sval_sum);
+ }
+
+ offset_region::key_t key (parent, type, byte_offset);
+ if (offset_region *reg = m_offset_regions.get (key))
+ return reg;
+
+ offset_region *offset_reg
+ = new offset_region (alloc_region_id (), parent, type, byte_offset);
+ m_offset_regions.put (key, offset_reg);
+ return offset_reg;
+}
+
+/* Return the region that describes accessing PARENT_REGION as if
+ it were of type TYPE, creating it if necessary. */
+
+const region *
+region_model_manager::get_cast_region (const region *original_region,
+ tree type)
+{
+ /* If types match, return ORIGINAL_REGION. */
+ if (type == original_region->get_type ())
+ return original_region;
+
+ cast_region::key_t key (original_region, type);
+ if (cast_region *reg = m_cast_regions.get (key))
+ return reg;
+
+ cast_region *cast_reg
+ = new cast_region (alloc_region_id (), original_region, type);
+ m_cast_regions.put (key, cast_reg);
+ return cast_reg;
+}
+
+/* Return the frame_region for call to FUN from CALLING_FRAME, creating it
+ if necessary. CALLING_FRAME may be NULL. */
+
+const frame_region *
+region_model_manager::get_frame_region (const frame_region *calling_frame,
+ function *fun)
+{
+ int index = calling_frame ? calling_frame->get_index () + 1 : 0;
+
+ frame_region::key_t key (calling_frame, fun);
+ if (frame_region *reg = m_frame_regions.get (key))
+ return reg;
+
+ frame_region *frame_reg
+ = new frame_region (alloc_region_id (), &m_stack_region, calling_frame,
+ fun, index);
+ m_frame_regions.put (key, frame_reg);
+ return frame_reg;
+}
+
+/* Return the region that describes dereferencing SVAL, creating it
+ if necessary. */
+
+const region *
+region_model_manager::get_symbolic_region (const svalue *sval)
+{
+ symbolic_region::key_t key (&m_root_region, sval);
+ if (symbolic_region *reg = m_symbolic_regions.get (key))
+ return reg;
+
+ symbolic_region *symbolic_reg
+ = new symbolic_region (alloc_region_id (), &m_root_region, sval);
+ m_symbolic_regions.put (key, symbolic_reg);
+ return symbolic_reg;
+}
+
+/* Return the region that describes accessing STRING_CST, creating it
+ if necessary. */
+
+const string_region *
+region_model_manager::get_region_for_string (tree string_cst)
+{
+ gcc_assert (TREE_CODE (string_cst) == STRING_CST);
+
+ string_region **slot = m_string_map.get (string_cst);
+ if (slot)
+ return *slot;
+ string_region *reg
+ = new string_region (alloc_region_id (), &m_root_region, string_cst);
+ m_string_map.put (string_cst, reg);
+ return reg;
+}
+
+/* If we see a tree code we don't know how to handle, rather than
+ ICE or generate bogus results, create a dummy region, and notify
+ CTXT so that it can mark the new state as being not properly
+ modelled. The exploded graph can then stop exploring that path,
+ since any diagnostics we might issue will have questionable
+ validity. */
+
+const region *
+region_model_manager::
+get_region_for_unexpected_tree_code (region_model_context *ctxt,
+ tree t,
+ const dump_location_t &loc)
+{
+ gcc_assert (ctxt);
+ tree type = TYPE_P (t) ? t : TREE_TYPE (t);
+ region *new_reg
+ = new unknown_region (alloc_region_id (), &m_root_region, type);
+ ctxt->on_unexpected_tree_code (t, loc);
+ return new_reg;
+}
+
+/* Return a new region describing a heap-allocated block of memory. */
+
+const region *
+region_model_manager::create_region_for_heap_alloc ()
+{
+ region *reg
+ = new heap_allocated_region (alloc_region_id (), &m_heap_region);
+ m_managed_dynamic_regions.safe_push (reg);
+ return reg;
+}
+
+/* Return a new region describing a block of memory allocated within FRAME. */
+
+const region *
+region_model_manager::create_region_for_alloca (const frame_region *frame)
+{
+ gcc_assert (frame);
+ region *reg = new alloca_region (alloc_region_id (), frame);
+ m_managed_dynamic_regions.safe_push (reg);
+ return reg;
+}
+
+/* Log OBJ to LOGGER. */
+
+template <typename T>
+static void
+log_managed_object (logger *logger, const T *obj)
+{
+ logger->start_log_line ();
+ pretty_printer *pp = logger->get_printer ();
+ pp_string (pp, " ");
+ obj->dump_to_pp (pp, true);
+ logger->end_log_line ();
+}
+
+/* Specialization for frame_region, which also logs the count of locals
+ managed by the frame_region. */
+
+template <>
+void
+log_managed_object (logger *logger, const frame_region *obj)
+{
+ logger->start_log_line ();
+ pretty_printer *pp = logger->get_printer ();
+ pp_string (pp, " ");
+ obj->dump_to_pp (pp, true);
+ pp_printf (pp, " [with %i region(s) for locals]", obj->get_num_locals ());
+ logger->end_log_line ();
+}
+
+/* Dump the number of objects that were managed by UNIQ_MAP to LOGGER.
+ If SHOW_OBJS is true, also dump the objects themselves. */
+
+template <typename K, typename T>
+static void
+log_uniq_map (logger *logger, bool show_objs, const char *title,
+ const hash_map<K, T*> &uniq_map)
+{
+ logger->log (" # %s: %li", title, uniq_map.elements ());
+ if (show_objs)
+ for (typename hash_map<K, T*>::iterator iter = uniq_map.begin ();
+ iter != uniq_map.end (); ++iter)
+ {
+ T *managed_obj = (*iter).second;
+ log_managed_object<T> (logger, managed_obj);
+ }
+}
+
+/* Dump the number of objects that were managed by MAP to LOGGER.
+ If SHOW_OBJS is true, also dump the objects themselves. */
+
+template <typename T>
+static void
+log_uniq_map (logger *logger, bool show_objs, const char *title,
+ const consolidation_map<T> &map)
+{
+ logger->log (" # %s: %li", title, map.elements ());
+ if (show_objs)
+ for (typename consolidation_map<T>::iterator iter = map.begin ();
+ iter != map.end (); ++iter)
+ {
+ T *managed_obj = (*iter).second;
+ log_managed_object<T> (logger, managed_obj);
+ }
+}
+
+/* Dump the number of objects of each class that were managed by this
+ manager to LOGGER.
+ If SHOW_OBJS is true, also dump the objects themselves. */
+
+void
+region_model_manager::log_stats (logger *logger, bool show_objs) const
+{
+ LOG_SCOPE (logger);
+ logger->log ("svalue consolidation");
+ log_uniq_map (logger, show_objs, "constant_svalue", m_constants_map);
+ log_uniq_map (logger, show_objs, "unknown_svalue", m_unknowns_map);
+ if (m_unknown_NULL)
+ log_managed_object (logger, m_unknown_NULL);
+ log_uniq_map (logger, show_objs, "poisoned_svalue", m_poisoned_values_map);
+ log_uniq_map (logger, show_objs, "setjmp_svalue", m_setjmp_values_map);
+ log_uniq_map (logger, show_objs, "initial_svalue", m_initial_values_map);
+ log_uniq_map (logger, show_objs, "region_svalue", m_pointer_values_map);
+ log_uniq_map (logger, show_objs, "unaryop_svalue", m_unaryop_values_map);
+ log_uniq_map (logger, show_objs, "binop_svalue", m_binop_values_map);
+ log_uniq_map (logger, show_objs, "sub_svalue", m_sub_values_map);
+ log_uniq_map (logger, show_objs, "unmergeable_svalue",
+ m_unmergeable_values_map);
+ log_uniq_map (logger, show_objs, "widening_svalue", m_widening_values_map);
+ log_uniq_map (logger, show_objs, "compound_svalue", m_compound_values_map);
+ log_uniq_map (logger, show_objs, "conjured_svalue", m_conjured_values_map);
+ logger->log ("max accepted svalue num_nodes: %i",
+ m_max_complexity.m_num_nodes);
+ logger->log ("max accepted svalue max_depth: %i",
+ m_max_complexity.m_max_depth);
+
+ logger->log ("region consolidation");
+ logger->log (" next region id: %i", m_next_region_id);
+ log_uniq_map (logger, show_objs, "function_region", m_fndecls_map);
+ log_uniq_map (logger, show_objs, "label_region", m_labels_map);
+ log_uniq_map (logger, show_objs, "decl_region for globals", m_globals_map);
+ log_uniq_map (logger, show_objs, "field_region", m_field_regions);
+ log_uniq_map (logger, show_objs, "element_region", m_element_regions);
+ log_uniq_map (logger, show_objs, "offset_region", m_offset_regions);
+ log_uniq_map (logger, show_objs, "cast_region", m_cast_regions);
+ log_uniq_map (logger, show_objs, "frame_region", m_frame_regions);
+ log_uniq_map (logger, show_objs, "symbolic_region", m_symbolic_regions);
+ log_uniq_map (logger, show_objs, "string_region", m_string_map);
+ logger->log (" # managed dynamic regions: %i",
+ m_managed_dynamic_regions.length ());
+ m_store_mgr.log_stats (logger, show_objs);
+}
+
+/* Dump the number of objects of each class that were managed by this
+ manager to LOGGER.
+ If SHOW_OBJS is true, also dump the objects themselves.
+ This is here so it can use log_uniq_map. */
+
+void
+store_manager::log_stats (logger *logger, bool show_objs) const
+{
+ LOG_SCOPE (logger);
+ log_uniq_map (logger, show_objs, "concrete_binding",
+ m_concrete_binding_key_mgr);
+ log_uniq_map (logger, show_objs, "symbolic_binding",
+ m_symbolic_binding_key_mgr);
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Finding reachable regions and values.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "graphviz.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "convert.h"
+#include "target.h"
+#include "fold-const.h"
+#include "tree-pretty-print.h"
+#include "tristate.h"
+#include "bitmap.h"
+#include "selftest.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+#include "analyzer/region-model-reachability.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+reachable_regions::reachable_regions (store *store,
+ region_model_manager *mgr)
+: m_store (store), m_mgr (mgr),
+ m_reachable_base_regs (), m_mutable_base_regs ()
+{
+}
+
+/* Callback called for each cluster when initializing this object. */
+
+void
+reachable_regions::init_cluster_cb (const region *base_reg,
+ reachable_regions *this_ptr)
+{
+ this_ptr->init_cluster (base_reg);
+}
+
+/* Called for each cluster when initializing this object. */
+void
+reachable_regions::init_cluster (const region *base_reg)
+{
+ /* Mark any globals as mutable (and traverse what they point to). */
+ const region *parent = base_reg->get_parent_region ();
+ gcc_assert (parent);
+ if (parent->get_kind () == RK_GLOBALS)
+ add (base_reg, true);
+
+ /* Mark any clusters that already escaped in previous unknown calls
+ as mutable (and traverse what they currently point to). */
+ if (m_store->escaped_p (base_reg))
+ add (base_reg, true);
+
+ /* If BASE_REG is *INIT_VAL(REG) for some other REG, see if REG is
+ unbound and untouched. If so, then add BASE_REG as a root. */
+ if (const symbolic_region *sym_reg = base_reg->dyn_cast_symbolic_region ())
+ {
+ const svalue *ptr = sym_reg->get_pointer ();
+ if (const initial_svalue *init_sval = ptr->dyn_cast_initial_svalue ())
+ {
+ const region *init_sval_reg = init_sval->get_region ();
+ const region *other_base_reg = init_sval_reg->get_base_region ();
+ const binding_cluster *other_cluster
+ = m_store->get_cluster (other_base_reg);
+ if (other_cluster == NULL
+ || !other_cluster->touched_p ())
+ add (base_reg, true);
+ }
+ }
+}
+
+ /* Lazily mark the cluster containing REG as being reachable, recursively
+ adding clusters reachable from REG's cluster. */
+void
+reachable_regions::add (const region *reg, bool is_mutable)
+{
+ gcc_assert (reg);
+
+ const region *base_reg = const_cast <region *> (reg->get_base_region ());
+ gcc_assert (base_reg);
+
+ /* Bail out if this cluster is already in the sets at the IS_MUTABLE
+ level of mutability. */
+ if (!is_mutable && m_reachable_base_regs.contains (base_reg))
+ return;
+ m_reachable_base_regs.add (base_reg);
+
+ if (is_mutable)
+ {
+ if (m_mutable_base_regs.contains (base_reg))
+ return;
+ else
+ m_mutable_base_regs.add (base_reg);
+ }
+
+ /* Add values within the cluster. If any are pointers, add the pointee. */
+ if (binding_cluster *bind_cluster = m_store->get_cluster (base_reg))
+ bind_cluster->for_each_value (handle_sval_cb, this);
+ else
+ handle_sval (m_mgr->get_or_create_initial_value (base_reg));
+}
+
+void
+reachable_regions::handle_sval_cb (const svalue *sval,
+ reachable_regions *this_ptr)
+{
+ this_ptr->handle_sval (sval);
+}
+
+/* Add SVAL. If it is a pointer, add the pointed-to region. */
+
+void
+reachable_regions::handle_sval (const svalue *sval)
+{
+ m_reachable_svals.add (sval);
+ if (const region_svalue *ptr = sval->dyn_cast_region_svalue ())
+ {
+ const region *pointee = ptr->get_pointee ();
+ /* Use const-ness of pointer type to affect mutability. */
+ bool ptr_is_mutable = true;
+ if (ptr->get_type ()
+ && TREE_CODE (ptr->get_type ()) == POINTER_TYPE
+ && TYPE_READONLY (TREE_TYPE (ptr->get_type ())))
+ {
+ ptr_is_mutable = false;
+ }
+ else
+ {
+ m_mutable_svals.add (sval);
+ }
+ add (pointee, ptr_is_mutable);
+ }
+ /* Treat all svalues within a compound_svalue as reachable. */
+ if (const compound_svalue *compound_sval
+ = sval->dyn_cast_compound_svalue ())
+ {
+ for (compound_svalue::iterator_t iter = compound_sval->begin ();
+ iter != compound_sval->end (); ++iter)
+ {
+ const svalue *iter_sval = (*iter).second;
+ handle_sval (iter_sval);
+ }
+ }
+ if (const svalue *cast = sval->maybe_undo_cast ())
+ handle_sval (cast);
+}
+
+/* Add SVAL. If it is a pointer, add the pointed-to region.
+ Use PARAM_TYPE for determining mutability. */
+
+void
+reachable_regions::handle_parm (const svalue *sval, tree param_type)
+{
+ bool is_mutable = true;
+ if (param_type
+ && TREE_CODE (param_type) == POINTER_TYPE
+ && TYPE_READONLY (TREE_TYPE (param_type)))
+ is_mutable = false;
+ if (is_mutable)
+ m_mutable_svals.add (sval);
+ else
+ m_reachable_svals.add (sval);
+ if (const region_svalue *parm_ptr
+ = sval->dyn_cast_region_svalue ())
+ {
+ const region *pointee_reg = parm_ptr->get_pointee ();
+ add (pointee_reg, is_mutable);
+ }
+}
+
+/* Update m_store to mark the clusters that were found to be mutable
+ as having escaped. */
+
+void
+reachable_regions::mark_escaped_clusters ()
+{
+ for (hash_set<const region *>::iterator iter = m_mutable_base_regs.begin ();
+ iter != m_mutable_base_regs.end (); ++iter)
+ {
+ const region *base_reg = *iter;
+ m_store->mark_as_escaped (base_reg);
+ }
+}
+
+/* Dump a multiline representation of this object to PP. */
+
+void
+reachable_regions::dump_to_pp (pretty_printer *pp) const
+{
+ pp_string (pp, "reachable clusters: ");
+ pp_newline (pp);
+ for (hash_set<const region *>::iterator iter = m_reachable_base_regs.begin ();
+ iter != m_reachable_base_regs.end (); ++iter)
+ {
+ pp_string (pp, " ");
+ (*iter)->dump_to_pp (pp, true);
+ pp_newline (pp);
+ }
+ pp_string (pp, "mutable clusters: ");
+ pp_newline (pp);
+ for (hash_set<const region *>::iterator iter = m_mutable_base_regs.begin ();
+ iter != m_mutable_base_regs.end (); ++iter)
+ {
+ pp_string (pp, " ");
+ (*iter)->dump_to_pp (pp, true);
+ pp_newline (pp);
+ }
+ pp_string (pp, "reachable svals: ");
+ pp_newline (pp);
+ for (svalue_set::iterator iter = m_reachable_svals.begin ();
+ iter != m_reachable_svals.end (); ++iter)
+ {
+ pp_string (pp, " ");
+ (*iter)->dump_to_pp (pp, true);
+ pp_newline (pp);
+ }
+ pp_string (pp, "mutable svals: ");
+ pp_newline (pp);
+ for (svalue_set::iterator iter = m_mutable_svals.begin ();
+ iter != m_mutable_svals.end (); ++iter)
+ {
+ pp_string (pp, " ");
+ (*iter)->dump_to_pp (pp, true);
+ pp_newline (pp);
+ }
+}
+
+/* Dump a multiline representation of this object to stderr. */
+
+DEBUG_FUNCTION void
+reachable_regions::dump () const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Finding reachable regions and values.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_REGION_MODEL_REACHABILITY_H
+#define GCC_ANALYZER_REGION_MODEL_REACHABILITY_H
+
+namespace ana {
+
+/* A class for determining which regions and svalues are reachable.
+
+ Used by region_model::handle_unrecognized_call for keeping
+ track of all regions that are reachable, and, of those, which are
+ mutable.
+
+ Used by program_state::detect_leaks
+ (via region_model::get_reachable_svalues) for detecting leaks. */
+
+class reachable_regions
+{
+public:
+ reachable_regions (store *store, region_model_manager *mgr);
+
+ /* Callback called for each cluster when initializing this object. */
+ static void init_cluster_cb (const region *base_reg,
+ reachable_regions *this_ptr);
+
+ /* Called for each cluster when initializing this object. */
+ void init_cluster (const region *base_reg);
+
+ /* Lazily mark the cluster containing REG as being reachable, recursively
+ adding clusters reachable from REG's cluster. */
+ void add (const region *reg, bool is_mutable);
+
+ static void handle_sval_cb (const svalue *sval,
+ reachable_regions *this_ptr);
+
+ /* Add SVAL. If it is a pointer, add the pointed-to region. */
+ void handle_sval (const svalue *sval);
+
+ /* Add SVAL. If it is a pointer, add the pointed-to region.
+ Use PARAM_TYPE for determining mutability. */
+ void handle_parm (const svalue *sval, tree param_type);
+
+ /* Update the store to mark the clusters that were found to be mutable
+ as having escaped. */
+ void mark_escaped_clusters ();
+
+ /* Iteration over reachable base regions. */
+ hash_set<const region *>::iterator begin ()
+ {
+ return m_reachable_base_regs.begin ();
+ }
+ hash_set<const region *>::iterator end ()
+ {
+ return m_reachable_base_regs.end ();
+ }
+
+ svalue_set::iterator begin_reachable_svals ()
+ {
+ return m_reachable_svals.begin ();
+ }
+ svalue_set::iterator end_reachable_svals ()
+ {
+ return m_reachable_svals.end ();
+ }
+ svalue_set::iterator begin_mutable_svals ()
+ {
+ return m_mutable_svals.begin ();
+ }
+ svalue_set::iterator end_mutable_svals ()
+ {
+ return m_mutable_svals.end ();
+ }
+
+ void dump_to_pp (pretty_printer *pp) const;
+
+ DEBUG_FUNCTION void dump () const;
+
+private:
+ store *m_store;
+ region_model_manager *m_mgr;
+
+ /* The base regions already seen. */
+ hash_set<const region *> m_reachable_base_regs;
+
+ /* The base regions that can be changed (accessed via non-const pointers). */
+ hash_set<const region *> m_mutable_base_regs;
+
+ /* svalues that were passed as const pointers, so e.g. couldn't have
+ been freed (but could have e.g. had "close" called on them if an
+ int file-descriptor). */
+ svalue_set m_reachable_svals;
+ /* svalues that were passed as non-const pointers, so e.g. could have
+ been freed. */
+ svalue_set m_mutable_svals;
+};
+
+} // namespace ana
+
+#endif /* GCC_ANALYZER_REGION_MODEL_REACHABILITY_H */
#include "digraph.h"
#include "analyzer/supergraph.h"
#include "sbitmap.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/constraint-manager.h"
#include "diagnostic-event-id.h"
#include "diagnostic-event-id.h"
#include "analyzer/sm.h"
#include "analyzer/pending-diagnostic.h"
+#include "analyzer/region-model-reachability.h"
#include "analyzer/analyzer-selftests.h"
#include "stor-layout.h"
/* Dump T to PP in language-independent form, for debugging/logging/dumping
purposes. */
-static void
-dump_tree (pretty_printer *pp, tree t)
-{
- dump_generic_node (pp, t, 0, TDF_SLIM, 0);
-}
-
-/* Dump T to PP in language-independent form in quotes, for
- debugging/logging/dumping purposes. */
-
-void
-dump_quoted_tree (pretty_printer *pp, tree t)
-{
- pp_begin_quote (pp, pp_show_color (pp));
- dump_tree (pp, t);
- pp_end_quote (pp, pp_show_color (pp));
-}
-
-/* Equivalent to pp_printf (pp, "%qT", t), to avoid nesting pp_printf
- calls within other pp_printf calls.
-
- default_tree_printer handles 'T' and some other codes by calling
- dump_generic_node (pp, t, 0, TDF_SLIM, 0);
- dump_generic_node calls pp_printf in various places, leading to
- garbled output.
-
- Ideally pp_printf could be made to be reentrant, but in the meantime
- this function provides a workaround. */
-
-static void
-print_quoted_type (pretty_printer *pp, tree t)
-{
- pp_begin_quote (pp, pp_show_color (pp));
- dump_generic_node (pp, t, 0, TDF_SLIM, 0);
- pp_end_quote (pp, pp_show_color (pp));
-}
-
-/* Dump this path_var to PP (which must support %E for trees).
-
- Express the stack depth using an "@DEPTH" suffix, so e.g. given
- void foo (int j);
- void bar (int i)
- {
- foo (i);
- }
- then:
- - the "i" in "bar" would be "(i @ 0)"
- - the "j" in "foo" would be "(j @ 1)". */
-
-void
-path_var::dump (pretty_printer *pp) const
-{
- if (m_tree == NULL_TREE)
- pp_string (pp, "NULL");
- if (CONSTANT_CLASS_P (m_tree))
- pp_printf (pp, "%qE", m_tree);
- else
- pp_printf (pp, "(%qE @ %i)", m_tree, m_stack_depth);
-}
-
-/* For use in printing a comma-separated list. */
-
-static void
-dump_separator (pretty_printer *pp, bool *is_first)
-{
- if (!*is_first)
- pp_string (pp, ", ");
- *is_first = false;
-}
-
-/* Concrete subclass of constraint_manager that wires it up to a region_model
- (whilst allowing the constraint_manager and region_model to be somewhat
- at arms length).
- TODO: revisit this; maybe put the region_model * into the constraint_manager
- base class. */
-
-class impl_constraint_manager : public constraint_manager
-{
- public:
- impl_constraint_manager (region_model *model)
- : constraint_manager (),
- m_model (model)
- {}
-
- impl_constraint_manager (const impl_constraint_manager &other,
- region_model *model)
- : constraint_manager (other),
- m_model (model)
- {}
-
- constraint_manager *clone (region_model *model) const
- {
- return new impl_constraint_manager (*this, model);
- }
-
- tree maybe_get_constant (svalue_id sid) const FINAL OVERRIDE
- {
- svalue *svalue = m_model->get_svalue (sid);
- return svalue->maybe_get_constant ();
- }
-
- svalue_id get_sid_for_constant (tree cst) const FINAL OVERRIDE
- {
- gcc_assert (CONSTANT_CLASS_P (cst));
- return m_model->get_rvalue (cst, NULL);
- }
-
- int get_num_svalues () const FINAL OVERRIDE
- {
- return m_model->get_num_svalues ();
- }
-
- private:
- region_model *m_model;
-};
-
-/* class svalue_id. */
-
-/* Print this svalue_id to PP. */
-
-void
-svalue_id::print (pretty_printer *pp) const
-{
- if (null_p ())
- pp_printf (pp, "null");
- else
- pp_printf (pp, "sv%i", m_idx);
-}
-
-/* Print this svalue_id in .dot format to PP. */
-
-void
-svalue_id::dump_node_name_to_pp (pretty_printer *pp) const
-{
- gcc_assert (!null_p ());
- pp_printf (pp, "svalue_%i", m_idx);
-}
-
-/* Assert that this object is valid (w.r.t. MODEL). */
-
-void
-svalue_id::validate (const region_model &model) const
-{
- gcc_assert (null_p () || m_idx < (int)model.get_num_svalues ());
-}
-
-/* class region_id. */
-
-/* Print this region_id to PP. */
-
-void
-region_id::print (pretty_printer *pp) const
-{
- if (null_p ())
- pp_printf (pp, "null");
- else
- pp_printf (pp, "r%i", m_idx);
-}
-
-/* Print this region_id in .dot format to PP. */
-
-void
-region_id::dump_node_name_to_pp (pretty_printer *pp) const
-{
- gcc_assert (!null_p ());
- pp_printf (pp, "region_%i", m_idx);
-}
-
-/* Assert that this object is valid (w.r.t. MODEL). */
-
-void
-region_id::validate (const region_model &model) const
-{
- gcc_assert (null_p () || m_idx < (int)model.get_num_regions ());
-}
-
-/* class region_id_set. */
-
-region_id_set::region_id_set (const region_model *model)
-: m_bitmap (model->get_num_regions ())
-{
- bitmap_clear (m_bitmap);
-}
-
-/* class svalue_id_set. */
-
-svalue_id_set::svalue_id_set ()
-: m_bitmap (NULL)
-{
- bitmap_clear (m_bitmap);
-}
-
-/* class svalue and its various subclasses. */
-
-/* class svalue. */
-
-/* svalue's equality operator. Most of the work is done by the
- a "compare_fields" implementation on each subclass. */
-
-bool
-svalue::operator== (const svalue &other) const
-{
- enum svalue_kind this_kind = get_kind ();
- enum svalue_kind other_kind = other.get_kind ();
- if (this_kind != other_kind)
- return false;
-
- if (m_type != other.m_type)
- return false;
-
- switch (this_kind)
- {
- default:
- gcc_unreachable ();
- case SK_REGION:
- {
- const region_svalue &this_sub
- = (const region_svalue &)*this;
- const region_svalue &other_sub
- = (const region_svalue &)other;
- return this_sub.compare_fields (other_sub);
- }
- break;
- case SK_CONSTANT:
- {
- const constant_svalue &this_sub
- = (const constant_svalue &)*this;
- const constant_svalue &other_sub
- = (const constant_svalue &)other;
- return this_sub.compare_fields (other_sub);
- }
- break;
- case SK_UNKNOWN:
- {
- const unknown_svalue &this_sub
- = (const unknown_svalue &)*this;
- const unknown_svalue &other_sub
- = (const unknown_svalue &)other;
- return this_sub.compare_fields (other_sub);
- }
- break;
- case SK_POISONED:
- {
- const poisoned_svalue &this_sub
- = (const poisoned_svalue &)*this;
- const poisoned_svalue &other_sub
- = (const poisoned_svalue &)other;
- return this_sub.compare_fields (other_sub);
- }
- break;
- case SK_SETJMP:
- {
- const setjmp_svalue &this_sub
- = (const setjmp_svalue &)*this;
- const setjmp_svalue &other_sub
- = (const setjmp_svalue &)other;
- return this_sub.compare_fields (other_sub);
- }
- break;
- }
-}
-
-/* Generate a hash value for this svalue. Most of the work is done by the
- add_to_hash vfunc. */
-
-hashval_t
-svalue::hash () const
-{
- inchash::hash hstate;
- if (m_type)
- hstate.add_int (TYPE_UID (m_type));
- add_to_hash (hstate);
- return hstate.end ();
-}
-
-/* Print this svalue and its ID to PP. */
-
-void
-svalue::print (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
-{
- this_sid.print (pp);
- pp_string (pp, ": {");
-
- if (m_type)
- {
- gcc_assert (TYPE_P (m_type));
- pp_string (pp, "type: ");
- print_quoted_type (pp, m_type);
- pp_string (pp, ", ");
- }
-
- /* vfunc. */
- print_details (model, this_sid, pp);
-
- pp_string (pp, "}");
-}
-
-/* Dump this svalue in the form of a .dot record to PP. */
-
-void
-svalue::dump_dot_to_pp (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
-{
- this_sid.dump_node_name_to_pp (pp);
- pp_printf (pp, " [label=\"");
- pp_write_text_to_stream (pp);
- this_sid.print (pp);
- pp_string (pp, ": {");
- print (model, this_sid, pp);
- pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
- pp_string (pp, "}\"];");
- pp_newline (pp);
-}
-
-/* Base implementation of svalue::remap_region_ids vfunc. */
-
-void
-svalue::remap_region_ids (const region_id_map &)
-{
- /* Empty. */
-}
-
-/* Base implementation of svalue::walk_for_canonicalization vfunc. */
-
-void
-svalue::walk_for_canonicalization (canonicalization *) const
-{
- /* Empty. */
-}
-
-/* Base implementation of svalue::get_child_sid vfunc. */
-
-svalue_id
-svalue::get_child_sid (region *parent ATTRIBUTE_UNUSED,
- region *child,
- region_model &model,
- region_model_context *ctxt ATTRIBUTE_UNUSED)
-{
- svalue *new_child_value = clone ();
- if (child->get_type ())
- new_child_value->m_type = child->get_type ();
- svalue_id new_child_sid = model.add_svalue (new_child_value);
- return new_child_sid;
-}
-
-/* If this svalue is a constant_svalue, return the underlying tree constant.
- Otherwise return NULL_TREE. */
-
-tree
-svalue::maybe_get_constant () const
-{
- if (const constant_svalue *cst_sval = dyn_cast_constant_svalue ())
- return cst_sval->get_constant ();
- else
- return NULL_TREE;
-}
-
-/* class region_svalue : public svalue. */
-
-/* Compare the fields of this region_svalue with OTHER, returning true
- if they are equal.
- For use by svalue::operator==. */
-
-bool
-region_svalue::compare_fields (const region_svalue &other) const
-{
- return m_rid == other.m_rid;
-}
-
-/* Implementation of svalue::add_to_hash vfunc for region_svalue. */
-
-void
-region_svalue::add_to_hash (inchash::hash &hstate) const
-{
- inchash::add (m_rid, hstate);
-}
-
-/* Implementation of svalue::print_details vfunc for region_svalue. */
-
-void
-region_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
- svalue_id this_sid ATTRIBUTE_UNUSED,
- pretty_printer *pp) const
-{
- if (m_rid.null_p ())
- pp_string (pp, "NULL");
- else
- {
- pp_string (pp, "&");
- m_rid.print (pp);
- }
-}
-
-/* Implementation of svalue::dump_dot_to_pp for region_svalue. */
-
-void
-region_svalue::dump_dot_to_pp (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
-{
- svalue::dump_dot_to_pp (model, this_sid, pp);
-
- /* If non-NULL, add an edge to the pointed-to region. */
- if (!m_rid.null_p ())
- {
- this_sid.dump_node_name_to_pp (pp);
- pp_string (pp, " -> ");
- m_rid.dump_node_name_to_pp (pp);
- pp_string (pp, ";");
- pp_newline (pp);
- }
-}
-
-/* Implementation of svalue::remap_region_ids vfunc for region_svalue. */
-
-void
-region_svalue::remap_region_ids (const region_id_map &map)
-{
- map.update (&m_rid);
-}
-
-/* Merge REGION_SVAL_A and REGION_SVAL_B using MERGER, writing the result
- into *MERGED_SID. */
-
-void
-region_svalue::merge_values (const region_svalue ®ion_sval_a,
- const region_svalue ®ion_sval_b,
- svalue_id *merged_sid,
- tree type,
- model_merger *merger)
-{
- region_id a_rid = region_sval_a.get_pointee ();
- region_id b_rid = region_sval_b.get_pointee ();
-
- /* Both are non-NULL. */
- gcc_assert (!a_rid.null_p () && !b_rid.null_p ());
-
- /* Have these ptr-values already been merged? */
-
- region_id a_rid_in_m
- = merger->m_map_regions_from_a_to_m.get_dst_for_src (a_rid);
- region_id b_rid_in_m
- = merger->m_map_regions_from_b_to_m.get_dst_for_src (b_rid);
-
- /* "null_p" here means "we haven't seen this ptr-value before".
- If we've seen one but not the other, or we have different
- regions, then the merged ptr has to be "unknown". */
- if (a_rid_in_m != b_rid_in_m)
- {
- svalue *merged_sval = new unknown_svalue (type);
- *merged_sid = merger->m_merged_model->add_svalue (merged_sval);
- return;
- }
-
- /* Have we seen this yet? If so, reuse the value. */
- if (!a_rid_in_m.null_p ())
- {
- *merged_sid
- = merger->m_merged_model->get_or_create_ptr_svalue (type, a_rid_in_m);
- return;
- }
-
- /* Otherwise we have A/B regions that haven't been referenced yet. */
-
- /* Are the regions the "same", when seen from the tree point-of-view.
- If so, create a merged pointer to it. */
- path_var pv_a = merger->m_model_a->get_representative_path_var (a_rid);
- path_var pv_b = merger->m_model_b->get_representative_path_var (b_rid);
- if (pv_a.m_tree
- && pv_a == pv_b)
- {
- region_id merged_pointee_rid
- = merger->m_merged_model->get_lvalue (pv_a, NULL);
- *merged_sid
- = merger->m_merged_model->get_or_create_ptr_svalue (type,
- merged_pointee_rid);
- merger->record_regions (a_rid, b_rid, merged_pointee_rid);
- return;
- }
-
- /* Handle an A/B pair of ptrs that both point at heap regions.
- If they both have a heap region in the merger model, merge them. */
- region *region_a = merger->m_model_a->get_region (a_rid);
- region *region_b = merger->m_model_b->get_region (b_rid);
- region_id a_parent_rid = region_a->get_parent ();
- region_id b_parent_rid = region_b->get_parent ();
- region *parent_region_a = merger->m_model_a->get_region (a_parent_rid);
- region *parent_region_b = merger->m_model_b->get_region (b_parent_rid);
- if (parent_region_a
- && parent_region_b
- && parent_region_a->get_kind () == RK_HEAP
- && parent_region_b->get_kind () == RK_HEAP)
- {
- /* We have an A/B pair of ptrs that both point at heap regions. */
- /* presumably we want to see if each A/B heap region already
- has a merged region, and, if so, is it the same one. */
- // This check is above
-
- region_id merged_pointee_rid
- = merger->m_merged_model->add_new_malloc_region ();
- *merged_sid
- = merger->m_merged_model->get_or_create_ptr_svalue
- (type, merged_pointee_rid);
- merger->record_regions (a_rid, b_rid, merged_pointee_rid);
- return;
- }
-
- /* Two different non-NULL pointers? Merge to unknown. */
- svalue *merged_sval = new unknown_svalue (type);
- *merged_sid = merger->m_merged_model->add_svalue (merged_sval);
- return;
-}
-
-/* Implementation of svalue::walk_for_canonicalization vfunc for
- region_svalue. */
-
-void
-region_svalue::walk_for_canonicalization (canonicalization *c) const
-{
- c->walk_rid (m_rid);
-}
-
-/* Evaluate the condition LHS OP RHS.
- Subroutine of region_model::eval_condition for when we have a pair of
- pointers. */
-
-tristate
-region_svalue::eval_condition (region_svalue *lhs,
- enum tree_code op,
- region_svalue *rhs)
-{
- /* See if they point to the same region. */
- /* TODO: what about child regions where the child is the first child
- (or descendent)? */
- region_id lhs_rid = lhs->get_pointee ();
- region_id rhs_rid = rhs->get_pointee ();
- switch (op)
- {
- default:
- gcc_unreachable ();
-
- case EQ_EXPR:
- if (lhs_rid == rhs_rid)
- return tristate::TS_TRUE;
- else
- return tristate::TS_FALSE;
- break;
-
- case NE_EXPR:
- if (lhs_rid != rhs_rid)
- return tristate::TS_TRUE;
- else
- return tristate::TS_FALSE;
- break;
-
- case GE_EXPR:
- case LE_EXPR:
- if (lhs_rid == rhs_rid)
- return tristate::TS_TRUE;
- break;
-
- case GT_EXPR:
- case LT_EXPR:
- if (lhs_rid == rhs_rid)
- return tristate::TS_FALSE;
- break;
- }
-
- return tristate::TS_UNKNOWN;
-}
-
-/* class constant_svalue : public svalue. */
-
-/* Compare the fields of this constant_svalue with OTHER, returning true
- if they are equal.
- For use by svalue::operator==. */
-
-bool
-constant_svalue::compare_fields (const constant_svalue &other) const
-{
- return m_cst_expr == other.m_cst_expr;
-}
-
-/* Implementation of svalue::add_to_hash vfunc for constant_svalue. */
-
-void
-constant_svalue::add_to_hash (inchash::hash &hstate) const
-{
- inchash::add_expr (m_cst_expr, hstate);
-}
-
-/* Merge the CST_SVAL_A and CST_SVAL_B using MERGER, writing the id of
- the resulting svalue into *MERGED_SID. */
-
-void
-constant_svalue::merge_values (const constant_svalue &cst_sval_a,
- const constant_svalue &cst_sval_b,
- svalue_id *merged_sid,
- model_merger *merger)
-{
- tree cst_a = cst_sval_a.get_constant ();
- tree cst_b = cst_sval_b.get_constant ();
- svalue *merged_sval;
- if (cst_a == cst_b)
- {
- /* If they are the same constant, merge as that constant value. */
- merged_sval = new constant_svalue (cst_a);
- }
- else
- {
- /* Otherwise, we have two different constant values.
- Merge as an unknown value.
- TODO: impose constraints on the value?
- (maybe just based on A, to avoid infinite chains) */
- merged_sval = new unknown_svalue (TREE_TYPE (cst_a));
- }
- *merged_sid = merger->m_merged_model->add_svalue (merged_sval);
-}
-
-/* Evaluate the condition LHS OP RHS.
- Subroutine of region_model::eval_condition for when we have a pair of
- constants. */
-
-tristate
-constant_svalue::eval_condition (constant_svalue *lhs,
- enum tree_code op,
- constant_svalue *rhs)
-{
- tree lhs_const = lhs->get_constant ();
- tree rhs_const = rhs->get_constant ();
-
- gcc_assert (CONSTANT_CLASS_P (lhs_const));
- gcc_assert (CONSTANT_CLASS_P (rhs_const));
-
- /* Check for comparable types. */
- if (types_compatible_p (TREE_TYPE (lhs_const), TREE_TYPE (rhs_const)))
- {
- tree comparison
- = fold_binary (op, boolean_type_node, lhs_const, rhs_const);
- if (comparison == boolean_true_node)
- return tristate (tristate::TS_TRUE);
- if (comparison == boolean_false_node)
- return tristate (tristate::TS_FALSE);
- }
- return tristate::TS_UNKNOWN;
-}
-
-/* Implementation of svalue::print_details vfunc for constant_svalue. */
-
-void
-constant_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
- svalue_id this_sid ATTRIBUTE_UNUSED,
- pretty_printer *pp) const
-{
- pp_printf (pp, "%qE", m_cst_expr);
-}
-
-/* Implementation of svalue::get_child_sid vfunc for constant_svalue. */
-
-svalue_id
-constant_svalue::get_child_sid (region *parent ATTRIBUTE_UNUSED,
- region *child,
- region_model &model,
- region_model_context *ctxt ATTRIBUTE_UNUSED)
-{
- /* TODO: handle the all-zeroes case by returning an all-zeroes of the
- child type. */
-
- /* Otherwise, we don't have a good way to get a child value out of a
- constant.
-
- Handle this case by using an unknown value. */
- svalue *unknown_sval = new unknown_svalue (child->get_type ());
- return model.add_svalue (unknown_sval);
-}
-
-/* class unknown_svalue : public svalue. */
-
-/* Compare the fields of this unknown_svalue with OTHER, returning true
- if they are equal.
- For use by svalue::operator==. */
-
-bool
-unknown_svalue::compare_fields (const unknown_svalue &) const
-{
- /* I *think* we want to return true here, in that when comparing
- two region models, we want two peer unknown_svalue instances
- to be the "same". */
- return true;
-}
-
-/* Implementation of svalue::add_to_hash vfunc for unknown_svalue. */
-
-void
-unknown_svalue::add_to_hash (inchash::hash &) const
-{
- /* Empty. */
-}
-
-/* Implementation of svalue::print_details vfunc for unknown_svalue. */
-
-void
-unknown_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
- svalue_id this_sid ATTRIBUTE_UNUSED,
- pretty_printer *pp) const
-{
- pp_string (pp, "unknown");
-}
-
-/* Get a string for KIND for use in debug dumps. */
-
-const char *
-poison_kind_to_str (enum poison_kind kind)
-{
- switch (kind)
- {
- default:
- gcc_unreachable ();
- case POISON_KIND_FREED:
- return "freed";
- case POISON_KIND_POPPED_STACK:
- return "popped stack";
- }
-}
-
-/* class poisoned_svalue : public svalue. */
-
-/* Compare the fields of this poisoned_svalue with OTHER, returning true
- if they are equal.
- For use by svalue::operator==. */
-
-bool
-poisoned_svalue::compare_fields (const poisoned_svalue &other) const
-{
- return m_kind == other.m_kind;
-}
-
-/* Implementation of svalue::add_to_hash vfunc for poisoned_svalue. */
-
-void
-poisoned_svalue::add_to_hash (inchash::hash &hstate) const
-{
- hstate.add_int (m_kind);
-}
-
-/* Implementation of svalue::print_details vfunc for poisoned_svalue. */
-
-void
-poisoned_svalue::print_details (const region_model &model ATTRIBUTE_UNUSED,
- svalue_id this_sid ATTRIBUTE_UNUSED,
- pretty_printer *pp) const
-{
- pp_printf (pp, "poisoned: %s", poison_kind_to_str (m_kind));
-}
-
-/* class setjmp_svalue's implementation is in engine.cc, so that it can use
- the declaration of exploded_node. */
-
-/* class region and its various subclasses. */
-
-/* Get a string for KIND for use in debug dumps. */
-
-const char *
-region_kind_to_str (enum region_kind kind)
-{
- switch (kind)
- {
- default:
- gcc_unreachable ();
- case RK_PRIMITIVE:
- return "primitive";
- case RK_STRUCT:
- return "struct";
- case RK_UNION:
- return "union";
- case RK_ARRAY:
- return "array";
- case RK_FRAME:
- return "frame";
- case RK_GLOBALS:
- return "globals";
- case RK_CODE:
- return "code";
- case RK_FUNCTION:
- return "function";
- case RK_STACK:
- return "stack";
- case RK_HEAP:
- return "heap";
- case RK_ROOT:
- return "root";
- case RK_SYMBOLIC:
- return "symbolic";
- }
-}
-
-/* class region. */
-
-/* Equality operator for region.
- After comparing base class fields and kind, the rest of the
- comparison is handled off to a "compare_fields" member function
- specific to the appropriate subclass. */
-
-bool
-region::operator== (const region &other) const
-{
- if (m_parent_rid != other.m_parent_rid)
- return false;
- if (m_sval_id != other.m_sval_id)
- return false;
- if (m_type != other.m_type)
- return false;
-
- enum region_kind this_kind = get_kind ();
- enum region_kind other_kind = other.get_kind ();
- if (this_kind != other_kind)
- return false;
-
- /* Compare views. */
- if (m_view_rids.length () != other.m_view_rids.length ())
- return false;
- int i;
- region_id *rid;
- FOR_EACH_VEC_ELT (m_view_rids, i, rid)
- if (! (*rid == other.m_view_rids[i]))
- return false;
-
- switch (this_kind)
- {
- default:
- gcc_unreachable ();
- case RK_PRIMITIVE:
- {
-#if 1
- return true;
-#else
- const primitive_region &this_sub
- = (const primitive_region &)*this;
- const primitive_region &other_sub
- = (const primitive_region &)other;
- return this_sub.compare_fields (other_sub);
-#endif
- }
- case RK_STRUCT:
- {
- const struct_region &this_sub
- = (const struct_region &)*this;
- const struct_region &other_sub
- = (const struct_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_UNION:
- {
- const union_region &this_sub
- = (const union_region &)*this;
- const union_region &other_sub
- = (const union_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_ARRAY:
- {
- const array_region &this_sub
- = (const array_region &)*this;
- const array_region &other_sub
- = (const array_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_FRAME:
- {
- const frame_region &this_sub
- = (const frame_region &)*this;
- const frame_region &other_sub
- = (const frame_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_GLOBALS:
- {
- const globals_region &this_sub
- = (const globals_region &)*this;
- const globals_region &other_sub
- = (const globals_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_CODE:
- {
- const code_region &this_sub
- = (const code_region &)*this;
- const code_region &other_sub
- = (const code_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_FUNCTION:
- {
- const function_region &this_sub
- = (const function_region &)*this;
- const function_region &other_sub
- = (const function_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_STACK:
- {
- const stack_region &this_sub
- = (const stack_region &)*this;
- const stack_region &other_sub
- = (const stack_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_ROOT:
- {
- const root_region &this_sub
- = (const root_region &)*this;
- const root_region &other_sub
- = (const root_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_SYMBOLIC:
- {
- const symbolic_region &this_sub
- = (const symbolic_region &)*this;
- const symbolic_region &other_sub
- = (const symbolic_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- case RK_HEAP:
- {
- const heap_region &this_sub
- = (const heap_region &)*this;
- const heap_region &other_sub
- = (const heap_region &)other;
- return this_sub.compare_fields (other_sub);
- }
- }
-}
-
-/* Get the parent region of this region. */
-
-region *
-region::get_parent_region (const region_model &model) const
-{
- return model.get_region (m_parent_rid);
-}
-
-/* Set this region's value to RHS_SID (or potentially a variant of it,
- for some kinds of casts). */
-
-void
-region::set_value (region_model &model, region_id this_rid, svalue_id rhs_sid,
- region_model_context *ctxt)
-{
- /* Handle some kinds of casting. */
- if (m_type)
- {
- svalue *sval = model.get_svalue (rhs_sid);
- if (sval->get_type ())
- rhs_sid = model.maybe_cast (m_type, rhs_sid, ctxt);
-
- sval = model.get_svalue (rhs_sid);
- if (sval->get_type ())
- gcc_assert (m_type == sval->get_type ());
- }
-
- m_sval_id = rhs_sid;
-
- /* Update views.
- If this is a view, it becomes its parent's active view.
- If there was already an active views, invalidate its value; otherwise
- if the parent itself had a value, invalidate it.
- If it's not a view, then deactivate any view that is active on this
- region. */
- {
- if (m_is_view)
- become_active_view (model, this_rid);
- else
- {
- deactivate_any_active_view (model);
- gcc_assert (m_active_view_rid.null_p ());
- }
- }
-}
-
-/* Make this region (with id THIS_RID) the "active" view of its parent.
- Any other active view has its value set to "unknown" and descendent values
- cleared.
- If there wasn't an active view, then set the parent's value to unknown, and
- clear its descendent values (apart from this view). */
-
-void
-region::become_active_view (region_model &model, region_id this_rid)
-{
- gcc_assert (m_is_view);
-
- region *parent_reg = model.get_region (m_parent_rid);
- gcc_assert (parent_reg);
-
- region_id old_active_view_rid = parent_reg->m_active_view_rid;
-
- if (old_active_view_rid == this_rid)
- {
- /* Already the active view: do nothing. */
- return;
- }
-
- /* We have a change of active view. */
- parent_reg->m_active_view_rid = this_rid;
-
- if (old_active_view_rid.null_p ())
- {
- /* No previous active view, but the parent and its other children
- might have values.
- If so, invalidate those values - but not that of the new view. */
- region_id_set below_region (&model);
- model.get_descendents (m_parent_rid, &below_region, this_rid);
- for (unsigned i = 0; i < model.get_num_regions (); i++)
- {
- region_id rid (region_id::from_int (i));
- if (below_region.region_p (rid))
- {
- region *other_reg = model.get_region (rid);
- other_reg->m_sval_id = svalue_id::null ();
- }
- }
- region *parent = model.get_region (m_parent_rid);
- parent->m_sval_id
- = model.add_svalue (new unknown_svalue (parent->get_type ()));
- }
- else
- {
- /* If there was an active view, invalidate it. */
- region *old_active_view = model.get_region (old_active_view_rid);
- old_active_view->deactivate_view (model, old_active_view_rid);
- }
-}
-
-/* If this region (with id THIS_RID) has an active view, deactivate it,
- clearing m_active_view_rid. */
-
-void
-region::deactivate_any_active_view (region_model &model)
-{
- if (m_active_view_rid.null_p ())
- return;
- region *view = model.get_region (m_active_view_rid);
- view->deactivate_view (model, m_active_view_rid);
- m_active_view_rid = region_id::null ();
-}
-
-/* Clear any values for regions below THIS_RID.
- Set the view's value to unknown. */
-
-void
-region::deactivate_view (region_model &model, region_id this_view_rid)
-{
- gcc_assert (is_view_p ());
-
- /* Purge values from old_active_this_view_rid and all its
- descendents. Potentially we could use a poison value
- for this, but let's use unknown for now. */
- region_id_set below_view (&model);
- model.get_descendents (this_view_rid, &below_view, region_id::null ());
-
- for (unsigned i = 0; i < model.get_num_regions (); i++)
- {
- region_id rid (region_id::from_int (i));
- if (below_view.region_p (rid))
- {
- region *other_reg = model.get_region (rid);
- other_reg->m_sval_id = svalue_id::null ();
- }
- }
-
- m_sval_id = model.add_svalue (new unknown_svalue (get_type ()));
-}
-
-/* Get a value for this region, either its value if it has one,
- or, failing that, "inherit" a value from first ancestor with a
- non-null value.
-
- For example, when getting the value for a local variable within
- a stack frame that doesn't have one, the frame doesn't have a value
- either, but the stack as a whole will have an "uninitialized" poison
- value, so inherit that. */
-
-svalue_id
-region::get_value (region_model &model, bool non_null,
- region_model_context *ctxt)
-{
- /* If this region has a value, use it. */
- if (!m_sval_id.null_p ())
- return m_sval_id;
-
- /* Otherwise, "inherit" value from first ancestor with a
- non-null value. */
-
- region *parent = model.get_region (m_parent_rid);
- if (parent)
- {
- svalue_id inherited_sid
- = parent->get_inherited_child_sid (this, model, ctxt);
- if (!inherited_sid.null_p ())
- return inherited_sid;
- }
-
- /* If a non-null value has been requested, then generate
- a new unknown value. Store it, so that repeated reads from this
- region will yield the same unknown value. */
- if (non_null)
- {
- svalue_id unknown_sid = model.add_svalue (new unknown_svalue (m_type));
- m_sval_id = unknown_sid;
- return unknown_sid;
- }
-
- return svalue_id::null ();
-}
-
-/* Get a value for CHILD, inheriting from this region.
-
- Recurse, so this region will inherit a value if it doesn't already
- have one. */
-
-svalue_id
-region::get_inherited_child_sid (region *child,
- region_model &model,
- region_model_context *ctxt)
-{
- if (m_sval_id.null_p ())
- {
- /* Recurse. */
- if (!m_parent_rid.null_p ())
- {
- region *parent = model.get_region (m_parent_rid);
- m_sval_id = parent->get_inherited_child_sid (this, model, ctxt);
- }
- }
-
- if (!m_sval_id.null_p ())
- {
- /* Clone the parent's value, so that attempts to update it
- (e.g giving a specific value to an inherited "uninitialized"
- value) touch the child, and not the parent. */
- svalue *this_value = model.get_svalue (m_sval_id);
- svalue_id new_child_sid
- = this_value->get_child_sid (this, child, model, ctxt);
- if (ctxt)
- ctxt->on_inherited_svalue (m_sval_id, new_child_sid);
- child->m_sval_id = new_child_sid;
- return new_child_sid;
- }
-
- return svalue_id::null ();
-}
-
-/* Copy from SRC_RID to DST_RID, using CTXT for any issues that occur.
- Copy across any value for the region, and handle structs, unions
- and arrays recursively. */
-
-void
-region_model::copy_region (region_id dst_rid, region_id src_rid,
- region_model_context *ctxt)
-{
- gcc_assert (!dst_rid.null_p ());
- gcc_assert (!src_rid.null_p ());
- if (dst_rid == src_rid)
- return;
- region *dst_reg = get_region (dst_rid);
- region *src_reg = get_region (src_rid);
-
- /* Copy across any value for the src region itself. */
- svalue_id sid = src_reg->get_value (*this, true, ctxt);
- set_value (dst_rid, sid, ctxt);
-
- if (dst_reg->get_kind () != src_reg->get_kind ())
- return;
-
- /* Copy across child regions for structs, unions, and arrays. */
- switch (dst_reg->get_kind ())
- {
- case RK_PRIMITIVE:
- return;
- case RK_STRUCT:
- {
- struct_region *dst_sub = as_a <struct_region *> (dst_reg);
- struct_region *src_sub = as_a <struct_region *> (src_reg);
- copy_struct_region (dst_rid, dst_sub, src_sub, ctxt);
- }
- return;
- case RK_UNION:
- {
- union_region *src_sub = as_a <union_region *> (src_reg);
- copy_union_region (dst_rid, src_sub, ctxt);
- }
- return;
- case RK_FRAME:
- case RK_GLOBALS:
- case RK_CODE:
- case RK_FUNCTION:
- return;
- case RK_ARRAY:
- {
- array_region *dst_sub = as_a <array_region *> (dst_reg);
- array_region *src_sub = as_a <array_region *> (src_reg);
- copy_array_region (dst_rid, dst_sub, src_sub, ctxt);
- }
- return;
- case RK_STACK:
- case RK_HEAP:
- case RK_ROOT:
- case RK_SYMBOLIC:
- return;
- }
-}
-
-/* Subroutine of region_model::copy_region for copying the child
- regions for a struct. */
-
-void
-region_model::copy_struct_region (region_id dst_rid,
- struct_region *dst_reg,
- struct_region *src_reg,
- region_model_context *ctxt)
-{
- for (map_region::iterator_t iter = src_reg->begin ();
- iter != src_reg->end (); ++iter)
- {
- tree src_key = (*iter).first;
- region_id src_field_rid = (*iter).second;
- region *src_field_reg = get_region (src_field_rid);
- region_id dst_field_rid
- = dst_reg->get_or_create (this, dst_rid, src_key,
- src_field_reg->get_type (), ctxt);
- copy_region (dst_field_rid, src_field_rid, ctxt);
- }
-}
-
-/* Subroutine of region_model::copy_region for copying the active
- child region for a union. */
-
-void
-region_model::copy_union_region (region_id dst_rid,
- union_region *src_reg,
- region_model_context *ctxt)
-{
- region_id src_active_view_rid = src_reg->get_active_view ();
- if (src_active_view_rid.null_p ())
- return;
- region *src_active_view = get_region (src_active_view_rid);
- tree type = src_active_view->get_type ();
- region_id dst_active_view_rid = get_or_create_view (dst_rid, type, ctxt);
- copy_region (dst_active_view_rid, src_active_view_rid, ctxt);
-}
-
-/* Subroutine of region_model::copy_region for copying the child
- regions for an array. */
-
-void
-region_model::copy_array_region (region_id dst_rid,
- array_region *dst_reg,
- array_region *src_reg,
- region_model_context *ctxt)
-{
- for (array_region::iterator_t iter = src_reg->begin ();
- iter != src_reg->end (); ++iter)
- {
- array_region::key_t src_key = (*iter).first;
- region_id src_field_rid = (*iter).second;
- region *src_field_reg = get_region (src_field_rid);
- region_id dst_field_rid
- = dst_reg->get_or_create (this, dst_rid, src_key,
- src_field_reg->get_type (), ctxt);
- copy_region (dst_field_rid, src_field_rid, ctxt);
- }
-}
-
-/* Generate a hash value for this region. The work is done by the
- add_to_hash vfunc. */
-
-hashval_t
-region::hash () const
-{
- inchash::hash hstate;
- add_to_hash (hstate);
- return hstate.end ();
-}
-
-/* Print a one-liner representation of this region to PP, assuming
- that this region is within MODEL and its id is THIS_RID. */
-
-void
-region::print (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- this_rid.print (pp);
- pp_string (pp, ": {");
-
- /* vfunc. */
- print_fields (model, this_rid, pp);
-
- pp_string (pp, "}");
-}
-
-/* Base class implementation of region::dump_dot_to_pp vfunc. */
-
-void
-region::dump_dot_to_pp (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- this_rid.dump_node_name_to_pp (pp);
- pp_printf (pp, " [shape=none,margin=0,style=filled,fillcolor=%s,label=\"",
- "lightgrey");
- pp_write_text_to_stream (pp);
- print (model, this_rid, pp);
- pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
- pp_string (pp, "\"];");
- pp_newline (pp);
-
- /* Add edge to svalue. */
- if (!m_sval_id.null_p ())
- {
- this_rid.dump_node_name_to_pp (pp);
- pp_string (pp, " -> ");
- m_sval_id.dump_node_name_to_pp (pp);
- pp_string (pp, ";");
- pp_newline (pp);
- }
-
- /* Add edge to parent. */
- if (!m_parent_rid.null_p ())
- {
- this_rid.dump_node_name_to_pp (pp);
- pp_string (pp, " -> ");
- m_parent_rid.dump_node_name_to_pp (pp);
- pp_string (pp, ";");
- pp_newline (pp);
- }
-}
-
-/* Dump a tree-like ASCII-art representation of this region to PP. */
-
-void
-region::dump_to_pp (const region_model &model,
- region_id this_rid,
- pretty_printer *pp,
- const char *prefix,
- bool is_last_child) const
-{
- print (model, this_rid, pp);
- pp_newline (pp);
-
- const char *new_prefix;
- if (!m_parent_rid.null_p ())
- new_prefix = ACONCAT ((prefix, is_last_child ? " " : "| ", NULL));
- else
- new_prefix = prefix;
-
- const char *begin_color = colorize_start (pp_show_color (pp), "note");
- const char *end_color = colorize_stop (pp_show_color (pp));
- char *field_prefix
- = ACONCAT ((begin_color, new_prefix, "|:", end_color, NULL));
-
- if (!m_sval_id.null_p ())
- {
- pp_printf (pp, "%s sval: ", field_prefix);
- model.get_svalue (m_sval_id)->print (model, m_sval_id, pp);
- pp_newline (pp);
- }
- if (m_type)
- {
- pp_printf (pp, "%s type: ", field_prefix);
- print_quoted_type (pp, m_type);
- pp_newline (pp);
- }
-
- /* Find the children. */
-
- auto_vec<region_id> child_rids;
- unsigned i;
- for (unsigned i = 0; i < model.get_num_regions (); ++i)
- {
- region_id rid = region_id::from_int (i);
- region *child = model.get_region (rid);
- if (child->m_parent_rid == this_rid)
- child_rids.safe_push (rid);
- }
-
- /* Print the children, using dump_child_label to label them. */
-
- region_id *child_rid;
- FOR_EACH_VEC_ELT (child_rids, i, child_rid)
- {
- is_last_child = (i == child_rids.length () - 1);
- if (!this_rid.null_p ())
- {
- const char *tail = is_last_child ? "`-" : "|-";
- pp_printf (pp, "%r%s%s%R", "note", new_prefix, tail);
- }
- dump_child_label (model, this_rid, *child_rid, pp);
- model.get_region (*child_rid)->dump_to_pp (model, *child_rid, pp,
- new_prefix,
- is_last_child);
- }
-}
-
-/* Base implementation of region::dump_child_label vfunc. */
-
-void
-region::dump_child_label (const region_model &model,
- region_id this_rid ATTRIBUTE_UNUSED,
- region_id child_rid,
- pretty_printer *pp) const
-{
- region *child = model.get_region (child_rid);
- if (child->m_is_view)
- {
- gcc_assert (TYPE_P (child->get_type ()));
- if (m_active_view_rid == child_rid)
- pp_string (pp, "active ");
- else
- pp_string (pp, "inactive ");
- pp_string (pp, "view as ");
- print_quoted_type (pp, child->get_type ());
- pp_string (pp, ": ");
- }
-}
-
-/* Base implementation of region::validate vfunc.
- Assert that the fields of "region" are valid; subclasses should
- chain up their implementation to this one. */
-
-void
-region::validate (const region_model &model) const
-{
- m_parent_rid.validate (model);
- m_sval_id.validate (model);
- unsigned i;
- region_id *view_rid;
- FOR_EACH_VEC_ELT (m_view_rids, i, view_rid)
- {
- gcc_assert (!view_rid->null_p ());
- view_rid->validate (model);
- }
- m_active_view_rid.validate (model);
-}
-
-/* Apply MAP to svalue_ids to this region. This updates the value
- for the region (if any). */
-
-void
-region::remap_svalue_ids (const svalue_id_map &map)
-{
- map.update (&m_sval_id);
-}
-
-/* Base implementation of region::remap_region_ids vfunc; subclasses should
- chain up to this, updating any region_id data. */
-
-void
-region::remap_region_ids (const region_id_map &map)
-{
- map.update (&m_parent_rid);
- unsigned i;
- region_id *view_rid;
- FOR_EACH_VEC_ELT (m_view_rids, i, view_rid)
- map.update (view_rid);
- map.update (&m_active_view_rid);
-}
-
-/* Add a new region with id VIEW_RID as a view of this region. */
-
-void
-region::add_view (region_id view_rid, region_model *model)
-{
- gcc_assert (!view_rid.null_p ());
- region *new_view = model->get_region (view_rid);
- new_view->m_is_view = true;
- gcc_assert (!new_view->m_parent_rid.null_p ());
- gcc_assert (new_view->m_sval_id.null_p ());
-
- //gcc_assert (new_view->get_type () != NULL_TREE);
- // TODO: this can sometimes be NULL, when viewing through a (void *)
-
- // TODO: the type ought to not be present yet
-
- m_view_rids.safe_push (view_rid);
-}
-
-/* Look for a view of type TYPE of this region, returning its id if found,
- or null otherwise. */
-
-region_id
-region::get_view (tree type, region_model *model) const
-{
- unsigned i;
- region_id *view_rid;
- FOR_EACH_VEC_ELT (m_view_rids, i, view_rid)
- {
- region *view = model->get_region (*view_rid);
- gcc_assert (view->m_is_view);
- if (view->get_type () == type)
- return *view_rid;
- }
- return region_id::null ();
-}
-
-/* region's ctor. */
-
-region::region (region_id parent_rid, svalue_id sval_id, tree type)
-: m_parent_rid (parent_rid), m_sval_id (sval_id), m_type (type),
- m_view_rids (), m_is_view (false), m_active_view_rid (region_id::null ())
-{
- gcc_assert (type == NULL_TREE || TYPE_P (type));
-}
-
-/* region's copy ctor. */
-
-region::region (const region &other)
-: m_parent_rid (other.m_parent_rid), m_sval_id (other.m_sval_id),
- m_type (other.m_type), m_view_rids (other.m_view_rids.length ()),
- m_is_view (other.m_is_view), m_active_view_rid (other.m_active_view_rid)
-{
- int i;
- region_id *rid;
- FOR_EACH_VEC_ELT (other.m_view_rids, i, rid)
- m_view_rids.quick_push (*rid);
-}
-
-/* Base implementation of region::add_to_hash vfunc; subclasses should
- chain up to this. */
-
-void
-region::add_to_hash (inchash::hash &hstate) const
-{
- inchash::add (m_parent_rid, hstate);
- inchash::add (m_sval_id, hstate);
- hstate.add_ptr (m_type);
- // TODO: views
-}
-
-/* Base implementation of region::print_fields vfunc. */
-
-void
-region::print_fields (const region_model &model ATTRIBUTE_UNUSED,
- region_id this_rid ATTRIBUTE_UNUSED,
- pretty_printer *pp) const
-{
- pp_printf (pp, "kind: %qs", region_kind_to_str (get_kind ()));
-
- pp_string (pp, ", parent: ");
- m_parent_rid.print (pp);
-
- pp_printf (pp, ", sval: ");
- m_sval_id.print (pp);
-
- if (m_type)
- {
- pp_printf (pp, ", type: ");
- print_quoted_type (pp, m_type);
- }
-}
-
-/* Determine if a pointer to this region must be non-NULL.
-
- Generally, pointers to regions must be non-NULL, but pointers
- to symbolic_regions might, in fact, be NULL.
-
- This allows us to simulate functions like malloc and calloc with:
- - only one "outcome" from each statement,
- - the idea that the pointer is on the heap if non-NULL
- - the possibility that the pointer could be NULL
- - the idea that successive values returned from malloc are non-equal
- - to be able to zero-fill for calloc. */
-
-bool
-region::non_null_p (const region_model &model) const
-{
- /* Look through views to get at the underlying region. */
- if (is_view_p ())
- return model.get_region (m_parent_rid)->non_null_p (model);
-
- /* Are we within a symbolic_region? If so, it could be NULL. */
- if (const symbolic_region *sym_reg = dyn_cast_symbolic_region ())
- {
- if (sym_reg->m_possibly_null)
- return false;
- }
-
- return true;
-}
-
-/* class primitive_region : public region. */
-
-/* Implementation of region::clone vfunc for primitive_region. */
-
-region *
-primitive_region::clone () const
-{
- return new primitive_region (*this);
-}
-
-/* Implementation of region::walk_for_canonicalization vfunc for
- primitive_region. */
-
-void
-primitive_region::walk_for_canonicalization (canonicalization *) const
-{
- /* Empty. */
-}
-
-/* class map_region : public region. */
-
-/* map_region's copy ctor. */
-
-map_region::map_region (const map_region &other)
-: region (other),
- m_map (other.m_map)
-{
-}
-
-/* Compare the fields of this map_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-map_region::compare_fields (const map_region &other) const
-{
- if (m_map.elements () != other.m_map.elements ())
- return false;
-
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- tree key = (*iter).first;
- region_id e = (*iter).second;
- region_id *other_slot = const_cast <map_t &> (other.m_map).get (key);
- if (other_slot == NULL)
- return false;
- if (e != *other_slot)
- return false;
- }
- return true;
-}
-
-/* Implementation of region::print_fields vfunc for map_region. */
-
-void
-map_region::print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- region::print_fields (model, this_rid, pp);
- pp_string (pp, ", map: {");
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- if (iter != m_map.begin ())
- pp_string (pp, ", ");
- tree expr = (*iter).first;
- region_id child_rid = (*iter).second;
- dump_quoted_tree (pp, expr);
- pp_string (pp, ": ");
- child_rid.print (pp);
- }
- pp_string (pp, "}");
-}
-
-/* Implementation of region::validate vfunc for map_region. */
-
-void
-map_region::validate (const region_model &model) const
-{
- region::validate (model);
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- region_id child_rid = (*iter).second;
- child_rid.validate (model);
- }
-}
-
-/* Implementation of region::dump_dot_to_pp vfunc for map_region. */
-
-void
-map_region::dump_dot_to_pp (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- region::dump_dot_to_pp (model, this_rid, pp);
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- // TODO: add nodes/edges to label things
-
- tree expr = (*iter).first;
- region_id child_rid = (*iter).second;
-
- pp_printf (pp, "rid_label_%i [label=\"", child_rid.as_int ());
- pp_write_text_to_stream (pp);
- pp_printf (pp, "%qE", expr);
- pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
- pp_string (pp, "\"];");
- pp_newline (pp);
-
- pp_printf (pp, "rid_label_%i", child_rid.as_int ());
- pp_string (pp, " -> ");
- child_rid.dump_node_name_to_pp (pp);
- pp_string (pp, ";");
- pp_newline (pp);
- }
-}
-
-/* Implementation of region::dump_child_label vfunc for map_region. */
-
-void
-map_region::dump_child_label (const region_model &model,
- region_id this_rid,
- region_id child_rid,
- pretty_printer *pp) const
-{
- region::dump_child_label (model, this_rid, child_rid, pp);
-
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- if (child_rid == (*iter).second)
- {
- tree key = (*iter).first;
- dump_quoted_tree (pp, key);
- pp_string (pp, ": ");
- }
- }
-}
-
-/* Look for a child region for KEY within this map_region.
- If it doesn't already exist, create a child map_region, using TYPE for
- its type.
- Return the region_id of the child (whether pre-existing, or
- newly-created).
- Notify CTXT if we don't know how to handle TYPE. */
-
-region_id
-map_region::get_or_create (region_model *model,
- region_id this_rid,
- tree key,
- tree type,
- region_model_context *ctxt)
-{
- gcc_assert (key);
- gcc_assert (valid_key_p (key));
- region_id *slot = m_map.get (key);
- if (slot)
- return *slot;
- region_id child_rid = model->add_region_for_type (this_rid, type, ctxt);
- m_map.put (key, child_rid);
- return child_rid;
-}
-
-/* Get the region_id for the child region for KEY within this
- MAP_REGION, or NULL if there is no such child region. */
-
-region_id *
-map_region::get (tree key)
-{
- gcc_assert (key);
- gcc_assert (valid_key_p (key));
- region_id *slot = m_map.get (key);
- return slot;
-}
-
-/* Implementation of region::add_to_hash vfunc for map_region. */
-
-void
-map_region::add_to_hash (inchash::hash &hstate) const
-{
- region::add_to_hash (hstate);
- // TODO
-}
-
-/* Implementation of region::remap_region_ids vfunc for map_region. */
-
-void
-map_region::remap_region_ids (const region_id_map &map)
-{
- region::remap_region_ids (map);
-
- /* Remap the region ids within the map entries. */
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end (); ++iter)
- map.update (&(*iter).second);
-}
-
-/* Remove the binding of KEY to its child region (but not the
- child region itself).
- For use when purging unneeded SSA names. */
-
-void
-map_region::unbind (tree key)
-{
- gcc_assert (key);
- gcc_assert (valid_key_p (key));
- m_map.remove (key);
-}
-
-/* Look for a child region with id CHILD_RID within this map_region.
- If one is found, return its tree key, otherwise return NULL_TREE. */
-
-tree
-map_region::get_tree_for_child_region (region_id child_rid) const
-{
- // TODO: do we want to store an inverse map?
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- tree key = (*iter).first;
- region_id r = (*iter).second;
- if (r == child_rid)
- return key;
- }
-
- return NULL_TREE;
-}
-
-/* Look for a child region CHILD within this map_region.
- If one is found, return its tree key, otherwise return NULL_TREE. */
-
-tree
-map_region::get_tree_for_child_region (region *child,
- const region_model &model) const
-{
- // TODO: do we want to store an inverse map?
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- tree key = (*iter).first;
- region_id r = (*iter).second;
- if (model.get_region (r) == child)
- return key;
- }
-
- return NULL_TREE;
-}
-
-/* Comparator for trees to impose a deterministic ordering on
- T1 and T2. */
-
-static int
-tree_cmp (const_tree t1, const_tree t2)
-{
- gcc_assert (t1);
- gcc_assert (t2);
-
- /* Test tree codes first. */
- if (TREE_CODE (t1) != TREE_CODE (t2))
- return TREE_CODE (t1) - TREE_CODE (t2);
-
- /* From this point on, we know T1 and T2 have the same tree code. */
-
- if (DECL_P (t1))
- {
- if (DECL_NAME (t1) && DECL_NAME (t2))
- return strcmp (IDENTIFIER_POINTER (DECL_NAME (t1)),
- IDENTIFIER_POINTER (DECL_NAME (t2)));
- else
- {
- if (DECL_NAME (t1))
- return -1;
- else if (DECL_NAME (t2))
- return 1;
- else
- return DECL_UID (t1) - DECL_UID (t2);
- }
- }
-
- switch (TREE_CODE (t1))
- {
- case SSA_NAME:
- {
- if (SSA_NAME_VAR (t1) && SSA_NAME_VAR (t2))
- {
- int var_cmp = tree_cmp (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
- if (var_cmp)
- return var_cmp;
- return SSA_NAME_VERSION (t1) - SSA_NAME_VERSION (t2);
- }
- else
- {
- if (SSA_NAME_VAR (t1))
- return -1;
- else if (SSA_NAME_VAR (t2))
- return 1;
- else
- return SSA_NAME_VERSION (t1) - SSA_NAME_VERSION (t2);
- }
- }
- break;
-
- case INTEGER_CST:
- return tree_int_cst_compare (t1, t2);
-
- case REAL_CST:
- {
- const real_value *rv1 = TREE_REAL_CST_PTR (t1);
- const real_value *rv2 = TREE_REAL_CST_PTR (t2);
- if (real_compare (UNORDERED_EXPR, rv1, rv2))
- {
- /* Impose an arbitrary order on NaNs relative to other NaNs
- and to non-NaNs. */
- if (int cmp_isnan = real_isnan (rv1) - real_isnan (rv2))
- return cmp_isnan;
- if (int cmp_issignaling_nan
- = real_issignaling_nan (rv1) - real_issignaling_nan (rv2))
- return cmp_issignaling_nan;
- return real_isneg (rv1) - real_isneg (rv2);
- }
- if (real_compare (LT_EXPR, rv1, rv2))
- return -1;
- if (real_compare (GT_EXPR, rv1, rv2))
- return 1;
- return 0;
- }
-
- case STRING_CST:
- return strcmp (TREE_STRING_POINTER (t1),
- TREE_STRING_POINTER (t2));
-
- default:
- gcc_unreachable ();
- break;
- }
-
- gcc_unreachable ();
-
- return 0;
-}
-
-/* qsort comparator for trees to impose a deterministic ordering on
- P1 and P2. */
-
-static int
-tree_cmp (const void *p1, const void *p2)
-{
- const_tree t1 = *(const_tree const *)p1;
- const_tree t2 = *(const_tree const *)p2;
-
- return tree_cmp (t1, t2);
-}
-
-/* Attempt to merge MAP_REGION_A and MAP_REGION_B into MERGED_MAP_REGION,
- which has region_id MERGED_RID, using MERGER.
- Return true if the merger is possible, false otherwise. */
-
-bool
-map_region::can_merge_p (const map_region *map_region_a,
- const map_region *map_region_b,
- map_region *merged_map_region,
- region_id merged_rid,
- model_merger *merger)
-{
- for (map_t::iterator iter = map_region_a->m_map.begin ();
- iter != map_region_a->m_map.end ();
- ++iter)
- {
- tree key_a = (*iter).first;
- region_id rid_a = (*iter).second;
-
- if (const region_id *slot_b
- = const_cast<map_region *>(map_region_b)->m_map.get (key_a))
- {
- region_id rid_b = *slot_b;
-
- region *child_region_a = merger->get_region_a <region> (rid_a);
- region *child_region_b = merger->get_region_b <region> (rid_b);
-
- gcc_assert (child_region_a->get_type ()
- == child_region_b->get_type ());
-
- gcc_assert (child_region_a->get_kind ()
- == child_region_b->get_kind ());
-
- region_id child_merged_rid
- = merged_map_region->get_or_create (merger->m_merged_model,
- merged_rid,
- key_a,
- child_region_a->get_type (),
- NULL);
-
- region *child_merged_region
- = merger->m_merged_model->get_region (child_merged_rid);
-
- /* Consider values. */
- svalue_id child_a_sid = child_region_a->get_value_direct ();
- svalue_id child_b_sid = child_region_b->get_value_direct ();
- svalue_id child_merged_sid;
- if (!merger->can_merge_values_p (child_a_sid, child_b_sid,
- &child_merged_sid))
- return false;
- if (!child_merged_sid.null_p ())
- child_merged_region->set_value (*merger->m_merged_model,
- child_merged_rid,
- child_merged_sid,
- NULL);
-
- if (map_region *map_region_a = child_region_a->dyn_cast_map_region ())
- {
- /* Recurse. */
- if (!can_merge_p (map_region_a,
- as_a <map_region *> (child_region_b),
- as_a <map_region *> (child_merged_region),
- child_merged_rid,
- merger))
- return false;
- }
-
- }
- else
- {
- /* TODO: region is present in A, but absent in B. */
- }
- }
-
- /* TODO: check for keys in B that aren't in A. */
-
- return true;
-}
-
-
-/* Implementation of region::walk_for_canonicalization vfunc for
- map_region. */
-
-void
-map_region::walk_for_canonicalization (canonicalization *c) const
-{
- auto_vec<tree> keys (m_map.elements ());
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- tree key_a = (*iter).first;
- keys.quick_push (key_a);
- }
- keys.qsort (tree_cmp);
-
- unsigned i;
- tree key;
- FOR_EACH_VEC_ELT (keys, i, key)
- {
- region_id rid = *const_cast<map_region *>(this)->m_map.get (key);
- c->walk_rid (rid);
- }
-}
-
-/* For debugging purposes: look for a child region for a decl named
- IDENTIFIER (or an SSA_NAME for such a decl), returning its value,
- or svalue_id::null if none are found. */
-
-svalue_id
-map_region::get_value_by_name (tree identifier,
- const region_model &model) const
-{
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- tree key = (*iter).first;
- if (TREE_CODE (key) == SSA_NAME)
- if (SSA_NAME_VAR (key))
- key = SSA_NAME_VAR (key);
- if (DECL_P (key))
- if (DECL_NAME (key) == identifier)
- {
- region_id rid = (*iter).second;
- region *region = model.get_region (rid);
- return region->get_value (const_cast<region_model &>(model),
- false, NULL);
- }
- }
- return svalue_id::null ();
-}
-
-/* class struct_or_union_region : public map_region. */
-
-/* Implementation of map_region::valid_key_p vfunc for
- struct_or_union_region. */
-
-bool
-struct_or_union_region::valid_key_p (tree key) const
-{
- return TREE_CODE (key) == FIELD_DECL;
-}
-
-/* Compare the fields of this struct_or_union_region with OTHER, returning
- true if they are equal.
- For use by region::operator==. */
-
-bool
-struct_or_union_region::compare_fields (const struct_or_union_region &other)
- const
-{
- return map_region::compare_fields (other);
-}
-
-/* class struct_region : public struct_or_union_region. */
-
-/* Implementation of region::clone vfunc for struct_region. */
-
-region *
-struct_region::clone () const
-{
- return new struct_region (*this);
-}
-
-/* Compare the fields of this struct_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-struct_region::compare_fields (const struct_region &other) const
-{
- return struct_or_union_region::compare_fields (other);
-}
-
-/* class union_region : public struct_or_union_region. */
-
-/* Implementation of region::clone vfunc for union_region. */
-
-region *
-union_region::clone () const
-{
- return new union_region (*this);
-}
-
-/* Compare the fields of this union_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-union_region::compare_fields (const union_region &other) const
-{
- return struct_or_union_region::compare_fields (other);
-}
-
-/* class frame_region : public map_region. */
-
-/* Compare the fields of this frame_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-frame_region::compare_fields (const frame_region &other) const
-{
- if (!map_region::compare_fields (other))
- return false;
- if (m_fun != other.m_fun)
- return false;
- if (m_depth != other.m_depth)
- return false;
- return true;
-}
-
-/* Implementation of region::clone vfunc for frame_region. */
-
-region *
-frame_region::clone () const
-{
- return new frame_region (*this);
-}
-
-/* Implementation of map_region::valid_key_p vfunc for frame_region. */
-
-bool
-frame_region::valid_key_p (tree key) const
-{
- // TODO: could also check that VAR_DECLs are locals
- return (TREE_CODE (key) == PARM_DECL
- || TREE_CODE (key) == VAR_DECL
- || TREE_CODE (key) == SSA_NAME
- || TREE_CODE (key) == RESULT_DECL);
-}
-
-/* Implementation of region::print_fields vfunc for frame_region. */
-
-void
-frame_region::print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- map_region::print_fields (model, this_rid, pp);
- pp_printf (pp, ", function: %qs, depth: %i", function_name (m_fun), m_depth);
-}
-
-/* Implementation of region::add_to_hash vfunc for frame_region. */
-
-void
-frame_region::add_to_hash (inchash::hash &hstate) const
-{
- map_region::add_to_hash (hstate);
- hstate.add_ptr (m_fun);
- hstate.add_int (m_depth);
-}
-
-/* class globals_region : public scope_region. */
-
-/* Compare the fields of this globals_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-globals_region::compare_fields (const globals_region &other) const
-{
- return map_region::compare_fields (other);
-}
-
-/* Implementation of region::clone vfunc for globals_region. */
-
-region *
-globals_region::clone () const
-{
- return new globals_region (*this);
-}
-
-/* Implementation of map_region::valid_key_p vfunc for globals_region. */
-
-bool
-globals_region::valid_key_p (tree key) const
-{
- return TREE_CODE (key) == VAR_DECL;
-}
-
-/* class code_region : public map_region. */
-
-/* Compare the fields of this code_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-code_region::compare_fields (const code_region &other) const
-{
- return map_region::compare_fields (other);
-}
-
-/* Implementation of region::clone vfunc for code_region. */
-
-region *
-code_region::clone () const
-{
- return new code_region (*this);
-}
-
-/* Implementation of map_region::valid_key_p vfunc for code_region. */
-
-bool
-code_region::valid_key_p (tree key) const
-{
- return TREE_CODE (key) == FUNCTION_DECL;
-}
-
-/* class array_region : public region. */
-
-/* array_region's copy ctor. */
-
-array_region::array_region (const array_region &other)
-: region (other),
- m_map (other.m_map)
-{
-}
-
-/* Get a child region for the element with index INDEX_SID. */
-
-region_id
-array_region::get_element (region_model *model,
- region_id this_rid,
- svalue_id index_sid,
- region_model_context *ctxt)
-{
- tree element_type = TREE_TYPE (get_type ());
- svalue *index_sval = model->get_svalue (index_sid);
- if (tree cst_index = index_sval->maybe_get_constant ())
- {
- key_t key = key_from_constant (cst_index);
- region_id element_rid
- = get_or_create (model, this_rid, key, element_type, ctxt);
- return element_rid;
- }
-
- return model->get_or_create_view (this_rid, element_type, ctxt);
-}
-
-/* Implementation of region::clone vfunc for array_region. */
-
-region *
-array_region::clone () const
-{
- return new array_region (*this);
-}
-
-/* Compare the fields of this array_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-array_region::compare_fields (const array_region &other) const
-{
- if (m_map.elements () != other.m_map.elements ())
- return false;
-
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- int key = (*iter).first;
- region_id e = (*iter).second;
- region_id *other_slot = const_cast <map_t &> (other.m_map).get (key);
- if (other_slot == NULL)
- return false;
- if (e != *other_slot)
- return false;
- }
- return true;
-}
-
-/* Implementation of region::print_fields vfunc for array_region. */
-
-void
-array_region::print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- region::print_fields (model, this_rid, pp);
- pp_string (pp, ", array: {");
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- if (iter != m_map.begin ())
- pp_string (pp, ", ");
- int key = (*iter).first;
- region_id child_rid = (*iter).second;
- pp_printf (pp, "[%i]: ", key);
- child_rid.print (pp);
- }
- pp_string (pp, "}");
-}
-
-/* Implementation of region::validate vfunc for array_region. */
-
-void
-array_region::validate (const region_model &model) const
-{
- region::validate (model);
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- region_id child_rid = (*iter).second;
- child_rid.validate (model);
- }
-}
-
-/* Implementation of region::dump_dot_to_pp vfunc for array_region. */
-
-void
-array_region::dump_dot_to_pp (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- region::dump_dot_to_pp (model, this_rid, pp);
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- // TODO: add nodes/edges to label things
-
- int key = (*iter).first;
- region_id child_rid = (*iter).second;
-
- pp_printf (pp, "rid_label_%i [label=\"", child_rid.as_int ());
- pp_write_text_to_stream (pp);
- pp_printf (pp, "%qi", key);
- pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
- pp_string (pp, "\"];");
- pp_newline (pp);
-
- pp_printf (pp, "rid_label_%i", child_rid.as_int ());
- pp_string (pp, " -> ");
- child_rid.dump_node_name_to_pp (pp);
- pp_string (pp, ";");
- pp_newline (pp);
- }
-}
-
-/* Implementation of region::dump_child_label vfunc for array_region. */
-
-void
-array_region::dump_child_label (const region_model &model,
- region_id this_rid,
- region_id child_rid,
- pretty_printer *pp) const
-{
- region::dump_child_label (model, this_rid, child_rid, pp);
-
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- if (child_rid == (*iter).second)
- {
- int key = (*iter).first;
- pp_printf (pp, "[%i]: ", key);
- }
- }
-}
-
-/* Look for a child region for KEY within this array_region.
- If it doesn't already exist, create a child array_region, using TYPE for
- its type.
- Return the region_id of the child (whether pre-existing, or
- newly-created).
- Notify CTXT if we don't know how to handle TYPE. */
-
-region_id
-array_region::get_or_create (region_model *model,
- region_id this_rid,
- key_t key,
- tree type,
- region_model_context *ctxt)
-{
- region_id *slot = m_map.get (key);
- if (slot)
- return *slot;
- region_id child_rid = model->add_region_for_type (this_rid, type, ctxt);
- m_map.put (key, child_rid);
- return child_rid;
-}
-
-/* Get the region_id for the child region for KEY within this
- ARRAY_REGION, or NULL if there is no such child region. */
-
-region_id *
-array_region::get (key_t key)
-{
- region_id *slot = m_map.get (key);
- return slot;
-}
-
-/* Implementation of region::add_to_hash vfunc for array_region. */
-
-void
-array_region::add_to_hash (inchash::hash &hstate) const
-{
- region::add_to_hash (hstate);
- // TODO
-}
-
-/* Implementation of region::remap_region_ids vfunc for array_region. */
-
-void
-array_region::remap_region_ids (const region_id_map &map)
-{
- region::remap_region_ids (map);
-
- /* Remap the region ids within the map entries. */
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end (); ++iter)
- map.update (&(*iter).second);
-}
-
-/* Look for a child region with id CHILD_RID within this array_region.
- If one is found, write its key to *OUT and return true,
- otherwise return false. */
-
-bool
-array_region::get_key_for_child_region (region_id child_rid, key_t *out) const
-{
- // TODO: do we want to store an inverse map?
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- key_t key = (*iter).first;
- region_id r = (*iter).second;
- if (r == child_rid)
- {
- *out = key;
- return true;
- }
- }
-
- return false;
-}
-
-/* qsort comparator for array_region's keys. */
-
-int
-array_region::key_cmp (const void *p1, const void *p2)
-{
- key_t i1 = *(const key_t *)p1;
- key_t i2 = *(const key_t *)p2;
-
- if (i1 > i2)
- return 1;
- else if (i1 < i2)
- return -1;
- else
- return 0;
-}
-
-/* Implementation of region::walk_for_canonicalization vfunc for
- array_region. */
-
-void
-array_region::walk_for_canonicalization (canonicalization *c) const
-{
- auto_vec<int> keys (m_map.elements ());
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end ();
- ++iter)
- {
- int key_a = (*iter).first;
- keys.quick_push (key_a);
- }
- keys.qsort (key_cmp);
-
- unsigned i;
- int key;
- FOR_EACH_VEC_ELT (keys, i, key)
- {
- region_id rid = *const_cast<array_region *>(this)->m_map.get (key);
- c->walk_rid (rid);
- }
-}
-
-/* Convert constant CST into an array_region::key_t. */
-
-array_region::key_t
-array_region::key_from_constant (tree cst)
-{
- gcc_assert (CONSTANT_CLASS_P (cst));
- wide_int w = wi::to_wide (cst);
- key_t result = w.to_shwi ();
- return result;
-}
-
-/* Convert array_region::key_t KEY into a tree constant. */
-
-tree
-array_region::constant_from_key (key_t key)
-{
- tree array_type = get_type ();
- tree index_type = TYPE_DOMAIN (array_type);
- return build_int_cst (index_type, key);
-}
-
-/* class function_region : public map_region. */
-
-/* Compare the fields of this function_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-function_region::compare_fields (const function_region &other) const
-{
- return map_region::compare_fields (other);
-}
-
-/* Implementation of region::clone vfunc for function_region. */
-
-region *
-function_region::clone () const
-{
- return new function_region (*this);
-}
-
-/* Implementation of map_region::valid_key_p vfunc for function_region. */
-
-bool
-function_region::valid_key_p (tree key) const
-{
- return TREE_CODE (key) == LABEL_DECL;
-}
-
-/* class stack_region : public region. */
-
-/* stack_region's copy ctor. */
-
-stack_region::stack_region (const stack_region &other)
-: region (other),
- m_frame_rids (other.m_frame_rids.length ())
-{
- int i;
- region_id *frame_rid;
- FOR_EACH_VEC_ELT (other.m_frame_rids, i, frame_rid)
- m_frame_rids.quick_push (*frame_rid);
-}
-
-/* Compare the fields of this stack_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-stack_region::compare_fields (const stack_region &other) const
-{
- if (m_frame_rids.length () != other.m_frame_rids.length ())
- return false;
-
- int i;
- region_id *frame_rid;
- FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
- if (m_frame_rids[i] != other.m_frame_rids[i])
- return false;
-
- return true;
-}
-
-/* Implementation of region::clone vfunc for stack_region. */
-
-region *
-stack_region::clone () const
-{
- return new stack_region (*this);
-}
-
-/* Implementation of region::print_fields vfunc for stack_region. */
-
-void
-stack_region::print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- region::print_fields (model, this_rid, pp);
- // TODO
-}
-
-/* Implementation of region::dump_child_label vfunc for stack_region. */
-
-void
-stack_region::dump_child_label (const region_model &model,
- region_id this_rid ATTRIBUTE_UNUSED,
- region_id child_rid,
- pretty_printer *pp) const
-{
- function *fun = model.get_region<frame_region> (child_rid)->get_function ();
- pp_printf (pp, "frame for %qs: ", function_name (fun));
-}
-
-/* Implementation of region::validate vfunc for stack_region. */
-
-void
-stack_region::validate (const region_model &model) const
-{
- region::validate (model);
- int i;
- region_id *frame_rid;
- FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
- m_frame_rids[i].validate (model);
-}
-
-/* Push FRAME_RID (for a frame_region) onto this stack. */
-
-void
-stack_region::push_frame (region_id frame_rid)
-{
- m_frame_rids.safe_push (frame_rid);
-}
-
-/* Get the region_id of the top-most frame in this stack, if any. */
-
-region_id
-stack_region::get_current_frame_id () const
-{
- if (m_frame_rids.length () > 0)
- return m_frame_rids[m_frame_rids.length () - 1];
- else
- return region_id::null ();
-}
-
-/* Pop the topmost frame_region from this stack.
-
- If RESULT_DST_RID is non-null, copy any return value from the frame
- into RESULT_DST_RID's region.
-
- Purge the frame region and all its descendent regions.
- Convert any pointers that point into such regions into
- POISON_KIND_POPPED_STACK svalues.
-
- If PURGE, then purge all unused svalues, with the exception of any
- returned values.
-
- Accumulate stats on purged entities into STATS. */
-
-void
-stack_region::pop_frame (region_model *model, region_id result_dst_rid,
- bool purge, purge_stats *stats,
- region_model_context *ctxt)
-{
- gcc_assert (m_frame_rids.length () > 0);
-
- region_id frame_rid = get_current_frame_id ();
- frame_region *frame = model->get_region<frame_region> (frame_rid);
-
- /* Evaluate the result, within the callee frame. */
- svalue_id_set returned_sids;
- tree fndecl = frame->get_function ()->decl;
- tree result = DECL_RESULT (fndecl);
- if (result && TREE_TYPE (result) != void_type_node)
- {
- if (!result_dst_rid.null_p ())
- {
- /* Copy the result to RESULT_DST_RID. */
- model->copy_region (result_dst_rid, model->get_lvalue (result, ctxt),
- ctxt);
- }
- if (purge)
- {
- /* Populate returned_sids, to avoid purging them. */
- region_id return_rid = model->get_lvalue (result, NULL);
- region_id_set returned_rids (model);
- model->get_descendents (return_rid, &returned_rids,
- region_id::null ());
- for (unsigned i = 0; i < model->get_num_regions (); i++)
- {
- region_id rid = region_id::from_int (i);
- if (returned_rids.region_p (rid))
- {
- svalue_id sid = model->get_region (rid)->get_value_direct ();
- returned_sids.add_svalue (sid);
- }
- }
- }
- }
-
- /* Pop the frame RID. */
- m_frame_rids.pop ();
-
- model->delete_region_and_descendents (frame_rid,
- POISON_KIND_POPPED_STACK,
- stats,
- ctxt ? ctxt->get_logger () : NULL);
-
- /* Delete unused svalues, but don't delete the return value(s). */
- if (purge)
- model->purge_unused_svalues (stats, ctxt, &returned_sids);
-
- model->validate ();
-}
-
-/* Implementation of region::add_to_hash vfunc for stack_region. */
-
-void
-stack_region::add_to_hash (inchash::hash &hstate) const
-{
- region::add_to_hash (hstate);
-
- int i;
- region_id *frame_rid;
- FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
- inchash::add (*frame_rid, hstate);
-}
-
-/* Implementation of region::remap_region_ids vfunc for stack_region. */
-
-void
-stack_region::remap_region_ids (const region_id_map &map)
-{
- region::remap_region_ids (map);
- int i;
- region_id *frame_rid;
- FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
- map.update (&m_frame_rids[i]);
-}
-
-/* Attempt to merge STACK_REGION_A and STACK_REGION_B using MERGER.
- Return true if the merger is possible, false otherwise. */
-
-bool
-stack_region::can_merge_p (const stack_region *stack_region_a,
- const stack_region *stack_region_b,
- model_merger *merger)
-{
- if (stack_region_a->get_num_frames ()
- != stack_region_b->get_num_frames ())
- return false;
-
- region_model *merged_model = merger->m_merged_model;
-
- region_id rid_merged_stack
- = merged_model->get_root_region ()->ensure_stack_region (merged_model);
-
- stack_region *merged_stack
- = merged_model->get_region <stack_region> (rid_merged_stack);
-
- /* First, create all frames in the merged model, without populating them.
- The merging code assumes that all frames in the merged model already exist,
- so we have to do this first to handle the case in which a local in an
- older frame points at a local in a more recent frame. */
- for (unsigned i = 0; i < stack_region_a->get_num_frames (); i++)
- {
- region_id rid_a = stack_region_a->get_frame_rid (i);
- frame_region *frame_a = merger->get_region_a <frame_region> (rid_a);
-
- region_id rid_b = stack_region_b->get_frame_rid (i);
- frame_region *frame_b = merger->get_region_b <frame_region> (rid_b);
-
- if (frame_a->get_function () != frame_b->get_function ())
- return false;
-
- frame_region *merged_frame = new frame_region (rid_merged_stack,
- frame_a->get_function (),
- frame_a->get_depth ());
- region_id rid_merged_frame = merged_model->add_region (merged_frame);
- merged_stack->push_frame (rid_merged_frame);
- }
-
- /* Now populate the frames we created. */
- for (unsigned i = 0; i < stack_region_a->get_num_frames (); i++)
- {
- region_id rid_a = stack_region_a->get_frame_rid (i);
- frame_region *frame_a = merger->get_region_a <frame_region> (rid_a);
-
- region_id rid_b = stack_region_b->get_frame_rid (i);
- frame_region *frame_b = merger->get_region_b <frame_region> (rid_b);
-
- region_id rid_merged_frame = merged_stack->get_frame_rid (i);
- frame_region *merged_frame
- = merged_model->get_region <frame_region> (rid_merged_frame);
- if (!map_region::can_merge_p (frame_a, frame_b,
- merged_frame, rid_merged_frame,
- merger))
- return false;
- }
-
- return true;
-}
-
-/* Implementation of region::walk_for_canonicalization vfunc for
- stack_region. */
-
-void
-stack_region::walk_for_canonicalization (canonicalization *c) const
-{
- int i;
- region_id *frame_rid;
- FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
- c->walk_rid (*frame_rid);
-}
-
-/* For debugging purposes: look for a grandchild region within one of
- the child frame regions, where the grandchild is for a decl named
- IDENTIFIER (or an SSA_NAME for such a decl):
-
- stack_region
- `-frame_region
- `-region for decl named IDENTIFIER
-
- returning its value, or svalue_id::null if none are found. */
-
-svalue_id
-stack_region::get_value_by_name (tree identifier,
- const region_model &model) const
-{
- int i;
- region_id *frame_rid;
- FOR_EACH_VEC_ELT (m_frame_rids, i, frame_rid)
- {
- frame_region *frame = model.get_region<frame_region> (*frame_rid);
- svalue_id sid = frame->get_value_by_name (identifier, model);
- if (!sid.null_p ())
- return sid;
- }
-
- return svalue_id::null ();
-}
-
-/* class heap_region : public region. */
-
-/* heap_region's copy ctor. */
-
-heap_region::heap_region (const heap_region &other)
-: region (other)
-{
-}
-
-/* Compare the fields of this heap_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-heap_region::compare_fields (const heap_region &) const
-{
- /* Empty. */
- return true;
-}
-
-/* Implementation of region::clone vfunc for heap_region. */
-
-region *
-heap_region::clone () const
-{
- return new heap_region (*this);
-}
-
-/* Implementation of region::walk_for_canonicalization vfunc for
- heap_region. */
-
-void
-heap_region::walk_for_canonicalization (canonicalization *) const
-{
- /* Empty. */
-}
-
-/* class root_region : public region. */
-
-/* root_region's default ctor. */
-
-root_region::root_region ()
-: region (region_id::null (),
- svalue_id::null (),
- NULL_TREE)
-{
-}
-
-/* root_region's copy ctor. */
-
-root_region::root_region (const root_region &other)
-: region (other),
- m_stack_rid (other.m_stack_rid),
- m_globals_rid (other.m_globals_rid),
- m_code_rid (other.m_code_rid),
- m_heap_rid (other.m_heap_rid)
-{
-}
-
-/* Compare the fields of this root_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-root_region::compare_fields (const root_region &other) const
-{
- if (m_stack_rid != other.m_stack_rid)
- return false;
- if (m_globals_rid != other.m_globals_rid)
- return false;
- if (m_code_rid != other.m_code_rid)
- return false;
- if (m_heap_rid != other.m_heap_rid)
- return false;
- return true;
-}
-
-/* Implementation of region::clone vfunc for root_region. */
-
-region *
-root_region::clone () const
-{
- return new root_region (*this);
-}
-
-/* Implementation of region::print_fields vfunc for root_region. */
-
-void
-root_region::print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
-{
- region::print_fields (model, this_rid, pp);
- // TODO
-}
-
-/* Implementation of region::validate vfunc for root_region. */
-
-void
-root_region::validate (const region_model &model) const
-{
- region::validate (model);
- m_stack_rid.validate (model);
- m_globals_rid.validate (model);
- m_code_rid.validate (model);
- m_heap_rid.validate (model);
-}
-
-/* Implementation of region::dump_child_label vfunc for root_region. */
-
-void
-root_region::dump_child_label (const region_model &model ATTRIBUTE_UNUSED,
- region_id this_rid ATTRIBUTE_UNUSED,
- region_id child_rid,
- pretty_printer *pp) const
-{
- if (child_rid == m_stack_rid)
- pp_printf (pp, "stack: ");
- else if (child_rid == m_globals_rid)
- pp_printf (pp, "globals: ");
- else if (child_rid == m_code_rid)
- pp_printf (pp, "code: ");
- else if (child_rid == m_heap_rid)
- pp_printf (pp, "heap: ");
-}
-
-/* Create a new frame_region for a call to FUN and push it onto
- the stack.
-
- If ARG_SIDS is non-NULL, use it to populate the parameters
- in the new frame.
- Otherwise, populate them with unknown values.
-
- Return the region_id of the new frame. */
-
-region_id
-root_region::push_frame (region_model *model, function *fun,
- vec<svalue_id> *arg_sids,
- region_model_context *ctxt)
-{
- gcc_assert (fun);
- /* arg_sids can be NULL. */
-
- ensure_stack_region (model);
- stack_region *stack = model->get_region <stack_region> (m_stack_rid);
-
- frame_region *region = new frame_region (m_stack_rid, fun,
- stack->get_num_frames ());
- region_id frame_rid = model->add_region (region);
-
- // TODO: unify these cases by building a vec of unknown?
-
- if (arg_sids)
- {
- /* Arguments supplied from a caller frame. */
-
- tree fndecl = fun->decl;
- unsigned idx = 0;
- for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
- iter_parm = DECL_CHAIN (iter_parm), ++idx)
- {
- /* If there's a mismatching declaration, the call stmt might
- not have enough args. Handle this case by leaving the
- rest of the params as uninitialized. */
- if (idx >= arg_sids->length ())
- break;
- svalue_id arg_sid = (*arg_sids)[idx];
- region_id parm_rid
- = region->get_or_create (model, frame_rid, iter_parm,
- TREE_TYPE (iter_parm), ctxt);
- model->set_value (parm_rid, arg_sid, ctxt);
-
- /* Also do it for default SSA name (sharing the same unknown
- value). */
- tree parm_default_ssa = ssa_default_def (fun, iter_parm);
- if (parm_default_ssa)
- {
- region_id defssa_rid
- = region->get_or_create (model, frame_rid, parm_default_ssa,
- TREE_TYPE (iter_parm), ctxt);
- model->set_value (defssa_rid, arg_sid, ctxt);
- }
- }
- }
- else
- {
- /* No known arguments (a top-level call within the analysis). */
-
- /* Params have a defined, unknown value; they should not inherit
- from the poisoned uninit value. */
- tree fndecl = fun->decl;
- for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
- iter_parm = DECL_CHAIN (iter_parm))
- {
- region_id parm_rid
- = region->get_or_create (model, frame_rid, iter_parm,
- TREE_TYPE (iter_parm), ctxt);
- svalue_id parm_sid
- = model->set_to_new_unknown_value (parm_rid, TREE_TYPE (iter_parm),
- ctxt);
-
- /* Also do it for default SSA name (sharing the same unknown
- value). */
- tree parm_default_ssa = ssa_default_def (fun, iter_parm);
- if (parm_default_ssa)
- {
- region_id defssa_rid
- = region->get_or_create (model, frame_rid, parm_default_ssa,
- TREE_TYPE (iter_parm), ctxt);
- model->get_region (defssa_rid)->set_value (*model, defssa_rid,
- parm_sid, ctxt);
- }
- }
- }
-
- stack->push_frame (frame_rid);
-
- return frame_rid;
-}
-
-/* Get the region_id of the top-most frame in this root_region's stack,
- if any. */
-
-region_id
-root_region::get_current_frame_id (const region_model &model) const
-{
- stack_region *stack = model.get_region <stack_region> (m_stack_rid);
- if (stack)
- return stack->get_current_frame_id ();
- else
- return region_id::null ();
-}
-
-/* Pop the topmost frame_region from this root_region's stack;
- see the comment for stack_region::pop_frame. */
-
-void
-root_region::pop_frame (region_model *model, region_id result_dst_rid,
- bool purge, purge_stats *out,
- region_model_context *ctxt)
-{
- stack_region *stack = model->get_region <stack_region> (m_stack_rid);
- stack->pop_frame (model, result_dst_rid, purge, out, ctxt);
-}
-
-/* Return the region_id of the stack region, creating it if doesn't
- already exist. */
-
-region_id
-root_region::ensure_stack_region (region_model *model)
-{
- if (m_stack_rid.null_p ())
- {
- m_stack_rid
- = model->add_region (new stack_region (model->get_root_rid (),
- svalue_id::null ()));
- }
- return m_stack_rid;
-}
-
-/* Return the stack region (which could be NULL). */
-
-stack_region *
-root_region::get_stack_region (const region_model *model) const
-{
- return model->get_region <stack_region> (m_stack_rid);
-}
-
-/* Return the region_id of the globals region, creating it if doesn't
- already exist. */
-
-region_id
-root_region::ensure_globals_region (region_model *model)
-{
- if (m_globals_rid.null_p ())
- m_globals_rid
- = model->add_region (new globals_region (model->get_root_rid ()));
- return m_globals_rid;
-}
-
-/* Return the code region (which could be NULL). */
-
-code_region *
-root_region::get_code_region (const region_model *model) const
-{
- return model->get_region <code_region> (m_code_rid);
-}
-
-/* Return the region_id of the code region, creating it if doesn't
- already exist. */
-
-region_id
-root_region::ensure_code_region (region_model *model)
-{
- if (m_code_rid.null_p ())
- m_code_rid
- = model->add_region (new code_region (model->get_root_rid ()));
- return m_code_rid;
-}
-
-/* Return the globals region (which could be NULL). */
-
-globals_region *
-root_region::get_globals_region (const region_model *model) const
-{
- return model->get_region <globals_region> (m_globals_rid);
-}
-
-/* Return the region_id of the heap region, creating it if doesn't
- already exist. */
-
-region_id
-root_region::ensure_heap_region (region_model *model)
-{
- if (m_heap_rid.null_p ())
- {
- m_heap_rid
- = model->add_region (new heap_region (model->get_root_rid (),
- svalue_id::null ()));
- }
- return m_heap_rid;
-}
-
-/* Return the heap region (which could be NULL). */
-
-heap_region *
-root_region::get_heap_region (const region_model *model) const
-{
- return model->get_region <heap_region> (m_heap_rid);
-}
-
-/* Implementation of region::remap_region_ids vfunc for root_region. */
-
-void
-root_region::remap_region_ids (const region_id_map &map)
-{
- map.update (&m_stack_rid);
- map.update (&m_globals_rid);
- map.update (&m_code_rid);
- map.update (&m_heap_rid);
-}
-
-/* Attempt to merge ROOT_REGION_A and ROOT_REGION_B into
- MERGED_ROOT_REGION using MERGER.
- Return true if the merger is possible, false otherwise. */
-
-bool
-root_region::can_merge_p (const root_region *root_region_a,
- const root_region *root_region_b,
- root_region *merged_root_region,
- model_merger *merger)
-{
- /* We can only merge if the stacks are sufficiently similar. */
- stack_region *stack_a = root_region_a->get_stack_region (merger->m_model_a);
- stack_region *stack_b = root_region_b->get_stack_region (merger->m_model_b);
- if (stack_a && stack_b)
- {
- /* If the two models both have a stack, attempt to merge them. */
- merged_root_region->ensure_stack_region (merger->m_merged_model);
- if (!stack_region::can_merge_p (stack_a, stack_b, merger))
- return false;
- }
- else if (stack_a || stack_b)
- /* Don't attempt to merge if one model has a stack and the other
- doesn't. */
- return false;
-
- map_region *globals_a = root_region_a->get_globals_region (merger->m_model_a);
- map_region *globals_b = root_region_b->get_globals_region (merger->m_model_b);
- if (globals_a && globals_b)
- {
- /* If both models have globals regions, attempt to merge them. */
- region_id merged_globals_rid
- = merged_root_region->ensure_globals_region (merger->m_merged_model);
- map_region *merged_globals
- = merged_root_region->get_globals_region (merger->m_merged_model);
- if (!map_region::can_merge_p (globals_a, globals_b,
- merged_globals, merged_globals_rid,
- merger))
- return false;
- }
- /* otherwise, merge as "no globals". */
-
- map_region *code_a = root_region_a->get_code_region (merger->m_model_a);
- map_region *code_b = root_region_b->get_code_region (merger->m_model_b);
- if (code_a && code_b)
- {
- /* If both models have code regions, attempt to merge them. */
- region_id merged_code_rid
- = merged_root_region->ensure_code_region (merger->m_merged_model);
- map_region *merged_code
- = merged_root_region->get_code_region (merger->m_merged_model);
- if (!map_region::can_merge_p (code_a, code_b,
- merged_code, merged_code_rid,
- merger))
- return false;
- }
- /* otherwise, merge as "no code". */
-
- heap_region *heap_a = root_region_a->get_heap_region (merger->m_model_a);
- heap_region *heap_b = root_region_b->get_heap_region (merger->m_model_b);
- if (heap_a && heap_b)
- {
- /* If both have a heap, create a "merged" heap.
- Actually merging the heap contents happens via the region_svalue
- instances, as needed, when seeing pairs of region_svalue instances. */
- merged_root_region->ensure_heap_region (merger->m_merged_model);
- }
- /* otherwise, merge as "no heap". */
-
- return true;
-}
-
-/* Implementation of region::add_to_hash vfunc for root_region. */
-
void
-root_region::add_to_hash (inchash::hash &hstate) const
+dump_tree (pretty_printer *pp, tree t)
{
- region::add_to_hash (hstate);
- inchash::add (m_stack_rid, hstate);
- inchash::add (m_globals_rid, hstate);
- inchash::add (m_code_rid, hstate);
- inchash::add (m_heap_rid, hstate);
+ dump_generic_node (pp, t, 0, TDF_SLIM, 0);
}
-/* Implementation of region::walk_for_canonicalization vfunc for
- root_region. */
+/* Dump T to PP in language-independent form in quotes, for
+ debugging/logging/dumping purposes. */
void
-root_region::walk_for_canonicalization (canonicalization *c) const
-{
- c->walk_rid (m_stack_rid);
- c->walk_rid (m_globals_rid);
- c->walk_rid (m_code_rid);
- c->walk_rid (m_heap_rid);
-}
-
-/* For debugging purposes: look for a descendant region for a local
- or global decl named IDENTIFIER (or an SSA_NAME for such a decl),
- returning its value, or svalue_id::null if none are found. */
-
-svalue_id
-root_region::get_value_by_name (tree identifier,
- const region_model &model) const
-{
- if (stack_region *stack = get_stack_region (&model))
- {
- svalue_id sid = stack->get_value_by_name (identifier, model);
- if (!sid.null_p ())
- return sid;
- }
- if (map_region *globals = get_globals_region (&model))
- {
- svalue_id sid = globals->get_value_by_name (identifier, model);
- if (!sid.null_p ())
- return sid;
- }
- return svalue_id::null ();
-}
-
-/* class symbolic_region : public map_region. */
-
-/* symbolic_region's copy ctor. */
-
-symbolic_region::symbolic_region (const symbolic_region &other)
-: region (other),
- m_possibly_null (other.m_possibly_null)
-{
-}
-
-/* Compare the fields of this symbolic_region with OTHER, returning true
- if they are equal.
- For use by region::operator==. */
-
-bool
-symbolic_region::compare_fields (const symbolic_region &other) const
-{
- return m_possibly_null == other.m_possibly_null;
-}
-
-/* Implementation of region::clone vfunc for symbolic_region. */
-
-region *
-symbolic_region::clone () const
+dump_quoted_tree (pretty_printer *pp, tree t)
{
- return new symbolic_region (*this);
+ pp_begin_quote (pp, pp_show_color (pp));
+ dump_tree (pp, t);
+ pp_end_quote (pp, pp_show_color (pp));
}
-/* Implementation of region::walk_for_canonicalization vfunc for
- symbolic_region. */
+/* Equivalent to pp_printf (pp, "%qT", t), to avoid nesting pp_printf
+ calls within other pp_printf calls.
-void
-symbolic_region::walk_for_canonicalization (canonicalization *) const
-{
- /* Empty. */
-}
+ default_tree_printer handles 'T' and some other codes by calling
+ dump_generic_node (pp, t, 0, TDF_SLIM, 0);
+ dump_generic_node calls pp_printf in various places, leading to
+ garbled output.
-/* Implementation of region::print_fields vfunc for symbolic_region. */
+ Ideally pp_printf could be made to be reentrant, but in the meantime
+ this function provides a workaround. */
void
-symbolic_region::print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
+print_quoted_type (pretty_printer *pp, tree t)
{
- region::print_fields (model, this_rid, pp);
- pp_printf (pp, ", possibly_null: %s", m_possibly_null ? "true" : "false");
+ pp_begin_quote (pp, pp_show_color (pp));
+ dump_generic_node (pp, t, 0, TDF_SLIM, 0);
+ pp_end_quote (pp, pp_show_color (pp));
}
/* class region_model. */
-/* region_model's default ctor. */
+/* Ctor for region_model: construct an "empty" model. */
-region_model::region_model ()
+region_model::region_model (region_model_manager *mgr)
+: m_mgr (mgr), m_store (), m_current_frame (NULL)
{
- m_root_rid = add_region (new root_region ());
- m_constraints = new impl_constraint_manager (this);
- // TODO
+ m_constraints = new constraint_manager (mgr);
}
/* region_model's copy ctor. */
region_model::region_model (const region_model &other)
-: m_svalues (other.m_svalues.length ()),
- m_regions (other.m_regions.length ()),
- m_root_rid (other.m_root_rid)
+: m_mgr (other.m_mgr), m_store (other.m_store),
+ m_constraints (new constraint_manager (*other.m_constraints)),
+ m_current_frame (other.m_current_frame)
{
- /* Clone the svalues and regions. */
- int i;
-
- svalue *svalue;
- FOR_EACH_VEC_ELT (other.m_svalues, i, svalue)
- m_svalues.quick_push (svalue->clone ());
-
- region *region;
- FOR_EACH_VEC_ELT (other.m_regions, i, region)
- m_regions.quick_push (region->clone ());
-
- m_constraints = other.m_constraints->clone (this);
}
/* region_model's dtor. */
region_model &
region_model::operator= (const region_model &other)
{
- unsigned i;
- svalue *svalue;
- region *region;
+ /* m_mgr is const. */
+ gcc_assert (m_mgr == other.m_mgr);
- /* Delete existing content. */
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- delete svalue;
- m_svalues.truncate (0);
-
- FOR_EACH_VEC_ELT (m_regions, i, region)
- delete region;
- m_regions.truncate (0);
+ m_store = other.m_store;
delete m_constraints;
+ m_constraints = new constraint_manager (*other.m_constraints);
- /* Clone the svalues and regions. */
- m_svalues.reserve (other.m_svalues.length (), true);
- FOR_EACH_VEC_ELT (other.m_svalues, i, svalue)
- m_svalues.quick_push (svalue->clone ());
-
- m_regions.reserve (other.m_regions.length (), true);
- FOR_EACH_VEC_ELT (other.m_regions, i, region)
- m_regions.quick_push (region->clone ());
-
- m_root_rid = other.m_root_rid;
-
- m_constraints = other.m_constraints->clone (this);
+ m_current_frame = other.m_current_frame;
return *this;
}
/* Equality operator for region_model.
- Amongst other things this directly compares the svalue and region
- vectors and so for this to be meaningful both this and OTHER should
+ Amongst other things this directly compares the stores and the constraint
+ managers, so for this to be meaningful both this and OTHER should
have been canonicalized. */
bool
region_model::operator== (const region_model &other) const
{
- if (m_root_rid != other.m_root_rid)
- return false;
-
- if (m_svalues.length () != other.m_svalues.length ())
- return false;
+ /* We can only compare instances that use the same manager. */
+ gcc_assert (m_mgr == other.m_mgr);
- if (m_regions.length () != other.m_regions.length ())
+ if (m_store != other.m_store)
return false;
if (*m_constraints != *other.m_constraints)
return false;
- unsigned i;
- svalue *svalue;
- FOR_EACH_VEC_ELT (other.m_svalues, i, svalue)
- if (!(*m_svalues[i] == *other.m_svalues[i]))
- return false;
-
- region *region;
- FOR_EACH_VEC_ELT (other.m_regions, i, region)
- if (!(*m_regions[i] == *other.m_regions[i]))
- return false;
+ if (m_current_frame != other.m_current_frame)
+ return false;
gcc_checking_assert (hash () == other.hash ());
/* Generate a hash value for this region_model. */
hashval_t
-region_model::hash () const
-{
- hashval_t result = 0;
- int i;
-
- svalue *svalue;
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- result ^= svalue->hash ();
-
- region *region;
- FOR_EACH_VEC_ELT (m_regions, i, region)
- result ^= region->hash ();
-
- result ^= m_constraints->hash ();
-
- return result;
-}
-
-/* Print an all-on-one-line representation of this region_model to PP,
- which must support %E for trees. */
-
-void
-region_model::print (pretty_printer *pp) const
-{
- int i;
-
- pp_string (pp, "svalues: [");
- svalue *svalue;
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- {
- if (i > 0)
- pp_string (pp, ", ");
- print_svalue (svalue_id::from_int (i), pp);
- }
-
- pp_string (pp, "], regions: [");
-
- region *region;
- FOR_EACH_VEC_ELT (m_regions, i, region)
- {
- if (i > 0)
- pp_string (pp, ", ");
- region->print (*this, region_id::from_int (i), pp);
- }
-
- pp_string (pp, "], constraints: ");
-
- m_constraints->print (pp);
-}
-
-/* Print the svalue with id SID to PP. */
-
-void
-region_model::print_svalue (svalue_id sid, pretty_printer *pp) const
-{
- get_svalue (sid)->print (*this, sid, pp);
-}
-
-/* Dump a .dot representation of this region_model to PP, showing
- the values and the hierarchy of regions. */
-
-void
-region_model::dump_dot_to_pp (pretty_printer *pp) const
-{
- graphviz_out gv (pp);
-
- pp_string (pp, "digraph \"");
- pp_write_text_to_stream (pp);
- pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/false);
- pp_string (pp, "\" {\n");
-
- gv.indent ();
-
- pp_string (pp, "overlap=false;\n");
- pp_string (pp, "compound=true;\n");
-
- int i;
-
- svalue *svalue;
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- svalue->dump_dot_to_pp (*this, svalue_id::from_int (i), pp);
-
- region *region;
- FOR_EACH_VEC_ELT (m_regions, i, region)
- region->dump_dot_to_pp (*this, region_id::from_int (i), pp);
-
- /* TODO: constraints. */
-
- /* Terminate "digraph" */
- gv.outdent ();
- pp_string (pp, "}");
- pp_newline (pp);
-}
-
-/* Dump a .dot representation of this region_model to FP. */
-
-void
-region_model::dump_dot_to_file (FILE *fp) const
-{
- pretty_printer pp;
- pp_format_decoder (&pp) = default_tree_printer;
- pp.buffer->stream = fp;
- dump_dot_to_pp (&pp);
- pp_flush (&pp);
-}
-
-/* Dump a .dot representation of this region_model to PATH. */
-
-void
-region_model::dump_dot (const char *path) const
-{
- FILE *fp = fopen (path, "w");
- dump_dot_to_file (fp);
- fclose (fp);
-}
-
-/* Dump a multiline representation of this model to PP, showing the
- region hierarchy, the svalues, and any constraints.
-
- If SUMMARIZE is true, show only the most pertinent information,
- in a form that attempts to be less verbose.
- Otherwise, show all information. */
-
-void
-region_model::dump_to_pp (pretty_printer *pp, bool summarize) const
-{
- if (summarize)
- {
- auto_vec<path_var> rep_path_vars;
-
- unsigned i;
- region *reg;
- FOR_EACH_VEC_ELT (m_regions, i, reg)
- {
- region_id rid = region_id::from_int (i);
- path_var pv = get_representative_path_var (rid);
- if (pv.m_tree)
- rep_path_vars.safe_push (pv);
- }
- bool is_first = true;
-
- /* Work with a copy in case the get_lvalue calls change anything
- (they shouldn't). */
- region_model copy (*this);
- copy.dump_summary_of_rep_path_vars (pp, &rep_path_vars, &is_first);
-
- equiv_class *ec;
- FOR_EACH_VEC_ELT (m_constraints->m_equiv_classes, i, ec)
- {
- for (unsigned j = 0; j < ec->m_vars.length (); j++)
- {
- svalue_id lhs_sid = ec->m_vars[j];
- tree lhs_tree = get_representative_tree (lhs_sid);
- if (lhs_tree == NULL_TREE)
- continue;
- for (unsigned k = j + 1; k < ec->m_vars.length (); k++)
- {
- svalue_id rhs_sid = ec->m_vars[k];
- tree rhs_tree = get_representative_tree (rhs_sid);
- if (rhs_tree
- && !(CONSTANT_CLASS_P (lhs_tree)
- && CONSTANT_CLASS_P (rhs_tree)))
- {
- dump_separator (pp, &is_first);
- dump_tree (pp, lhs_tree);
- pp_string (pp, " == ");
- dump_tree (pp, rhs_tree);
- }
- }
- }
- }
-
- constraint *c;
- FOR_EACH_VEC_ELT (m_constraints->m_constraints, i, c)
- {
- const equiv_class &lhs = c->m_lhs.get_obj (*m_constraints);
- const equiv_class &rhs = c->m_rhs.get_obj (*m_constraints);
- svalue_id lhs_sid = lhs.get_representative ();
- svalue_id rhs_sid = rhs.get_representative ();
- tree lhs_tree = get_representative_tree (lhs_sid);
- tree rhs_tree = get_representative_tree (rhs_sid);
- if (lhs_tree && rhs_tree
- && !(CONSTANT_CLASS_P (lhs_tree) && CONSTANT_CLASS_P (rhs_tree)))
- {
- dump_separator (pp, &is_first);
- dump_tree (pp, lhs_tree);
- pp_printf (pp, " %s ", constraint_op_code (c->m_op));
- dump_tree (pp, rhs_tree);
- }
- }
-
- return;
- }
-
- get_region (m_root_rid)->dump_to_pp (*this, m_root_rid, pp, "", true);
-
- pp_string (pp, "svalues:");
- pp_newline (pp);
- int i;
- svalue *svalue;
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- {
- pp_string (pp, " ");
- svalue_id sid = svalue_id::from_int (i);
- print_svalue (sid, pp);
- pp_newline (pp);
- }
-
- pp_string (pp, "constraint manager:");
- pp_newline (pp);
- m_constraints->dump_to_pp (pp);
-}
-
-/* Dump a multiline representation of this model to FILE. */
-
-void
-region_model::dump (FILE *fp, bool summarize) const
-{
- pretty_printer pp;
- pp_format_decoder (&pp) = default_tree_printer;
- pp_show_color (&pp) = pp_show_color (global_dc->printer);
- pp.buffer->stream = fp;
- dump_to_pp (&pp, summarize);
- pp_flush (&pp);
-}
-
-/* Dump a multiline representation of this model to stderr. */
-
-DEBUG_FUNCTION void
-region_model::dump (bool summarize) const
-{
- dump (stderr, summarize);
-}
-
-/* Dump RMODEL fully to stderr (i.e. without summarization). */
-
-DEBUG_FUNCTION void
-region_model::debug () const
-{
- dump (false);
-}
-
-/* Dump VEC to PP, in the form "{VEC elements}: LABEL". */
-
-static void
-dump_vec_of_tree (pretty_printer *pp,
- bool *is_first,
- const auto_vec<tree> &vec,
- const char *label)
-{
- if (vec.length () == 0)
- return;
-
- dump_separator (pp, is_first);
- pp_printf (pp, "{");
- unsigned i;
- tree key;
- FOR_EACH_VEC_ELT (vec, i, key)
- {
- if (i > 0)
- pp_string (pp, ", ");
- dump_tree (pp, key);
- }
- pp_printf (pp, "}: %s", label);
-}
-
-/* Dump all *REP_PATH_VARS to PP in compact form, updating *IS_FIRST.
- Subroutine of region_model::dump_to_pp. */
-
-void
-region_model::dump_summary_of_rep_path_vars (pretty_printer *pp,
- auto_vec<path_var> *rep_path_vars,
- bool *is_first)
-{
- /* Print pointers, constants, and poisoned values that aren't "uninit";
- gather keys for unknown and uninit values. */
- unsigned i;
- path_var *pv;
- auto_vec<tree> unknown_trees;
- FOR_EACH_VEC_ELT (*rep_path_vars, i, pv)
- {
- if (TREE_CODE (pv->m_tree) == STRING_CST)
- continue;
- tentative_region_model_context ctxt;
- region_id child_rid = get_lvalue (*pv, &ctxt);
- if (ctxt.had_errors_p ())
- continue;
- region *child_region = get_region (child_rid);
- if (!child_region)
- continue;
- svalue_id sid = child_region->get_value_direct ();
- if (sid.null_p ())
- continue;
- svalue *sval = get_svalue (sid);
- switch (sval->get_kind ())
- {
- default:
- gcc_unreachable ();
- case SK_REGION:
- {
- region_svalue *region_sval = as_a <region_svalue *> (sval);
- region_id pointee_rid = region_sval->get_pointee ();
- gcc_assert (!pointee_rid.null_p ());
- tree pointee = get_representative_path_var (pointee_rid).m_tree;
- dump_separator (pp, is_first);
- dump_tree (pp, pv->m_tree);
- pp_string (pp, ": ");
- pp_character (pp, '&');
- if (pointee)
- dump_tree (pp, pointee);
- else
- pointee_rid.print (pp);
- }
- break;
- case SK_CONSTANT:
- dump_separator (pp, is_first);
- dump_tree (pp, pv->m_tree);
- pp_string (pp, ": ");
- dump_tree (pp, sval->dyn_cast_constant_svalue ()->get_constant ());
- break;
- case SK_UNKNOWN:
- unknown_trees.safe_push (pv->m_tree);
- break;
- case SK_POISONED:
- {
- poisoned_svalue *poisoned_sval = as_a <poisoned_svalue *> (sval);
- enum poison_kind pkind = poisoned_sval->get_poison_kind ();
- dump_separator (pp, is_first);
- dump_tree (pp, pv->m_tree);
- pp_printf (pp, ": %s", poison_kind_to_str (pkind));
- }
- break;
- case SK_SETJMP:
- dump_separator (pp, is_first);
- pp_printf (pp, "setjmp: EN: %i",
- sval->dyn_cast_setjmp_svalue ()->get_enode_index ());
- break;
- }
- }
-
- /* Print unknown and uninitialized values in consolidated form. */
- dump_vec_of_tree (pp, is_first, unknown_trees, "unknown");
+region_model::hash () const
+{
+ hashval_t result = m_store.hash ();
+ result ^= m_constraints->hash ();
+ return result;
}
-/* Assert that this object is valid. */
+/* Dump a representation of this model to PP, showing the
+ stack, the store, and any constraints.
+ Use SIMPLE to control how svalues and regions are printed. */
void
-region_model::validate () const
+region_model::dump_to_pp (pretty_printer *pp, bool simple,
+ bool multiline) const
{
- /* Skip this in a release build. */
-#if !CHECKING_P
- return;
-#endif
-
- m_constraints->validate ();
+ /* Dump stack. */
+ pp_printf (pp, "stack depth: %i", get_stack_depth ());
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, " {");
+ for (const frame_region *iter_frame = m_current_frame; iter_frame;
+ iter_frame = iter_frame->get_calling_frame ())
+ {
+ if (multiline)
+ pp_string (pp, " ");
+ else if (iter_frame != m_current_frame)
+ pp_string (pp, ", ");
+ pp_printf (pp, "frame (index %i): ", iter_frame->get_index ());
+ iter_frame->dump_to_pp (pp, simple);
+ if (multiline)
+ pp_newline (pp);
+ }
+ if (!multiline)
+ pp_string (pp, "}");
+
+ /* Dump store. */
+ if (!multiline)
+ pp_string (pp, ", {");
+ m_store.dump_to_pp (pp, simple, multiline,
+ m_mgr->get_store_manager ());
+ if (!multiline)
+ pp_string (pp, "}");
+
+ /* Dump constraints. */
+ pp_string (pp, "constraint_manager:");
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, " {");
+ m_constraints->dump_to_pp (pp, multiline);
+ if (!multiline)
+ pp_string (pp, "}");
+}
- unsigned i;
- region *r;
- FOR_EACH_VEC_ELT (m_regions, i, r)
- r->validate (*this);
+/* Dump a representation of this model to FILE. */
- // TODO: anything else?
+void
+region_model::dump (FILE *fp, bool simple, bool multiline) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp, simple, multiline);
+ pp_newline (&pp);
+ pp_flush (&pp);
}
-/* Global data for use by svalue_id_cmp_by_constant_svalue. */
+/* Dump a multiline representation of this model to stderr. */
-static region_model *svalue_id_cmp_by_constant_svalue_model = NULL;
+DEBUG_FUNCTION void
+region_model::dump (bool simple) const
+{
+ dump (stderr, simple, true);
+}
-/* Comparator for use by region_model::canonicalize. */
+/* Dump a multiline representation of this model to stderr. */
-static int
-svalue_id_cmp_by_constant_svalue (const void *p1, const void *p2)
+DEBUG_FUNCTION void
+region_model::debug () const
{
- const svalue_id *sid1 = (const svalue_id *)p1;
- const svalue_id *sid2 = (const svalue_id *)p2;
- gcc_assert (!sid1->null_p ());
- gcc_assert (!sid2->null_p ());
- gcc_assert (svalue_id_cmp_by_constant_svalue_model);
- const svalue &sval1
- = *svalue_id_cmp_by_constant_svalue_model->get_svalue (*sid1);
- const svalue &sval2
- = *svalue_id_cmp_by_constant_svalue_model->get_svalue (*sid2);
- gcc_assert (sval1.get_kind () == SK_CONSTANT);
- gcc_assert (sval2.get_kind () == SK_CONSTANT);
-
- tree cst1 = ((const constant_svalue &)sval1).get_constant ();
- tree cst2 = ((const constant_svalue &)sval2).get_constant ();
- return tree_cmp (cst1, cst2);
+ dump (true);
}
-/* Reorder the regions and svalues into a deterministic "canonical" order,
- to maximize the chance of equality.
- If non-NULL, notify CTXT about the svalue id remapping. */
+/* Canonicalize the store and constraints, to maximize the chance of
+ equality between region_model instances. */
void
-region_model::canonicalize (region_model_context *ctxt)
+region_model::canonicalize ()
{
- /* Walk all regions and values in a deterministic order, visiting
- rids and sids, generating a rid and sid map. */
- canonicalization c (*this);
-
- /* (1): Walk all svalues, putting constants first, sorting the constants
- (thus imposing an ordering on any constants that are purely referenced
- by constraints).
- Ignore other svalues for now. */
- {
- unsigned i;
- auto_vec<svalue_id> sids;
- svalue *sval;
- FOR_EACH_VEC_ELT (m_svalues, i, sval)
- {
- if (sval->get_kind () == SK_CONSTANT)
- sids.safe_push (svalue_id::from_int (i));
- }
- svalue_id_cmp_by_constant_svalue_model = this;
- sids.qsort (svalue_id_cmp_by_constant_svalue);
- svalue_id_cmp_by_constant_svalue_model = NULL;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (sids, i, sid)
- c.walk_sid (*sid);
- }
-
- /* (2): Walk all regions (and thus their values) in a deterministic
- order. */
- c.walk_rid (m_root_rid);
-
- /* (3): Ensure we've visited everything, as we don't want to purge
- at this stage. Anything we visit for the first time here has
- arbitrary order. */
- {
- unsigned i;
- region *region;
- FOR_EACH_VEC_ELT (m_regions, i, region)
- c.walk_rid (region_id::from_int (i));
- svalue *sval;
- FOR_EACH_VEC_ELT (m_svalues, i, sval)
- c.walk_sid (svalue_id::from_int (i));
- }
-
- /* (4): We now have a reordering of the regions and values.
- Apply it. */
- remap_svalue_ids (c.m_sid_map);
- remap_region_ids (c.m_rid_map);
- if (ctxt)
- ctxt->remap_svalue_ids (c.m_sid_map);
-
- /* (5): Canonicalize the constraint_manager (it has already had its
- svalue_ids remapped above). This makes use of the new svalue_id
- values, and so must happen last. */
- m_constraints->canonicalize (get_num_svalues ());
-
- validate ();
+ m_store.canonicalize (m_mgr->get_store_manager ());
+ m_constraints->canonicalize ();
}
/* Return true if this region_model is in canonical form. */
region_model::canonicalized_p () const
{
region_model copy (*this);
- copy.canonicalize (NULL);
+ copy.canonicalize ();
return *this == copy;
}
+/* See the comment for store::loop_replay_fixup. */
+
+void
+region_model::loop_replay_fixup (const region_model *dst_state)
+{
+ m_store.loop_replay_fixup (dst_state->get_store (), m_mgr);
+}
+
/* A subclass of pending_diagnostic for complaining about uses of
poisoned values. */
case POISON_KIND_POPPED_STACK:
{
/* TODO: which CWE? */
- return warning_at (rich_loc,
- OPT_Wanalyzer_use_of_pointer_in_stale_stack_frame,
- "use of pointer %qE within stale stack frame",
- m_expr);
+ return warning_at
+ (rich_loc,
+ OPT_Wanalyzer_use_of_pointer_in_stale_stack_frame,
+ "dereferencing pointer %qE to within stale stack frame",
+ m_expr);
}
break;
}
m_expr);
case POISON_KIND_POPPED_STACK:
return ev.formatted_print
- ("use of pointer %qE within stale stack frame here",
+ ("dereferencing pointer %qE to within stale stack frame",
m_expr);
}
}
enum poison_kind m_pkind;
};
-/* Determine if EXPR is poisoned, and if so, queue a diagnostic to CTXT. */
-
-void
-region_model::check_for_poison (tree expr, region_model_context *ctxt)
-{
- if (!ctxt)
- return;
-
- // TODO: this is disabled for now (too many false positives)
- return;
+/* If ASSIGN is a stmt that can be modelled via
+ set_value (lhs_reg, SVALUE, CTXT)
+ for some SVALUE, get the SVALUE.
+ Otherwise return NULL. */
- svalue_id expr_sid = get_rvalue (expr, ctxt);
- gcc_assert (!expr_sid.null_p ());
- svalue *expr_svalue = get_svalue (expr_sid);
- gcc_assert (expr_svalue);
- if (const poisoned_svalue *poisoned_sval
- = expr_svalue->dyn_cast_poisoned_svalue ())
- {
- enum poison_kind pkind = poisoned_sval->get_poison_kind ();
- ctxt->warn (new poisoned_value_diagnostic (expr, pkind));
- }
-}
-
-/* Update this model for the ASSIGN stmt, using CTXT to report any
- diagnostics. */
-
-void
-region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
+const svalue *
+region_model::get_gassign_result (const gassign *assign,
+ region_model_context *ctxt)
{
tree lhs = gimple_assign_lhs (assign);
tree rhs1 = gimple_assign_rhs1 (assign);
-
- region_id lhs_rid = get_lvalue (lhs, ctxt);
-
- /* Check for uses of poisoned values. */
- switch (get_gimple_rhs_class (gimple_expr_code (assign)))
- {
- case GIMPLE_INVALID_RHS:
- gcc_unreachable ();
- break;
- case GIMPLE_TERNARY_RHS:
- check_for_poison (gimple_assign_rhs3 (assign), ctxt);
- /* Fallthru */
- case GIMPLE_BINARY_RHS:
- check_for_poison (gimple_assign_rhs2 (assign), ctxt);
- /* Fallthru */
- case GIMPLE_UNARY_RHS:
- case GIMPLE_SINGLE_RHS:
- check_for_poison (gimple_assign_rhs1 (assign), ctxt);
- }
-
- if (lhs_rid.null_p ())
- return;
- // TODO: issue a warning for this case
-
enum tree_code op = gimple_assign_rhs_code (assign);
switch (op)
{
default:
- {
- if (0)
- sorry_at (assign->location, "unhandled assignment op: %qs",
- get_tree_code_name (op));
- set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
- }
- break;
-
- case BIT_FIELD_REF:
- {
- // TODO
- }
- break;
-
- case CONSTRUCTOR:
- {
- /* e.g. "x ={v} {CLOBBER};" */
- // TODO
- }
- break;
+ return NULL;
case POINTER_PLUS_EXPR:
{
tree ptr = rhs1;
tree offset = gimple_assign_rhs2 (assign);
- svalue_id ptr_sid = get_rvalue (ptr, ctxt);
- svalue_id offset_sid = get_rvalue (offset, ctxt);
- region_id element_rid
- = get_or_create_pointer_plus_expr (TREE_TYPE (TREE_TYPE (ptr)),
- ptr_sid, offset_sid,
- ctxt);
- svalue_id element_ptr_sid
- = get_or_create_ptr_svalue (TREE_TYPE (ptr), element_rid);
- set_value (lhs_rid, element_ptr_sid, ctxt);
+ const svalue *ptr_sval = get_rvalue (ptr, ctxt);
+ const svalue *offset_sval = get_rvalue (offset, ctxt);
+ /* Quoting tree.def, "the second operand [of a POINTER_PLUS_EXPR]
+ is an integer of type sizetype". */
+ offset_sval = m_mgr->get_or_create_cast (size_type_node, offset_sval);
+
+ const svalue *sval_binop
+ = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
+ ptr_sval, offset_sval);
+ return sval_binop;
}
break;
case POINTER_DIFF_EXPR:
{
/* e.g. "_1 = p_2(D) - q_3(D);". */
+ tree rhs2 = gimple_assign_rhs2 (assign);
+ const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
+ const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
- /* TODO. */
-
- set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
- }
- break;
+ // TODO: perhaps fold to zero if they're known to be equal?
- case ADDR_EXPR:
- {
- /* LHS = &RHS; */
- svalue_id ptr_sid = get_rvalue (rhs1, ctxt);
- set_value (lhs_rid, ptr_sid, ctxt);
+ const svalue *sval_binop
+ = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
+ rhs1_sval, rhs2_sval);
+ return sval_binop;
}
break;
+ /* Assignments of the form
+ set_value (lvalue (LHS), rvalue (EXPR))
+ for various EXPR.
+ We already have the lvalue for the LHS above, as "lhs_reg". */
+ case ADDR_EXPR: /* LHS = &RHS; */
+ case BIT_FIELD_REF:
+ case COMPONENT_REF: /* LHS = op0.op1; */
case MEM_REF:
- {
- region_id rhs_rid = get_lvalue (rhs1, ctxt);
- svalue_id rhs_sid
- = get_region (rhs_rid)->get_value (*this, true, ctxt);
- set_value (lhs_rid, rhs_sid, ctxt);
- }
- break;
-
case REAL_CST:
+ case COMPLEX_CST:
+ case VECTOR_CST:
case INTEGER_CST:
case ARRAY_REF:
- {
- /* LHS = RHS; */
- svalue_id cst_sid = get_rvalue (rhs1, ctxt);
- set_value (lhs_rid, cst_sid, ctxt);
- }
- break;
-
+ case SSA_NAME: /* LHS = VAR; */
+ case VAR_DECL: /* LHS = VAR; */
+ case PARM_DECL:/* LHS = VAR; */
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ return get_rvalue (rhs1, ctxt);
+
+ case ABS_EXPR:
+ case ABSU_EXPR:
+ case CONJ_EXPR:
+ case BIT_NOT_EXPR:
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
+ case NEGATE_EXPR:
case NOP_EXPR:
- // cast: TODO
- // fall though for now
- case SSA_NAME:
- case VAR_DECL:
- case PARM_DECL:
+ case VIEW_CONVERT_EXPR:
{
- /* LHS = VAR; */
- region_id rhs_rid = get_lvalue (rhs1, ctxt);
- copy_region (lhs_rid, rhs_rid, ctxt);
+ /* Unary ops. */
+ const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
+ const svalue *sval_unaryop
+ = m_mgr->get_or_create_unaryop (TREE_TYPE (lhs), op, rhs_sval);
+ return sval_unaryop;
}
- break;
case EQ_EXPR:
case GE_EXPR:
case NE_EXPR:
case GT_EXPR:
case LT_EXPR:
+ case UNORDERED_EXPR:
+ case ORDERED_EXPR:
{
tree rhs2 = gimple_assign_rhs2 (assign);
// TODO: constraints between svalues
- svalue_id rhs1_sid = get_rvalue (rhs1, ctxt);
- svalue_id rhs2_sid = get_rvalue (rhs2, ctxt);
+ const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
+ const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
- tristate t = eval_condition (rhs1_sid, op, rhs2_sid);
+ tristate t = eval_condition (rhs1_sval, op, rhs2_sval);
if (t.is_known ())
- set_value (lhs_rid,
- get_rvalue (t.is_true ()
- ? boolean_true_node
- : boolean_false_node,
- ctxt),
- ctxt);
+ return get_rvalue (t.is_true ()
+ ? boolean_true_node
+ : boolean_false_node,
+ ctxt);
else
- set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
- }
- break;
-
- case NEGATE_EXPR:
- case BIT_NOT_EXPR:
- {
- // TODO: unary ops
-
- // TODO: constant?
-
- set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
+ {
+ // TODO: symbolic value for binop
+ const svalue *sval_binop
+ = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
+ rhs1_sval, rhs2_sval);
+ return sval_binop;
+ }
}
break;
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
+ case MULT_HIGHPART_EXPR:
case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
case TRUNC_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ case RDIV_EXPR:
+ case EXACT_DIV_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
case MIN_EXPR:
case MAX_EXPR:
+ case COMPLEX_EXPR:
{
/* Binary ops. */
tree rhs2 = gimple_assign_rhs2 (assign);
- svalue_id rhs1_sid = get_rvalue (rhs1, ctxt);
- svalue_id rhs2_sid = get_rvalue (rhs2, ctxt);
+ const svalue *rhs1_sval = get_rvalue (rhs1, ctxt);
+ const svalue *rhs2_sval = get_rvalue (rhs2, ctxt);
- if (tree rhs1_cst = maybe_get_constant (rhs1_sid))
- if (tree rhs2_cst = maybe_get_constant (rhs2_sid))
- {
- tree result = fold_binary (op, TREE_TYPE (lhs),
- rhs1_cst, rhs2_cst);
- if (result && CONSTANT_CLASS_P (result))
- {
- svalue_id result_sid
- = get_or_create_constant_svalue (result);
- set_value (lhs_rid, result_sid, ctxt);
- return;
- }
- }
- set_to_new_unknown_value (lhs_rid, TREE_TYPE (lhs), ctxt);
+ const svalue *sval_binop
+ = m_mgr->get_or_create_binop (TREE_TYPE (lhs), op,
+ rhs1_sval, rhs2_sval);
+ return sval_binop;
+ }
+
+ /* Vector expressions. In theory we could implement these elementwise,
+ but for now, simply return unknown values. */
+ case VEC_DUPLICATE_EXPR:
+ case VEC_SERIES_EXPR:
+ case VEC_COND_EXPR:
+ case VEC_PERM_EXPR:
+ return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (lhs));
+ }
+}
+
+/* Update this model for the ASSIGN stmt, using CTXT to report any
+ diagnostics. */
+
+void
+region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
+{
+ tree lhs = gimple_assign_lhs (assign);
+ tree rhs1 = gimple_assign_rhs1 (assign);
+
+ const region *lhs_reg = get_lvalue (lhs, ctxt);
+
+ /* Most assignments are handled by:
+ set_value (lhs_reg, SVALUE, CTXT)
+ for some SVALUE. */
+ if (const svalue *sval = get_gassign_result (assign, ctxt))
+ {
+ set_value (lhs_reg, sval, ctxt);
+ return;
+ }
+
+ enum tree_code op = gimple_assign_rhs_code (assign);
+ switch (op)
+ {
+ default:
+ {
+ if (1)
+ sorry_at (assign->location, "unhandled assignment op: %qs",
+ get_tree_code_name (op));
+ gcc_unreachable ();
}
break;
- case COMPONENT_REF:
+ case CONSTRUCTOR:
+ {
+ if (TREE_CLOBBER_P (rhs1))
+ {
+ /* e.g. "x ={v} {CLOBBER};" */
+ clobber_region (lhs_reg);
+ }
+ else
+ {
+ /* Any CONSTRUCTOR that survives to this point is either
+ just a zero-init of everything, or a vector. */
+ if (!CONSTRUCTOR_NO_CLEARING (rhs1))
+ zero_fill_region (lhs_reg);
+ unsigned ix;
+ tree index;
+ tree val;
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (rhs1), ix, index, val)
+ {
+ gcc_assert (TREE_CODE (TREE_TYPE (rhs1)) == VECTOR_TYPE);
+ if (!index)
+ index = build_int_cst (integer_type_node, ix);
+ gcc_assert (TREE_CODE (index) == INTEGER_CST);
+ const svalue *index_sval
+ = m_mgr->get_or_create_constant_svalue (index);
+ gcc_assert (index_sval);
+ const region *sub_reg
+ = m_mgr->get_element_region (lhs_reg,
+ TREE_TYPE (val),
+ index_sval);
+ const svalue *val_sval = get_rvalue (val, ctxt);
+ set_value (sub_reg, val_sval, ctxt);
+ }
+ }
+ }
+ break;
+
+ case STRING_CST:
{
- /* LHS = op0.op1; */
- region_id child_rid = get_lvalue (rhs1, ctxt);
- svalue_id child_sid
- = get_region (child_rid)->get_value (*this, true, ctxt);
- set_value (lhs_rid, child_sid, ctxt);
+ /* e.g. "struct s2 x = {{'A', 'B', 'C', 'D'}};". */
+ /* Add a default binding, rather than a direct one, so that array
+ access will "inherit" the individual chars. */
+ const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
+ m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
+ BK_default);
}
break;
}
bool
region_model::on_call_pre (const gcall *call, region_model_context *ctxt)
{
- region_id lhs_rid;
- tree lhs_type = NULL_TREE;
- if (tree lhs = gimple_call_lhs (call))
- {
- lhs_rid = get_lvalue (lhs, ctxt);
- lhs_type = TREE_TYPE (lhs);
- }
-
- /* Check for uses of poisoned values.
- For now, special-case "free", to avoid warning about "use-after-free"
- when "double free" would be more precise. */
- if (!is_special_named_call_p (call, "free", 1))
- for (unsigned i = 0; i < gimple_call_num_args (call); i++)
- check_for_poison (gimple_call_arg (call, i), ctxt);
-
bool unknown_side_effects = false;
if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
{
+ call_details cd (call, this, ctxt);
+
+ /* The various impl_call_* member functions are implemented
+ in region-model-impl-calls.cc.
+ Having them split out into separate functions makes it easier
+ to put breakpoints on the handling of specific functions. */
if (is_named_call_p (callee_fndecl, "malloc", call, 1))
- {
- // TODO: capture size as a svalue?
- region_id new_rid = add_new_malloc_region ();
- if (!lhs_rid.null_p ())
- {
- svalue_id ptr_sid
- = get_or_create_ptr_svalue (lhs_type, new_rid);
- set_value (lhs_rid, ptr_sid, ctxt);
- }
- return false;
- }
+ return impl_call_malloc (cd);
+ else if (is_named_call_p (callee_fndecl, "calloc", call, 2))
+ return impl_call_calloc (cd);
else if (is_named_call_p (callee_fndecl, "__builtin_alloca", call, 1))
- {
- region_id frame_rid = get_current_frame_id ();
- region_id new_rid
- = add_region (new symbolic_region (frame_rid, NULL_TREE, false));
- if (!lhs_rid.null_p ())
- {
- svalue_id ptr_sid
- = get_or_create_ptr_svalue (lhs_type, new_rid);
- set_value (lhs_rid, ptr_sid, ctxt);
- }
- return false;
- }
+ return impl_call_alloca (cd);
else if (gimple_call_builtin_p (call, BUILT_IN_EXPECT)
|| gimple_call_builtin_p (call, BUILT_IN_EXPECT_WITH_PROBABILITY)
|| gimple_call_internal_p (call, IFN_BUILTIN_EXPECT))
+ return impl_call_builtin_expect (cd);
+ else if (is_named_call_p (callee_fndecl, "memset", call, 3)
+ || gimple_call_builtin_p (call, BUILT_IN_MEMSET))
{
- /* __builtin_expect's return value is its initial argument. */
- if (!lhs_rid.null_p ())
- {
- tree initial_arg = gimple_call_arg (call, 0);
- svalue_id sid = get_rvalue (initial_arg, ctxt);
- set_value (lhs_rid, sid, ctxt);
- }
+ impl_call_memset (cd);
return false;
}
else if (is_named_call_p (callee_fndecl, "strlen", call, 1))
{
- region_id buf_rid = deref_rvalue (gimple_call_arg (call, 0), ctxt);
- svalue_id buf_sid
- = get_region (buf_rid)->get_value (*this, true, ctxt);
- if (tree cst_expr = maybe_get_constant (buf_sid))
- {
- if (TREE_CODE (cst_expr) == STRING_CST
- && !lhs_rid.null_p ())
- {
- /* TREE_STRING_LENGTH is sizeof, not strlen. */
- int sizeof_cst = TREE_STRING_LENGTH (cst_expr);
- int strlen_cst = sizeof_cst - 1;
- tree t_cst = build_int_cst (lhs_type, strlen_cst);
- svalue_id result_sid
- = get_or_create_constant_svalue (t_cst);
- set_value (lhs_rid, result_sid, ctxt);
- return false;
- }
- }
- /* Otherwise an unknown value. */
- }
- else if (is_named_call_p (callee_fndecl,
- "__analyzer_dump_num_heap_regions", call, 0))
- {
- /* Handle the builtin "__analyzer_dump_num_heap_regions" by emitting
- a warning (for use in DejaGnu tests). */
- int num_heap_regions = 0;
- region_id heap_rid = get_root_region ()->ensure_heap_region (this);
- unsigned i;
- region *region;
- FOR_EACH_VEC_ELT (m_regions, i, region)
- if (region->get_parent () == heap_rid)
- num_heap_regions++;
- /* Use quotes to ensure the output isn't truncated. */
- warning_at (call->location, 0,
- "num heap regions: %qi", num_heap_regions);
- return false;
+ if (impl_call_strlen (cd))
+ return false;
}
else if (!fndecl_has_gimple_body_p (callee_fndecl)
- && !DECL_PURE_P (callee_fndecl))
+ && !DECL_PURE_P (callee_fndecl)
+ && !fndecl_built_in_p (callee_fndecl))
unknown_side_effects = true;
}
else
unknown_side_effects = true;
- /* Unknown return value. */
- if (!lhs_rid.null_p ())
- set_to_new_unknown_value (lhs_rid, lhs_type, ctxt);
+ /* Some of the above cases update the lhs of the call based on the
+ return value. If we get here, it hasn't been done yet, so do that
+ now. */
+ if (tree lhs = gimple_call_lhs (call))
+ {
+ const region *lhs_region = get_lvalue (lhs, ctxt);
+ if (TREE_CODE (lhs) == SSA_NAME)
+ {
+ const svalue *sval = m_mgr->get_or_create_initial_value (lhs_region);
+ set_value (lhs_region, sval, ctxt);
+ }
+ }
return unknown_side_effects;
}
bool unknown_side_effects,
region_model_context *ctxt)
{
- /* Update for "free" here, after sm-handling.
-
- If the ptr points to an underlying heap region, delete the region,
- poisoning pointers to it and regions within it.
-
- We delay this until after sm-state has been updated so that the
- sm-handling can transition all of the various casts of the pointer
- to a "freed" state *before* we delete the related region here.
-
- This has to be done here so that the sm-handling can use the fact
- that they point to the same region to establish that they are equal
- (in region_model::eval_condition_without_cm), and thus transition
- all pointers to the region to the "freed" state together, regardless
- of casts. */
if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
if (is_named_call_p (callee_fndecl, "free", call, 1))
{
- tree ptr = gimple_call_arg (call, 0);
- svalue_id ptr_sid = get_rvalue (ptr, ctxt);
- svalue *ptr_sval = get_svalue (ptr_sid);
- if (region_svalue *ptr_to_region_sval
- = ptr_sval->dyn_cast_region_svalue ())
- {
- /* If the ptr points to an underlying heap region, delete it,
- poisoning pointers. */
- region_id pointee_rid = ptr_to_region_sval->get_pointee ();
- region_id heap_rid = get_root_region ()->ensure_heap_region (this);
- if (!pointee_rid.null_p ()
- && get_region (pointee_rid)->get_parent () == heap_rid)
- {
- purge_stats stats;
- delete_region_and_descendents (pointee_rid,
- POISON_KIND_FREED,
- &stats, ctxt->get_logger ());
- purge_unused_svalues (&stats, ctxt);
- validate ();
- // TODO: do anything with stats?
- }
- }
+ call_details cd (call, this, ctxt);
+ impl_call_free (cd);
return;
}
handle_unrecognized_call (call, ctxt);
}
-/* Helper class for region_model::handle_unrecognized_call, for keeping
- track of all regions that are reachable, and, of those, which are
- mutable. */
-
-class reachable_regions
-{
-public:
- reachable_regions (region_model *model)
- : m_model (model), m_reachable_rids (), m_mutable_rids ()
- {}
-
- /* Lazily mark RID as being reachable, recursively adding regions
- reachable from RID. */
- void add (region_id rid, bool is_mutable)
- {
- gcc_assert (!rid.null_p ());
-
- unsigned idx = rid.as_int ();
- /* Bail out if this region is already in the sets at the IS_MUTABLE
- level of mutability. */
- if (!is_mutable && bitmap_bit_p (m_reachable_rids, idx))
- return;
- bitmap_set_bit (m_reachable_rids, idx);
-
- if (is_mutable)
- {
- if (bitmap_bit_p (m_mutable_rids, idx))
- return;
- else
- bitmap_set_bit (m_mutable_rids, idx);
- }
-
- /* If this region's value is a pointer, add the pointee. */
- region *reg = m_model->get_region (rid);
- svalue_id sid = reg->get_value_direct ();
- svalue *sval = m_model->get_svalue (sid);
- if (sval)
- if (region_svalue *ptr = sval->dyn_cast_region_svalue ())
- {
- region_id pointee_rid = ptr->get_pointee ();
- /* Use const-ness of pointer type to affect mutability. */
- bool ptr_is_mutable = true;
- if (ptr->get_type ()
- && TREE_CODE (ptr->get_type ()) == POINTER_TYPE
- && TYPE_READONLY (TREE_TYPE (ptr->get_type ())))
- ptr_is_mutable = false;
- add (pointee_rid, ptr_is_mutable);
- }
-
- /* Add descendents of this region. */
- region_id_set descendents (m_model);
- m_model->get_descendents (rid, &descendents, region_id::null ());
- for (unsigned i = 0; i < m_model->get_num_regions (); i++)
- {
- region_id iter_rid = region_id::from_int (i);
- if (descendents.region_p (iter_rid))
- add (iter_rid, is_mutable);
- }
- }
-
- bool mutable_p (region_id rid)
- {
- gcc_assert (!rid.null_p ());
- return bitmap_bit_p (m_mutable_rids, rid.as_int ());
- }
-
-private:
- region_model *m_model;
-
- /* The region ids already seen. This has to be an auto_bitmap rather than
- an auto_sbitmap as new regions can be created within the model during
- the traversal. */
- auto_bitmap m_reachable_rids;
-
- /* The region_ids that can be changed (accessed via non-const pointers). */
- auto_bitmap m_mutable_rids;
-};
-
/* Handle a call CALL to a function with unknown behavior.
Traverse the regions in this model, determining what regions are
{
tree fndecl = get_fndecl_for_call (call, ctxt);
- reachable_regions reachable_regions (this);
+ reachable_regions reachable_regs (&m_store, m_mgr);
/* Determine the reachable regions and their mutability. */
{
- /* Globals. */
- region_id globals_rid = get_globals_region_id ();
- if (!globals_rid.null_p ())
- reachable_regions.add (globals_rid, true);
+ /* Add globals and regions that already escaped in previous
+ unknown calls. */
+ m_store.for_each_cluster (reachable_regions::init_cluster_cb,
+ &reachable_regs);
/* Params that are pointers. */
tree iter_param_types = NULL_TREE;
}
tree parm = gimple_call_arg (call, arg_idx);
- svalue_id parm_sid = get_rvalue (parm, ctxt);
- svalue *parm_sval = get_svalue (parm_sid);
- if (parm_sval)
- if (region_svalue *parm_ptr = parm_sval->dyn_cast_region_svalue ())
- {
- region_id pointee_rid = parm_ptr->get_pointee ();
- bool is_mutable = true;
- if (param_type
- && TREE_CODE (param_type) == POINTER_TYPE
- && TYPE_READONLY (TREE_TYPE (param_type)))
- is_mutable = false;
- reachable_regions.add (pointee_rid, is_mutable);
- }
- // FIXME: what about compound parms that contain ptrs?
+ const svalue *parm_sval = get_rvalue (parm, ctxt);
+ reachable_regs.handle_parm (parm_sval, param_type);
}
}
- /* OK: we now have all reachable regions.
- Set them all to new unknown values. */
- for (unsigned i = 0; i < get_num_regions (); i++)
+ /* Purge sm-state for the svalues that were reachable,
+ both in non-mutable and mutable form. */
+ for (svalue_set::iterator iter
+ = reachable_regs.begin_reachable_svals ();
+ iter != reachable_regs.end_reachable_svals (); ++iter)
{
- region_id iter_rid = region_id::from_int (i);
- if (reachable_regions.mutable_p (iter_rid))
- {
- region *reg = get_region (iter_rid);
+ const svalue *sval = (*iter);
+ ctxt->on_unknown_change (sval, false);
+ }
+ for (svalue_set::iterator iter
+ = reachable_regs.begin_mutable_svals ();
+ iter != reachable_regs.end_mutable_svals (); ++iter)
+ {
+ const svalue *sval = (*iter);
+ ctxt->on_unknown_change (sval, true);
+ }
- /* Purge any sm-state for any underlying svalue. */
- svalue_id curr_sid = reg->get_value_direct ();
- if (!curr_sid.null_p ())
- ctxt->on_unknown_change (curr_sid);
+ /* Mark any clusters that have escaped. */
+ reachable_regs.mark_escaped_clusters ();
- set_to_new_unknown_value (iter_rid,
- reg->get_type (),
- ctxt);
- }
- }
+ /* Update bindings for all clusters that have escaped, whether above,
+ or previously. */
+ m_store.on_unknown_fncall (call, m_mgr->get_store_manager ());
+}
- /* Purge sm-state for any remaining svalues that point to regions that
- were reachable. This helps suppress leak false-positives.
+/* Traverse the regions in this model, determining what regions are
+ reachable from the store and populating *OUT.
- For example, if we had a malloc call that was cast to a "foo *" type,
- we could have a temporary void * for the result of malloc which has its
- own svalue, not reachable from the function call, but for which the
- "foo *" svalue was reachable. If we don't purge it, the temporary will
- be reported as a leak. */
- int i;
- svalue *svalue;
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- if (region_svalue *ptr = svalue->dyn_cast_region_svalue ())
- {
- region_id pointee_rid = ptr->get_pointee ();
- if (reachable_regions.mutable_p (pointee_rid))
- ctxt->on_unknown_change (svalue_id::from_int (i));
- }
+ If EXTRA_SVAL is non-NULL, treat it as an additional "root"
+ for reachability (for handling return values from functions when
+ analyzing return of the only function on the stack).
+
+ Find svalues that haven't leaked. */
+
+void
+region_model::get_reachable_svalues (svalue_set *out,
+ const svalue *extra_sval)
+{
+ reachable_regions reachable_regs (&m_store, m_mgr);
+
+ /* Add globals and regions that already escaped in previous
+ unknown calls. */
+ m_store.for_each_cluster (reachable_regions::init_cluster_cb,
+ &reachable_regs);
+
+ if (extra_sval)
+ reachable_regs.handle_sval (extra_sval);
- validate ();
+ /* Get regions for locals that have explicitly bound values. */
+ for (store::cluster_map_t::iterator iter = m_store.begin ();
+ iter != m_store.end (); ++iter)
+ {
+ const region *base_reg = (*iter).first;
+ if (const region *parent = base_reg->get_parent_region ())
+ if (parent->get_kind () == RK_FRAME)
+ reachable_regs.add (base_reg, false);
+ }
+
+ /* Populate *OUT based on the values that were reachable. */
+ for (svalue_set::iterator iter
+ = reachable_regs.begin_reachable_svals ();
+ iter != reachable_regs.end_reachable_svals (); ++iter)
+ out->add (*iter);
}
/* Update this model for the RETURN_STMT, using CTXT to report any
region_model::on_setjmp (const gcall *call, const exploded_node *enode,
region_model_context *ctxt)
{
- region_id buf_rid = deref_rvalue (gimple_call_arg (call, 0), ctxt);
- region *buf = get_region (buf_rid);
+ const svalue *buf_ptr = get_rvalue (gimple_call_arg (call, 0), ctxt);
+ const region *buf_reg = deref_rvalue (buf_ptr, gimple_call_arg (call, 0),
+ ctxt);
- /* Create a setjmp_svalue for this call and store it in BUF_RID's region. */
- if (buf)
+ /* Create a setjmp_svalue for this call and store it in BUF_REG's
+ region. */
+ if (buf_reg)
{
setjmp_record r (enode, call);
- svalue *sval = new setjmp_svalue (r, buf->get_type ());
- svalue_id new_sid = add_svalue (sval);
- set_value (buf_rid, new_sid, ctxt);
+ const svalue *sval
+ = m_mgr->get_or_create_setjmp_svalue (r, buf_reg->get_type ());
+ set_value (buf_reg, sval, ctxt);
}
/* Direct calls to setjmp return 0. */
if (tree lhs = gimple_call_lhs (call))
{
tree zero = build_int_cst (TREE_TYPE (lhs), 0);
- svalue_id new_sid = get_or_create_constant_svalue (zero);
- region_id lhs_rid = get_lvalue (lhs, ctxt);
- set_value (lhs_rid, new_sid, ctxt);
+ const svalue *new_sval = m_mgr->get_or_create_constant_svalue (zero);
+ const region *lhs_reg = get_lvalue (lhs, ctxt);
+ set_value (lhs_reg, new_sval, ctxt);
}
}
/* Update this region_model for rewinding from a "longjmp" at LONGJMP_CALL
to a "setjmp" at SETJMP_CALL where the final stack depth should be
- SETJMP_STACK_DEPTH. Purge any stack frames, potentially reporting on
- leaks to CTXT. */
+ SETJMP_STACK_DEPTH. Pop any stack frames. Leak detection is *not*
+ done, and should be done by the caller. */
void
region_model::on_longjmp (const gcall *longjmp_call, const gcall *setjmp_call,
- int setjmp_stack_depth,
- region_model_context *ctxt)
+ int setjmp_stack_depth, region_model_context *ctxt)
{
/* Evaluate the val, using the frame of the "longjmp". */
tree fake_retval = gimple_call_arg (longjmp_call, 1);
- svalue_id fake_retval_sid = get_rvalue (fake_retval, ctxt);
+ const svalue *fake_retval_sval = get_rvalue (fake_retval, ctxt);
/* Pop any frames until we reach the stack depth of the function where
setjmp was called. */
gcc_assert (get_stack_depth () >= setjmp_stack_depth);
while (get_stack_depth () > setjmp_stack_depth)
- {
- /* Don't purge unused svalues yet, as we're using fake_retval_sid. */
- pop_frame (region_id::null (), false, NULL, ctxt);
- }
+ pop_frame (NULL, NULL, ctxt);
gcc_assert (get_stack_depth () == setjmp_stack_depth);
{
/* Passing 0 as the val to longjmp leads to setjmp returning 1. */
tree t_zero = build_int_cst (TREE_TYPE (fake_retval), 0);
- svalue_id zero_sid = get_or_create_constant_svalue (t_zero);
- tristate eq_zero = eval_condition (fake_retval_sid, EQ_EXPR, zero_sid);
+ const svalue *zero_sval = m_mgr->get_or_create_constant_svalue (t_zero);
+ tristate eq_zero = eval_condition (fake_retval_sval, EQ_EXPR, zero_sval);
/* If we have 0, use 1. */
if (eq_zero.is_true ())
{
tree t_one = build_int_cst (TREE_TYPE (fake_retval), 1);
- svalue_id one_sid = get_or_create_constant_svalue (t_one);
- fake_retval_sid = one_sid;
+ const svalue *one_sval
+ = m_mgr->get_or_create_constant_svalue (t_one);
+ fake_retval_sval = one_sval;
}
else
{
/* Otherwise note that the value is nonzero. */
- m_constraints->add_constraint (fake_retval_sid, NE_EXPR, zero_sid);
+ m_constraints->add_constraint (fake_retval_sval, NE_EXPR, zero_sval);
}
- region_id lhs_rid = get_lvalue (lhs, ctxt);
- set_value (lhs_rid, fake_retval_sid, ctxt);
- }
+ /* Decorate the return value from setjmp as being unmergeable,
+ so that we don't attempt to merge states with it as zero
+ with states in which it's nonzero, leading to a clean distinction
+ in the exploded_graph betweeen the first return and the second
+ return. */
+ fake_retval_sval = m_mgr->get_or_create_unmergeable (fake_retval_sval);
- /* Now that we've assigned the fake_retval, we can purge the unused
- svalues, which could detect leaks. */
- purge_unused_svalues (NULL, ctxt, NULL);
- validate ();
+ const region *lhs_reg = get_lvalue (lhs, ctxt);
+ set_value (lhs_reg, fake_retval_sval, ctxt);
+ }
}
/* Update this region_model for a phi stmt of the form
void
region_model::handle_phi (const gphi *phi,
- tree lhs, tree rhs, bool is_back_edge,
+ tree lhs, tree rhs,
region_model_context *ctxt)
{
/* For now, don't bother tracking the .MEM SSA names. */
if (VAR_DECL_IS_VIRTUAL_OPERAND (var))
return;
- svalue_id rhs_sid = get_rvalue (rhs, ctxt);
+ const svalue *rhs_sval = get_rvalue (rhs, ctxt);
- if (is_back_edge && get_svalue (rhs_sid)->get_kind () != SK_UNKNOWN)
- {
- /* If we have a back edge, we probably have a loop.
- Use an unknown value, to avoid effectively unrolling the
- loop.
- To terminate, we need to avoid generating a series of
- models with an unbounded monotonically increasing number of
- redundant unknown values; hence we need to purge svalues
- before inserting the state into the exploded graph, to
- collect unused svalues. */
- set_to_new_unknown_value (get_lvalue (lhs, ctxt), TREE_TYPE (lhs), ctxt);
- }
- else
- set_value (get_lvalue (lhs, ctxt), rhs_sid, ctxt);
+ set_value (get_lvalue (lhs, ctxt), rhs_sval, ctxt);
if (ctxt)
ctxt->on_phi (phi, rhs);
Get the id of the region for PV within this region_model,
emitting any diagnostics to CTXT. */
-region_id
+const region *
region_model::get_lvalue_1 (path_var pv, region_model_context *ctxt)
{
tree expr = pv.m_tree;
switch (TREE_CODE (expr))
{
default:
- return make_region_for_unexpected_tree_code (ctxt, expr,
- dump_location_t ());
+ return m_mgr->get_region_for_unexpected_tree_code (ctxt, expr,
+ dump_location_t ());
case ARRAY_REF:
{
tree array = TREE_OPERAND (expr, 0);
tree index = TREE_OPERAND (expr, 1);
-#if 0
- // TODO: operands 2 and 3, if present:
- gcc_assert (TREE_OPERAND (expr, 2) == NULL_TREE);
- gcc_assert (TREE_OPERAND (expr, 3) == NULL_TREE);
-#endif
- region_id array_rid = get_lvalue (array, ctxt);
- svalue_id index_sid = get_rvalue (index, ctxt);
- region *base_array_reg = get_region (array_rid);
- array_region *array_reg = base_array_reg->dyn_cast_array_region ();
- if (!array_reg)
- {
- /* Normally, array_rid ought to refer to an array_region, since
- array's type will be ARRAY_TYPE. However, if we have an
- unexpected tree code for array, we could have a
- symbolic_region here. If so, we're in error-handling. */
- gcc_assert (base_array_reg->get_type () == NULL_TREE);
- return make_region_for_unexpected_tree_code (ctxt, expr,
- dump_location_t ());
- }
- return array_reg->get_element (this, array_rid, index_sid, ctxt);
+ const region *array_reg = get_lvalue (array, ctxt);
+ const svalue *index_sval = get_rvalue (index, ctxt);
+ return m_mgr->get_element_region (array_reg,
+ TREE_TYPE (TREE_TYPE (array)),
+ index_sval);
}
break;
- case BIT_FIELD_REF:
- {
- /* For now, create a view, as if a cast, ignoring the bit positions. */
- tree obj = TREE_OPERAND (expr, 0);
- return get_or_create_view (get_lvalue (obj, ctxt), TREE_TYPE (expr),
- ctxt);
- };
- break;
-
case MEM_REF:
{
tree ptr = TREE_OPERAND (expr, 0);
tree offset = TREE_OPERAND (expr, 1);
- svalue_id ptr_sid = get_rvalue (ptr, ctxt);
- svalue_id offset_sid = get_rvalue (offset, ctxt);
- return get_or_create_mem_ref (TREE_TYPE (expr), ptr_sid,
- offset_sid, ctxt);
+ const svalue *ptr_sval = get_rvalue (ptr, ctxt);
+ const svalue *offset_sval = get_rvalue (offset, ctxt);
+ const region *star_ptr = deref_rvalue (ptr_sval, ptr, ctxt);
+ return m_mgr->get_offset_region (star_ptr,
+ TREE_TYPE (expr),
+ offset_sval);
}
break;
+ case FUNCTION_DECL:
+ return m_mgr->get_region_for_fndecl (expr);
+
+ case LABEL_DECL:
+ return m_mgr->get_region_for_label (expr);
+
case VAR_DECL:
/* Handle globals. */
if (is_global_var (expr))
- {
- region_id globals_rid
- = get_root_region ()->ensure_globals_region (this);
- map_region *globals = get_region<map_region> (globals_rid);
- region_id var_rid = globals->get_or_create (this, globals_rid, expr,
- TREE_TYPE (expr), ctxt);
- return var_rid;
- }
+ return m_mgr->get_region_for_global (expr);
/* Fall through. */
|| TREE_CODE (expr) == VAR_DECL
|| TREE_CODE (expr) == RESULT_DECL);
- int stack_depth = pv.m_stack_depth;
- stack_region *stack = get_root_region ()->get_stack_region (this);
- gcc_assert (stack);
- region_id frame_rid = stack->get_frame_rid (stack_depth);
- frame_region *frame = get_region <frame_region> (frame_rid);
+ int stack_index = pv.m_stack_depth;
+ const frame_region *frame = get_frame_at_index (stack_index);
gcc_assert (frame);
- region_id child_rid = frame->get_or_create (this, frame_rid, expr,
- TREE_TYPE (expr), ctxt);
- return child_rid;
+ return frame->get_region_for_local (m_mgr, expr);
}
case COMPONENT_REF:
/* obj.field */
tree obj = TREE_OPERAND (expr, 0);
tree field = TREE_OPERAND (expr, 1);
- tree obj_type = TREE_TYPE (obj);
- if (TREE_CODE (obj_type) != RECORD_TYPE
- && TREE_CODE (obj_type) != UNION_TYPE)
- return make_region_for_unexpected_tree_code (ctxt, obj_type,
- dump_location_t ());
- region_id obj_rid = get_lvalue (obj, ctxt);
- region_id struct_or_union_rid
- = get_or_create_view (obj_rid, TREE_TYPE (obj), ctxt);
- return get_field_region (struct_or_union_rid, field, ctxt);
- }
- break;
-
- case CONST_DECL:
- {
- tree cst_type = TREE_TYPE (expr);
- region_id cst_rid = add_region_for_type (m_root_rid, cst_type, ctxt);
- if (tree value = DECL_INITIAL (expr))
- {
- svalue_id sid = get_rvalue (value, ctxt);
- get_region (cst_rid)->set_value (*this, cst_rid, sid, ctxt);
- }
- return cst_rid;
+ const region *obj_reg = get_lvalue (obj, ctxt);
+ return m_mgr->get_field_region (obj_reg, field);
}
break;
case STRING_CST:
- {
- tree cst_type = TREE_TYPE (expr);
- array_region *cst_region = new array_region (m_root_rid, cst_type);
- region_id cst_rid = add_region (cst_region);
- svalue_id cst_sid = get_or_create_constant_svalue (expr);
- cst_region->set_value (*this, cst_rid, cst_sid, ctxt);
- return cst_rid;
- }
- break;
-
- case NOP_EXPR:
- case VIEW_CONVERT_EXPR:
- {
- tree obj = TREE_OPERAND (expr, 0);
- return get_or_create_view (get_lvalue (obj, ctxt), TREE_TYPE (expr),
- ctxt);
- };
- break;
+ return m_mgr->get_region_for_string (expr);
}
}
-/* If we see a tree code we don't know how to handle, rather than
- ICE or generate bogus results, create a dummy region, and notify
- CTXT so that it can mark the new state as being not properly
- modelled. The exploded graph can then stop exploring that path,
- since any diagnostics we might issue will have questionable
- validity. */
-
-region_id
-region_model::make_region_for_unexpected_tree_code (region_model_context *ctxt,
- tree t,
- const dump_location_t &loc)
-{
- gcc_assert (ctxt);
- region_id new_rid
- = add_region (new symbolic_region (m_root_rid, NULL_TREE, false));
- ctxt->on_unexpected_tree_code (t, loc);
- return new_rid;
-}
-
/* Assert that SRC_TYPE can be converted to DST_TYPE as a no-op. */
static void
assert_compat_types (tree src_type, tree dst_type)
{
if (src_type && dst_type && !VOID_TYPE_P (dst_type))
- gcc_checking_assert (useless_type_conversion_p (src_type, dst_type));
+ {
+#if CHECKING_P
+ if (!(useless_type_conversion_p (src_type, dst_type)))
+ internal_error ("incompatible types: %qT and %qT", src_type, dst_type);
+#endif
+ }
}
-/* Get the id of the region for PV within this region_model,
+/* Get the region for PV within this region_model,
emitting any diagnostics to CTXT. */
-region_id
+const region *
region_model::get_lvalue (path_var pv, region_model_context *ctxt)
{
if (pv.m_tree == NULL_TREE)
- return region_id::null ();
+ return NULL;
- region_id result_rid = get_lvalue_1 (pv, ctxt);
- assert_compat_types (get_region (result_rid)->get_type (),
- TREE_TYPE (pv.m_tree));
- return result_rid;
+ const region *result_reg = get_lvalue_1 (pv, ctxt);
+ assert_compat_types (result_reg->get_type (), TREE_TYPE (pv.m_tree));
+ return result_reg;
}
-/* Get the region_id for EXPR within this region_model (assuming the most
+/* Get the region for EXPR within this region_model (assuming the most
recent stack frame if it's a local). */
-region_id
+const region *
region_model::get_lvalue (tree expr, region_model_context *ctxt)
{
return get_lvalue (path_var (expr, get_stack_depth () - 1), ctxt);
Get the value of PV within this region_model,
emitting any diagnostics to CTXT. */
-svalue_id
+const svalue *
region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
{
gcc_assert (pv.m_tree);
switch (TREE_CODE (pv.m_tree))
{
default:
- {
- svalue *unknown_sval = new unknown_svalue (TREE_TYPE (pv.m_tree));
- return add_svalue (unknown_sval);
- }
- break;
+ gcc_unreachable ();
case ADDR_EXPR:
{
/* "&EXPR". */
tree expr = pv.m_tree;
tree op0 = TREE_OPERAND (expr, 0);
- if (TREE_CODE (op0) == FUNCTION_DECL)
- return get_svalue_for_fndecl (TREE_TYPE (expr), op0, ctxt);
- else if (TREE_CODE (op0) == LABEL_DECL)
- return get_svalue_for_label (TREE_TYPE (expr), op0, ctxt);
- region_id expr_rid = get_lvalue (op0, ctxt);
- return get_or_create_ptr_svalue (TREE_TYPE (expr), expr_rid);
+ const region *expr_reg = get_lvalue (op0, ctxt);
+ return m_mgr->get_ptr_svalue (TREE_TYPE (expr), expr_reg);
}
break;
+ case BIT_FIELD_REF:
+ return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (pv.m_tree));
+
+ case SSA_NAME:
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
case ARRAY_REF:
{
- region_id element_rid = get_lvalue (pv, ctxt);
- return get_region (element_rid)->get_value (*this, true, ctxt);
+ const region *element_reg = get_lvalue (pv, ctxt);
+ return get_store_value (element_reg);
}
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ {
+ tree expr = pv.m_tree;
+ tree arg = TREE_OPERAND (expr, 0);
+ const svalue *arg_sval = get_rvalue (arg, ctxt);
+ const svalue *sval_unaryop
+ = m_mgr->get_or_create_unaryop (TREE_TYPE (expr), TREE_CODE (expr),
+ arg_sval);
+ return sval_unaryop;
+ };
+
case INTEGER_CST:
case REAL_CST:
+ case COMPLEX_CST:
+ case VECTOR_CST:
case STRING_CST:
- return get_or_create_constant_svalue (pv.m_tree);
+ return m_mgr->get_or_create_constant_svalue (pv.m_tree);
+
+ case POINTER_PLUS_EXPR:
+ {
+ tree expr = pv.m_tree;
+ tree ptr = TREE_OPERAND (expr, 0);
+ tree offset = TREE_OPERAND (expr, 1);
+ const svalue *ptr_sval = get_rvalue (ptr, ctxt);
+ const svalue *offset_sval = get_rvalue (offset, ctxt);
+ const svalue *sval_binop
+ = m_mgr->get_or_create_binop (TREE_TYPE (expr), POINTER_PLUS_EXPR,
+ ptr_sval, offset_sval);
+ return sval_binop;
+ }
+
+ /* Binary ops. */
+ case PLUS_EXPR:
+ case MULT_EXPR:
+ {
+ tree expr = pv.m_tree;
+ tree arg0 = TREE_OPERAND (expr, 0);
+ tree arg1 = TREE_OPERAND (expr, 1);
+ const svalue *arg0_sval = get_rvalue (arg0, ctxt);
+ const svalue *arg1_sval = get_rvalue (arg1, ctxt);
+ const svalue *sval_binop
+ = m_mgr->get_or_create_binop (TREE_TYPE (expr), TREE_CODE (expr),
+ arg0_sval, arg1_sval);
+ return sval_binop;
+ }
case COMPONENT_REF:
case MEM_REF:
- case SSA_NAME:
- case VAR_DECL:
- case PARM_DECL:
- case RESULT_DECL:
{
- region_id var_rid = get_lvalue (pv, ctxt);
- return get_region (var_rid)->get_value (*this, true, ctxt);
+ const region *ref_reg = get_lvalue (pv, ctxt);
+ return get_store_value (ref_reg);
}
}
}
/* Get the value of PV within this region_model,
emitting any diagnostics to CTXT. */
-svalue_id
+const svalue *
region_model::get_rvalue (path_var pv, region_model_context *ctxt)
{
if (pv.m_tree == NULL_TREE)
- return svalue_id::null ();
- svalue_id result_sid = get_rvalue_1 (pv, ctxt);
+ return NULL;
- assert_compat_types (get_svalue (result_sid)->get_type (),
- TREE_TYPE (pv.m_tree));
+ const svalue *result_sval = get_rvalue_1 (pv, ctxt);
- return result_sid;
+ assert_compat_types (result_sval->get_type (), TREE_TYPE (pv.m_tree));
+
+ return result_sval;
}
/* Get the value of EXPR within this region_model (assuming the most
recent stack frame if it's a local). */
-svalue_id
+const svalue *
region_model::get_rvalue (tree expr, region_model_context *ctxt)
{
return get_rvalue (path_var (expr, get_stack_depth () - 1), ctxt);
}
-/* Return an svalue_id for a pointer to RID of type PTR_TYPE, reusing
- existing pointer values if one is available. */
-
-svalue_id
-region_model::get_or_create_ptr_svalue (tree ptr_type, region_id rid)
-{
- /* Reuse existing region_svalue, if one of the right type is
- available. */
- /* In theory we could stash a svalue_id in "region", but differing
- pointer types muddles things.
- For now, just do a linear search through all existing svalues. */
- int i;
- svalue *svalue;
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- if (region_svalue *ptr_svalue = svalue->dyn_cast_region_svalue ())
- if (ptr_svalue->get_pointee () == rid
- && ptr_svalue->get_type () == ptr_type)
- return svalue_id::from_int (i);
-
- return add_svalue (new region_svalue (ptr_type, rid));
-}
-
-/* Return an svalue_id for a constant_svalue for CST_EXPR,
- creating the constant_svalue if necessary.
- The constant_svalue instances are reused, based on pointer equality
- of trees */
-
-svalue_id
-region_model::get_or_create_constant_svalue (tree cst_expr)
-{
- gcc_assert (cst_expr);
-
- /* Reuse one if it already exists. */
- // TODO: maybe store a map, rather than do linear search?
- int i;
- svalue *svalue;
- FOR_EACH_VEC_ELT (m_svalues, i, svalue)
- if (svalue->maybe_get_constant () == cst_expr)
- return svalue_id::from_int (i);
-
- svalue_id cst_sid = add_svalue (new constant_svalue (cst_expr));
- return cst_sid;
-}
-
-/* Return an svalue_id for a region_svalue for FNDECL,
- creating the function_region if necessary. */
-
-svalue_id
-region_model::get_svalue_for_fndecl (tree ptr_type, tree fndecl,
- region_model_context *ctxt)
-{
- gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
- region_id function_rid = get_region_for_fndecl (fndecl, ctxt);
- return get_or_create_ptr_svalue (ptr_type, function_rid);
-}
-
-/* Return a region_id for a function_region for FNDECL,
- creating it if necessary. */
-
-region_id
-region_model::get_region_for_fndecl (tree fndecl,
- region_model_context *ctxt)
-{
- gcc_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
-
- region_id code_rid = get_root_region ()->ensure_code_region (this);
- code_region *code = get_root_region ()->get_code_region (this);
-
- return code->get_or_create (this, code_rid, fndecl, TREE_TYPE (fndecl),
- ctxt);
-}
-
-/* Return an svalue_id for a region_svalue for LABEL,
- creating the label_region if necessary. */
-
-svalue_id
-region_model::get_svalue_for_label (tree ptr_type, tree label,
- region_model_context *ctxt)
-{
- gcc_assert (TREE_CODE (label) == LABEL_DECL);
- region_id label_rid = get_region_for_label (label, ctxt);
- return get_or_create_ptr_svalue (ptr_type, label_rid);
-}
-
-/* Return a region_id for a label_region for LABEL,
- creating it if necessary. */
-
-region_id
-region_model::get_region_for_label (tree label,
- region_model_context *ctxt)
-{
- gcc_assert (TREE_CODE (label) == LABEL_DECL);
-
- tree fndecl = DECL_CONTEXT (label);
- gcc_assert (fndecl && TREE_CODE (fndecl) == FUNCTION_DECL);
-
- region_id func_rid = get_region_for_fndecl (fndecl, ctxt);
- function_region *func_reg = get_region <function_region> (func_rid);
- return func_reg->get_or_create (this, func_rid, label, TREE_TYPE (label),
- ctxt);
-}
-
-/* Build a cast of SRC_EXPR to DST_TYPE, or return NULL_TREE.
-
- Adapted from gcc::jit::playback::context::build_cast, which in turn is
- adapted from
- - c/c-typeck.c:build_c_cast
- - c/c-convert.c: convert
- - convert.h
- Only some kinds of cast are currently supported here. */
+/* Get a value for REG, looking it up in the store, or otherwise falling
+ back to "initial" or "unknown" values. */
-static tree
-build_cast (tree dst_type, tree src_expr)
+const svalue *
+region_model::get_store_value (const region *reg) const
{
- tree result = targetm.convert_to_type (dst_type, src_expr);
- if (result)
- return result;
- enum tree_code dst_code = TREE_CODE (dst_type);
- switch (dst_code)
+ const svalue *sval
+ = m_store.get_any_binding (m_mgr->get_store_manager (), reg);
+ if (sval)
{
- case INTEGER_TYPE:
- case ENUMERAL_TYPE:
- result = convert_to_integer (dst_type, src_expr);
- goto maybe_fold;
-
- case BOOLEAN_TYPE:
- /* Compare with c_objc_common_truthvalue_conversion and
- c_common_truthvalue_conversion. */
- /* For now, convert to: (src_expr != 0) */
- result = build2 (NE_EXPR, dst_type,
- src_expr,
- build_int_cst (TREE_TYPE (src_expr), 0));
- goto maybe_fold;
-
- case REAL_TYPE:
- result = convert_to_real (dst_type, src_expr);
- goto maybe_fold;
-
- case POINTER_TYPE:
- result = build1 (NOP_EXPR, dst_type, src_expr);
- goto maybe_fold;
-
- default:
- return NULL_TREE;
-
- maybe_fold:
- if (TREE_CODE (result) != C_MAYBE_CONST_EXPR)
- result = fold (result);
- return result;
+ if (reg->get_type ())
+ sval = m_mgr->get_or_create_cast (reg->get_type (), sval);
+ return sval;
}
-}
-/* If the type of SID's underlying value is DST_TYPE, return SID.
- Otherwise, attempt to create (or reuse) an svalue representing an access
- of SID as a DST_TYPE and return that value's svalue_id. */
-
-svalue_id
-region_model::maybe_cast_1 (tree dst_type, svalue_id sid)
-{
- svalue *sval = get_svalue (sid);
- tree src_type = sval->get_type ();
- if (src_type == dst_type)
- return sid;
-
- if (POINTER_TYPE_P (dst_type)
- || POINTER_TYPE_P (src_type))
- {
- /* Pointer to region. */
- if (region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
- return get_or_create_ptr_svalue (dst_type, ptr_sval->get_pointee ());
-
- /* Unknown pointer? Get or create a new unknown pointer of the
- correct type, preserving the equality between the pointers. */
- if (sval->dyn_cast_unknown_svalue ())
+ /* Special-case: read at a constant index within a STRING_CST. */
+ if (const offset_region *offset_reg = reg->dyn_cast_offset_region ())
+ if (tree byte_offset_cst
+ = offset_reg->get_byte_offset ()->maybe_get_constant ())
+ if (const string_region *str_reg
+ = reg->get_parent_region ()->dyn_cast_string_region ())
{
- equiv_class &ec = m_constraints->get_equiv_class (sid);
-
- /* Look for an existing pointer of the correct type within the EC. */
- int i;
- svalue_id *equiv_sid;
- FOR_EACH_VEC_ELT (ec.m_vars, i, equiv_sid)
- {
- svalue *equiv_val = get_svalue (*equiv_sid);
- if (equiv_val->get_type () == dst_type)
- return *equiv_sid;
- }
-
- /* Otherwise, create a new unknown pointer of the correct type. */
- svalue *unknown_sval = new unknown_svalue (dst_type);
- svalue_id new_ptr_sid = add_svalue (unknown_sval);
- m_constraints->add_constraint (sid, EQ_EXPR, new_ptr_sid);
- return new_ptr_sid;
+ tree string_cst = str_reg->get_string_cst ();
+ if (const svalue *char_sval
+ = m_mgr->maybe_get_char_from_string_cst (string_cst,
+ byte_offset_cst))
+ return m_mgr->get_or_create_cast (reg->get_type (), char_sval);
}
- }
-
- /* Attempt to cast constants. */
- if (tree src_cst = sval->maybe_get_constant ())
- {
- if (tree dst = build_cast (dst_type, src_cst))
- if (CONSTANT_CLASS_P (dst))
- return get_or_create_constant_svalue (dst);
- }
- /* Otherwise, return a new unknown value. */
- svalue *unknown_sval = new unknown_svalue (dst_type);
- return add_svalue (unknown_sval);
-}
+ /* Special-case: read the initial char of a STRING_CST. */
+ if (const cast_region *cast_reg = reg->dyn_cast_cast_region ())
+ if (const string_region *str_reg
+ = cast_reg->get_original_region ()->dyn_cast_string_region ())
+ {
+ tree string_cst = str_reg->get_string_cst ();
+ tree byte_offset_cst = build_int_cst (integer_type_node, 0);
+ if (const svalue *char_sval
+ = m_mgr->maybe_get_char_from_string_cst (string_cst,
+ byte_offset_cst))
+ return m_mgr->get_or_create_cast (reg->get_type (), char_sval);
+ }
-/* If the type of SID's underlying value is DST_TYPE, return SID.
- Otherwise, attempt to create (or reuse) an svalue representing an access
- of SID as a DST_TYPE and return that value's svalue_id.
+ /* Otherwise we implicitly have the initial value of the region
+ (if the cluster had been touched, binding_cluster::get_any_binding,
+ would have returned UNKNOWN, and we would already have returned
+ that above). */
- If the result != SID, then call CTXT's on_cast vfunc (if CTXT is non-NULL),
- so that sm-state can be propagated from SID to the result. */
+ /* Special-case: to avoid having to explicitly update all previously
+ untracked globals when calling an unknown fn, we instead change
+ the default here so we implicitly have an unknown value for such
+ regions. */
+ if (m_store.called_unknown_fn_p ())
+ if (reg->get_base_region ()->get_parent_region ()->get_kind ()
+ == RK_GLOBALS)
+ return m_mgr->get_or_create_unknown_svalue (reg->get_type ());
-svalue_id
-region_model::maybe_cast (tree dst_type, svalue_id sid,
- region_model_context *ctxt)
-{
- svalue_id result = maybe_cast_1 (dst_type, sid);
- if (result != sid)
- if (ctxt)
- {
- /* Notify ctxt about a cast, so any sm-state can be copied. */
- ctxt->on_cast (sid, result);
- }
- return result;
+ return m_mgr->get_or_create_initial_value (reg);
}
-/* Ensure that the region for OBJ_RID has a child region for FIELD;
- return the child region's region_id. */
+/* Return false if REG does not exist, true if it may do.
+ This is for detecting regions within the stack that don't exist anymore
+ after frames are popped. */
-region_id
-region_model::get_field_region (region_id struct_or_union_rid, tree field,
- region_model_context *ctxt)
+bool
+region_model::region_exists_p (const region *reg) const
{
- struct_or_union_region *sou_reg
- = get_region<struct_or_union_region> (struct_or_union_rid);
-
- /* Inherit constness from parent type. */
- const int qual_mask = TYPE_QUAL_CONST;
- int sou_quals = TYPE_QUALS (sou_reg->get_type ()) & qual_mask;
- tree field_type = TREE_TYPE (field);
- tree field_type_with_quals = build_qualified_type (field_type, sou_quals);
-
- // TODO: maybe convert to a vfunc?
- if (sou_reg->get_kind () == RK_UNION)
+ /* If within a stack frame, check that the stack frame is live. */
+ if (const frame_region *enclosing_frame = reg->maybe_get_frame_region ())
{
- /* Union.
- Get a view of the union as a whole, with the type of the field. */
- region_id view_rid
- = get_or_create_view (struct_or_union_rid, field_type_with_quals, ctxt);
- return view_rid;
- }
- else
- {
- /* Struct. */
- region_id child_rid
- = sou_reg->get_or_create (this, struct_or_union_rid, field,
- field_type_with_quals, ctxt);
- return child_rid;
+ /* Check that the current frame is the enclosing frame, or is called
+ by it. */
+ for (const frame_region *iter_frame = get_current_frame (); iter_frame;
+ iter_frame = iter_frame->get_calling_frame ())
+ if (iter_frame == enclosing_frame)
+ return true;
+ return false;
}
+
+ return true;
}
-/* Get a region_id for referencing PTR_SID, creating a region if need be, and
- potentially generating warnings via CTXT. */
+/* Get a region for referencing PTR_SVAL, creating a region if need be, and
+ potentially generating warnings via CTXT.
+ PTR_TREE if non-NULL can be used when emitting diagnostics. */
-region_id
-region_model::deref_rvalue (svalue_id ptr_sid, region_model_context *ctxt)
+const region *
+region_model::deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
+ region_model_context *ctxt)
{
- gcc_assert (!ptr_sid.null_p ());
- svalue *ptr_svalue = get_svalue (ptr_sid);
- gcc_assert (ptr_svalue);
+ gcc_assert (ptr_sval);
- switch (ptr_svalue->get_kind ())
+ switch (ptr_sval->get_kind ())
{
+ default:
+ gcc_unreachable ();
+
case SK_REGION:
{
- region_svalue *region_sval = as_a <region_svalue *> (ptr_svalue);
+ const region_svalue *region_sval
+ = as_a <const region_svalue *> (ptr_sval);
return region_sval->get_pointee ();
}
+ case SK_BINOP:
+ {
+ const binop_svalue *binop_sval
+ = as_a <const binop_svalue *> (ptr_sval);
+ switch (binop_sval->get_op ())
+ {
+ case POINTER_PLUS_EXPR:
+ {
+ /* If we have a symbolic value expressing pointer arithmentic,
+ try to convert it to a suitable region. */
+ const region *parent_region
+ = deref_rvalue (binop_sval->get_arg0 (), NULL_TREE, ctxt);
+ const svalue *offset = binop_sval->get_arg1 ();
+ tree type= TREE_TYPE (ptr_sval->get_type ());
+ return m_mgr->get_offset_region (parent_region, type, offset);
+ }
+ default:
+ goto create_symbolic_region;
+ }
+ }
+
case SK_CONSTANT:
+ case SK_INITIAL:
+ case SK_UNARYOP:
+ case SK_SUB:
+ case SK_WIDENING:
+ case SK_CONJURED:
goto create_symbolic_region;
case SK_POISONED:
{
if (ctxt)
- if (tree ptr = get_representative_tree (ptr_sid))
- {
- poisoned_svalue *poisoned_sval
- = as_a <poisoned_svalue *> (ptr_svalue);
- enum poison_kind pkind = poisoned_sval->get_poison_kind ();
- ctxt->warn (new poisoned_value_diagnostic (ptr, pkind));
- }
+ {
+ tree ptr = get_representative_tree (ptr_sval);
+ /* If we can't get a representative tree for PTR_SVAL
+ (e.g. if it hasn't been bound into the store), then
+ fall back on PTR_TREE, if non-NULL. */
+ if (!ptr)
+ ptr = ptr_tree;
+ if (ptr)
+ {
+ const poisoned_svalue *poisoned_sval
+ = as_a <const poisoned_svalue *> (ptr_sval);
+ enum poison_kind pkind = poisoned_sval->get_poison_kind ();
+ ctxt->warn (new poisoned_value_diagnostic (ptr, pkind));
+ }
+ }
goto create_symbolic_region;
}
case SK_UNKNOWN:
{
create_symbolic_region:
- /* We need a symbolic_region to represent this unknown region.
- We don't know if it on the heap, stack, or a global,
- so use the root region as parent. */
- region_id new_rid
- = add_region (new symbolic_region (m_root_rid, NULL_TREE, false));
-
- /* We need to write the region back into the pointer,
- or we'll get a new, different region each time.
- We do this by changing the meaning of ptr_sid, replacing
- the unknown value with the ptr to the new region.
- We replace the meaning of the ID rather than simply writing
- to PTR's lvalue since there could be several places sharing
- the same unknown ptr value. */
- svalue *ptr_val
- = new region_svalue (ptr_svalue->get_type (), new_rid);
- replace_svalue (ptr_sid, ptr_val);
-
- return new_rid;
+ return m_mgr->get_symbolic_region (ptr_sval);
}
case SK_SETJMP:
gcc_unreachable ();
}
-/* Get a region_id for referencing PTR, creating a region if need be, and
- potentially generating warnings via CTXT. */
+/* Set the value of the region given by LHS_REG to the value given
+ by RHS_SVAL. */
-region_id
-region_model::deref_rvalue (tree ptr, region_model_context *ctxt)
+void
+region_model::set_value (const region *lhs_reg, const svalue *rhs_sval,
+ region_model_context */*ctxt*/)
{
- svalue_id ptr_sid = get_rvalue (ptr, ctxt);
- return deref_rvalue (ptr_sid, ctxt);
+ gcc_assert (lhs_reg);
+ gcc_assert (rhs_sval);
+
+ m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
+ BK_direct);
}
-/* Set the value of the region given by LHS_RID to the value given
- by RHS_SID. */
+/* Set the value of the region given by LHS to the value given by RHS. */
void
-region_model::set_value (region_id lhs_rid, svalue_id rhs_sid,
- region_model_context *ctxt)
+region_model::set_value (tree lhs, tree rhs, region_model_context *ctxt)
{
- gcc_assert (!lhs_rid.null_p ());
- gcc_assert (!rhs_sid.null_p ());
- get_region (lhs_rid)->set_value (*this, lhs_rid, rhs_sid, ctxt);
+ const region *lhs_reg = get_lvalue (lhs, ctxt);
+ const svalue *rhs_sval = get_rvalue (rhs, ctxt);
+ gcc_assert (lhs_reg);
+ gcc_assert (rhs_sval);
+ set_value (lhs_reg, rhs_sval, ctxt);
}
-/* Set the value of the region given by LHS to the value given
- by RHS. */
+/* Remove all bindings overlapping REG within the store. */
void
-region_model::set_value (tree lhs, tree rhs, region_model_context *ctxt)
+region_model::clobber_region (const region *reg)
+{
+ m_store.clobber_region (m_mgr->get_store_manager(), reg);
+}
+
+/* Remove any bindings for REG within the store. */
+
+void
+region_model::purge_region (const region *reg)
+{
+ m_store.purge_region (m_mgr->get_store_manager(), reg);
+}
+
+/* Zero-fill REG. */
+
+void
+region_model::zero_fill_region (const region *reg)
+{
+ m_store.zero_fill_region (m_mgr->get_store_manager(), reg);
+}
+
+/* Mark REG as having unknown content. */
+
+void
+region_model::mark_region_as_unknown (const region *reg)
{
- region_id lhs_rid = get_lvalue (lhs, ctxt);
- svalue_id rhs_sid = get_rvalue (rhs, ctxt);
- gcc_assert (!lhs_rid.null_p ());
- gcc_assert (!rhs_sid.null_p ());
- set_value (lhs_rid, rhs_sid, ctxt);
+ m_store.mark_region_as_unknown (m_mgr->get_store_manager(), reg);
}
-/* Determine what is known about the condition "LHS_SID OP RHS_SID" within
+/* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
this model. */
tristate
-region_model::eval_condition (svalue_id lhs_sid,
- enum tree_code op,
- svalue_id rhs_sid) const
+region_model::eval_condition (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs) const
{
- svalue *lhs = get_svalue (lhs_sid);
- svalue *rhs = get_svalue (rhs_sid);
-
/* For now, make no attempt to capture constraints on floating-point
values. */
if ((lhs->get_type () && FLOAT_TYPE_P (lhs->get_type ()))
|| (rhs->get_type () && FLOAT_TYPE_P (rhs->get_type ())))
return tristate::unknown ();
- tristate ts = eval_condition_without_cm (lhs_sid, op, rhs_sid);
-
+ tristate ts = eval_condition_without_cm (lhs, op, rhs);
if (ts.is_known ())
return ts;
/* Otherwise, try constraints. */
- return m_constraints->eval_condition (lhs_sid, op, rhs_sid);
+ return m_constraints->eval_condition (lhs, op, rhs);
}
-/* Determine what is known about the condition "LHS_SID OP RHS_SID" within
+/* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
this model, without resorting to the constraint_manager.
This is exposed so that impl_region_model_context::on_state_leak can
without risking creating new ECs. */
tristate
-region_model::eval_condition_without_cm (svalue_id lhs_sid,
- enum tree_code op,
- svalue_id rhs_sid) const
+region_model::eval_condition_without_cm (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs) const
{
- svalue *lhs = get_svalue (lhs_sid);
- svalue *rhs = get_svalue (rhs_sid);
gcc_assert (lhs);
gcc_assert (rhs);
/* See what we know based on the values. */
- if (lhs && rhs)
+
+ /* For now, make no attempt to capture constraints on floating-point
+ values. */
+ if ((lhs->get_type () && FLOAT_TYPE_P (lhs->get_type ()))
+ || (rhs->get_type () && FLOAT_TYPE_P (rhs->get_type ())))
+ return tristate::unknown ();
+
+ /* Unwrap any unmergeable values. */
+ lhs = lhs->unwrap_any_unmergeable ();
+ rhs = rhs->unwrap_any_unmergeable ();
+
+ if (lhs == rhs)
{
- /* For now, make no attempt to capture constraints on floating-point
- values. */
- if ((lhs->get_type () && FLOAT_TYPE_P (lhs->get_type ()))
- || (rhs->get_type () && FLOAT_TYPE_P (rhs->get_type ())))
- return tristate::unknown ();
+ /* If we have the same svalue, then we have equality
+ (apart from NaN-handling).
+ TODO: should this definitely be the case for poisoned values? */
+ /* Poisoned and unknown values are "unknowable". */
+ if (lhs->get_kind () == SK_POISONED
+ || lhs->get_kind () == SK_UNKNOWN)
+ return tristate::TS_UNKNOWN;
- if (lhs == rhs)
+ switch (op)
{
- /* If we have the same svalue, then we have equality
- (apart from NaN-handling).
- TODO: should this definitely be the case for poisoned values? */
- switch (op)
- {
- case EQ_EXPR:
- case GE_EXPR:
- case LE_EXPR:
- return tristate::TS_TRUE;
-
- case NE_EXPR:
- case GT_EXPR:
- case LT_EXPR:
- return tristate::TS_FALSE;
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ return tristate::TS_TRUE;
- default:
- /* For other ops, use the logic below. */
- break;
- }
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ return tristate::TS_FALSE;
+
+ default:
+ /* For other ops, use the logic below. */
+ break;
}
+ }
- /* If we have a pair of region_svalues, compare them. */
- if (region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
- if (region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
- {
- tristate res = region_svalue::eval_condition (lhs_ptr, op, rhs_ptr);
- if (res.is_known ())
- return res;
- /* Otherwise, only known through constraints. */
- }
+ /* If we have a pair of region_svalues, compare them. */
+ if (const region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
+ if (const region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
+ {
+ tristate res = region_svalue::eval_condition (lhs_ptr, op, rhs_ptr);
+ if (res.is_known ())
+ return res;
+ /* Otherwise, only known through constraints. */
+ }
- /* If we have a pair of constants, compare them. */
- if (constant_svalue *cst_lhs = lhs->dyn_cast_constant_svalue ())
- if (constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
- return constant_svalue::eval_condition (cst_lhs, op, cst_rhs);
+ /* If we have a pair of constants, compare them. */
+ if (const constant_svalue *cst_lhs = lhs->dyn_cast_constant_svalue ())
+ if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
+ return constant_svalue::eval_condition (cst_lhs, op, cst_rhs);
- /* Handle comparison of a region_svalue against zero. */
- if (region_svalue *ptr = lhs->dyn_cast_region_svalue ())
- if (constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
- if (zerop (cst_rhs->get_constant ()))
+ /* Handle comparison of a region_svalue against zero. */
+
+ if (const region_svalue *ptr = lhs->dyn_cast_region_svalue ())
+ if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
+ if (zerop (cst_rhs->get_constant ()))
+ {
+ /* A region_svalue is a non-NULL pointer, except in certain
+ special cases (see the comment for region::non_null_p. */
+ const region *pointee = ptr->get_pointee ();
+ if (pointee->non_null_p ())
{
- /* A region_svalue is a non-NULL pointer, except in certain
- special cases (see the comment for region::non_null_p. */
- region *pointee = get_region (ptr->get_pointee ());
- if (pointee->non_null_p (*this))
+ switch (op)
{
- switch (op)
- {
- default:
- gcc_unreachable ();
-
- case EQ_EXPR:
- case GE_EXPR:
- case LE_EXPR:
- return tristate::TS_FALSE;
-
- case NE_EXPR:
- case GT_EXPR:
- case LT_EXPR:
- return tristate::TS_TRUE;
- }
+ default:
+ gcc_unreachable ();
+
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ return tristate::TS_FALSE;
+
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ return tristate::TS_TRUE;
}
}
+ }
+
+ /* Handle rejection of equality for comparisons of the initial values of
+ "external" values (such as params) with the address of locals. */
+ if (const initial_svalue *init_lhs = lhs->dyn_cast_initial_svalue ())
+ if (const region_svalue *rhs_ptr = rhs->dyn_cast_region_svalue ())
+ {
+ tristate res = compare_initial_and_pointer (init_lhs, rhs_ptr);
+ if (res.is_known ())
+ return res;
+ }
+ if (const initial_svalue *init_rhs = rhs->dyn_cast_initial_svalue ())
+ if (const region_svalue *lhs_ptr = lhs->dyn_cast_region_svalue ())
+ {
+ tristate res = compare_initial_and_pointer (init_rhs, lhs_ptr);
+ if (res.is_known ())
+ return res;
+ }
+
+ if (const widening_svalue *widen_lhs = lhs->dyn_cast_widening_svalue ())
+ if (tree rhs_cst = rhs->maybe_get_constant ())
+ {
+ tristate res = widen_lhs->eval_condition_without_cm (op, rhs_cst);
+ if (res.is_known ())
+ return res;
+ }
+
+ return tristate::TS_UNKNOWN;
+}
+
+/* Subroutine of region_model::eval_condition_without_cm, for rejecting
+ equality of INIT_VAL(PARM) with &LOCAL. */
+
+tristate
+region_model::compare_initial_and_pointer (const initial_svalue *init,
+ const region_svalue *ptr) const
+{
+ const region *pointee = ptr->get_pointee ();
+
+ /* If we have a pointer to something within a stack frame, it can't be the
+ initial value of a param. */
+ if (pointee->maybe_get_frame_region ())
+ {
+ const region *reg = init->get_region ();
+ if (tree reg_decl = reg->maybe_get_decl ())
+ if (TREE_CODE (reg_decl) == SSA_NAME)
+ {
+ tree ssa_name = reg_decl;
+ if (SSA_NAME_IS_DEFAULT_DEF (ssa_name)
+ && SSA_NAME_VAR (ssa_name)
+ && TREE_CODE (SSA_NAME_VAR (ssa_name)) == PARM_DECL)
+ return tristate::TS_FALSE;
+ }
}
return tristate::TS_UNKNOWN;
if (FLOAT_TYPE_P (TREE_TYPE (lhs)) || FLOAT_TYPE_P (TREE_TYPE (rhs)))
return true;
- svalue_id lhs_sid = get_rvalue (lhs, ctxt);
- svalue_id rhs_sid = get_rvalue (rhs, ctxt);
+ const svalue *lhs_sval = get_rvalue (lhs, ctxt);
+ const svalue *rhs_sval = get_rvalue (rhs, ctxt);
- tristate t_cond = eval_condition (lhs_sid, op, rhs_sid);
+ tristate t_cond = eval_condition (lhs_sval, op, rhs_sval);
/* If we already have the condition, do nothing. */
if (t_cond.is_true ())
return false;
/* Store the constraint. */
- m_constraints->add_constraint (lhs_sid, op, rhs_sid);
+ m_constraints->add_constraint (lhs_sval, op, rhs_sval);
add_any_constraints_from_ssa_def_stmt (lhs, op, rhs, ctxt);
- /* If we now know a symbolic_region is non-NULL, clear its
- m_possibly_null. */
- if (zerop (rhs) && op == NE_EXPR)
- if (region_svalue *ptr = get_svalue (lhs_sid)->dyn_cast_region_svalue ())
- {
- region *pointee = get_region (ptr->get_pointee ());
- if (symbolic_region *sym_reg = pointee->dyn_cast_symbolic_region ())
- sym_reg->m_possibly_null = false;
- }
-
/* Notify the context, if any. This exists so that the state machines
in a program_state can be notified about the condition, and so can
set sm-state for e.g. unchecked->checked, both for cfg-edges, and
return eval_condition (get_rvalue (lhs, ctxt), op, get_rvalue (rhs, ctxt));
}
-/* If SID is a constant value, return the underlying tree constant.
- Otherwise, return NULL_TREE. */
+/* Attempt to return a path_var that represents SVAL, or return NULL_TREE.
+ Use VISITED to prevent infinite mutual recursion with the overload for
+ regions. */
-tree
-region_model::maybe_get_constant (svalue_id sid) const
+path_var
+region_model::get_representative_path_var (const svalue *sval,
+ svalue_set *visited) const
{
- gcc_assert (!sid.null_p ());
- svalue *sval = get_svalue (sid);
- return sval->maybe_get_constant ();
-}
-
-/* Create a new child region of the heap (creating the heap region if
- necessary).
- Return the region_id of the new child region. */
+ if (sval == NULL)
+ return path_var (NULL_TREE, 0);
-region_id
-region_model::add_new_malloc_region ()
-{
- region_id heap_rid
- = get_root_region ()->ensure_heap_region (this);
- return add_region (new symbolic_region (heap_rid, NULL_TREE, true));
-}
+ if (const svalue *cast_sval = sval->maybe_undo_cast ())
+ sval = cast_sval;
-/* Attempt to return a tree that represents SID, or return NULL_TREE. */
+ /* Prevent infinite recursion. */
+ if (visited->contains (sval))
+ return path_var (NULL_TREE, 0);
+ visited->add (sval);
-tree
-region_model::get_representative_tree (svalue_id sid) const
-{
- if (sid.null_p ())
- return NULL_TREE;
+ auto_vec<path_var> pvs;
+ m_store.get_representative_path_vars (this, visited, sval, &pvs);
- /* Find the first region that stores the value (e.g. a local) and
- generate a representative tree for it. */
- unsigned i;
- region *region;
- FOR_EACH_VEC_ELT (m_regions, i, region)
- if (sid == region->get_value_direct ())
- {
- path_var pv = get_representative_path_var (region_id::from_int (i));
- if (pv.m_tree)
- return pv.m_tree;
- }
+ if (tree cst = sval->maybe_get_constant ())
+ pvs.safe_push (path_var (cst, 0));
/* Handle string literals and various other pointers. */
- svalue *sval = get_svalue (sid);
- if (region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
+ if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
+ {
+ const region *reg = ptr_sval->get_pointee ();
+ if (path_var pv = get_representative_path_var (reg, visited))
+ return path_var (build1 (ADDR_EXPR,
+ TREE_TYPE (sval->get_type ()),
+ pv.m_tree),
+ pv.m_stack_depth);
+ }
+
+ /* If we have a sub_svalue, look for ways to represent the parent. */
+ if (const sub_svalue *sub_sval = sval->dyn_cast_sub_svalue ())
{
- region_id rid = ptr_sval->get_pointee ();
- path_var pv = get_representative_path_var (rid);
- if (pv.m_tree)
- return build1 (ADDR_EXPR,
- TREE_TYPE (sval->get_type ()),
- pv.m_tree);
+ const svalue *parent_sval = sub_sval->get_parent ();
+ const region *subreg = sub_sval->get_subregion ();
+ if (path_var parent_pv
+ = get_representative_path_var (parent_sval, visited))
+ if (const field_region *field_reg = subreg->dyn_cast_field_region ())
+ return path_var (build3 (COMPONENT_REF,
+ sval->get_type (),
+ parent_pv.m_tree,
+ field_reg->get_field (),
+ NULL_TREE),
+ parent_pv.m_stack_depth);
}
- return maybe_get_constant (sid);
+ if (pvs.length () < 1)
+ return path_var (NULL_TREE, 0);
+
+ pvs.qsort (readability_comparator);
+ return pvs[0];
}
-/* Attempt to return a path_var that represents the region, or return
- the NULL path_var.
- For example, a region for a field of a local would be a path_var
- wrapping a COMPONENT_REF. */
+/* Attempt to return a tree that represents SVAL, or return NULL_TREE. */
-path_var
-region_model::get_representative_path_var (region_id rid) const
+tree
+region_model::get_representative_tree (const svalue *sval) const
{
- region *reg = get_region (rid);
- region *parent_reg = get_region (reg->get_parent ());
- region_id stack_rid = get_stack_region_id ();
- if (!stack_rid.null_p ())
- if (parent_reg && parent_reg->get_parent () == stack_rid)
- {
- frame_region *parent_frame = (frame_region *)parent_reg;
- tree t = parent_frame->get_tree_for_child_region (rid);
- return path_var (t, parent_frame->get_depth ());
- }
- if (reg->get_parent () == get_globals_region_id ())
- {
- map_region *globals = get_root_region ()->get_globals_region (this);
- if (globals)
- return path_var (globals->get_tree_for_child_region (rid), -1);
- }
+ svalue_set visited;
+ return get_representative_path_var (sval, &visited).m_tree;
+}
+
+/* Attempt to return a path_var that represents REG, or return
+ the NULL path_var.
+ For example, a region for a field of a local would be a path_var
+ wrapping a COMPONENT_REF.
+ Use VISITED to prevent infinite mutual recursion with the overload for
+ svalues. */
- /* Handle e.g. fields of a local by recursing. */
- region_id parent_rid = reg->get_parent ();
- if (parent_reg)
+path_var
+region_model::get_representative_path_var (const region *reg,
+ svalue_set *visited) const
+{
+ switch (reg->get_kind ())
{
- if (reg->is_view_p ())
- {
- path_var parent_pv = get_representative_path_var (parent_rid);
- if (parent_pv.m_tree && reg->get_type ())
- return path_var (build1 (NOP_EXPR,
- reg->get_type (),
- parent_pv.m_tree),
- parent_pv.m_stack_depth);
- }
+ default:
+ gcc_unreachable ();
- if (parent_reg->get_kind () == RK_STRUCT)
- {
- map_region *parent_map_region = (map_region *)parent_reg;
- /* This can fail if we have a view, rather than a field. */
- if (tree child_key
- = parent_map_region->get_tree_for_child_region (rid))
- {
- path_var parent_pv = get_representative_path_var (parent_rid);
- if (parent_pv.m_tree && TREE_CODE (child_key) == FIELD_DECL)
- return path_var (build3 (COMPONENT_REF,
- TREE_TYPE (child_key),
- parent_pv.m_tree, child_key,
- NULL_TREE),
- parent_pv.m_stack_depth);
- }
- }
+ case RK_FRAME:
+ case RK_GLOBALS:
+ case RK_CODE:
+ case RK_HEAP:
+ case RK_STACK:
+ case RK_ROOT:
+ /* Regions that represent memory spaces are not expressible as trees. */
+ return path_var (NULL_TREE, 0);
- /* Handle elements within an array. */
- if (array_region *array_reg = parent_reg->dyn_cast_array_region ())
+ case RK_FUNCTION:
{
- array_region::key_t key;
- if (array_reg->get_key_for_child_region (rid, &key))
- {
- path_var parent_pv = get_representative_path_var (parent_rid);
- if (parent_pv.m_tree && reg->get_type ())
- {
- tree index = array_reg->constant_from_key (key);
- return path_var (build4 (ARRAY_REF,
- reg->get_type (),
- parent_pv.m_tree, index,
- NULL_TREE, NULL_TREE),
- parent_pv.m_stack_depth);
- }
- }
+ const function_region *function_reg
+ = as_a <const function_region *> (reg);
+ return path_var (function_reg->get_fndecl (), 0);
}
- }
+ case RK_LABEL:
+ gcc_unreachable (); // TODO
- /* Handle string literals. */
- svalue_id sid = reg->get_value_direct ();
- if (svalue *sval = get_svalue (sid))
- if (tree cst = sval->maybe_get_constant ())
- if (TREE_CODE (cst) == STRING_CST)
- return path_var (cst, 0);
-
- return path_var (NULL_TREE, 0);
-}
+ case RK_SYMBOLIC:
+ {
+ const symbolic_region *symbolic_reg
+ = as_a <const symbolic_region *> (reg);
+ const svalue *pointer = symbolic_reg->get_pointer ();
+ path_var pointer_pv = get_representative_path_var (pointer, visited);
+ if (!pointer_pv)
+ return path_var (NULL_TREE, 0);
+ tree offset = build_int_cst (pointer->get_type (), 0);
+ return path_var (build2 (MEM_REF,
+ reg->get_type (),
+ pointer_pv.m_tree,
+ offset),
+ pointer_pv.m_stack_depth);
+ }
+ case RK_DECL:
+ {
+ const decl_region *decl_reg = as_a <const decl_region *> (reg);
+ return path_var (decl_reg->get_decl (), decl_reg->get_stack_depth ());
+ }
+ case RK_FIELD:
+ {
+ const field_region *field_reg = as_a <const field_region *> (reg);
+ path_var parent_pv
+ = get_representative_path_var (reg->get_parent_region (), visited);
+ if (!parent_pv)
+ return path_var (NULL_TREE, 0);
+ return path_var (build3 (COMPONENT_REF,
+ reg->get_type (),
+ parent_pv.m_tree,
+ field_reg->get_field (),
+ NULL_TREE),
+ parent_pv.m_stack_depth);
+ }
-/* Locate all regions that directly have value SID and append representative
- path_var instances for them into *OUT. */
+ case RK_ELEMENT:
+ {
+ const element_region *element_reg
+ = as_a <const element_region *> (reg);
+ path_var parent_pv
+ = get_representative_path_var (reg->get_parent_region (), visited);
+ if (!parent_pv)
+ return path_var (NULL_TREE, 0);
+ path_var index_pv
+ = get_representative_path_var (element_reg->get_index (), visited);
+ if (!index_pv)
+ return path_var (NULL_TREE, 0);
+ return path_var (build4 (ARRAY_REF,
+ reg->get_type (),
+ parent_pv.m_tree, index_pv.m_tree,
+ NULL_TREE, NULL_TREE),
+ parent_pv.m_stack_depth);
+ }
-void
-region_model::get_path_vars_for_svalue (svalue_id sid, vec<path_var> *out) const
-{
- unsigned i;
- region *region;
- FOR_EACH_VEC_ELT (m_regions, i, region)
- if (sid == region->get_value_direct ())
+ case RK_OFFSET:
{
- path_var pv = get_representative_path_var (region_id::from_int (i));
- if (pv.m_tree)
- out->safe_push (pv);
+ const offset_region *offset_reg
+ = as_a <const offset_region *> (reg);
+ path_var parent_pv
+ = get_representative_path_var (reg->get_parent_region (), visited);
+ if (!parent_pv)
+ return path_var (NULL_TREE, 0);
+ path_var offset_pv
+ = get_representative_path_var (offset_reg->get_byte_offset (),
+ visited);
+ if (!offset_pv)
+ return path_var (NULL_TREE, 0);
+ return path_var (build2 (MEM_REF,
+ reg->get_type (),
+ parent_pv.m_tree, offset_pv.m_tree),
+ parent_pv.m_stack_depth);
}
-}
-/* Set DST_RID value to be a new unknown value of type TYPE. */
+ case RK_CAST:
+ {
+ path_var parent_pv
+ = get_representative_path_var (reg->get_parent_region (), visited);
+ if (!parent_pv)
+ return path_var (NULL_TREE, 0);
+ return path_var (build1 (NOP_EXPR,
+ reg->get_type (),
+ parent_pv.m_tree),
+ parent_pv.m_stack_depth);
+ }
-svalue_id
-region_model::set_to_new_unknown_value (region_id dst_rid, tree type,
- region_model_context *ctxt)
-{
- gcc_assert (!dst_rid.null_p ());
- svalue_id new_sid = add_svalue (new unknown_svalue (type));
- set_value (dst_rid, new_sid, ctxt);
+ case RK_HEAP_ALLOCATED:
+ case RK_ALLOCA:
+ /* No good way to express heap-allocated/alloca regions as trees. */
+ return path_var (NULL_TREE, 0);
- // TODO: presumably purge all child regions too (but do this in set_value?)
+ case RK_STRING:
+ {
+ const string_region *string_reg = as_a <const string_region *> (reg);
+ return path_var (string_reg->get_string_cst (), 0);
+ }
- return new_sid;
+ case RK_UNKNOWN:
+ return path_var (NULL_TREE, 0);
+ }
}
/* Update this model for any phis in SNODE, assuming we came from
tree lhs = gimple_phi_result (phi);
/* Update next_state based on phi. */
- bool is_back_edge = last_cfg_superedge->back_edge_p ();
- handle_phi (phi, lhs, src, is_back_edge, ctxt);
+ handle_phi (phi, lhs, src, ctxt);
}
}
region_model::update_for_call_superedge (const call_superedge &call_edge,
region_model_context *ctxt)
{
- /* Build a vec of argument svalue_id, using the current top
+ /* Build a vec of argument svalues, using the current top
frame for resolving tree expressions. */
const gcall *call_stmt = call_edge.get_call_stmt ();
- auto_vec<svalue_id> arg_sids (gimple_call_num_args (call_stmt));
+ auto_vec<const svalue *> arg_svals (gimple_call_num_args (call_stmt));
for (unsigned i = 0; i < gimple_call_num_args (call_stmt); i++)
{
tree arg = gimple_call_arg (call_stmt, i);
- arg_sids.quick_push (get_rvalue (arg, ctxt));
+ arg_svals.quick_push (get_rvalue (arg, ctxt));
}
- push_frame (call_edge.get_callee_function (), &arg_sids, ctxt);
+ push_frame (call_edge.get_callee_function (), &arg_svals, ctxt);
}
/* Pop the top-most frame_region from the stack, and copy the return
region's values (if any) into the region for the lvalue of the LHS of
the call (if any). */
-
void
region_model::update_for_return_superedge (const return_superedge &return_edge,
region_model_context *ctxt)
{
- region_id stack_rid = get_stack_region_id ();
- stack_region *stack = get_region <stack_region> (stack_rid);
-
/* Get the region for the result of the call, within the caller frame. */
- region_id result_dst_rid;
+ const region *result_dst_reg = NULL;
const gcall *call_stmt = return_edge.get_call_stmt ();
tree lhs = gimple_call_lhs (call_stmt);
if (lhs)
{
/* Normally we access the top-level frame, which is:
- path_var (expr, stack->get_num_frames () - 1)
+ path_var (expr, get_stack_depth () - 1)
whereas here we need the caller frame, hence "- 2" here. */
- gcc_assert (stack->get_num_frames () >= 2);
- result_dst_rid = get_lvalue (path_var (lhs, stack->get_num_frames () - 2),
+ gcc_assert (get_stack_depth () >= 2);
+ result_dst_reg = get_lvalue (path_var (lhs, get_stack_depth () - 2),
ctxt);
}
- purge_stats stats;
- stack->pop_frame (this, result_dst_rid, true, &stats, ctxt);
- // TODO: do something with the stats?
-
- if (!lhs)
- {
- /* This could be a leak; try purging again, but this time,
- don't special-case the result sids (as was done in pop_frame). */
- purge_unused_svalues (&stats, ctxt);
- }
+ pop_frame (result_dst_reg, NULL, ctxt);
}
/* Update this region_model with a summary of the effect of calling
const gcall *call_stmt = cg_sedge.get_call_stmt ();
tree lhs = gimple_call_lhs (call_stmt);
if (lhs)
- set_to_new_unknown_value (get_lvalue (lhs, ctxt), TREE_TYPE (lhs), ctxt);
+ mark_region_as_unknown (get_lvalue (lhs, ctxt));
// TODO: actually implement some kind of summary here
}
}
}
-/* Get the root_region within this model (guaranteed to be non-null). */
-
-root_region *
-region_model::get_root_region () const
-{
- return get_region<root_region> (m_root_rid);
-}
-
-/* Get the region_id of this model's stack region (if any). */
-
-region_id
-region_model::get_stack_region_id () const
-{
- return get_root_region ()->get_stack_region_id ();
-}
-
-/* Create a new frame_region for a call to FUN and push it onto
- the stack.
-
- If ARG_SIDS is non-NULL, use it to populate the parameters
- in the new frame.
- Otherwise, populate them with unknown values.
-
- Return the region_id of the new frame_region. */
-
-region_id
-region_model::push_frame (function *fun, vec<svalue_id> *arg_sids,
- region_model_context *ctxt)
-{
- return get_root_region ()->push_frame (this, fun, arg_sids, ctxt);
-}
-
-/* Get the region_id of the top-most frame in this region_model's stack,
- if any. */
-
-region_id
-region_model::get_current_frame_id () const
-{
- return get_root_region ()->get_current_frame_id (*this);
-}
-
-/* Get the function of the top-most frame in this region_model's stack.
- There must be such a frame. */
-
-function *
-region_model::get_current_function () const
-{
- region_id frame_id = get_current_frame_id ();
- frame_region *frame = get_region<frame_region> (frame_id);
- return frame->get_function ();
-}
-
-/* Pop the topmost frame_region from this region_model's stack;
- see the comment for stack_region::pop_frame. */
-
-void
-region_model::pop_frame (region_id result_dst_rid,
- bool purge, purge_stats *out,
- region_model_context *ctxt)
-{
- get_root_region ()->pop_frame (this, result_dst_rid, purge, out, ctxt);
-}
-
-/* Get the number of frames in this region_model's stack. */
-
-int
-region_model::get_stack_depth () const
-{
- stack_region *stack = get_root_region ()->get_stack_region (this);
- if (stack)
- return stack->get_num_frames ();
- else
- return 0;
-}
-
-/* Get the function * at DEPTH within the call stack. */
-
-function *
-region_model::get_function_at_depth (unsigned depth) const
-{
- stack_region *stack = get_root_region ()->get_stack_region (this);
- gcc_assert (stack);
- region_id frame_rid = stack->get_frame_rid (depth);
- frame_region *frame = get_region <frame_region> (frame_rid);
- return frame->get_function ();
-}
-
-/* Get the region_id of this model's globals region (if any). */
-
-region_id
-region_model::get_globals_region_id () const
-{
- return get_root_region ()->get_globals_region_id ();
-}
-
-/* Add SVAL to this model, taking ownership, and returning its new
- svalue_id. */
-
-svalue_id
-region_model::add_svalue (svalue *sval)
-{
- gcc_assert (sval);
- m_svalues.safe_push (sval);
- return svalue_id::from_int (m_svalues.length () - 1);
-}
-
-/* Change the meaning of SID to be NEW_SVAL
- (e.g. when deferencing an unknown pointer, the pointer
- becomes a pointer to a symbolic region, so that all users
- of the former unknown pointer are now effectively pointing
- at the same region). */
+/* For use with push_frame when handling a top-level call within the analysis.
+ PARAM has a defined but unknown initial value.
+ Anything it points to has escaped, since the calling context "knows"
+ the pointer, and thus calls to unknown functions could read/write into
+ the region. */
void
-region_model::replace_svalue (svalue_id sid, svalue *new_sval)
-{
- gcc_assert (!sid.null_p ());
- int idx = sid.as_int ();
-
- gcc_assert (m_svalues[idx]);
- gcc_assert (m_svalues[idx]->get_type () == new_sval->get_type ());
- delete m_svalues[idx];
-
- m_svalues[idx] = new_sval;
-}
-
-/* Add region R to this model, taking ownership, and returning its new
- region_id. */
-
-region_id
-region_model::add_region (region *r)
-{
- gcc_assert (r);
- m_regions.safe_push (r);
- return region_id::from_int (m_regions.length () - 1);
-}
-
-/* Return the svalue with id SVAL_ID, or NULL for a null id. */
-
-svalue *
-region_model::get_svalue (svalue_id sval_id) const
-{
- if (sval_id.null_p ())
- return NULL;
- return m_svalues[sval_id.as_int ()];
-}
-
-/* Return the region with id RID, or NULL for a null id. */
-
-region *
-region_model::get_region (region_id rid) const
-{
- if (rid.null_p ())
- return NULL;
- return m_regions[rid.as_int ()];
-}
-
-/* Make a region of an appropriate subclass for TYPE,
- with parent PARENT_RID, or return NULL for types we don't yet know
- how to handle. */
-
-static region *
-make_region_for_type (region_id parent_rid, tree type)
-{
- gcc_assert (TYPE_P (type));
-
- if (INTEGRAL_TYPE_P (type)
- || SCALAR_FLOAT_TYPE_P (type)
- || POINTER_TYPE_P (type)
- || TREE_CODE (type) == COMPLEX_TYPE
- || TREE_CODE (type) == VECTOR_TYPE)
- return new primitive_region (parent_rid, type);
-
- if (TREE_CODE (type) == RECORD_TYPE)
- return new struct_region (parent_rid, type);
-
- if (TREE_CODE (type) == ARRAY_TYPE)
- return new array_region (parent_rid, type);
-
- if (TREE_CODE (type) == UNION_TYPE)
- return new union_region (parent_rid, type);
-
- if (FUNC_OR_METHOD_TYPE_P (type))
- return new function_region (parent_rid, type);
-
- /* If we have a void *, make a new symbolic region. */
- if (VOID_TYPE_P (type))
- return new symbolic_region (parent_rid, type, false);
-
- return NULL;
-}
-
-/* Add a region with type TYPE and parent PARENT_RID. */
-
-region_id
-region_model::add_region_for_type (region_id parent_rid, tree type,
+region_model::on_top_level_param (tree param,
region_model_context *ctxt)
{
- if (type)
+ if (POINTER_TYPE_P (TREE_TYPE (param)))
{
- gcc_assert (TYPE_P (type));
-
- if (region *new_region = make_region_for_type (parent_rid, type))
- return add_region (new_region);
+ const region *param_reg = get_lvalue (param, ctxt);
+ const svalue *init_ptr_sval
+ = m_mgr->get_or_create_initial_value (param_reg);
+ const region *pointee_reg = m_mgr->get_symbolic_region (init_ptr_sval);
+ m_store.mark_as_escaped (pointee_reg);
}
-
- /* If we can't handle TYPE, return a placeholder region, and stop
- exploring this path. */
- return make_region_for_unexpected_tree_code (ctxt, type,
- dump_location_t ());
}
-/* Helper class for region_model::purge_unused_svalues. */
-
-class restrict_to_used_svalues : public purge_criteria
-{
-public:
- restrict_to_used_svalues (const auto_sbitmap &used) : m_used (used) {}
-
- bool should_purge_p (svalue_id sid) const FINAL OVERRIDE
- {
- gcc_assert (!sid.null_p ());
- return !bitmap_bit_p (m_used, sid.as_int ());
- }
+/* Update this region_model to reflect pushing a frame onto the stack
+ for a call to FUN.
-private:
- const auto_sbitmap &m_used;
-};
+ If ARG_SVALS is non-NULL, use it to populate the parameters
+ in the new frame.
+ Otherwise, the params have their initial_svalues.
-/* Remove unused svalues from this model, accumulating stats into STATS.
- Unused svalues are deleted. Doing so could reorder the svalues, and
- thus change the meaning of svalue_ids.
-
- If CTXT is non-NULL, then it is notified about svalue_id remappings,
- and about svalue_ids that are about to be deleted. This allows e.g.
- for warning about resource leaks, for the case where the svalue
- represents a resource handle in the user code (e.g. a FILE * or a malloc
- buffer).
-
- Amongst other things, removing unused svalues is important for ensuring
- that the analysis of loops terminates. Otherwise, we could generate a
- succession of models with unreferenced "unknown" values, where the
- number of redundant unknown values could grow without bounds, and each
- such model would be treated as distinct.
-
- If KNOWN_USED_SIDS is non-NULL, treat *KNOWN_USED_SIDS as used (this is for
- handling values being returned from functions as their frame is popped,
- since otherwise we'd have to simultaneously determine both the rvalue
- of the return expr in the callee frame and the lvalue for the gcall's
- assignment in the caller frame, and it seems cleaner to express all
- lvalue and rvalue lookups implicitly relative to a "current" frame).
- The svalue_ids in *KNOWN_USED_SIDS are not remapped and hence this
- call makes it invalid. */
+ Return the frame_region for the new frame. */
-void
-region_model::purge_unused_svalues (purge_stats *stats,
- region_model_context *ctxt,
- svalue_id_set *known_used_sids)
+const region *
+region_model::push_frame (function *fun, const vec<const svalue *> *arg_svals,
+ region_model_context *ctxt)
{
- // TODO: might want to avoid a vfunc call just to do logging here:
- logger *logger = ctxt ? ctxt->get_logger () : NULL;
-
- LOG_SCOPE (logger);
-
- auto_sbitmap used (m_svalues.length ());
- bitmap_clear (used);
-
- if (known_used_sids)
- {
- /* We can't use an sbitmap for known_used_sids as the number of
- svalues could have grown since it was created. */
- for (unsigned i = 0; i < get_num_svalues (); i++)
- if (known_used_sids->svalue_p (svalue_id::from_int (i)))
- bitmap_set_bit (used, i);
- }
-
- /* Walk the regions, marking sids that are used. */
- unsigned i;
- region *r;
- FOR_EACH_VEC_ELT (m_regions, i, r)
- {
- svalue_id sid = r->get_value_direct ();
- if (!sid.null_p ())
- bitmap_set_bit (used, sid.as_int ());
- }
-
- /* Now purge any constraints involving svalues we don't care about. */
- restrict_to_used_svalues criterion (used);
- m_constraints->purge (criterion, stats);
-
- /* Mark any sids that are in constraints that survived. */
- {
- equiv_class *ec;
- FOR_EACH_VEC_ELT (m_constraints->m_equiv_classes, i, ec)
- {
- int j;
- svalue_id *sid;
- FOR_EACH_VEC_ELT (ec->m_vars, j, sid)
- {
- gcc_assert (!sid->null_p ());
- bitmap_set_bit (used, sid->as_int ());
- }
- }
- }
-
- /* Build a mapping from old-sid to new-sid so that we can preserve
- order of the used IDs and move all redundant ones to the end.
- Iterate though svalue IDs, adding used ones to the front of
- the new list, and unused ones to the back. */
- svalue_id_map map (m_svalues.length ());
- int next_used_new_sid = 0;
- int after_next_unused_new_sid = m_svalues.length ();
- for (unsigned i = 0; i < m_svalues.length (); i++)
+ m_current_frame = m_mgr->get_frame_region (m_current_frame, fun);
+ if (arg_svals)
{
- svalue_id src (svalue_id::from_int (i));
- if (bitmap_bit_p (used, i))
- {
- if (logger)
- logger->log ("sv%i is used", i);
- map.put (src, svalue_id::from_int (next_used_new_sid++));
- }
- else
+ /* Arguments supplied from a caller frame. */
+ tree fndecl = fun->decl;
+ unsigned idx = 0;
+ for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
+ iter_parm = DECL_CHAIN (iter_parm), ++idx)
{
- if (logger)
- logger->log ("sv%i is unused", i);
- map.put (src, svalue_id::from_int (--after_next_unused_new_sid));
- }
- }
- /* The two insertion points should have met. */
- gcc_assert (next_used_new_sid == after_next_unused_new_sid);
-
- /* Now walk the regions and the constraints, remapping sids,
- so that all the redundant svalues are at the end. */
- remap_svalue_ids (map);
-
- if (logger)
- {
- logger->start_log_line ();
- logger->log_partial ("map: ");
- map.dump_to_pp (logger->get_printer ());
- logger->end_log_line ();
- }
-
- /* Notify any client about the remapping and pending deletion.
- Potentially this could trigger leak warnings. */
- if (ctxt)
- {
- ctxt->remap_svalue_ids (map);
- int num_client_items_purged
- = ctxt->on_svalue_purge (svalue_id::from_int (next_used_new_sid), map);
- if (stats)
- stats->m_num_client_items += num_client_items_purged;
- }
+ /* If there's a mismatching declaration, the call stmt might
+ not have enough args. Handle this case by leaving the
+ rest of the params as uninitialized. */
+ if (idx >= arg_svals->length ())
+ break;
+ const svalue *arg_sval = (*arg_svals)[idx];
+ const region *parm_reg = get_lvalue (iter_parm, ctxt);
+ set_value (parm_reg, arg_sval, ctxt);
- /* Drop the redundant svalues from the end of the vector. */
- while ((signed)m_svalues.length () > next_used_new_sid)
- {
- if (logger)
- {
- svalue_id victim = svalue_id::from_int (m_svalues.length () - 1);
- logger->log ("deleting sv%i (was sv%i)",
- victim.as_int (),
- map.get_src_for_dst (victim).as_int ());
+ /* Also do it for default SSA name (sharing the same value). */
+ tree parm_default_ssa = ssa_default_def (fun, iter_parm);
+ if (parm_default_ssa)
+ {
+ const region *defssa_reg = get_lvalue (parm_default_ssa, ctxt);
+ set_value (defssa_reg, arg_sval, ctxt);
+ }
}
- delete m_svalues.pop ();
- if (stats)
- stats->m_num_svalues++;
- }
-
- validate ();
-}
-
-/* Renumber the svalues within this model according to MAP. */
-
-void
-region_model::remap_svalue_ids (const svalue_id_map &map)
-{
- /* Update IDs within regions. */
- unsigned i;
- region *r;
- FOR_EACH_VEC_ELT (m_regions, i, r)
- r->remap_svalue_ids (map);
-
- /* Update IDs within ECs within constraints. */
- m_constraints->remap_svalue_ids (map);
-
- /* Build a reordered svalues vector. */
- auto_vec<svalue *> new_svalues (m_svalues.length ());
- for (unsigned i = 0; i < m_svalues.length (); i++)
- {
- svalue_id dst (svalue_id::from_int (i));
- svalue_id src = map.get_src_for_dst (dst);
- new_svalues.quick_push (get_svalue (src));
- }
-
- /* Copy over the reordered vec to m_svalues. */
- m_svalues.truncate (0);
- gcc_assert (m_svalues.space (new_svalues.length ()));
- svalue *sval;
- FOR_EACH_VEC_ELT (new_svalues, i, sval)
- m_svalues.quick_push (sval);
-}
-
-/* Renumber the regions within this model according to MAP. */
-
-void
-region_model::remap_region_ids (const region_id_map &map)
-{
- /* Update IDs within regions. */
- unsigned i;
- region *r;
- FOR_EACH_VEC_ELT (m_regions, i, r)
- r->remap_region_ids (map);
-
- /* Update IDs within svalues. */
- svalue *sval;
- FOR_EACH_VEC_ELT (m_svalues, i, sval)
- sval->remap_region_ids (map);
-
- /* Build a reordered regions vector. */
- auto_vec<region *> new_regions (m_regions.length ());
- for (unsigned i = 0; i < m_regions.length (); i++)
- {
- region_id dst (region_id::from_int (i));
- region_id src = map.get_src_for_dst (dst);
- new_regions.quick_push (get_region (src));
- }
-
- /* Copy over the reordered vec to m_regions. */
- m_regions.truncate (0);
- gcc_assert (m_regions.space (new_regions.length ()));
- FOR_EACH_VEC_ELT (new_regions, i, r)
- m_regions.quick_push (r);
-}
-
-/* Delete all regions within SET_TO_PURGE, remapping region IDs for
- other regions. It's required that there are no uses of the
- regions within the set (or the region IDs will become invalid).
-
- Accumulate stats to STATS. */
-
-void
-region_model::purge_regions (const region_id_set &set_to_purge,
- purge_stats *stats,
- logger *)
-{
- /* Build a mapping from old-rid to new-rid so that we can preserve
- order of the used IDs and move all redundant ones to the end.
- Iterate though region IDs, adding used ones to the front of
- the new list, and unused ones to the back. */
- region_id_map map (m_regions.length ());
- int next_used_new_rid = 0;
- int after_next_unused_new_rid = m_regions.length ();
- for (unsigned i = 0; i < m_regions.length (); i++)
- {
- region_id src (region_id::from_int (i));
- if (set_to_purge.region_p (src))
- map.put (src, region_id::from_int (--after_next_unused_new_rid));
- else
- map.put (src, region_id::from_int (next_used_new_rid++));
- }
- /* The two insertion points should have met. */
- gcc_assert (next_used_new_rid == after_next_unused_new_rid);
-
- /* Now walk the regions and svalues, remapping rids,
- so that all the redundant regions are at the end. */
- remap_region_ids (map);
-
- /* Drop the redundant regions from the end of the vector. */
- while ((signed)m_regions.length () > next_used_new_rid)
- {
- delete m_regions.pop ();
- if (stats)
- stats->m_num_regions++;
}
-}
-
-/* Populate *OUT with RID and all of its descendents.
- If EXCLUDE_RID is non-null, then don't add it or its descendents. */
-
-void
-region_model::get_descendents (region_id rid, region_id_set *out,
- region_id exclude_rid) const
-{
- out->add_region (rid);
-
- bool changed = true;
- while (changed)
+ else
{
- changed = false;
- unsigned i;
- region *r;
- FOR_EACH_VEC_ELT (m_regions, i, r)
+ /* Otherwise we have a top-level call within the analysis. The params
+ have defined but unknown initial values.
+ Anything they point to has escaped. */
+ tree fndecl = fun->decl;
+ for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
+ iter_parm = DECL_CHAIN (iter_parm))
{
- region_id iter_rid = region_id::from_int (i);
- if (iter_rid == exclude_rid)
- continue;
- if (!out->region_p (iter_rid))
- {
- region_id parent_rid = r->get_parent ();
- if (!parent_rid.null_p ())
- if (out->region_p (parent_rid))
- {
- out->add_region (iter_rid);
- changed = true;
- }
- }
+ on_top_level_param (iter_parm, ctxt);
+ tree parm_default_ssa = ssa_default_def (fun, iter_parm);
+ if (parm_default_ssa)
+ on_top_level_param (parm_default_ssa, ctxt);
}
}
-}
-
-/* Delete RID and all descendent regions.
- Find any pointers to such regions; convert them to
- poisoned values of kind PKIND.
- Accumulate stats on purged entities into STATS. */
-
-void
-region_model::delete_region_and_descendents (region_id rid,
- enum poison_kind pkind,
- purge_stats *stats,
- logger *logger)
-{
- /* Find all child and descendent regions. */
- region_id_set descendents (this);
- get_descendents (rid, &descendents, region_id::null ());
-
- /* Find any pointers to such regions; convert to poisoned. */
- poison_any_pointers_to_bad_regions (descendents, pkind);
- /* Delete all such regions. */
- purge_regions (descendents, stats, logger);
+ return m_current_frame;
}
-/* Find any pointers to regions within BAD_REGIONS; convert them to
- poisoned values of kind PKIND. */
+/* Get the function of the top-most frame in this region_model's stack.
+ There must be such a frame. */
-void
-region_model::poison_any_pointers_to_bad_regions (const region_id_set &
- bad_regions,
- enum poison_kind pkind)
+function *
+region_model::get_current_function () const
{
- int i;
- svalue *sval;
- FOR_EACH_VEC_ELT (m_svalues, i, sval)
- if (region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
- {
- region_id ptr_dst = ptr_sval->get_pointee ();
- if (!ptr_dst.null_p ())
- if (bad_regions.region_p (ptr_dst))
- replace_svalue
- (svalue_id::from_int (i),
- new poisoned_svalue (pkind, sval->get_type ()));
- }
+ const frame_region *frame = get_current_frame ();
+ gcc_assert (frame);
+ return frame->get_function ();
}
-/* Attempt to merge THIS with OTHER_MODEL, writing the result
- to OUT_MODEL, and populating SID_MAPPING. */
-
-bool
-region_model::can_merge_with_p (const region_model &other_model,
- region_model *out_model,
- svalue_id_merger_mapping *sid_mapping) const
-{
- gcc_assert (m_root_rid == other_model.m_root_rid);
- gcc_assert (m_root_rid.as_int () == 0);
- gcc_assert (sid_mapping);
- gcc_assert (out_model);
+/* Pop the topmost frame_region from this region_model's stack;
- model_merger merger (this, &other_model, out_model, sid_mapping);
+ If RESULT_DST_REG is non-null, copy any return value from the frame
+ into RESULT_DST_REG's region.
+ If OUT_RESULT is non-null, copy any return value from the frame
+ into *OUT_RESULT.
- if (!root_region::can_merge_p (get_root_region (),
- other_model.get_root_region (),
- out_model->get_root_region (),
- &merger))
- return false;
+ Purge the frame region and all its descendent regions.
+ Convert any pointers that point into such regions into
+ POISON_KIND_POPPED_STACK svalues. */
- /* Merge constraints. */
- constraint_manager::merge (*m_constraints,
- *other_model.m_constraints,
- out_model->m_constraints,
- merger);
+void
+region_model::pop_frame (const region *result_dst_reg,
+ const svalue **out_result,
+ region_model_context *ctxt)
+{
+ gcc_assert (m_current_frame);
- out_model->validate ();
+ /* Evaluate the result, within the callee frame. */
+ const frame_region *frame_reg = m_current_frame;
+ tree fndecl = m_current_frame->get_function ()->decl;
+ tree result = DECL_RESULT (fndecl);
+ if (result && TREE_TYPE (result) != void_type_node)
+ {
+ if (result_dst_reg)
+ {
+ /* Copy the result to RESULT_DST_REG. */
+ copy_region (result_dst_reg,
+ get_lvalue (result, ctxt),
+ ctxt);
+ }
+ if (out_result)
+ *out_result = get_rvalue (result, ctxt);
+ }
- /* The merged model should be simpler (or as simple) as the inputs. */
-#if 0
- gcc_assert (out_model->m_svalues.length () <= m_svalues.length ());
- gcc_assert (out_model->m_svalues.length ()
- <= other_model.m_svalues.length ());
-#endif
- gcc_assert (out_model->m_regions.length () <= m_regions.length ());
- gcc_assert (out_model->m_regions.length ()
- <= other_model.m_regions.length ());
- // TODO: same, for constraints
+ /* Pop the frame. */
+ m_current_frame = m_current_frame->get_calling_frame ();
- return true;
+ unbind_region_and_descendents (frame_reg,POISON_KIND_POPPED_STACK);
}
-/* As above, but supply a placeholder svalue_id_merger_mapping
- instance to be used and receive output. For use in selftests. */
+/* Get the number of frames in this region_model's stack. */
-bool
-region_model::can_merge_with_p (const region_model &other_model,
- region_model *out_model) const
+int
+region_model::get_stack_depth () const
{
- svalue_id_merger_mapping sid_mapping (*this, other_model);
- return can_merge_with_p (other_model, out_model, &sid_mapping);
+ const frame_region *frame = get_current_frame ();
+ if (frame)
+ return frame->get_stack_depth ();
+ else
+ return 0;
}
-/* For debugging purposes: look for a region within this region_model
- for a decl named NAME (or an SSA_NAME for such a decl),
- returning its value, or svalue_id::null if none are found. */
+/* Get the frame_region with the given index within the stack.
+ The frame_region must exist. */
-svalue_id
-region_model::get_value_by_name (const char *name) const
+const frame_region *
+region_model::get_frame_at_index (int index) const
{
- gcc_assert (name);
- tree identifier = get_identifier (name);
- return get_root_region ()->get_value_by_name (identifier, *this);
+ const frame_region *frame = get_current_frame ();
+ gcc_assert (frame);
+ gcc_assert (index >= 0);
+ gcc_assert (index <= frame->get_index ());
+ while (index != frame->get_index ())
+ {
+ frame = frame->get_calling_frame ();
+ gcc_assert (frame);
+ }
+ return frame;
}
-/* Generate or reuse an svalue_id within this model for an index
- into an array of type PTR_TYPE, based on OFFSET_SID. */
+/* Unbind svalues for any regions in REG and below.
+ Find any pointers to such regions; convert them to
+ poisoned values of kind PKIND. */
-svalue_id
-region_model::convert_byte_offset_to_array_index (tree ptr_type,
- svalue_id offset_sid)
+void
+region_model::unbind_region_and_descendents (const region *reg,
+ enum poison_kind pkind)
{
- gcc_assert (POINTER_TYPE_P (ptr_type));
-
- if (tree offset_cst = maybe_get_constant (offset_sid))
+ /* Gather a set of base regions to be unbound. */
+ hash_set<const region *> base_regs;
+ for (store::cluster_map_t::iterator iter = m_store.begin ();
+ iter != m_store.end (); ++iter)
{
- tree elem_type = TREE_TYPE (ptr_type);
-
- /* Arithmetic on void-pointers is a GNU C extension, treating the size
- of a void as 1.
- https://gcc.gnu.org/onlinedocs/gcc/Pointer-Arith.html */
- if (TREE_CODE (elem_type) == VOID_TYPE)
- return offset_sid;
-
- /* First, use int_size_in_bytes, to reject the case where we have an
- incomplete type, or a non-constant value. */
- HOST_WIDE_INT hwi_byte_size = int_size_in_bytes (elem_type);
- if (hwi_byte_size > 0)
- {
- /* Now call size_in_bytes to get the answer in tree form. */
- tree byte_size = size_in_bytes (elem_type);
- gcc_assert (byte_size);
- /* Try to get a constant by dividing, ensuring that we're in a
- signed representation first. */
- tree index
- = fold_binary (TRUNC_DIV_EXPR, ssizetype,
- fold_convert (ssizetype, offset_cst),
- fold_convert (ssizetype, byte_size));
- if (index && TREE_CODE (index) == INTEGER_CST)
- return get_or_create_constant_svalue (index);
- }
+ const region *iter_base_reg = (*iter).first;
+ if (iter_base_reg->descendent_of_p (reg))
+ base_regs.add (iter_base_reg);
}
+ for (hash_set<const region *>::iterator iter = base_regs.begin ();
+ iter != base_regs.end (); ++iter)
+ m_store.purge_cluster (*iter);
- /* Otherwise, we don't know the array index; generate a new unknown value.
- TODO: do we need to capture the relationship between two unknown
- values (the offset and the index)? */
- return add_svalue (new unknown_svalue (integer_type_node));
+ /* Find any pointers to REG or its descendents; convert to poisoned. */
+ poison_any_pointers_to_descendents (reg, pkind);
}
-/* Get a region of type TYPE for PTR_SID[OFFSET_SID/sizeof (*PTR_SID)].
+/* Implementation of BindingVisitor.
+ Update the bound svalues for regions below REG to use poisoned
+ values instead. */
- If OFFSET_SID is known to be zero, then dereference PTR_SID.
- Otherwise, impose a view of "typeof(*PTR_SID)[]" on *PTR_SID,
- and then get a view of type TYPE on the relevant array element. */
-
-region_id
-region_model::get_or_create_mem_ref (tree type,
- svalue_id ptr_sid,
- svalue_id offset_sid,
- region_model_context *ctxt)
+struct bad_pointer_finder
{
- svalue *ptr_sval = get_svalue (ptr_sid);
- tree ptr_type = ptr_sval->get_type ();
- gcc_assert (ptr_type);
-
- region_id raw_rid = deref_rvalue (ptr_sid, ctxt);
-
- svalue *offset_sval = get_svalue (offset_sid);
- tree offset_type = offset_sval->get_type ();
- gcc_assert (offset_type);
-
- if (constant_svalue *cst_sval = offset_sval->dyn_cast_constant_svalue ())
- {
- if (zerop (cst_sval->get_constant ()))
- {
- /* Handle the zero offset case. */
- return get_or_create_view (raw_rid, type, ctxt);
- }
-
- /* If we're already within an array of the correct type,
- then we want to reuse that array, rather than starting
- a new view.
- If so, figure out our raw_rid's offset from its parent,
- if we can, and use that to offset OFFSET_SID, and create
- the element within the parent region. */
- region *raw_reg = get_region (raw_rid);
- region_id parent_rid = raw_reg->get_parent ();
- tree parent_type = get_region (parent_rid)->get_type ();
- if (parent_type
- && TREE_CODE (parent_type) == ARRAY_TYPE)
- {
- // TODO: check we have the correct parent type
- array_region *parent_array = get_region <array_region> (parent_rid);
- array_region::key_t key_for_raw_rid;
- if (parent_array->get_key_for_child_region (raw_rid,
- &key_for_raw_rid))
- {
- /* Convert from offset to index. */
- svalue_id index_sid
- = convert_byte_offset_to_array_index (ptr_type, offset_sid);
- if (tree index_cst
- = get_svalue (index_sid)->maybe_get_constant ())
- {
- array_region::key_t index_offset
- = array_region::key_from_constant (index_cst);
- array_region::key_t index_rel_to_parent
- = key_for_raw_rid + index_offset;
- tree index_rel_to_parent_cst
- = wide_int_to_tree (integer_type_node,
- index_rel_to_parent);
- svalue_id index_sid
- = get_or_create_constant_svalue (index_rel_to_parent_cst);
-
- /* Carry on, using the parent region and adjusted index. */
- region_id element_rid
- = parent_array->get_element (this, raw_rid, index_sid,
- ctxt);
- return get_or_create_view (element_rid, type, ctxt);
- }
- }
- }
- }
+ bad_pointer_finder (const region *reg, enum poison_kind pkind,
+ region_model_manager *mgr)
+ : m_reg (reg), m_pkind (pkind), m_mgr (mgr), m_count (0)
+ {}
- tree array_type = build_array_type (TREE_TYPE (ptr_type),
- integer_type_node);
- region_id array_view_rid = get_or_create_view (raw_rid, array_type, ctxt);
- array_region *array_reg = get_region <array_region> (array_view_rid);
+ void on_binding (const binding_key *, const svalue *&sval)
+ {
+ if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
+ {
+ const region *ptr_dst = ptr_sval->get_pointee ();
+ /* Poison ptrs to descendents of REG, but not to REG itself,
+ otherwise double-free detection doesn't work (since sm-state
+ for "free" is stored on the original ptr svalue). */
+ if (ptr_dst->descendent_of_p (m_reg)
+ && ptr_dst != m_reg)
+ {
+ sval = m_mgr->get_or_create_poisoned_svalue (m_pkind,
+ sval->get_type ());
+ ++m_count;
+ }
+ }
+ }
- svalue_id index_sid
- = convert_byte_offset_to_array_index (ptr_type, offset_sid);
+ const region *m_reg;
+ enum poison_kind m_pkind;
+ region_model_manager *const m_mgr;
+ int m_count;
+};
- region_id element_rid
- = array_reg->get_element (this, array_view_rid, index_sid, ctxt);
+/* Find any pointers to REG or its descendents; convert them to
+ poisoned values of kind PKIND.
+ Return the number of pointers that were poisoned. */
- return get_or_create_view (element_rid, type, ctxt);
+int
+region_model::poison_any_pointers_to_descendents (const region *reg,
+ enum poison_kind pkind)
+{
+ bad_pointer_finder bv (reg, pkind, m_mgr);
+ m_store.for_each_binding (bv);
+ return bv.m_count;
}
-/* Get a region of type TYPE for PTR_SID + OFFSET_SID.
-
- If OFFSET_SID is known to be zero, then dereference PTR_SID.
- Otherwise, impose a view of "typeof(*PTR_SID)[]" on *PTR_SID,
- and then get a view of type TYPE on the relevant array element. */
+/* Attempt to merge THIS with OTHER_MODEL, writing the result
+ to OUT_MODEL. Use POINT to distinguish values created as a
+ result of merging. */
-region_id
-region_model::get_or_create_pointer_plus_expr (tree type,
- svalue_id ptr_sid,
- svalue_id offset_in_bytes_sid,
- region_model_context *ctxt)
+bool
+region_model::can_merge_with_p (const region_model &other_model,
+ const program_point &point,
+ region_model *out_model) const
{
- return get_or_create_mem_ref (type,
- ptr_sid,
- offset_in_bytes_sid,
- ctxt);
-}
+ gcc_assert (out_model);
+ gcc_assert (m_mgr == other_model.m_mgr);
+ gcc_assert (m_mgr == out_model->m_mgr);
-/* Get or create a view of type TYPE of the region with id RAW_ID.
- Return the id of the view (or RAW_ID if it of the same type). */
+ if (m_current_frame != other_model.m_current_frame)
+ return false;
+ out_model->m_current_frame = m_current_frame;
-region_id
-region_model::get_or_create_view (region_id raw_rid, tree type,
- region_model_context *ctxt)
-{
- region *raw_region = get_region (raw_rid);
+ model_merger m (this, &other_model, point, out_model);
- gcc_assert (TYPE_P (type));
- if (type != raw_region->get_type ())
- {
- /* If the region already has a view of the requested type,
- reuse it. */
- region_id existing_view_rid = raw_region->get_view (type, this);
- if (!existing_view_rid.null_p ())
- return existing_view_rid;
-
- /* Otherwise, make one (adding it to the region_model and
- to the viewed region). */
- region_id view_rid = add_region_for_type (raw_rid, type, ctxt);
- raw_region->add_view (view_rid, this);
- // TODO: something to signify that this is a "view"
- return view_rid;
- }
+ if (!store::can_merge_p (&m_store, &other_model.m_store,
+ &out_model->m_store, m_mgr->get_store_manager (),
+ &m))
+ return false;
+
+ /* Merge constraints. */
+ constraint_manager::merge (*m_constraints,
+ *other_model.m_constraints,
+ out_model->m_constraints,
+ m);
- return raw_rid;
+ return true;
}
/* Attempt to get the fndecl used at CALL, if known, or NULL_TREE
tree fn_ptr = gimple_call_fn (call);
if (fn_ptr == NULL_TREE)
return NULL_TREE;
- svalue_id fn_ptr_sid = get_rvalue (fn_ptr, ctxt);
- svalue *fn_ptr_sval = get_svalue (fn_ptr_sid);
- if (region_svalue *fn_ptr_ptr = fn_ptr_sval->dyn_cast_region_svalue ())
+ const svalue *fn_ptr_sval = get_rvalue (fn_ptr, ctxt);
+ if (const region_svalue *fn_ptr_ptr
+ = fn_ptr_sval->dyn_cast_region_svalue ())
{
- region_id fn_rid = fn_ptr_ptr->get_pointee ();
- code_region *code = get_root_region ()->get_code_region (this);
- if (code)
+ const region *reg = fn_ptr_ptr->get_pointee ();
+ if (const function_region *fn_reg = reg->dyn_cast_function_region ())
{
- tree fn_decl = code->get_tree_for_child_region (fn_rid);
- if (!fn_decl)
- return NULL_TREE;
+ tree fn_decl = fn_reg->get_fndecl ();
cgraph_node *node = cgraph_node::get (fn_decl);
if (!node)
return NULL_TREE;
return NULL_TREE;
}
-/* struct model_merger. */
-
-/* Dump a multiline representation of this merger to PP. */
+/* Would be much simpler to use a lambda here, if it were supported. */
-void
-model_merger::dump_to_pp (pretty_printer *pp) const
+struct append_ssa_names_cb_data
{
- pp_string (pp, "model A:");
- pp_newline (pp);
- m_model_a->dump_to_pp (pp, false);
- pp_newline (pp);
-
- pp_string (pp, "model B:");
- pp_newline (pp);
- m_model_b->dump_to_pp (pp, false);
- pp_newline (pp);
-
- pp_string (pp, "merged model:");
- pp_newline (pp);
- m_merged_model->dump_to_pp (pp, false);
- pp_newline (pp);
-
- pp_string (pp, "region map: model A to merged model:");
- pp_newline (pp);
- m_map_regions_from_a_to_m.dump_to_pp (pp);
- pp_newline (pp);
-
- pp_string (pp, "region map: model B to merged model:");
- pp_newline (pp);
- m_map_regions_from_b_to_m.dump_to_pp (pp);
- pp_newline (pp);
-
- m_sid_mapping->dump_to_pp (pp);
-}
+ const region_model *model;
+ auto_vec<const decl_region *> *out;
+};
-/* Dump a multiline representation of this merger to FILE. */
+/* Populate *OUT with all decl_regions for SSA names in the current
+ frame that have clusters within the store. */
void
-model_merger::dump (FILE *fp) const
-{
- pretty_printer pp;
- pp_format_decoder (&pp) = default_tree_printer;
- pp_show_color (&pp) = pp_show_color (global_dc->printer);
- pp.buffer->stream = fp;
- dump_to_pp (&pp);
- pp_flush (&pp);
-}
-
-/* Dump a multiline representation of this merger to stderr. */
-
-DEBUG_FUNCTION void
-model_merger::dump () const
+region_model::
+get_ssa_name_regions_for_current_frame (auto_vec<const decl_region *> *out)
+ const
{
- dump (stderr);
+ append_ssa_names_cb_data data;
+ data.model = this;
+ data.out = out;
+ m_store.for_each_cluster (append_ssa_names_cb, &data);
}
-/* Attempt to merge the svalues of SID_A and SID_B (from their
- respective models), writing the id of the resulting svalue
- into *MERGED_SID.
- Return true if the merger is possible, false otherwise. */
+/* Implementation detail of get_ssa_name_regions_for_current_frame. */
-bool
-model_merger::can_merge_values_p (svalue_id sid_a,
- svalue_id sid_b,
- svalue_id *merged_sid)
+void
+region_model::append_ssa_names_cb (const region *base_reg,
+ append_ssa_names_cb_data *cb_data)
{
- gcc_assert (merged_sid);
- svalue *sval_a = m_model_a->get_svalue (sid_a);
- svalue *sval_b = m_model_b->get_svalue (sid_b);
-
- /* If both are NULL, then the "values" are trivially mergeable. */
- if (!sval_a && !sval_b)
- return true;
-
- /* If one is NULL and the other non-NULL, then the "values"
- are not mergeable. */
- if (!(sval_a && sval_b))
- return false;
-
- /* Have they both already been mapped to the same new svalue_id?
- If so, use it. */
- svalue_id sid_a_in_m
- = m_sid_mapping->m_map_from_a_to_m.get_dst_for_src (sid_a);
- svalue_id sid_b_in_m
- = m_sid_mapping->m_map_from_b_to_m.get_dst_for_src (sid_b);
- if (!sid_a_in_m.null_p ()
- && !sid_b_in_m.null_p ()
- && sid_a_in_m == sid_b_in_m)
- {
- *merged_sid = sid_a_in_m;
- return true;
- }
-
- tree type = sval_a->get_type ();
- if (type == NULL_TREE)
- type = sval_b->get_type ();
-
- /* If the values have different kinds, or are both unknown,
- then merge as "unknown". */
- if (sval_a->get_kind () != sval_b->get_kind ()
- || sval_a->get_kind () == SK_UNKNOWN)
- {
- svalue *merged_sval = new unknown_svalue (type);
- *merged_sid = m_merged_model->add_svalue (merged_sval);
- record_svalues (sid_a, sid_b, *merged_sid);
- return true;
- }
-
- gcc_assert (sval_a->get_kind () == sval_b->get_kind ());
-
- switch (sval_a->get_kind ())
+ if (base_reg->get_parent_region () != cb_data->model->m_current_frame)
+ return;
+ if (const decl_region *decl_reg = base_reg->dyn_cast_decl_region ())
{
- default:
- case SK_UNKNOWN: /* SK_UNKNOWN handled above. */
- gcc_unreachable ();
-
- case SK_REGION:
- {
- /* If we have two region pointers, then we can merge (possibly to
- "unknown"). */
- const region_svalue ®ion_sval_a = *as_a <region_svalue *> (sval_a);
- const region_svalue ®ion_sval_b = *as_a <region_svalue *> (sval_b);
- region_svalue::merge_values (region_sval_a, region_sval_b,
- merged_sid, type,
- this);
- record_svalues (sid_a, sid_b, *merged_sid);
- return true;
- }
- break;
- case SK_CONSTANT:
- {
- /* If we have two constants, then we can merge. */
- const constant_svalue &cst_sval_a = *as_a <constant_svalue *> (sval_a);
- const constant_svalue &cst_sval_b = *as_a <constant_svalue *> (sval_b);
- constant_svalue::merge_values (cst_sval_a, cst_sval_b,
- merged_sid, this);
- record_svalues (sid_a, sid_b, *merged_sid);
- return true;
- }
- break;
-
- case SK_POISONED:
- case SK_SETJMP:
- return false;
+ if (TREE_CODE (decl_reg->get_decl ()) == SSA_NAME)
+ cb_data->out->safe_push (decl_reg);
}
}
-/* Record that A_RID in model A and B_RID in model B
- correspond to MERGED_RID in the merged model, so
- that pointers can be accurately merged. */
-
-void
-model_merger::record_regions (region_id a_rid,
- region_id b_rid,
- region_id merged_rid)
-{
- m_map_regions_from_a_to_m.put (a_rid, merged_rid);
- m_map_regions_from_b_to_m.put (b_rid, merged_rid);
-}
-
-/* Record that A_SID in model A and B_SID in model B
- correspond to MERGED_SID in the merged model. */
-
-void
-model_merger::record_svalues (svalue_id a_sid,
- svalue_id b_sid,
- svalue_id merged_sid)
-{
- gcc_assert (m_sid_mapping);
- m_sid_mapping->m_map_from_a_to_m.put (a_sid, merged_sid);
- m_sid_mapping->m_map_from_b_to_m.put (b_sid, merged_sid);
-}
-
-/* struct svalue_id_merger_mapping. */
+/* Return a new region describing a heap-allocated block of memory. */
-/* svalue_id_merger_mapping's ctor. */
-
-svalue_id_merger_mapping::svalue_id_merger_mapping (const region_model &a,
- const region_model &b)
-: m_map_from_a_to_m (a.get_num_svalues ()),
- m_map_from_b_to_m (b.get_num_svalues ())
+const region *
+region_model::create_region_for_heap_alloc (const svalue *size_in_bytes)
{
+ const region *reg = m_mgr->create_region_for_heap_alloc ();
+ record_dynamic_extents (reg, size_in_bytes);
+ return reg;
}
-/* Dump a multiline representation of this to PP. */
+/* Return a new region describing a block of memory allocated within the
+ current frame. */
-void
-svalue_id_merger_mapping::dump_to_pp (pretty_printer *pp) const
+const region *
+region_model::create_region_for_alloca (const svalue *size_in_bytes)
{
- pp_string (pp, "svalue_id map: model A to merged model:");
- pp_newline (pp);
- m_map_from_a_to_m.dump_to_pp (pp);
- pp_newline (pp);
-
- pp_string (pp, "svalue_id map: model B to merged model:");
- pp_newline (pp);
- m_map_from_b_to_m.dump_to_pp (pp);
- pp_newline (pp);
+ const region *reg = m_mgr->create_region_for_alloca (m_current_frame);
+ record_dynamic_extents (reg, size_in_bytes);
+ return reg;
}
-/* Dump a multiline representation of this to FILE. */
+/* Placeholder hook for recording that the size of REG is SIZE_IN_BYTES.
+ Currently does nothing. */
void
-svalue_id_merger_mapping::dump (FILE *fp) const
-{
- pretty_printer pp;
- pp_format_decoder (&pp) = default_tree_printer;
- pp_show_color (&pp) = pp_show_color (global_dc->printer);
- pp.buffer->stream = fp;
- dump_to_pp (&pp);
- pp_flush (&pp);
-}
-
-/* Dump a multiline representation of this to stderr. */
-
-DEBUG_FUNCTION void
-svalue_id_merger_mapping::dump () const
-{
- dump (stderr);
-}
-
-/* struct canonicalization. */
-
-/* canonicalization's ctor. */
-
-canonicalization::canonicalization (const region_model &model)
-: m_model (model),
- m_rid_map (model.get_num_regions ()),
- m_sid_map (model.get_num_svalues ()),
- m_next_rid_int (0),
- m_next_sid_int (0)
+region_model::
+record_dynamic_extents (const region *reg ATTRIBUTE_UNUSED,
+ const svalue *size_in_bytes ATTRIBUTE_UNUSED)
{
}
-/* If we've not seen RID yet, assign it a canonicalized region_id,
- and walk the region's svalue and then the region. */
-
-void
-canonicalization::walk_rid (region_id rid)
-{
- /* Stop if we've already seen RID. */
- if (!m_rid_map.get_dst_for_src (rid).null_p ())
- return;
-
- region *region = m_model.get_region (rid);
- if (region)
- {
- m_rid_map.put (rid, region_id::from_int (m_next_rid_int++));
- walk_sid (region->get_value_direct ());
- region->walk_for_canonicalization (this);
- }
-}
+/* struct model_merger. */
-/* If we've not seen SID yet, assign it a canonicalized svalue_id,
- and walk the svalue (and potentially regions e.g. for ptr values). */
+/* Dump a multiline representation of this merger to PP. */
void
-canonicalization::walk_sid (svalue_id sid)
+model_merger::dump_to_pp (pretty_printer *pp, bool simple) const
{
- /* Stop if we've already seen SID. */
- if (!m_sid_map.get_dst_for_src (sid).null_p ())
- return;
-
- svalue *sval = m_model.get_svalue (sid);
- if (sval)
- {
- m_sid_map.put (sid, svalue_id::from_int (m_next_sid_int++));
- /* Potentially walk regions e.g. for ptrs. */
- sval->walk_for_canonicalization (this);
- }
-}
-
-/* Dump a multiline representation of this to PP. */
+ pp_string (pp, "model A:");
+ pp_newline (pp);
+ m_model_a->dump_to_pp (pp, simple, true);
+ pp_newline (pp);
-void
-canonicalization::dump_to_pp (pretty_printer *pp) const
-{
- pp_string (pp, "region_id map:");
+ pp_string (pp, "model B:");
pp_newline (pp);
- m_rid_map.dump_to_pp (pp);
+ m_model_b->dump_to_pp (pp, simple, true);
pp_newline (pp);
- pp_string (pp, "svalue_id map:");
+ pp_string (pp, "merged model:");
pp_newline (pp);
- m_sid_map.dump_to_pp (pp);
+ m_merged_model->dump_to_pp (pp, simple, true);
pp_newline (pp);
}
-/* Dump a multiline representation of this to FILE. */
+/* Dump a multiline representation of this merger to FILE. */
void
-canonicalization::dump (FILE *fp) const
+model_merger::dump (FILE *fp, bool simple) const
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = fp;
- dump_to_pp (&pp);
+ dump_to_pp (&pp, simple);
pp_flush (&pp);
}
-/* Dump a multiline representation of this to stderr. */
+/* Dump a multiline representation of this merger to stderr. */
DEBUG_FUNCTION void
-canonicalization::dump () const
+model_merger::dump (bool simple) const
{
- dump (stderr);
+ dump (stderr, simple);
}
} // namespace ana
-/* Update HSTATE with a hash of SID. */
+/* Dump RMODEL fully to stderr (i.e. without summarization). */
-void
-inchash::add (svalue_id sid, inchash::hash &hstate)
+DEBUG_FUNCTION void
+debug (const region_model &rmodel)
{
- hstate.add_int (sid.as_int ());
+ rmodel.dump (false);
}
-/* Update HSTATE with a hash of RID. */
-
-void
-inchash::add (region_id rid, inchash::hash &hstate)
-{
- hstate.add_int (rid.as_int ());
-}
+/* class engine. */
-/* Dump RMODEL fully to stderr (i.e. without summarization). */
+/* Dump the managed objects by class to LOGGER, and the per-class totals. */
-DEBUG_FUNCTION void
-debug (const region_model &rmodel)
+void
+engine::log_stats (logger *logger) const
{
- rmodel.dump (false);
+ m_mgr.log_stats (logger, true);
}
namespace ana {
/* Implementation detail of the ASSERT_CONDITION_* macros. */
+void
+assert_condition (const location &loc,
+ region_model &model,
+ const svalue *lhs, tree_code op, const svalue *rhs,
+ tristate expected)
+{
+ tristate actual = model.eval_condition (lhs, op, rhs);
+ ASSERT_EQ_AT (loc, actual, expected);
+}
+
+/* Implementation detail of the ASSERT_CONDITION_* macros. */
+
void
assert_condition (const location &loc,
region_model &model,
auto_fix_quotes sentinel;
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
- model.dump_to_pp (&pp, summarize);
+
+ model.dump_to_pp (&pp, summarize, true);
ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
}
static void
test_dump ()
{
- region_model model;
- model.get_root_region ()->ensure_stack_region (&model);
- model.get_root_region ()->ensure_globals_region (&model);
- model.get_root_region ()->ensure_heap_region (&model);
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_DUMP_EQ (model, false,
- "r0: {kind: `root', parent: null, sval: null}\n"
- "|-stack: r1: {kind: `stack', parent: r0, sval: null}\n"
- "|-globals: r2: {kind: `globals', parent: r0, sval: null, map: {}}\n"
- "`-heap: r3: {kind: `heap', parent: r0, sval: null}\n"
- "svalues:\n"
- "constraint manager:\n"
+ "stack depth: 0\n"
+ "m_called_unknown_fn: FALSE\n"
+ "constraint_manager:\n"
+ " equiv classes:\n"
+ " constraints:\n");
+ ASSERT_DUMP_EQ (model, true,
+ "stack depth: 0\n"
+ "m_called_unknown_fn: FALSE\n"
+ "constraint_manager:\n"
" equiv classes:\n"
" constraints:\n");
- ASSERT_DUMP_EQ (model, true, "");
}
/* Helper function for selftests. Create a struct or union type named NAME,
tree m_coord_type;
};
-/* Verify that dumps can show struct fields. */
+/* Verify usage of a struct. */
static void
-test_dump_2 ()
+test_struct ()
{
coord_test ct;
tree int_17 = build_int_cst (integer_type_node, 17);
tree int_m3 = build_int_cst (integer_type_node, -3);
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
model.set_value (c_x, int_17, NULL);
model.set_value (c_y, int_m3, NULL);
- /* Simplified dump. */
- ASSERT_DUMP_EQ (model, true, "c.x: 17, c.y: -3");
-
- /* Full dump. */
- ASSERT_DUMP_EQ
- (model, false,
- "r0: {kind: `root', parent: null, sval: null}\n"
- "`-globals: r1: {kind: `globals', parent: r0, sval: null, map: {`c': r2}}\n"
- " `-`c': r2: {kind: `struct', parent: r1, sval: null, type: `struct coord', map: {`x': r3, `y': r4}}\n"
- " |: type: `struct coord'\n"
- " |-`x': r3: {kind: `primitive', parent: r2, sval: sv0, type: `int'}\n"
- " | |: sval: sv0: {type: `int', `17'}\n"
- " | |: type: `int'\n"
- " `-`y': r4: {kind: `primitive', parent: r2, sval: sv1, type: `int'}\n"
- " |: sval: sv1: {type: `int', `-3'}\n"
- " |: type: `int'\n"
- "svalues:\n"
- " sv0: {type: `int', `17'}\n"
- " sv1: {type: `int', `-3'}\n"
- "constraint manager:\n"
- " equiv classes:\n"
- " constraints:\n");
+ /* Verify get_offset for "c.x". */
+ {
+ const region *c_x_reg = model.get_lvalue (c_x, NULL);
+ region_offset offset = c_x_reg->get_offset ();
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, NULL));
+ ASSERT_EQ (offset.get_bit_offset (), 0);
+ }
+
+ /* Verify get_offset for "c.y". */
+ {
+ const region *c_y_reg = model.get_lvalue (c_y, NULL);
+ region_offset offset = c_y_reg->get_offset ();
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, NULL));
+ ASSERT_EQ (offset.get_bit_offset (), INT_TYPE_SIZE);
+ }
}
-/* Verify that dumps can show array elements. */
+/* Verify usage of an array element. */
static void
-test_dump_3 ()
+test_array_1 ()
{
tree tlen = size_int (10);
tree arr_type = build_array_type (char_type_node, build_index_type (tlen));
tree a = build_global_decl ("a", arr_type);
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
tree int_0 = build_int_cst (integer_type_node, 0);
tree a_0 = build4 (ARRAY_REF, char_type_node,
a, int_0, NULL_TREE, NULL_TREE);
tree char_A = build_int_cst (char_type_node, 'A');
model.set_value (a_0, char_A, NULL);
-
- /* Simplified dump. */
- ASSERT_DUMP_EQ (model, true, "a[0]: 65");
-
- /* Full dump. */
- ASSERT_DUMP_EQ
- (model, false,
- "r0: {kind: `root', parent: null, sval: null}\n"
- "`-globals: r1: {kind: `globals', parent: r0, sval: null, map: {`a': r2}}\n"
- " `-`a': r2: {kind: `array', parent: r1, sval: null, type: `char[11]', array: {[0]: r3}}\n"
- " |: type: `char[11]'\n"
- " `-[0]: r3: {kind: `primitive', parent: r2, sval: sv1, type: `char'}\n"
- " |: sval: sv1: {type: `char', `65'}\n"
- " |: type: `char'\n"
- "svalues:\n"
- " sv0: {type: `int', `0'}\n"
- " sv1: {type: `char', `65'}\n"
- "constraint manager:\n"
- " equiv classes:\n"
- " constraints:\n");
}
/* Verify that region_model::get_representative_tree works as expected. */
static void
test_get_representative_tree ()
{
+ region_model_manager mgr;
+
/* STRING_CST. */
{
tree string_cst = build_string (4, "foo");
- region_model m;
- svalue_id str_sid = m.get_rvalue (string_cst, NULL);
- tree rep = m.get_representative_tree (str_sid);
+ region_model m (&mgr);
+ const svalue *str_sval = m.get_rvalue (string_cst, NULL);
+ tree rep = m.get_representative_tree (str_sval);
ASSERT_EQ (rep, string_cst);
}
/* String literal. */
{
tree string_cst_ptr = build_string_literal (4, "foo");
- region_model m;
- svalue_id str_sid = m.get_rvalue (string_cst_ptr, NULL);
- tree rep = m.get_representative_tree (str_sid);
+ region_model m (&mgr);
+ const svalue *str_sval = m.get_rvalue (string_cst_ptr, NULL);
+ tree rep = m.get_representative_tree (str_sval);
ASSERT_DUMP_TREE_EQ (rep, "&\"foo\"[0]");
}
+
+ /* Value of an element within an array. */
+ {
+ tree tlen = size_int (10);
+ tree arr_type = build_array_type (char_type_node, build_index_type (tlen));
+ tree a = build_global_decl ("a", arr_type);
+ placeholder_svalue test_sval (char_type_node, "test value");
+
+ /* Value of a[3]. */
+ {
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+ tree int_3 = build_int_cst (integer_type_node, 3);
+ tree a_3 = build4 (ARRAY_REF, char_type_node,
+ a, int_3, NULL_TREE, NULL_TREE);
+ const region *a_3_reg = model.get_lvalue (a_3, &ctxt);
+ model.set_value (a_3_reg, &test_sval, &ctxt);
+ tree rep = model.get_representative_tree (&test_sval);
+ ASSERT_DUMP_TREE_EQ (rep, "a[3]");
+ }
+
+ /* Value of a[0]. */
+ {
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+ tree idx = build_int_cst (integer_type_node, 0);
+ tree a_0 = build4 (ARRAY_REF, char_type_node,
+ a, idx, NULL_TREE, NULL_TREE);
+ const region *a_0_reg = model.get_lvalue (a_0, &ctxt);
+ model.set_value (a_0_reg, &test_sval, &ctxt);
+ tree rep = model.get_representative_tree (&test_sval);
+ ASSERT_DUMP_TREE_EQ (rep, "a[0]");
+ }
+ }
+
+ /* Value of a field within a struct. */
+ {
+ coord_test ct;
+
+ tree c = build_global_decl ("c", ct.m_coord_type);
+ tree c_x = build3 (COMPONENT_REF, TREE_TYPE (ct.m_x_field),
+ c, ct.m_x_field, NULL_TREE);
+ tree c_y = build3 (COMPONENT_REF, TREE_TYPE (ct.m_y_field),
+ c, ct.m_y_field, NULL_TREE);
+
+ test_region_model_context ctxt;
+
+ /* Value of initial field. */
+ {
+ region_model m (&mgr);
+ const region *c_x_reg = m.get_lvalue (c_x, &ctxt);
+ placeholder_svalue test_sval_x (integer_type_node, "test x val");
+ m.set_value (c_x_reg, &test_sval_x, &ctxt);
+ tree rep = m.get_representative_tree (&test_sval_x);
+ ASSERT_DUMP_TREE_EQ (rep, "c.x");
+ }
+
+ /* Value of non-initial field. */
+ {
+ region_model m (&mgr);
+ const region *c_y_reg = m.get_lvalue (c_y, &ctxt);
+ placeholder_svalue test_sval_y (integer_type_node, "test y val");
+ m.set_value (c_y_reg, &test_sval_y, &ctxt);
+ tree rep = m.get_representative_tree (&test_sval_y);
+ ASSERT_DUMP_TREE_EQ (rep, "c.y");
+ }
+ }
}
/* Verify that calling region_model::get_rvalue repeatedly on the same
- tree constant retrieves the same svalue_id. */
+ tree constant retrieves the same svalue *. */
static void
test_unique_constants ()
tree int_42 = build_int_cst (integer_type_node, 42);
test_region_model_context ctxt;
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ASSERT_EQ (model.get_rvalue (int_0, &ctxt), model.get_rvalue (int_0, &ctxt));
ASSERT_EQ (model.get_rvalue (int_42, &ctxt),
model.get_rvalue (int_42, &ctxt));
ASSERT_NE (model.get_rvalue (int_0, &ctxt), model.get_rvalue (int_42, &ctxt));
ASSERT_EQ (ctxt.get_num_diagnostics (), 0);
-}
-
-/* Check that operator== and hashing works as expected for the
- various svalue subclasses. */
-
-static void
-test_svalue_equality ()
-{
- tree int_42 = build_int_cst (integer_type_node, 42);
- tree int_0 = build_int_cst (integer_type_node, 0);
- /* Create pairs instances of the various subclasses of svalue,
- testing for hash and equality between (this, this) and
- (this, other of same subclass). */
- svalue *ptr_to_r0
- = new region_svalue (ptr_type_node, region_id::from_int (0));
- svalue *ptr_to_r1
- = new region_svalue (ptr_type_node, region_id::from_int (1));
-
- ASSERT_EQ (ptr_to_r0->hash (), ptr_to_r0->hash ());
- ASSERT_EQ (*ptr_to_r0, *ptr_to_r0);
-
- ASSERT_NE (ptr_to_r0->hash (), ptr_to_r1->hash ());
- ASSERT_NE (*ptr_to_r0, *ptr_to_r1);
-
- svalue *cst_int_42 = new constant_svalue (int_42);
- svalue *cst_int_0 = new constant_svalue (int_0);
-
- ASSERT_EQ (cst_int_42->hash (), cst_int_42->hash ());
- ASSERT_EQ (*cst_int_42, *cst_int_42);
-
- ASSERT_NE (cst_int_42->hash (), cst_int_0->hash ());
- ASSERT_NE (*cst_int_42, *cst_int_0);
-
- svalue *unknown_0 = new unknown_svalue (ptr_type_node);
- svalue *unknown_1 = new unknown_svalue (ptr_type_node);
- ASSERT_EQ (unknown_0->hash (), unknown_0->hash ());
- ASSERT_EQ (*unknown_0, *unknown_0);
- ASSERT_EQ (*unknown_1, *unknown_1);
-
- /* Comparisons between different kinds of svalue. */
- ASSERT_NE (*ptr_to_r0, *cst_int_42);
- ASSERT_NE (*ptr_to_r0, *unknown_0);
- ASSERT_NE (*cst_int_42, *ptr_to_r0);
- ASSERT_NE (*cst_int_42, *unknown_0);
- ASSERT_NE (*unknown_0, *ptr_to_r0);
- ASSERT_NE (*unknown_0, *cst_int_42);
-
- delete ptr_to_r0;
- delete ptr_to_r1;
- delete cst_int_42;
- delete cst_int_0;
- delete unknown_0;
- delete unknown_1;
+ /* A "(const int)42" will be a different tree from "(int)42)"... */
+ tree const_int_type_node
+ = build_qualified_type (integer_type_node, TYPE_QUAL_CONST);
+ tree const_int_42 = build_int_cst (const_int_type_node, 42);
+ ASSERT_NE (int_42, const_int_42);
+ /* It should have a different const_svalue. */
+ const svalue *int_42_sval = model.get_rvalue (int_42, &ctxt);
+ const svalue *const_int_42_sval = model.get_rvalue (const_int_42, &ctxt);
+ ASSERT_NE (int_42_sval, const_int_42_sval);
+ /* But they should compare as equal. */
+ ASSERT_CONDITION_TRUE (model, int_42_sval, EQ_EXPR, const_int_42_sval);
+ ASSERT_CONDITION_FALSE (model, int_42_sval, NE_EXPR, const_int_42_sval);
}
-/* Check that operator== and hashing works as expected for the
- various region subclasses. */
+/* Verify that each type gets its own singleton unknown_svalue within a
+ region_model_manager, and that NULL_TREE gets its own singleton. */
static void
-test_region_equality ()
+test_unique_unknowns ()
{
- region *r0
- = new primitive_region (region_id::from_int (3), integer_type_node);
- region *r1
- = new primitive_region (region_id::from_int (4), integer_type_node);
+ region_model_manager mgr;
+ const svalue *unknown_int
+ = mgr.get_or_create_unknown_svalue (integer_type_node);
+ /* Repeated calls with the same type should get the same "unknown"
+ svalue. */
+ const svalue *unknown_int_2
+ = mgr.get_or_create_unknown_svalue (integer_type_node);
+ ASSERT_EQ (unknown_int, unknown_int_2);
- ASSERT_EQ (*r0, *r0);
- ASSERT_EQ (r0->hash (), r0->hash ());
- ASSERT_NE (*r0, *r1);
- ASSERT_NE (r0->hash (), r1->hash ());
+ /* Different types (or the NULL type) should have different
+ unknown_svalues. */
+ const svalue *unknown_NULL_type = mgr.get_or_create_unknown_svalue (NULL);
+ ASSERT_NE (unknown_NULL_type, unknown_int);
- delete r0;
- delete r1;
-
- // TODO: test coverage for the map within a map_region
+ /* Repeated calls with NULL for the type should get the same "unknown"
+ svalue. */
+ const svalue *unknown_NULL_type_2 = mgr.get_or_create_unknown_svalue (NULL);
+ ASSERT_EQ (unknown_NULL_type, unknown_NULL_type_2);
}
-/* A subclass of purge_criteria for selftests: purge all svalue_id instances. */
-
-class purge_all_svalue_ids : public purge_criteria
-{
-public:
- bool should_purge_p (svalue_id) const FINAL OVERRIDE
- {
- return true;
- }
-};
-
-/* A subclass of purge_criteria: purge a specific svalue_id. */
+/* Verify that initial_svalue are handled as expected. */
-class purge_one_svalue_id : public purge_criteria
+static void
+test_initial_svalue_folding ()
{
-public:
- purge_one_svalue_id (svalue_id victim) : m_victim (victim) {}
-
- purge_one_svalue_id (region_model model, tree expr)
- : m_victim (model.get_rvalue (expr, NULL)) {}
+ region_model_manager mgr;
+ tree x = build_global_decl ("x", integer_type_node);
+ tree y = build_global_decl ("y", integer_type_node);
- bool should_purge_p (svalue_id sid) const FINAL OVERRIDE
- {
- return sid == m_victim;
- }
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+ const svalue *x_init = model.get_rvalue (x, &ctxt);
+ const svalue *y_init = model.get_rvalue (y, &ctxt);
+ ASSERT_NE (x_init, y_init);
+ const region *x_reg = model.get_lvalue (x, &ctxt);
+ ASSERT_EQ (x_init, mgr.get_or_create_initial_value (x_reg));
-private:
- svalue_id m_victim;
-};
+}
-/* Check that constraint_manager::purge works for individual svalue_ids. */
+/* Verify that unary ops are folded as expected. */
static void
-test_purging_by_criteria ()
+test_unaryop_svalue_folding ()
{
- tree int_42 = build_int_cst (integer_type_node, 42);
- tree int_0 = build_int_cst (integer_type_node, 0);
-
+ region_model_manager mgr;
tree x = build_global_decl ("x", integer_type_node);
tree y = build_global_decl ("y", integer_type_node);
- {
- region_model model0;
- region_model model1;
-
- ADD_SAT_CONSTRAINT (model1, x, EQ_EXPR, y);
- ASSERT_NE (model0, model1);
-
- purge_stats stats_for_px;
- purge_one_svalue_id px (model1, x);
- model1.get_constraints ()->purge (px, &stats_for_px);
- ASSERT_EQ (stats_for_px.m_num_equiv_classes, 0);
-
- purge_stats stats_for_py;
- purge_one_svalue_id py (model1.get_rvalue (y, NULL));
- model1.get_constraints ()->purge (py, &stats_for_py);
- ASSERT_EQ (stats_for_py.m_num_equiv_classes, 1);
-
- ASSERT_EQ (*model0.get_constraints (), *model1.get_constraints ());
- }
-
- {
- region_model model0;
- region_model model1;
-
- ADD_SAT_CONSTRAINT (model1, x, EQ_EXPR, int_42);
- ASSERT_NE (model0, model1);
- ASSERT_CONDITION_TRUE (model1, x, EQ_EXPR, int_42);
-
- purge_stats stats;
- model1.get_constraints ()->purge (purge_one_svalue_id (model1, x), &stats);
-
- ASSERT_CONDITION_UNKNOWN (model1, x, EQ_EXPR, int_42);
- }
-
- {
- region_model model0;
- region_model model1;
-
- ADD_SAT_CONSTRAINT (model1, x, GE_EXPR, int_0);
- ADD_SAT_CONSTRAINT (model1, x, LE_EXPR, int_42);
- ASSERT_NE (model0, model1);
-
- ASSERT_CONDITION_TRUE (model1, x, GE_EXPR, int_0);
- ASSERT_CONDITION_TRUE (model1, x, LE_EXPR, int_42);
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+ const svalue *x_init = model.get_rvalue (x, &ctxt);
+ const svalue *y_init = model.get_rvalue (y, &ctxt);
+ const region *x_reg = model.get_lvalue (x, &ctxt);
+ ASSERT_EQ (x_init, mgr.get_or_create_initial_value (x_reg));
+
+ /* "(int)x" -> "x". */
+ ASSERT_EQ (x_init, mgr.get_or_create_cast (integer_type_node, x_init));
+
+ /* "(void *)x" -> something other than "x". */
+ ASSERT_NE (x_init, mgr.get_or_create_cast (ptr_type_node, x_init));
+
+ /* "!(x == y)" -> "x != y". */
+ ASSERT_EQ (mgr.get_or_create_unaryop
+ (boolean_type_node, TRUTH_NOT_EXPR,
+ mgr.get_or_create_binop (boolean_type_node, EQ_EXPR,
+ x_init, y_init)),
+ mgr.get_or_create_binop (boolean_type_node, NE_EXPR,
+ x_init, y_init));
+ /* "!(x > y)" -> "x <= y". */
+ ASSERT_EQ (mgr.get_or_create_unaryop
+ (boolean_type_node, TRUTH_NOT_EXPR,
+ mgr.get_or_create_binop (boolean_type_node, GT_EXPR,
+ x_init, y_init)),
+ mgr.get_or_create_binop (boolean_type_node, LE_EXPR,
+ x_init, y_init));
+}
+
+/* Verify that binops on constant svalues are folded. */
- purge_stats stats;
- model1.get_constraints ()->purge (purge_one_svalue_id (model1, x), &stats);
+static void
+test_binop_svalue_folding ()
+{
+#define NUM_CSTS 10
+ tree cst_int[NUM_CSTS];
+ region_model_manager mgr;
+ const svalue *cst_sval[NUM_CSTS];
+ for (int i = 0; i < NUM_CSTS; i++)
+ {
+ cst_int[i] = build_int_cst (integer_type_node, i);
+ cst_sval[i] = mgr.get_or_create_constant_svalue (cst_int[i]);
+ ASSERT_EQ (cst_sval[i]->get_kind (), SK_CONSTANT);
+ ASSERT_EQ (cst_sval[i]->maybe_get_constant (), cst_int[i]);
+ }
- ASSERT_CONDITION_UNKNOWN (model1, x, GE_EXPR, int_0);
- ASSERT_CONDITION_UNKNOWN (model1, x, LE_EXPR, int_42);
- }
+ for (int i = 0; i < NUM_CSTS; i++)
+ for (int j = 0; j < NUM_CSTS; j++)
+ {
+ if (i != j)
+ ASSERT_NE (cst_sval[i], cst_sval[j]);
+ if (i + j < NUM_CSTS)
+ {
+ const svalue *sum
+ = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
+ cst_sval[i], cst_sval[j]);
+ ASSERT_EQ (sum, cst_sval[i + j]);
+ }
+ if (i - j >= 0)
+ {
+ const svalue *difference
+ = mgr.get_or_create_binop (integer_type_node, MINUS_EXPR,
+ cst_sval[i], cst_sval[j]);
+ ASSERT_EQ (difference, cst_sval[i - j]);
+ }
+ if (i * j < NUM_CSTS)
+ {
+ const svalue *product
+ = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
+ cst_sval[i], cst_sval[j]);
+ ASSERT_EQ (product, cst_sval[i * j]);
+ }
+ const svalue *eq = mgr.get_or_create_binop (integer_type_node, EQ_EXPR,
+ cst_sval[i], cst_sval[j]);
+ ASSERT_EQ (eq, i == j ? cst_sval[1] : cst_sval [0]);
+ const svalue *neq = mgr.get_or_create_binop (integer_type_node, NE_EXPR,
+ cst_sval[i], cst_sval[j]);
+ ASSERT_EQ (neq, i != j ? cst_sval[1] : cst_sval [0]);
+ // etc
+ }
- {
- region_model model0;
- region_model model1;
+ tree x = build_global_decl ("x", integer_type_node);
- ADD_SAT_CONSTRAINT (model1, x, NE_EXPR, int_42);
- ADD_SAT_CONSTRAINT (model1, y, NE_EXPR, int_0);
- ASSERT_NE (model0, model1);
- ASSERT_CONDITION_TRUE (model1, x, NE_EXPR, int_42);
- ASSERT_CONDITION_TRUE (model1, y, NE_EXPR, int_0);
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+ const svalue *x_init = model.get_rvalue (x, &ctxt);
+
+ /* PLUS_EXPR folding. */
+ const svalue *x_init_plus_zero
+ = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
+ x_init, cst_sval[0]);
+ ASSERT_EQ (x_init_plus_zero, x_init);
+ const svalue *zero_plus_x_init
+ = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
+ cst_sval[0], x_init);
+ ASSERT_EQ (zero_plus_x_init, x_init);
+
+ /* MULT_EXPR folding. */
+ const svalue *x_init_times_zero
+ = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
+ x_init, cst_sval[0]);
+ ASSERT_EQ (x_init_times_zero, cst_sval[0]);
+ const svalue *zero_times_x_init
+ = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
+ cst_sval[0], x_init);
+ ASSERT_EQ (zero_times_x_init, cst_sval[0]);
+
+ const svalue *x_init_times_one
+ = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
+ x_init, cst_sval[1]);
+ ASSERT_EQ (x_init_times_one, x_init);
+ const svalue *one_times_x_init
+ = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
+ cst_sval[1], x_init);
+ ASSERT_EQ (one_times_x_init, x_init);
+
+ // etc
+ // TODO: do we want to use the match-and-simplify DSL for this?
+
+ /* Verify that binops put any constants on the RHS. */
+ const svalue *four_times_x_init
+ = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
+ cst_sval[4], x_init);
+ const svalue *x_init_times_four
+ = mgr.get_or_create_binop (integer_type_node, MULT_EXPR,
+ x_init, cst_sval[4]);
+ ASSERT_EQ (four_times_x_init, x_init_times_four);
+ const binop_svalue *binop = four_times_x_init->dyn_cast_binop_svalue ();
+ ASSERT_EQ (binop->get_op (), MULT_EXPR);
+ ASSERT_EQ (binop->get_arg0 (), x_init);
+ ASSERT_EQ (binop->get_arg1 (), cst_sval[4]);
+
+ /* Verify that ((x + 1) + 1) == (x + 2). */
+ const svalue *x_init_plus_one
+ = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
+ x_init, cst_sval[1]);
+ const svalue *x_init_plus_two
+ = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
+ x_init, cst_sval[2]);
+ const svalue *x_init_plus_one_plus_one
+ = mgr.get_or_create_binop (integer_type_node, PLUS_EXPR,
+ x_init_plus_one, cst_sval[1]);
+ ASSERT_EQ (x_init_plus_one_plus_one, x_init_plus_two);
+}
+
+/* Verify that sub_svalues are folded as expected. */
- purge_stats stats;
- model1.get_constraints ()->purge (purge_one_svalue_id (model1, x), &stats);
- ASSERT_NE (model0, model1);
+static void
+test_sub_svalue_folding ()
+{
+ coord_test ct;
+ tree c = build_global_decl ("c", ct.m_coord_type);
+ tree c_x = build3 (COMPONENT_REF, TREE_TYPE (ct.m_x_field),
+ c, ct.m_x_field, NULL_TREE);
- ASSERT_CONDITION_UNKNOWN (model1, x, NE_EXPR, int_42);
- ASSERT_CONDITION_TRUE (model1, y, NE_EXPR, int_0);
- }
+ region_model_manager mgr;
+ region_model model (&mgr);
+ test_region_model_context ctxt;
+ const region *c_x_reg = model.get_lvalue (c_x, &ctxt);
- {
- region_model model0;
- region_model model1;
-
- ADD_SAT_CONSTRAINT (model1, x, NE_EXPR, int_42);
- ADD_SAT_CONSTRAINT (model1, y, NE_EXPR, int_0);
- ASSERT_NE (model0, model1);
- ASSERT_CONDITION_TRUE (model1, x, NE_EXPR, int_42);
- ASSERT_CONDITION_TRUE (model1, y, NE_EXPR, int_0);
-
- purge_stats stats;
- model1.get_constraints ()->purge (purge_all_svalue_ids (), &stats);
- ASSERT_CONDITION_UNKNOWN (model1, x, NE_EXPR, int_42);
- ASSERT_CONDITION_UNKNOWN (model1, y, NE_EXPR, int_0);
- }
+ /* Verify that sub_svalue of "unknown" simply
+ yields an unknown. */
+ const svalue *unknown = mgr.get_or_create_unknown_svalue (ct.m_coord_type);
+ const svalue *sub = mgr.get_or_create_sub_svalue (TREE_TYPE (ct.m_x_field),
+ unknown, c_x_reg);
+ ASSERT_EQ (sub->get_kind (), SK_UNKNOWN);
+ ASSERT_EQ (sub->get_type (), TREE_TYPE (ct.m_x_field));
}
-/* Test that region_model::purge_unused_svalues works as expected. */
+/* Test that region::descendent_of_p works as expected. */
static void
-test_purge_unused_svalues ()
+test_descendent_of_p ()
{
- tree int_42 = build_int_cst (integer_type_node, 42);
- tree int_0 = build_int_cst (integer_type_node, 0);
- tree x = build_global_decl ("x", integer_type_node);
- tree y = build_global_decl ("y", integer_type_node);
-
- test_region_model_context ctxt;
- region_model model;
- model.set_to_new_unknown_value (model.get_lvalue (x, &ctxt), TREE_TYPE (x),
- &ctxt);
- model.set_to_new_unknown_value (model.get_lvalue (x, &ctxt), TREE_TYPE (x),
- &ctxt);
- model.set_to_new_unknown_value (model.get_lvalue (x, &ctxt), TREE_TYPE (x),
- &ctxt);
- model.add_constraint (x, NE_EXPR, int_42, &ctxt);
-
- model.set_value (model.get_lvalue (x, &ctxt),
- model.get_rvalue (int_42, &ctxt),
- &ctxt);
- model.add_constraint (y, GT_EXPR, int_0, &ctxt);
-
- /* The redundant unknown values should have been purged. */
- purge_stats purged;
- model.purge_unused_svalues (&purged, NULL);
- ASSERT_EQ (purged.m_num_svalues, 3);
-
- /* and the redundant constraint on an old, unknown value for x should
- have been purged. */
- ASSERT_EQ (purged.m_num_equiv_classes, 1);
- ASSERT_EQ (purged.m_num_constraints, 1);
- ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 2);
+ region_model_manager mgr;
+ const region *stack = mgr.get_stack_region ();
+ const region *heap = mgr.get_heap_region ();
+ const region *code = mgr.get_code_region ();
+ const region *globals = mgr.get_globals_region ();
- /* ...but we should still have x == 42. */
- ASSERT_EQ (model.eval_condition (x, EQ_EXPR, int_42, &ctxt),
- tristate::TS_TRUE);
+ /* descendent_of_p should return true when used on the region itself. */
+ ASSERT_TRUE (stack->descendent_of_p (stack));
+ ASSERT_FALSE (stack->descendent_of_p (heap));
+ ASSERT_FALSE (stack->descendent_of_p (code));
+ ASSERT_FALSE (stack->descendent_of_p (globals));
- /* ...and we should still have the constraint on y. */
- ASSERT_EQ (model.eval_condition (y, GT_EXPR, int_0, &ctxt),
- tristate::TS_TRUE);
+ tree x = build_global_decl ("x", integer_type_node);
+ const region *x_reg = mgr.get_region_for_global (x);
+ ASSERT_TRUE (x_reg->descendent_of_p (globals));
- ASSERT_EQ (ctxt.get_num_diagnostics (), 0);
+ /* A cast_region should be a descendent of the original region. */
+ const region *cast_reg = mgr.get_cast_region (x_reg, ptr_type_node);
+ ASSERT_TRUE (cast_reg->descendent_of_p (x_reg));
}
/* Verify that simple assignments work as expected. */
tree y = build_global_decl ("y", integer_type_node);
/* "x == 0", then use of y, then "y = 0;". */
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
ASSERT_CONDITION_UNKNOWN (model, y, EQ_EXPR, int_0);
model.set_value (model.get_lvalue (y, NULL),
NULL);
ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, int_0);
ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, x);
-
- ASSERT_DUMP_EQ (model, true, "y: 0, {x}: unknown, x == y");
}
/* Verify that compound assignments work as expected. */
tree int_17 = build_int_cst (integer_type_node, 17);
tree int_m3 = build_int_cst (integer_type_node, -3);
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
model.set_value (c_x, int_17, NULL);
model.set_value (c_y, int_m3, NULL);
- ASSERT_DUMP_EQ (model, true, "c.x: 17, c.y: -3");
-
/* Copy c to d. */
model.copy_region (model.get_lvalue (d, NULL), model.get_lvalue (c, NULL),
NULL);
/* "q" global. */
tree q = build_global_decl ("q", ptr_type_node);
+ region_model_manager mgr;
test_region_model_context ctxt;
- region_model model;
+ region_model model (&mgr);
/* Push stack frame for "parent_fn". */
- region_id parent_frame_rid
- = model.push_frame (DECL_STRUCT_FUNCTION (parent_fndecl), NULL, &ctxt);
- ASSERT_EQ (model.get_current_frame_id (), parent_frame_rid);
- region_id a_in_parent_rid = model.get_lvalue (a, &ctxt);
- model.set_value (a_in_parent_rid, model.get_rvalue (int_42, &ctxt), &ctxt);
- model.set_to_new_unknown_value (model.get_lvalue (b, &ctxt),
- integer_type_node, &ctxt);
+ const region *parent_frame_reg
+ = model.push_frame (DECL_STRUCT_FUNCTION (parent_fndecl),
+ NULL, &ctxt);
+ ASSERT_EQ (model.get_current_frame (), parent_frame_reg);
+ ASSERT_TRUE (model.region_exists_p (parent_frame_reg));
+ const region *a_in_parent_reg = model.get_lvalue (a, &ctxt);
+ model.set_value (a_in_parent_reg,
+ model.get_rvalue (int_42, &ctxt),
+ &ctxt);
+ ASSERT_EQ (a_in_parent_reg->maybe_get_frame_region (), parent_frame_reg);
+
model.add_constraint (b, LT_EXPR, int_10, &ctxt);
ASSERT_EQ (model.eval_condition (b, LT_EXPR, int_10, &ctxt),
tristate (tristate::TS_TRUE));
/* Push stack frame for "child_fn". */
- region_id child_frame_rid
+ const region *child_frame_reg
= model.push_frame (DECL_STRUCT_FUNCTION (child_fndecl), NULL, &ctxt);
- ASSERT_EQ (model.get_current_frame_id (), child_frame_rid);
- region_id x_in_child_rid = model.get_lvalue (x, &ctxt);
- model.set_value (x_in_child_rid, model.get_rvalue (int_0, &ctxt), &ctxt);
- model.set_to_new_unknown_value (model.get_lvalue (y, &ctxt),
- integer_type_node, &ctxt);
+ ASSERT_EQ (model.get_current_frame (), child_frame_reg);
+ ASSERT_TRUE (model.region_exists_p (child_frame_reg));
+ const region *x_in_child_reg = model.get_lvalue (x, &ctxt);
+ model.set_value (x_in_child_reg,
+ model.get_rvalue (int_0, &ctxt),
+ &ctxt);
+ ASSERT_EQ (x_in_child_reg->maybe_get_frame_region (), child_frame_reg);
+
model.add_constraint (y, NE_EXPR, int_5, &ctxt);
ASSERT_EQ (model.eval_condition (y, NE_EXPR, int_5, &ctxt),
tristate (tristate::TS_TRUE));
/* Point a global pointer at a local in the child frame: p = &x. */
- region_id p_in_globals_rid = model.get_lvalue (p, &ctxt);
- model.set_value (p_in_globals_rid,
- model.get_or_create_ptr_svalue (ptr_type_node,
- x_in_child_rid),
+ const region *p_in_globals_reg = model.get_lvalue (p, &ctxt);
+ model.set_value (p_in_globals_reg,
+ mgr.get_ptr_svalue (ptr_type_node, x_in_child_reg),
&ctxt);
+ ASSERT_EQ (p_in_globals_reg->maybe_get_frame_region (), NULL);
/* Point another global pointer at p: q = &p. */
- region_id q_in_globals_rid = model.get_lvalue (q, &ctxt);
- model.set_value (q_in_globals_rid,
- model.get_or_create_ptr_svalue (ptr_type_node,
- p_in_globals_rid),
+ const region *q_in_globals_reg = model.get_lvalue (q, &ctxt);
+ model.set_value (q_in_globals_reg,
+ mgr.get_ptr_svalue (ptr_type_node, p_in_globals_reg),
&ctxt);
- /* Test get_descendents. */
- region_id_set descendents (&model);
- model.get_descendents (child_frame_rid, &descendents, region_id::null ());
- ASSERT_TRUE (descendents.region_p (child_frame_rid));
- ASSERT_TRUE (descendents.region_p (x_in_child_rid));
- ASSERT_FALSE (descendents.region_p (a_in_parent_rid));
- ASSERT_EQ (descendents.num_regions (), 3);
-#if 0
- auto_vec<region_id> test_vec;
- for (region_id_set::iterator_t iter = descendents.begin ();
- iter != descendents.end ();
- ++iter)
- test_vec.safe_push (*iter);
- gcc_unreachable (); // TODO
- //ASSERT_EQ ();
-#endif
-
- ASSERT_DUMP_EQ (model, true,
- "a: 42, x: 0, p: &x, q: &p, {b, y}: unknown, b < 10, y != 5");
+ /* Test region::descendent_of_p. */
+ ASSERT_TRUE (child_frame_reg->descendent_of_p (child_frame_reg));
+ ASSERT_TRUE (x_in_child_reg->descendent_of_p (child_frame_reg));
+ ASSERT_FALSE (a_in_parent_reg->descendent_of_p (child_frame_reg));
/* Pop the "child_fn" frame from the stack. */
- purge_stats purged;
- model.pop_frame (region_id::null (), true, &purged, &ctxt);
-
- /* We should have purged the unknown values for x and y. */
- ASSERT_EQ (purged.m_num_svalues, 2);
-
- /* We should have purged the frame region and the regions for x and y. */
- ASSERT_EQ (purged.m_num_regions, 3);
-
- /* We should have purged the constraint on y. */
- ASSERT_EQ (purged.m_num_equiv_classes, 1);
- ASSERT_EQ (purged.m_num_constraints, 1);
+ model.pop_frame (NULL, NULL, &ctxt);
+ ASSERT_FALSE (model.region_exists_p (child_frame_reg));
+ ASSERT_TRUE (model.region_exists_p (parent_frame_reg));
/* Verify that p (which was pointing at the local "x" in the popped
frame) has been poisoned. */
- svalue *new_p_sval = model.get_svalue (model.get_rvalue (p, &ctxt));
+ const svalue *new_p_sval = model.get_rvalue (p, &ctxt);
ASSERT_EQ (new_p_sval->get_kind (), SK_POISONED);
ASSERT_EQ (new_p_sval->dyn_cast_poisoned_svalue ()->get_poison_kind (),
POISON_KIND_POPPED_STACK);
/* Verify that q still points to p, in spite of the region
renumbering. */
- svalue *new_q_sval = model.get_svalue (model.get_rvalue (q, &ctxt));
+ const svalue *new_q_sval = model.get_rvalue (q, &ctxt);
ASSERT_EQ (new_q_sval->get_kind (), SK_REGION);
ASSERT_EQ (new_q_sval->dyn_cast_region_svalue ()->get_pointee (),
model.get_lvalue (p, &ctxt));
/* Verify that top of stack has been updated. */
- ASSERT_EQ (model.get_current_frame_id (), parent_frame_rid);
+ ASSERT_EQ (model.get_current_frame (), parent_frame_reg);
/* Verify locals in parent frame. */
/* Verify "a" still has its value. */
- svalue *new_a_sval = model.get_svalue (model.get_rvalue (a, &ctxt));
+ const svalue *new_a_sval = model.get_rvalue (a, &ctxt);
ASSERT_EQ (new_a_sval->get_kind (), SK_CONSTANT);
ASSERT_EQ (new_a_sval->dyn_cast_constant_svalue ()->get_constant (),
int_42);
}
/* Verify that get_representative_path_var works as expected, that
- we can map from region ids to parms and back within a recursive call
+ we can map from regions to parms and back within a recursive call
stack. */
static void
get_identifier ("n"),
integer_type_node);
- region_model model;
+ region_model_manager mgr;
+ test_region_model_context ctxt;
+ region_model model (&mgr);
/* Push 5 stack frames for "factorial", each with a param */
- auto_vec<region_id> parm_rids;
- auto_vec<svalue_id> parm_sids;
+ auto_vec<const region *> parm_regs;
+ auto_vec<const svalue *> parm_svals;
for (int depth = 0; depth < 5; depth++)
{
- region_id frame_rid
- = model.push_frame (DECL_STRUCT_FUNCTION (fndecl), NULL, NULL);
- region_id rid_n = model.get_lvalue (path_var (n, depth), NULL);
- parm_rids.safe_push (rid_n);
+ const region *frame_n_reg
+ = model.push_frame (DECL_STRUCT_FUNCTION (fndecl), NULL, &ctxt);
+ const region *parm_n_reg = model.get_lvalue (path_var (n, depth), &ctxt);
+ parm_regs.safe_push (parm_n_reg);
- ASSERT_EQ (model.get_region (rid_n)->get_parent (), frame_rid);
-
- svalue_id sid_n
- = model.set_to_new_unknown_value (rid_n, integer_type_node, NULL);
- parm_sids.safe_push (sid_n);
+ ASSERT_EQ (parm_n_reg->get_parent_region (), frame_n_reg);
+ const svalue *sval_n = mgr.get_or_create_initial_value (parm_n_reg);
+ parm_svals.safe_push (sval_n);
}
/* Verify that we can recognize that the regions are the parms,
at every depth. */
for (int depth = 0; depth < 5; depth++)
{
- ASSERT_EQ (model.get_representative_path_var (parm_rids[depth]),
- path_var (n, depth));
+ {
+ svalue_set visited;
+ ASSERT_EQ (model.get_representative_path_var (parm_regs[depth],
+ &visited),
+ path_var (n, depth + 1));
+ }
/* ...and that we can lookup lvalues for locals for all frames,
not just the top. */
ASSERT_EQ (model.get_lvalue (path_var (n, depth), NULL),
- parm_rids[depth]);
+ parm_regs[depth]);
/* ...and that we can locate the svalues. */
- auto_vec<path_var> pvs;
- model.get_path_vars_for_svalue (parm_sids[depth], &pvs);
- ASSERT_EQ (pvs.length (), 1);
- ASSERT_EQ (pvs[0], path_var (n, depth));
+ {
+ svalue_set visited;
+ ASSERT_EQ (model.get_representative_path_var (parm_svals[depth],
+ &visited),
+ path_var (n, depth + 1));
+ }
}
}
-/* Verify that the core regions within a region_model are in a consistent
- order after canonicalization. */
+/* Ensure that region_model::operator== works as expected. */
static void
-test_canonicalization_1 ()
+test_equality_1 ()
{
- region_model model0;
- model0.get_root_region ()->ensure_stack_region (&model0);
- model0.get_root_region ()->ensure_globals_region (&model0);
-
- region_model model1;
- model1.get_root_region ()->ensure_globals_region (&model1);
- model1.get_root_region ()->ensure_stack_region (&model1);
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_17 = build_int_cst (integer_type_node, 17);
- model0.canonicalize (NULL);
- model1.canonicalize (NULL);
+/* Verify that "empty" region_model instances are equal to each other. */
+ region_model_manager mgr;
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
ASSERT_EQ (model0, model1);
+
+ /* Verify that setting state in model1 makes the models non-equal. */
+ tree x = build_global_decl ("x", integer_type_node);
+ model0.set_value (x, int_42, NULL);
+ ASSERT_EQ (model0.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
+ ASSERT_NE (model0, model1);
+
+ /* Verify the copy-ctor. */
+ region_model model2 (model0);
+ ASSERT_EQ (model0, model2);
+ ASSERT_EQ (model2.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
+ ASSERT_NE (model1, model2);
+
+ /* Verify that models obtained from copy-ctor are independently editable
+ w/o affecting the original model. */
+ model2.set_value (x, int_17, NULL);
+ ASSERT_NE (model0, model2);
+ ASSERT_EQ (model2.get_rvalue (x, NULL)->maybe_get_constant (), int_17);
+ ASSERT_EQ (model0.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
}
/* Verify that region models for
x = 42; y = 113;
and
y = 113; x = 42;
- are equal after canonicalization. */
+ are equal. */
static void
test_canonicalization_2 ()
tree x = build_global_decl ("x", integer_type_node);
tree y = build_global_decl ("y", integer_type_node);
- region_model model0;
+ region_model_manager mgr;
+ region_model model0 (&mgr);
model0.set_value (model0.get_lvalue (x, NULL),
model0.get_rvalue (int_42, NULL),
NULL);
model0.get_rvalue (int_113, NULL),
NULL);
- region_model model1;
+ region_model model1 (&mgr);
model1.set_value (model1.get_lvalue (y, NULL),
model1.get_rvalue (int_113, NULL),
NULL);
model1.get_rvalue (int_42, NULL),
NULL);
- model0.canonicalize (NULL);
- model1.canonicalize (NULL);
ASSERT_EQ (model0, model1);
}
tree x = build_global_decl ("x", integer_type_node);
tree y = build_global_decl ("y", integer_type_node);
- region_model model0;
+ region_model_manager mgr;
+ region_model model0 (&mgr);
model0.add_constraint (x, GT_EXPR, int_3, NULL);
model0.add_constraint (y, GT_EXPR, int_42, NULL);
- region_model model1;
+ region_model model1 (&mgr);
model1.add_constraint (y, GT_EXPR, int_42, NULL);
model1.add_constraint (x, GT_EXPR, int_3, NULL);
- model0.canonicalize (NULL);
- model1.canonicalize (NULL);
+ model0.canonicalize ();
+ model1.canonicalize ();
ASSERT_EQ (model0, model1);
}
auto_vec<tree> csts;
append_interesting_constants (&csts);
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
unsigned i;
tree cst;
FOR_EACH_VEC_ELT (csts, i, cst)
model.get_rvalue (cst, NULL);
- model.canonicalize (NULL);
+ model.canonicalize ();
}
/* Assert that if we have two region_model instances
static void
assert_region_models_merge (tree expr, tree val_a, tree val_b,
- region_model *out_merged_model,
- svalue **out_merged_svalue)
+ region_model *out_merged_model,
+ const svalue **out_merged_svalue)
{
+ program_point point (program_point::origin ());
test_region_model_context ctxt;
- region_model model0;
- region_model model1;
+ region_model_manager *mgr = out_merged_model->get_manager ();
+ region_model model0 (mgr);
+ region_model model1 (mgr);
if (val_a)
model0.set_value (model0.get_lvalue (expr, &ctxt),
model0.get_rvalue (val_a, &ctxt),
&ctxt);
/* They should be mergeable. */
- ASSERT_TRUE (model0.can_merge_with_p (model1, out_merged_model));
-
- svalue_id merged_svalue_sid = out_merged_model->get_rvalue (expr, &ctxt);
- *out_merged_svalue = out_merged_model->get_svalue (merged_svalue_sid);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, out_merged_model));
+ *out_merged_svalue = out_merged_model->get_rvalue (expr, &ctxt);
}
/* Verify that we can merge region_model instances. */
get_identifier ("q"),
ptr_type_node);
+ program_point point (program_point::origin ());
+ region_model_manager mgr;
+
{
- region_model model0;
- region_model model1;
- region_model merged;
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
+ region_model merged (&mgr);
/* Verify empty models can be merged. */
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_EQ (model0, merged);
}
/* TODO: verify that the merged model doesn't have a value for
the global */
{
- region_model model0;
- region_model model1;
- region_model merged;
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
+ region_model merged (&mgr);
test_region_model_context ctxt;
model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
model1.add_constraint (x, EQ_EXPR, int_113, &ctxt);
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_NE (model0, merged);
ASSERT_NE (model1, merged);
}
/* Verify handling of a PARM_DECL. */
{
test_region_model_context ctxt;
- region_model model0;
- region_model model1;
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
ASSERT_EQ (model0.get_stack_depth (), 0);
model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, &ctxt);
ASSERT_EQ (model0.get_stack_depth (), 1);
- ASSERT_EQ (model0.get_function_at_depth (0),
- DECL_STRUCT_FUNCTION (test_fndecl));
model1.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, &ctxt);
- svalue_id sid_a
- = model0.set_to_new_unknown_value (model0.get_lvalue (a, &ctxt),
- integer_type_node, &ctxt);
- model1.set_to_new_unknown_value (model1.get_lvalue (a, &ctxt),
- integer_type_node, &ctxt);
+ placeholder_svalue test_sval (integer_type_node, "test sval");
+ model0.set_value (model0.get_lvalue (a, &ctxt), &test_sval, &ctxt);
+ model1.set_value (model1.get_lvalue (a, &ctxt), &test_sval, &ctxt);
ASSERT_EQ (model0, model1);
- /* Check that get_value_by_name works for locals. */
- ASSERT_EQ (model0.get_value_by_name ("a"), sid_a);
-
/* They should be mergeable, and the result should be the same. */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_EQ (model0, merged);
- /* In particular, there should be an unknown value for "a". */
- svalue *merged_a_sval = merged.get_svalue (merged.get_rvalue (a, &ctxt));
- ASSERT_EQ (merged_a_sval->get_kind (), SK_UNKNOWN);
+ /* In particular, "a" should have the placeholder value. */
+ ASSERT_EQ (merged.get_rvalue (a, &ctxt), &test_sval);
}
/* Verify handling of a global. */
{
test_region_model_context ctxt;
- region_model model0;
- region_model model1;
- svalue_id sid_x
- = model0.set_to_new_unknown_value (model0.get_lvalue (x, &ctxt),
- integer_type_node, &ctxt);
- model1.set_to_new_unknown_value (model1.get_lvalue (x, &ctxt),
- integer_type_node, &ctxt);
- ASSERT_EQ (model0, model1);
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
- /* Check that get_value_by_name works for globals. */
- ASSERT_EQ (model0.get_value_by_name ("x"), sid_x);
+ placeholder_svalue test_sval (integer_type_node, "test sval");
+ model0.set_value (model0.get_lvalue (x, &ctxt), &test_sval, &ctxt);
+ model1.set_value (model1.get_lvalue (x, &ctxt), &test_sval, &ctxt);
+ ASSERT_EQ (model0, model1);
/* They should be mergeable, and the result should be the same. */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_EQ (model0, merged);
- /* In particular, there should be an unknown value for "x". */
- svalue *merged_x_sval = merged.get_svalue (merged.get_rvalue (x, &ctxt));
- ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
+ /* In particular, "x" should have the placeholder value. */
+ ASSERT_EQ (merged.get_rvalue (x, &ctxt), &test_sval);
}
/* Use global-handling to verify various combinations of values. */
/* Two equal constant values. */
{
- region_model merged;
- svalue *merged_x_sval;
+ region_model merged (&mgr);
+ const svalue *merged_x_sval;
assert_region_models_merge (x, int_42, int_42, &merged, &merged_x_sval);
/* In particular, there should be a constant value for "x". */
/* Two non-equal constant values. */
{
- region_model merged;
- svalue *merged_x_sval;
+ region_model merged (&mgr);
+ const svalue *merged_x_sval;
assert_region_models_merge (x, int_42, int_113, &merged, &merged_x_sval);
- /* In particular, there should be an unknown value for "x". */
- ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
+ /* In particular, there should be a "widening" value for "x". */
+ ASSERT_EQ (merged_x_sval->get_kind (), SK_WIDENING);
}
- /* Uninit and constant. */
+ /* Initial and constant. */
{
- region_model merged;
- svalue *merged_x_sval;
+ region_model merged (&mgr);
+ const svalue *merged_x_sval;
assert_region_models_merge (x, NULL_TREE, int_113, &merged, &merged_x_sval);
/* In particular, there should be an unknown value for "x". */
ASSERT_EQ (merged_x_sval->get_kind (), SK_UNKNOWN);
}
- /* Constant and uninit. */
+ /* Constant and initial. */
{
- region_model merged;
- svalue *merged_x_sval;
+ region_model merged (&mgr);
+ const svalue *merged_x_sval;
assert_region_models_merge (x, int_42, NULL_TREE, &merged, &merged_x_sval);
/* In particular, there should be an unknown value for "x". */
/* Pointers: non-NULL and non-NULL: ptr to a local. */
{
- region_model model0;
+ region_model model0 (&mgr);
model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
- model0.set_to_new_unknown_value (model0.get_lvalue (a, NULL),
- integer_type_node, NULL);
model0.set_value (model0.get_lvalue (p, NULL),
model0.get_rvalue (addr_of_a, NULL), NULL);
ASSERT_EQ (model0, model1);
/* They should be mergeable, and the result should be the same. */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_EQ (model0, merged);
}
/* Pointers: non-NULL and non-NULL: ptr to a global. */
{
- region_model merged;
+ region_model merged (&mgr);
/* p == &y in both input models. */
- svalue *merged_p_sval;
+ const svalue *merged_p_sval;
assert_region_models_merge (p, addr_of_y, addr_of_y, &merged,
&merged_p_sval);
/* We should get p == &y in the merged model. */
ASSERT_EQ (merged_p_sval->get_kind (), SK_REGION);
- region_svalue *merged_p_ptr = merged_p_sval->dyn_cast_region_svalue ();
- region_id merged_p_star_rid = merged_p_ptr->get_pointee ();
- ASSERT_EQ (merged_p_star_rid, merged.get_lvalue (y, NULL));
+ const region_svalue *merged_p_ptr
+ = merged_p_sval->dyn_cast_region_svalue ();
+ const region *merged_p_star_reg = merged_p_ptr->get_pointee ();
+ ASSERT_EQ (merged_p_star_reg, merged.get_lvalue (y, NULL));
}
/* Pointers: non-NULL ptrs to different globals: should be unknown. */
{
- region_model merged;
- /* x == &y vs x == &z in the input models. */
- svalue *merged_x_sval;
+ region_model merged (&mgr);
+ /* x == &y vs x == &z in the input models; these are actually casts
+ of the ptrs to "int". */
+ const svalue *merged_x_sval;
+ // TODO:
assert_region_models_merge (x, addr_of_y, addr_of_z, &merged,
&merged_x_sval);
/* Pointers: non-NULL and non-NULL: ptr to a heap region. */
{
test_region_model_context ctxt;
- region_model model0;
- region_id new_rid = model0.add_new_malloc_region ();
- svalue_id ptr_sid
- = model0.get_or_create_ptr_svalue (ptr_type_node, new_rid);
+ region_model model0 (&mgr);
+ tree size = build_int_cst (integer_type_node, 1024);
+ const svalue *size_sval = mgr.get_or_create_constant_svalue (size);
+ const region *new_reg = model0.create_region_for_heap_alloc (size_sval);
+ const svalue *ptr_sval = mgr.get_ptr_svalue (ptr_type_node, new_reg);
model0.set_value (model0.get_lvalue (p, &ctxt),
- ptr_sid, &ctxt);
- model0.canonicalize (&ctxt);
+ ptr_sval, &ctxt);
region_model model1 (model0);
ASSERT_EQ (model0, model1);
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
- merged.canonicalize (&ctxt);
-
- /* The merged model ought to be identical (after canonicalization,
- at least). */
+ /* The merged model ought to be identical. */
ASSERT_EQ (model0, merged);
}
- /* Two regions sharing the same unknown svalue should continue sharing
- an unknown svalue after self-merger. */
+ /* Two regions sharing the same placeholder svalue should continue sharing
+ it after self-merger. */
{
test_region_model_context ctxt;
- region_model model0;
- svalue_id sid
- = model0.set_to_new_unknown_value (model0.get_lvalue (x, &ctxt),
- integer_type_node, &ctxt);
- model0.set_value (model0.get_lvalue (y, &ctxt), sid, &ctxt);
+ region_model model0 (&mgr);
+ placeholder_svalue placeholder_sval (integer_type_node, "test");
+ model0.set_value (model0.get_lvalue (x, &ctxt),
+ &placeholder_sval, &ctxt);
+ model0.set_value (model0.get_lvalue (y, &ctxt), &placeholder_sval, &ctxt);
region_model model1 (model0);
/* They should be mergeable, and the result should be the same. */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_EQ (model0, merged);
/* In particular, we should have x == y. */
tristate (tristate::TS_TRUE));
}
-#if 0
{
- region_model model0;
- region_model model1;
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
test_region_model_context ctxt;
model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
model1.add_constraint (x, NE_EXPR, int_42, &ctxt);
- ASSERT_TRUE (model0.can_merge_with_p (model1));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
}
{
- region_model model0;
- region_model model1;
+ region_model model0 (&mgr);
+ region_model model1 (&mgr);
test_region_model_context ctxt;
model0.add_constraint (x, EQ_EXPR, int_42, &ctxt);
model1.add_constraint (x, NE_EXPR, int_42, &ctxt);
model1.add_constraint (x, EQ_EXPR, int_113, &ctxt);
- ASSERT_TRUE (model0.can_merge_with_p (model1));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
}
-#endif
// TODO: what can't we merge? need at least one such test
- heap regions
- value merging:
- every combination, but in particular
- - pairs of regions
+ - pairs of regions
*/
/* Views. */
{
test_region_model_context ctxt;
- region_model model0;
+ region_model model0 (&mgr);
- region_id x_rid = model0.get_lvalue (x, &ctxt);
- region_id x_as_ptr = model0.get_or_create_view (x_rid, ptr_type_node,
- &ctxt);
+ const region *x_reg = model0.get_lvalue (x, &ctxt);
+ const region *x_as_ptr = mgr.get_cast_region (x_reg, ptr_type_node);
model0.set_value (x_as_ptr, model0.get_rvalue (addr_of_y, &ctxt), &ctxt);
region_model model1 (model0);
ASSERT_EQ (model1, model0);
/* They should be mergeable, and the result should be the same. */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
}
/* Verify that we can merge a model in which a local in an older stack
frame points to a local in a more recent stack frame. */
{
- region_model model0;
+ region_model model0 (&mgr);
model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
- region_id q_in_first_frame = model0.get_lvalue (q, NULL);
+ const region *q_in_first_frame = model0.get_lvalue (q, NULL);
/* Push a second frame. */
- region_id rid_2nd_frame
+ const region *reg_2nd_frame
= model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
/* Have a pointer in the older frame point to a local in the
more recent frame. */
- svalue_id sid_ptr = model0.get_rvalue (addr_of_a, NULL);
- model0.set_value (q_in_first_frame, sid_ptr, NULL);
+ const svalue *sval_ptr = model0.get_rvalue (addr_of_a, NULL);
+ model0.set_value (q_in_first_frame, sval_ptr, NULL);
/* Verify that it's pointing at the newer frame. */
- region_id rid_pointee
- = model0.get_svalue (sid_ptr)->dyn_cast_region_svalue ()->get_pointee ();
- ASSERT_EQ (model0.get_region (rid_pointee)->get_parent (), rid_2nd_frame);
+ const region *reg_pointee
+ = sval_ptr->dyn_cast_region_svalue ()->get_pointee ();
+ ASSERT_EQ (reg_pointee->get_parent_region (), reg_2nd_frame);
- model0.canonicalize (NULL);
+ model0.canonicalize ();
region_model model1 (model0);
ASSERT_EQ (model0, model1);
/* They should be mergeable, and the result should be the same
(after canonicalization, at least). */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
- merged.canonicalize (NULL);
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
+ merged.canonicalize ();
ASSERT_EQ (model0, merged);
}
/* Verify that we can merge a model in which a local points to a global. */
{
- region_model model0;
+ region_model model0 (&mgr);
model0.push_frame (DECL_STRUCT_FUNCTION (test_fndecl), NULL, NULL);
model0.set_value (model0.get_lvalue (q, NULL),
model0.get_rvalue (addr_of_y, NULL), NULL);
- model0.canonicalize (NULL);
-
region_model model1 (model0);
ASSERT_EQ (model0, model1);
/* They should be mergeable, and the result should be the same
(after canonicalization, at least). */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
- merged.canonicalize (NULL);
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_EQ (model0, merged);
}
}
tree z = build_global_decl ("z", integer_type_node);
tree n = build_global_decl ("n", integer_type_node);
+ region_model_manager mgr;
test_region_model_context ctxt;
/* model0: 0 <= (x == y) < n. */
- region_model model0;
- model0.set_to_new_unknown_value (model0.get_lvalue (x, &ctxt),
- integer_type_node, &ctxt);
+ region_model model0 (&mgr);
model0.add_constraint (x, EQ_EXPR, y, &ctxt);
model0.add_constraint (x, GE_EXPR, int_0, NULL);
model0.add_constraint (x, LT_EXPR, n, NULL);
/* model1: z != 5 && (0 <= x < n). */
- region_model model1;
- model1.set_to_new_unknown_value (model1.get_lvalue (x, &ctxt),
- integer_type_node, &ctxt);
+ region_model model1 (&mgr);
model1.add_constraint (z, NE_EXPR, int_5, NULL);
model1.add_constraint (x, GE_EXPR, int_0, NULL);
model1.add_constraint (x, LT_EXPR, n, NULL);
/* They should be mergeable; the merged constraints should
be: (0 <= x < n). */
- region_model merged;
- ASSERT_TRUE (model0.can_merge_with_p (model1, &merged));
+ program_point point (program_point::origin ());
+ region_model merged (&mgr);
+ ASSERT_TRUE (model0.can_merge_with_p (model1, point, &merged));
ASSERT_EQ (merged.eval_condition (x, GE_EXPR, int_0, &ctxt),
tristate (tristate::TS_TRUE));
tristate (tristate::TS_UNKNOWN));
}
+/* Verify that widening_svalue::eval_condition_without_cm works as
+ expected. */
+
+static void
+test_widening_constraints ()
+{
+ program_point point (program_point::origin ());
+ tree int_0 = build_int_cst (integer_type_node, 0);
+ tree int_m1 = build_int_cst (integer_type_node, -1);
+ tree int_1 = build_int_cst (integer_type_node, 1);
+ tree int_256 = build_int_cst (integer_type_node, 256);
+ region_model_manager mgr;
+ test_region_model_context ctxt;
+ const svalue *int_0_sval = mgr.get_or_create_constant_svalue (int_0);
+ const svalue *int_1_sval = mgr.get_or_create_constant_svalue (int_1);
+ const svalue *w_zero_then_one_sval
+ = mgr.get_or_create_widening_svalue (integer_type_node, point,
+ int_0_sval, int_1_sval);
+ const widening_svalue *w_zero_then_one
+ = w_zero_then_one_sval->dyn_cast_widening_svalue ();
+ ASSERT_EQ (w_zero_then_one->get_direction (),
+ widening_svalue::DIR_ASCENDING);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_m1),
+ tristate::TS_FALSE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_0),
+ tristate::TS_FALSE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_1),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LT_EXPR, int_256),
+ tristate::TS_UNKNOWN);
+
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_m1),
+ tristate::TS_FALSE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_0),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_1),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (LE_EXPR, int_256),
+ tristate::TS_UNKNOWN);
+
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_m1),
+ tristate::TS_TRUE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_0),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_1),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GT_EXPR, int_256),
+ tristate::TS_UNKNOWN);
+
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_m1),
+ tristate::TS_TRUE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_0),
+ tristate::TS_TRUE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_1),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (GE_EXPR, int_256),
+ tristate::TS_UNKNOWN);
+
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_m1),
+ tristate::TS_FALSE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_0),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_1),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (EQ_EXPR, int_256),
+ tristate::TS_UNKNOWN);
+
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_m1),
+ tristate::TS_TRUE);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_0),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_1),
+ tristate::TS_UNKNOWN);
+ ASSERT_EQ (w_zero_then_one->eval_condition_without_cm (NE_EXPR, int_256),
+ tristate::TS_UNKNOWN);
+}
+
+/* Verify merging constraints for states simulating successive iterations
+ of a loop.
+ Simulate:
+ for (i = 0; i < 256; i++)
+ [...body...]
+ i.e. this gimple:.
+ i_15 = 0;
+ goto <bb 4>;
+
+ <bb 4> :
+ i_11 = PHI <i_15(2), i_23(3)>
+ if (i_11 <= 255)
+ goto <bb 3>;
+ else
+ goto [AFTER LOOP]
+
+ <bb 3> :
+ [LOOP BODY]
+ i_23 = i_11 + 1;
+
+ and thus these ops (and resultant states):
+ i_11 = PHI()
+ {i_11: 0}
+ add_constraint (i_11 <= 255) [for the true edge]
+ {i_11: 0} [constraint was a no-op]
+ i_23 = i_11 + 1;
+ {i_22: 1}
+ i_11 = PHI()
+ {i_11: WIDENED (at phi, 0, 1)}
+ add_constraint (i_11 <= 255) [for the true edge]
+ {i_11: WIDENED (at phi, 0, 1); WIDENED <= 255}
+ i_23 = i_11 + 1;
+ {i_23: (WIDENED (at phi, 0, 1) + 1); WIDENED <= 255}
+ i_11 = PHI(); merge with state at phi above
+ {i_11: WIDENED (at phi, 0, 1); WIDENED <= 256}
+ [changing meaning of "WIDENED" here]
+ if (i_11 <= 255)
+ T: {i_11: WIDENED (at phi, 0, 1); WIDENED <= 255}; cache hit
+ F: {i_11: 256}
+ */
+
+static void
+test_iteration_1 ()
+{
+ program_point point (program_point::origin ());
+
+ tree int_0 = build_int_cst (integer_type_node, 0);
+ tree int_1 = build_int_cst (integer_type_node, 1);
+ tree int_256 = build_int_cst (integer_type_node, 256);
+ tree int_257 = build_int_cst (integer_type_node, 257);
+ tree i = build_global_decl ("i", integer_type_node);
+
+ region_model_manager mgr;
+ test_region_model_context ctxt;
+
+ /* model0: i: 0. */
+ region_model model0 (&mgr);
+ model0.set_value (i, int_0, &ctxt);
+
+ /* model1: i: 1. */
+ region_model model1 (&mgr);
+ model1.set_value (i, int_1, &ctxt);
+
+ /* Should merge "i" to a widened value. */
+ region_model model2 (&mgr);
+ ASSERT_TRUE (model1.can_merge_with_p (model0, point, &model2));
+ const svalue *merged_i = model2.get_rvalue (i, &ctxt);
+ ASSERT_EQ (merged_i->get_kind (), SK_WIDENING);
+ const widening_svalue *w = merged_i->dyn_cast_widening_svalue ();
+ ASSERT_EQ (w->get_direction (), widening_svalue::DIR_ASCENDING);
+
+ /* Add constraint: i < 256 */
+ model2.add_constraint (i, LT_EXPR, int_256, &ctxt);
+ ASSERT_EQ (model2.eval_condition (i, LT_EXPR, int_256, &ctxt),
+ tristate (tristate::TS_TRUE));
+ ASSERT_EQ (model2.eval_condition (i, GE_EXPR, int_0, &ctxt),
+ tristate (tristate::TS_TRUE));
+
+ /* Try merging with the initial state. */
+ region_model model3 (&mgr);
+ ASSERT_TRUE (model2.can_merge_with_p (model0, point, &model3));
+ /* Merging the merged value with the initial value should be idempotent,
+ so that the analysis converges. */
+ ASSERT_EQ (model3.get_rvalue (i, &ctxt), merged_i);
+ /* Merger of 0 and a widening value with constraint < CST
+ should retain the constraint, even though it was implicit
+ for the 0 case. */
+ ASSERT_EQ (model3.eval_condition (i, LT_EXPR, int_256, &ctxt),
+ tristate (tristate::TS_TRUE));
+ /* ...and we should have equality: the analysis should have converged. */
+ ASSERT_EQ (model3, model2);
+
+ /* "i_23 = i_11 + 1;" */
+ region_model model4 (model3);
+ ASSERT_EQ (model4, model2);
+ model4.set_value (i, build2 (PLUS_EXPR, integer_type_node, i, int_1), &ctxt);
+ const svalue *plus_one = model4.get_rvalue (i, &ctxt);
+ ASSERT_EQ (plus_one->get_kind (), SK_BINOP);
+
+ /* Try merging with the "i: 1" state. */
+ region_model model5 (&mgr);
+ ASSERT_TRUE (model4.can_merge_with_p (model1, point, &model5));
+ ASSERT_EQ (model5.get_rvalue (i, &ctxt), plus_one);
+ ASSERT_EQ (model5, model4);
+
+ /* "i_11 = PHI();" merge with state at phi above.
+ For i, we should have a merger of WIDENING with WIDENING + 1,
+ and this should be WIDENING again. */
+ region_model model6 (&mgr);
+ ASSERT_TRUE (model5.can_merge_with_p (model2, point, &model6));
+ const svalue *merged_widening = model6.get_rvalue (i, &ctxt);
+ ASSERT_EQ (merged_widening->get_kind (), SK_WIDENING);
+
+ ASSERT_CONDITION_TRUE (model6, i, LT_EXPR, int_257);
+}
+
/* Verify that if we mark a pointer to a malloc-ed region as non-NULL,
all cast pointers to that region are also known to be non-NULL. */
static void
test_malloc_constraints ()
{
- region_model model;
+ region_model_manager mgr;
+ region_model model (&mgr);
tree p = build_global_decl ("p", ptr_type_node);
tree char_star = build_pointer_type (char_type_node);
tree q = build_global_decl ("q", char_star);
tree null_ptr = build_int_cst (ptr_type_node, 0);
- region_id rid = model.add_new_malloc_region ();
- svalue_id sid = model.get_or_create_ptr_svalue (ptr_type_node, rid);
- model.set_value (model.get_lvalue (p, NULL), sid, NULL);
+ const svalue *size_in_bytes
+ = mgr.get_or_create_unknown_svalue (integer_type_node);
+ const region *reg = model.create_region_for_heap_alloc (size_in_bytes);
+ const svalue *sval = mgr.get_ptr_svalue (ptr_type_node, reg);
+ model.set_value (model.get_lvalue (p, NULL), sval, NULL);
model.set_value (q, p, NULL);
- /* We should have a symbolic_region with m_possibly_null: true. */
- region *pointee = model.get_region (rid);
- symbolic_region *sym_reg = pointee->dyn_cast_symbolic_region ();
- ASSERT_NE (sym_reg, NULL);
- ASSERT_TRUE (sym_reg->m_possibly_null);
-
ASSERT_CONDITION_UNKNOWN (model, p, NE_EXPR, null_ptr);
ASSERT_CONDITION_UNKNOWN (model, p, EQ_EXPR, null_ptr);
ASSERT_CONDITION_UNKNOWN (model, q, NE_EXPR, null_ptr);
model.add_constraint (p, NE_EXPR, null_ptr, NULL);
- /* Adding the constraint should have cleared m_possibly_null. */
- ASSERT_FALSE (sym_reg->m_possibly_null);
-
ASSERT_CONDITION_TRUE (model, p, NE_EXPR, null_ptr);
ASSERT_CONDITION_FALSE (model, p, EQ_EXPR, null_ptr);
ASSERT_CONDITION_TRUE (model, q, NE_EXPR, null_ptr);
ASSERT_CONDITION_FALSE (model, q, EQ_EXPR, null_ptr);
}
+/* Smoketest of getting and setting the value of a variable. */
+
+static void
+test_var ()
+{
+ /* "int i;" */
+ tree i = build_global_decl ("i", integer_type_node);
+
+ tree int_17 = build_int_cst (integer_type_node, 17);
+ tree int_m3 = build_int_cst (integer_type_node, -3);
+
+ region_model_manager mgr;
+ region_model model (&mgr);
+
+ const region *i_reg = model.get_lvalue (i, NULL);
+ ASSERT_EQ (i_reg->get_kind (), RK_DECL);
+
+ /* Reading "i" should give a symbolic "initial value". */
+ const svalue *sval_init = model.get_rvalue (i, NULL);
+ ASSERT_EQ (sval_init->get_kind (), SK_INITIAL);
+ ASSERT_EQ (sval_init->dyn_cast_initial_svalue ()->get_region (), i_reg);
+ /* ..and doing it again should give the same "initial value". */
+ ASSERT_EQ (model.get_rvalue (i, NULL), sval_init);
+
+ /* "i = 17;". */
+ model.set_value (i, int_17, NULL);
+ ASSERT_EQ (model.get_rvalue (i, NULL),
+ model.get_rvalue (int_17, NULL));
+
+ /* "i = -3;". */
+ model.set_value (i, int_m3, NULL);
+ ASSERT_EQ (model.get_rvalue (i, NULL),
+ model.get_rvalue (int_m3, NULL));
+
+ /* Verify get_offset for "i". */
+ {
+ region_offset offset = i_reg->get_offset ();
+ ASSERT_EQ (offset.get_base_region (), i_reg);
+ ASSERT_EQ (offset.get_bit_offset (), 0);
+ }
+}
+
+static void
+test_array_2 ()
+{
+ /* "int arr[10];" */
+ tree tlen = size_int (10);
+ tree arr_type
+ = build_array_type (integer_type_node, build_index_type (tlen));
+ tree arr = build_global_decl ("arr", arr_type);
+
+ /* "int i;" */
+ tree i = build_global_decl ("i", integer_type_node);
+
+ tree int_0 = build_int_cst (integer_type_node, 0);
+ tree int_1 = build_int_cst (integer_type_node, 1);
+
+ tree arr_0 = build4 (ARRAY_REF, integer_type_node,
+ arr, int_0, NULL_TREE, NULL_TREE);
+ tree arr_1 = build4 (ARRAY_REF, integer_type_node,
+ arr, int_1, NULL_TREE, NULL_TREE);
+ tree arr_i = build4 (ARRAY_REF, integer_type_node,
+ arr, i, NULL_TREE, NULL_TREE);
+
+ tree int_17 = build_int_cst (integer_type_node, 17);
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ tree int_m3 = build_int_cst (integer_type_node, -3);
+
+ region_model_manager mgr;
+ region_model model (&mgr);
+ /* "arr[0] = 17;". */
+ model.set_value (arr_0, int_17, NULL);
+ /* "arr[1] = -3;". */
+ model.set_value (arr_1, int_m3, NULL);
+
+ ASSERT_EQ (model.get_rvalue (arr_0, NULL), model.get_rvalue (int_17, NULL));
+ ASSERT_EQ (model.get_rvalue (arr_1, NULL), model.get_rvalue (int_m3, NULL));
+
+ /* Overwrite a pre-existing binding: "arr[1] = 42;". */
+ model.set_value (arr_1, int_42, NULL);
+ ASSERT_EQ (model.get_rvalue (arr_1, NULL), model.get_rvalue (int_42, NULL));
+
+ /* Verify get_offset for "arr[0]". */
+ {
+ const region *arr_0_reg = model.get_lvalue (arr_0, NULL);
+ region_offset offset = arr_0_reg->get_offset ();
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, NULL));
+ ASSERT_EQ (offset.get_bit_offset (), 0);
+ }
+
+ /* Verify get_offset for "arr[1]". */
+ {
+ const region *arr_1_reg = model.get_lvalue (arr_1, NULL);
+ region_offset offset = arr_1_reg->get_offset ();
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, NULL));
+ ASSERT_EQ (offset.get_bit_offset (), INT_TYPE_SIZE);
+ }
+
+ /* "arr[i] = i;" - this should remove the earlier bindings. */
+ model.set_value (arr_i, i, NULL);
+ ASSERT_EQ (model.get_rvalue (arr_i, NULL), model.get_rvalue (i, NULL));
+ ASSERT_EQ (model.get_rvalue (arr_0, NULL)->get_kind (), SK_UNKNOWN);
+
+ /* "arr[0] = 17;" - this should remove the arr[i] binding. */
+ model.set_value (arr_0, int_17, NULL);
+ ASSERT_EQ (model.get_rvalue (arr_0, NULL), model.get_rvalue (int_17, NULL));
+ ASSERT_EQ (model.get_rvalue (arr_i, NULL)->get_kind (), SK_UNKNOWN);
+}
+
+/* Smoketest of dereferencing a pointer via MEM_REF. */
+
+static void
+test_mem_ref ()
+{
+ /*
+ x = 17;
+ p = &x;
+ *p;
+ */
+ tree x = build_global_decl ("x", integer_type_node);
+ tree int_star = build_pointer_type (integer_type_node);
+ tree p = build_global_decl ("p", int_star);
+
+ tree int_17 = build_int_cst (integer_type_node, 17);
+ tree addr_of_x = build1 (ADDR_EXPR, int_star, x);
+ tree offset_0 = build_int_cst (integer_type_node, 0);
+ tree star_p = build2 (MEM_REF, integer_type_node, p, offset_0);
+
+ region_model_manager mgr;
+ region_model model (&mgr);
+
+ /* "x = 17;". */
+ model.set_value (x, int_17, NULL);
+
+ /* "p = &x;". */
+ model.set_value (p, addr_of_x, NULL);
+
+ const svalue *sval = model.get_rvalue (star_p, NULL);
+ ASSERT_EQ (sval->maybe_get_constant (), int_17);
+}
+
+/* Test for a POINTER_PLUS_EXPR followed by a MEM_REF.
+ Analogous to this code:
+ void test_6 (int a[10])
+ {
+ __analyzer_eval (a[3] == 42); [should be UNKNOWN]
+ a[3] = 42;
+ __analyzer_eval (a[3] == 42); [should be TRUE]
+ }
+ from data-model-1.c, which looks like this at the gimple level:
+ # __analyzer_eval (a[3] == 42); [should be UNKNOWN]
+ int *_1 = a_10(D) + 12; # POINTER_PLUS_EXPR
+ int _2 = *_1; # MEM_REF
+ _Bool _3 = _2 == 42;
+ int _4 = (int) _3;
+ __analyzer_eval (_4);
+
+ # a[3] = 42;
+ int *_5 = a_10(D) + 12; # POINTER_PLUS_EXPR
+ *_5 = 42; # MEM_REF
+
+ # __analyzer_eval (a[3] == 42); [should be TRUE]
+ int *_6 = a_10(D) + 12; # POINTER_PLUS_EXPR
+ int _7 = *_6; # MEM_REF
+ _Bool _8 = _7 == 42;
+ int _9 = (int) _8;
+ __analyzer_eval (_9); */
+
+static void
+test_POINTER_PLUS_EXPR_then_MEM_REF ()
+{
+ tree int_star = build_pointer_type (integer_type_node);
+ tree a = build_global_decl ("a", int_star);
+ tree offset_12 = build_int_cst (size_type_node, 12);
+ tree pointer_plus_expr = build2 (POINTER_PLUS_EXPR, int_star, a, offset_12);
+ tree offset_0 = build_int_cst (integer_type_node, 0);
+ tree mem_ref = build2 (MEM_REF, integer_type_node,
+ pointer_plus_expr, offset_0);
+ region_model_manager mgr;
+ region_model m (&mgr);
+
+ tree int_42 = build_int_cst (integer_type_node, 42);
+ m.set_value (mem_ref, int_42, NULL);
+ ASSERT_EQ (m.get_rvalue (mem_ref, NULL)->maybe_get_constant (), int_42);
+}
+
+/* Verify that malloc works. */
+
+static void
+test_malloc ()
+{
+ tree int_star = build_pointer_type (integer_type_node);
+ tree p = build_global_decl ("p", int_star);
+ tree n = build_global_decl ("n", integer_type_node);
+ tree n_times_4 = build2 (MULT_EXPR, size_type_node,
+ n, build_int_cst (size_type_node, 4));
+
+ region_model_manager mgr;
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+
+ /* "p = malloc (n * 4);". */
+ const svalue *size_sval = model.get_rvalue (n_times_4, &ctxt);
+ const region *reg = model.create_region_for_heap_alloc (size_sval);
+ const svalue *ptr = mgr.get_ptr_svalue (int_star, reg);
+ model.set_value (model.get_lvalue (p, &ctxt), ptr, &ctxt);
+ // TODO: verify dynamic extents
+}
+
+/* Verify that alloca works. */
+
+static void
+test_alloca ()
+{
+ auto_vec <tree> param_types;
+ tree fndecl = make_fndecl (integer_type_node,
+ "test_fn",
+ param_types);
+ allocate_struct_function (fndecl, true);
+
+
+ tree int_star = build_pointer_type (integer_type_node);
+ tree p = build_global_decl ("p", int_star);
+ tree n = build_global_decl ("n", integer_type_node);
+ tree n_times_4 = build2 (MULT_EXPR, size_type_node,
+ n, build_int_cst (size_type_node, 4));
+
+ region_model_manager mgr;
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+
+ /* Push stack frame. */
+ const region *frame_reg
+ = model.push_frame (DECL_STRUCT_FUNCTION (fndecl),
+ NULL, &ctxt);
+ /* "p = alloca (n * 4);". */
+ const svalue *size_sval = model.get_rvalue (n_times_4, &ctxt);
+ const region *reg = model.create_region_for_alloca (size_sval);
+ ASSERT_EQ (reg->get_parent_region (), frame_reg);
+ const svalue *ptr = mgr.get_ptr_svalue (int_star, reg);
+ model.set_value (model.get_lvalue (p, &ctxt), ptr, &ctxt);
+ // TODO: verify dynamic extents
+
+ /* Verify that the pointers to the alloca region are replaced by
+ poisoned values when the frame is popped. */
+ model.pop_frame (NULL, NULL, &ctxt);
+ ASSERT_EQ (model.get_rvalue (p, &ctxt)->get_kind (), SK_POISONED);
+}
+
/* Run all of the selftests within this file. */
void
{
test_tree_cmp_on_constants ();
test_dump ();
- test_dump_2 ();
- test_dump_3 ();
+ test_struct ();
+ test_array_1 ();
test_get_representative_tree ();
test_unique_constants ();
- test_svalue_equality ();
- test_region_equality ();
- test_purging_by_criteria ();
- test_purge_unused_svalues ();
+ test_unique_unknowns ();
+ test_initial_svalue_folding ();
+ test_unaryop_svalue_folding ();
+ test_binop_svalue_folding ();
+ test_sub_svalue_folding ();
+ test_descendent_of_p ();
test_assignment ();
test_compound_assignment ();
test_stack_frames ();
test_get_representative_path_var ();
- test_canonicalization_1 ();
+ test_equality_1 ();
test_canonicalization_2 ();
test_canonicalization_3 ();
test_canonicalization_4 ();
test_state_merging ();
test_constraint_merging ();
+ test_widening_constraints ();
+ test_iteration_1 ();
test_malloc_constraints ();
+ test_var ();
+ test_array_2 ();
+ test_mem_ref ();
+ test_POINTER_PLUS_EXPR_then_MEM_REF ();
+ test_malloc ();
+ test_alloca ();
}
} // namespace selftest
(Zhongxing Xu, Ted Kremenek, and Jian Zhang)
http://lcs.ios.ac.cn/~xuzb/canalyze/memmodel.pdf */
-/* A tree, extended with stack frame information for locals, so that
- we can distinguish between different values of locals within a potentially
- recursive callstack. */
-// TODO: would this be better as a new tree code?
-
using namespace ana;
-namespace ana {
-
-class path_var
-{
-public:
- path_var (tree t, int stack_depth)
- : m_tree (t), m_stack_depth (stack_depth)
- {
- // TODO: ignore stack depth for globals and constants
- }
-
- bool operator== (const path_var &other) const
- {
- return (m_tree == other.m_tree
- && m_stack_depth == other.m_stack_depth);
- }
-
- void dump (pretty_printer *pp) const;
-
- tree m_tree;
- int m_stack_depth; // or -1 for globals?
-};
-
-} // namespace ana
-
namespace inchash
{
extern void add_path_var (path_var pv, hash &hstate);
} // namespace inchash
-
namespace ana {
-/* A region_model is effectively a graph of regions and symbolic values.
- We store per-model IDs rather than pointers to make it easier to clone
- and to compare graphs. */
-
-/* An ID for an svalue within a region_model. Internally, this is an index
- into a vector of svalue * within the region_model. */
-
-class svalue_id
-{
-public:
- static svalue_id null () { return svalue_id (-1); }
-
- svalue_id () : m_idx (-1) {}
-
- bool operator== (const svalue_id &other) const
- {
- return m_idx == other.m_idx;
- }
-
- bool operator!= (const svalue_id &other) const
- {
- return m_idx != other.m_idx;
- }
-
- bool null_p () const { return m_idx == -1; }
-
- static svalue_id from_int (int idx) { return svalue_id (idx); }
- int as_int () const { return m_idx; }
-
- void print (pretty_printer *pp) const;
- void dump_node_name_to_pp (pretty_printer *pp) const;
-
- void validate (const region_model &model) const;
-
-private:
- svalue_id (int idx) : m_idx (idx) {}
-
- int m_idx;
-};
-
-/* An ID for a region within a region_model. Internally, this is an index
- into a vector of region * within the region_model. */
-
-class region_id
-{
-public:
- static region_id null () { return region_id (-1); }
-
- region_id () : m_idx (-1) {}
-
- bool operator== (const region_id &other) const
- {
- return m_idx == other.m_idx;
- }
-
- bool operator!= (const region_id &other) const
- {
- return m_idx != other.m_idx;
- }
-
- bool null_p () const { return m_idx == -1; }
-
- static region_id from_int (int idx) { return region_id (idx); }
- int as_int () const { return m_idx; }
-
- void print (pretty_printer *pp) const;
- void dump_node_name_to_pp (pretty_printer *pp) const;
-
- void validate (const region_model &model) const;
-
-private:
- region_id (int idx) : m_idx (idx) {}
-
- int m_idx;
-};
-
-/* A class for renumbering IDs within a region_model, mapping old IDs
- to new IDs (e.g. when removing one or more elements, thus needing to
- renumber). */
-// TODO: could this be useful for equiv_class_ids?
-
-template <typename T>
-class id_map
-{
- public:
- id_map (int num_ids);
- void put (T src, T dst);
- T get_dst_for_src (T src) const;
- T get_src_for_dst (T dst) const;
- void dump_to_pp (pretty_printer *pp) const;
- void dump () const;
- void update (T *) const;
-
- private:
- auto_vec<T> m_src_to_dst;
- auto_vec<T> m_dst_to_src;
-};
-
-typedef id_map<svalue_id> svalue_id_map;
-typedef id_map<region_id> region_id_map;
-
-/* class id_map. */
-
-/* id_map's ctor, which populates the map with dummy null values. */
-
-template <typename T>
-inline id_map<T>::id_map (int num_svalues)
-: m_src_to_dst (num_svalues),
- m_dst_to_src (num_svalues)
-{
- for (int i = 0; i < num_svalues; i++)
- {
- m_src_to_dst.quick_push (T::null ());
- m_dst_to_src.quick_push (T::null ());
- }
-}
-
-/* Record that SRC is to be mapped to DST. */
-
-template <typename T>
-inline void
-id_map<T>::put (T src, T dst)
-{
- m_src_to_dst[src.as_int ()] = dst;
- m_dst_to_src[dst.as_int ()] = src;
-}
-
-/* Get the new value for SRC within the map. */
-
-template <typename T>
-inline T
-id_map<T>::get_dst_for_src (T src) const
-{
- if (src.null_p ())
- return src;
- return m_src_to_dst[src.as_int ()];
-}
-
-/* Given DST, a new value, determine which old value will be mapped to it
- (the inverse of the map). */
-
-template <typename T>
-inline T
-id_map<T>::get_src_for_dst (T dst) const
-{
- if (dst.null_p ())
- return dst;
- return m_dst_to_src[dst.as_int ()];
-}
-
-/* Dump this id_map to PP. */
-
-template <typename T>
-inline void
-id_map<T>::dump_to_pp (pretty_printer *pp) const
-{
- pp_string (pp, "src to dst: {");
- unsigned i;
- T *dst;
- FOR_EACH_VEC_ELT (m_src_to_dst, i, dst)
- {
- if (i > 0)
- pp_string (pp, ", ");
- T src (T::from_int (i));
- src.print (pp);
- pp_string (pp, " -> ");
- dst->print (pp);
- }
- pp_string (pp, "}");
- pp_newline (pp);
-
- pp_string (pp, "dst to src: {");
- T *src;
- FOR_EACH_VEC_ELT (m_dst_to_src, i, src)
- {
- if (i > 0)
- pp_string (pp, ", ");
- T dst (T::from_int (i));
- dst.print (pp);
- pp_string (pp, " <- ");
- src->print (pp);
- }
- pp_string (pp, "}");
- pp_newline (pp);
-}
-
-/* Dump this id_map to stderr. */
-
-template <typename T>
-DEBUG_FUNCTION inline void
-id_map<T>::dump () const
-{
- pretty_printer pp;
- pp.buffer->stream = stderr;
- dump_to_pp (&pp);
- pp_flush (&pp);
-}
-
-/* Update *ID from the old value to its new value in this map. */
-
-template <typename T>
-inline void
-id_map<T>::update (T *id) const
-{
- *id = get_dst_for_src (*id);
-}
-
-/* Variant of the above, which only stores things in one direction.
- (e.g. for merging, when the number of destination regions is not
- the same of the src regions, and can grow). */
-
template <typename T>
class one_way_id_map
{
private:
auto_vec<T> m_src_to_dst;
-};
-
-typedef one_way_id_map<svalue_id> one_way_svalue_id_map;
-typedef one_way_id_map<region_id> one_way_region_id_map;
+ };
/* class one_way_id_map. */
*id = get_dst_for_src (*id);
}
-/* A set of region_ids within a region_model. */
-
-class region_id_set
-{
-public:
- region_id_set (const region_model *model);
-
- void add_region (region_id rid)
- {
- if (!rid.null_p ())
- bitmap_set_bit (m_bitmap, rid.as_int ());
- }
-
- bool region_p (region_id rid) const
- {
- gcc_assert (!rid.null_p ());
- return bitmap_bit_p (const_cast <auto_sbitmap &> (m_bitmap),
- rid.as_int ());
- }
-
- unsigned int num_regions ()
- {
- return bitmap_count_bits (m_bitmap);
- }
-
-private:
- auto_sbitmap m_bitmap;
-};
-
-/* A set of svalue_ids within a region_model. */
-
-class svalue_id_set
-{
-public:
- svalue_id_set ();
-
- void add_svalue (svalue_id sid)
- {
- if (!sid.null_p ())
- bitmap_set_bit (m_bitmap, sid.as_int ());
- }
-
- bool svalue_p (svalue_id sid) const
- {
- gcc_assert (!sid.null_p ());
- return bitmap_bit_p (const_cast <auto_bitmap &> (m_bitmap),
- sid.as_int ());
- }
-
-private:
- auto_bitmap m_bitmap;
-};
-
/* Various operations delete information from a region_model.
This struct tracks how many of each kind of entity were purged (e.g.
int m_num_client_items;
};
+/* A measurement of the complexity of an svalue or region, so that
+ we can impose bounds on the growth of these tree-like structures
+ and thus avoid infinite chains of analysis. */
+
+struct complexity
+{
+ complexity (unsigned num_nodes, unsigned max_depth)
+ : m_num_nodes (num_nodes), m_max_depth (max_depth)
+ {}
+
+ complexity (const region *reg);
+ complexity (const svalue *sval);
+ static complexity from_pair (const complexity &c1, const complexity &c);
+
+ /* The total number of svalues and regions in the tree of this
+ entity, including the entity itself. */
+ unsigned m_num_nodes;
+
+ /* The maximum depth of the tree of this entity, including the
+ entity itself. */
+ unsigned m_max_depth;
+};
+
+/* A base class for visiting regions and svalues, with do-nothing
+ base implementations of the per-subclass vfuncs. */
+
+class visitor
+{
+public:
+ virtual void visit_region_svalue (const region_svalue *) {}
+ virtual void visit_constant_svalue (const constant_svalue *) {}
+ virtual void visit_unknown_svalue (const unknown_svalue *) {}
+ virtual void visit_poisoned_svalue (const poisoned_svalue *) {}
+ virtual void visit_setjmp_svalue (const setjmp_svalue *) {}
+ virtual void visit_initial_svalue (const initial_svalue *) {}
+ virtual void visit_unaryop_svalue (const unaryop_svalue *) {}
+ virtual void visit_binop_svalue (const binop_svalue *) {}
+ virtual void visit_sub_svalue (const sub_svalue *) {}
+ virtual void visit_unmergeable_svalue (const unmergeable_svalue *) {}
+ virtual void visit_placeholder_svalue (const placeholder_svalue *) {}
+ virtual void visit_widening_svalue (const widening_svalue *) {}
+ virtual void visit_compound_svalue (const compound_svalue *) {}
+ virtual void visit_conjured_svalue (const conjured_svalue *) {}
+
+ virtual void visit_region (const region *) {}
+};
+
/* An enum for discriminating between the different concrete subclasses
of svalue. */
SK_CONSTANT,
SK_UNKNOWN,
SK_POISONED,
- SK_SETJMP
+ SK_SETJMP,
+ SK_INITIAL,
+ SK_UNARYOP,
+ SK_BINOP,
+ SK_SUB,
+ SK_UNMERGEABLE,
+ SK_PLACEHOLDER,
+ SK_WIDENING,
+ SK_COMPOUND,
+ SK_CONJURED
};
/* svalue and its subclasses.
inheritance, and with svalue_kinds shown for the concrete subclasses):
svalue
- region_svalue (SK_REGION)
- constant_svalue (SK_CONSTANT)
- unknown_svalue (SK_UNKNOWN)
- poisoned_svalue (SK_POISONED)
- setjmp_svalue (SK_SETJMP). */
+ region_svalue (SK_REGION): a pointer to a region
+ constant_svalue (SK_CONSTANT): a constant
+ unknown_svalue (SK_UNKNOWN): an unknowable value
+ poisoned_svalue (SK_POISONED): a unusable value (undefined)
+ setjmp_svalue (SK_SETJMP): a setjmp/longjmp buffer
+ initial_svalue (SK_INITIAL): the initial value of a region
+ unaryop_svalue (SK_UNARYOP): unary operation on another svalue
+ binop_svalue (SK_BINOP): binary operation on two svalues
+ sub_svalue (SK_SUB): the result of accessing a subregion
+ unmergeable_svalue (SK_UNMERGEABLE): a value that is so interesting
+ from a control-flow perspective that it can inhibit state-merging
+ placeholder_svalue (SK_PLACEHOLDER): for use in selftests.
+ widening_svalue (SK_WIDENING): a merger of two svalues (possibly
+ in an iteration).
+ compound_svalue (SK_COMPOUND): a mapping of bit-ranges to svalues
+ conjured_svalue (SK_CONJURED): a value arising from a stmt. */
/* An abstract base class representing a value held by a region of memory. */
public:
virtual ~svalue () {}
- bool operator== (const svalue &other) const;
- bool operator!= (const svalue &other) const { return !(*this == other); }
-
- virtual svalue *clone () const = 0;
-
tree get_type () const { return m_type; }
virtual enum svalue_kind get_kind () const = 0;
- hashval_t hash () const;
-
void print (const region_model &model,
- svalue_id this_sid,
pretty_printer *pp) const;
- virtual void dump_dot_to_pp (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const;
+ virtual void dump_to_pp (pretty_printer *pp, bool simple) const = 0;
+ void dump (bool simple=true) const;
+ label_text get_desc (bool simple=true) const;
+
+ virtual const region_svalue *
+ dyn_cast_region_svalue () const { return NULL; }
+ virtual const constant_svalue *
+ dyn_cast_constant_svalue () const { return NULL; }
+ virtual const poisoned_svalue *
+ dyn_cast_poisoned_svalue () const { return NULL; }
+ virtual const setjmp_svalue *
+ dyn_cast_setjmp_svalue () const { return NULL; }
+ virtual const initial_svalue *
+ dyn_cast_initial_svalue () const { return NULL; }
+ virtual const unaryop_svalue *
+ dyn_cast_unaryop_svalue () const { return NULL; }
+ virtual const binop_svalue *
+ dyn_cast_binop_svalue () const { return NULL; }
+ virtual const sub_svalue *
+ dyn_cast_sub_svalue () const { return NULL; }
+ virtual const unmergeable_svalue *
+ dyn_cast_unmergeable_svalue () const { return NULL; }
+ virtual const widening_svalue *
+ dyn_cast_widening_svalue () const { return NULL; }
+ virtual const compound_svalue *
+ dyn_cast_compound_svalue () const { return NULL; }
+ virtual const conjured_svalue *
+ dyn_cast_conjured_svalue () const { return NULL; }
- virtual region_svalue *dyn_cast_region_svalue () { return NULL; }
- virtual constant_svalue *dyn_cast_constant_svalue () { return NULL; }
- virtual const constant_svalue *dyn_cast_constant_svalue () const
- { return NULL; }
- virtual poisoned_svalue *dyn_cast_poisoned_svalue () { return NULL; }
- virtual unknown_svalue *dyn_cast_unknown_svalue () { return NULL; }
- virtual setjmp_svalue *dyn_cast_setjmp_svalue () { return NULL; }
+ tree maybe_get_constant () const;
+ const svalue *maybe_undo_cast () const;
+ const svalue *unwrap_any_unmergeable () const;
- virtual void remap_region_ids (const region_id_map &map);
+ const svalue *can_merge_p (const svalue *other,
+ region_model_manager *mgr,
+ model_merger *merger) const;
- virtual void walk_for_canonicalization (canonicalization *c) const;
+ const complexity &get_complexity () const { return m_complexity; }
- virtual svalue_id get_child_sid (region *parent, region *child,
- region_model &model,
- region_model_context *ctxt);
+ virtual void accept (visitor *v) const = 0;
- tree maybe_get_constant () const;
+ bool live_p (const svalue_set &live_svalues,
+ const region_model *model) const;
+ virtual bool implicitly_live_p (const svalue_set &live_svalues,
+ const region_model *model) const;
protected:
- svalue (tree type) : m_type (type) {}
-
- virtual void add_to_hash (inchash::hash &hstate) const = 0;
+ svalue (complexity c, tree type)
+ : m_complexity (c), m_type (type)
+ {}
private:
- virtual void print_details (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const = 0;
+ complexity m_complexity;
tree m_type;
};
class region_svalue : public svalue
{
public:
- region_svalue (tree type, region_id rid) : svalue (type), m_rid (rid)
+ /* A support class for uniquifying instances of region_svalue. */
+ struct key_t
{
- /* Should we support NULL ptrs here? */
- gcc_assert (!rid.null_p ());
- }
-
- bool compare_fields (const region_svalue &other) const;
+ key_t (tree type, const region *reg)
+ : m_type (type), m_reg (reg)
+ {}
- svalue *clone () const FINAL OVERRIDE
- { return new region_svalue (get_type (), m_rid); }
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_ptr (m_reg);
+ return hstate.end ();
+ }
- enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_REGION; }
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type && m_reg == other.m_reg);
+ }
- void dump_dot_to_pp (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
- region_svalue *dyn_cast_region_svalue () FINAL OVERRIDE { return this; }
+ tree m_type;
+ const region *m_reg;
+ };
- region_id get_pointee () const { return m_rid; }
+ region_svalue (tree type, const region *reg)
+ : svalue (complexity (reg), type),
+ m_reg (reg)
+ {
+ gcc_assert (m_reg != NULL);
+ }
- void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_REGION; }
+ const region_svalue *
+ dyn_cast_region_svalue () const FINAL OVERRIDE { return this; }
- static void merge_values (const region_svalue ®ion_sval_a,
- const region_svalue ®ion_sval_b,
- svalue_id *merged_sid,
- tree type,
- model_merger *merger);
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+ const region * get_pointee () const { return m_reg; }
- static tristate eval_condition (region_svalue *lhs_ptr,
+ static tristate eval_condition (const region_svalue *lhs_ptr,
enum tree_code op,
- region_svalue *rhs_ptr);
-
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+ const region_svalue *rhs_ptr);
private:
- void print_details (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
-
- region_id m_rid;
+ const region *m_reg;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <region_svalue *>::test (svalue *sval)
+is_a_helper <const region_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_REGION;
}
+template <> struct default_hash_traits<region_svalue::key_t>
+: public member_function_hash_traits<region_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
namespace ana {
/* Concrete subclass of svalue representing a specific constant value. */
{
public:
constant_svalue (tree cst_expr)
- : svalue (TREE_TYPE (cst_expr)), m_cst_expr (cst_expr)
+ : svalue (complexity (1, 1), TREE_TYPE (cst_expr)), m_cst_expr (cst_expr)
{
gcc_assert (cst_expr);
gcc_assert (CONSTANT_CLASS_P (cst_expr));
}
- bool compare_fields (const constant_svalue &other) const;
-
- svalue *clone () const FINAL OVERRIDE
- { return new constant_svalue (m_cst_expr); }
-
enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_CONSTANT; }
+ const constant_svalue *
+ dyn_cast_constant_svalue () const FINAL OVERRIDE { return this; }
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
-
- constant_svalue *dyn_cast_constant_svalue () FINAL OVERRIDE { return this; }
- const constant_svalue *dyn_cast_constant_svalue () const FINAL OVERRIDE
- { return this; }
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set &,
+ const region_model *) const FINAL OVERRIDE;
tree get_constant () const { return m_cst_expr; }
-
- static void merge_values (const constant_svalue &cst_sval_a,
- const constant_svalue &cst_sval_b,
- svalue_id *merged_sid,
- model_merger *merger);
-
- static tristate eval_condition (constant_svalue *lhs,
+ static tristate eval_condition (const constant_svalue *lhs,
enum tree_code op,
- constant_svalue *rhs);
-
- svalue_id get_child_sid (region *parent, region *child,
- region_model &model,
- region_model_context *ctxt) FINAL OVERRIDE;
+ const constant_svalue *rhs);
private:
- void print_details (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
-
tree m_cst_expr;
};
template <>
template <>
inline bool
-is_a_helper <constant_svalue *>::test (svalue *sval)
+is_a_helper <const constant_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_CONSTANT;
}
namespace ana {
-/* Concrete subclass of svalue representing a unique but unknown value.
- Comparisons of variables that share the same unknown value are known
- to be equal, even if we don't know what the value is. */
+/* Concrete subclass of svalue representing an unknowable value, the bottom
+ value when thinking of svalues as a lattice.
+ This is a singleton (w.r.t. its manager): there is a single unknown_svalue
+ per type. Self-comparisons of such instances yield "unknown". */
class unknown_svalue : public svalue
{
public:
unknown_svalue (tree type)
- : svalue (type)
+ : svalue (complexity (1, 1), type)
{}
- bool compare_fields (const unknown_svalue &other) const;
-
- svalue *clone () const FINAL OVERRIDE
- { return new unknown_svalue (get_type ()); }
-
enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_UNKNOWN; }
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
-
- unknown_svalue *dyn_cast_unknown_svalue () FINAL OVERRIDE { return this; }
-
- private:
- void print_details (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
};
/* An enum describing a particular kind of "poisoned" value. */
class poisoned_svalue : public svalue
{
public:
- poisoned_svalue (enum poison_kind kind, tree type)
- : svalue (type), m_kind (kind) {}
+ /* A support class for uniquifying instances of poisoned_svalue. */
+ struct key_t
+ {
+ key_t (enum poison_kind kind, tree type)
+ : m_kind (kind), m_type (type)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_int (m_kind);
+ hstate.add_ptr (m_type);
+ return hstate.end ();
+ }
- bool compare_fields (const poisoned_svalue &other) const;
+ bool operator== (const key_t &other) const
+ {
+ return (m_kind == other.m_kind && m_type == other.m_type);
+ }
- svalue *clone () const FINAL OVERRIDE
- { return new poisoned_svalue (m_kind, get_type ()); }
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
- enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_POISONED; }
+ enum poison_kind m_kind;
+ tree m_type;
+ };
+
+ poisoned_svalue (enum poison_kind kind, tree type)
+ : svalue (complexity (1, 1), type), m_kind (kind) {}
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_POISONED; }
+ const poisoned_svalue *
+ dyn_cast_poisoned_svalue () const FINAL OVERRIDE { return this; }
- poisoned_svalue *dyn_cast_poisoned_svalue () FINAL OVERRIDE { return this; }
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
enum poison_kind get_poison_kind () const { return m_kind; }
private:
- void print_details (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
-
enum poison_kind m_kind;
};
template <>
template <>
inline bool
-is_a_helper <poisoned_svalue *>::test (svalue *sval)
+is_a_helper <const poisoned_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_POISONED;
}
+template <> struct default_hash_traits<poisoned_svalue::key_t>
+: public member_function_hash_traits<poisoned_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
namespace ana {
/* A bundle of information recording a setjmp/sigsetjmp call, corresponding
&& m_setjmp_call == other.m_setjmp_call);
}
+ void add_to_hash (inchash::hash *hstate) const
+ {
+ hstate->add_ptr (m_enode);
+ hstate->add_ptr (m_setjmp_call);
+ }
+
const exploded_node *m_enode;
const gcall *m_setjmp_call;
};
class setjmp_svalue : public svalue
{
public:
- setjmp_svalue (const setjmp_record &setjmp_record,
- tree type)
- : svalue (type), m_setjmp_record (setjmp_record)
- {}
+ /* A support class for uniquifying instances of poisoned_svalue. */
+ struct key_t
+ {
+ key_t (const setjmp_record &record, tree type)
+ : m_record (record), m_type (type)
+ {}
- bool compare_fields (const setjmp_svalue &other) const;
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ m_record.add_to_hash (&hstate);
+ hstate.add_ptr (m_type);
+ return hstate.end ();
+ }
- svalue *clone () const FINAL OVERRIDE
- { return new setjmp_svalue (m_setjmp_record, get_type ()); }
+ bool operator== (const key_t &other) const
+ {
+ return (m_record == other.m_record && m_type == other.m_type);
+ }
- enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_SETJMP; }
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+ setjmp_record m_record;
+ tree m_type;
+ };
- setjmp_svalue *dyn_cast_setjmp_svalue () FINAL OVERRIDE { return this; }
+ setjmp_svalue (const setjmp_record &setjmp_record,
+ tree type)
+ : svalue (complexity (1, 1), type), m_setjmp_record (setjmp_record)
+ {}
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_SETJMP; }
+ const setjmp_svalue *
+ dyn_cast_setjmp_svalue () const FINAL OVERRIDE { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
int get_enode_index () const;
const setjmp_record &get_setjmp_record () const { return m_setjmp_record; }
private:
- void print_details (const region_model &model,
- svalue_id this_sid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
-
setjmp_record m_setjmp_record;
};
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const setjmp_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_SETJMP;
+}
+
+template <> struct default_hash_traits<setjmp_svalue::key_t>
+: public member_function_hash_traits<setjmp_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* Concrete subclass of svalue representing the initial value of a
+ specific region.
+
+ This represents the initial value at the start of the analysis path,
+ as opposed to the first time the region is accessed during the path.
+ Hence as soon as we have a call to an unknown function, all previously
+ unmodelled globals become implicitly "unknown" rathen than "initial". */
+
+class initial_svalue : public svalue
+{
+public:
+ initial_svalue (tree type, const region *reg)
+ : svalue (complexity (reg), type), m_reg (reg)
+ {
+ gcc_assert (m_reg != NULL);
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_INITIAL; }
+ const initial_svalue *
+ dyn_cast_initial_svalue () const FINAL OVERRIDE { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set &,
+ const region_model *) const FINAL OVERRIDE;
+
+ const region *get_region () const { return m_reg; }
+
+ private:
+ const region *m_reg;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const initial_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_INITIAL;
+}
+
+namespace ana {
+
+/* Concrete subclass of svalue representing a unary operation on
+ another svalues (e.g. a cast). */
+
+class unaryop_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of unaryop_svalue. */
+ struct key_t
+ {
+ key_t (tree type, enum tree_code op, const svalue *arg)
+ : m_type (type), m_op (op), m_arg (arg)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_int (m_op);
+ hstate.add_ptr (m_arg);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_op == other.m_op
+ && m_arg == other.m_arg);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
+
+ tree m_type;
+ enum tree_code m_op;
+ const svalue *m_arg;
+ };
+
+ unaryop_svalue (tree type, enum tree_code op, const svalue *arg)
+ : svalue (complexity (arg), type), m_op (op), m_arg (arg)
+ {
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_UNARYOP; }
+ const unaryop_svalue *
+ dyn_cast_unaryop_svalue () const FINAL OVERRIDE { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set &,
+ const region_model *) const FINAL OVERRIDE;
+
+ enum tree_code get_op () const { return m_op; }
+ const svalue *get_arg () const { return m_arg; }
+
+ private:
+ enum tree_code m_op;
+ const svalue *m_arg;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const unaryop_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_UNARYOP;
+}
+
+template <> struct default_hash_traits<unaryop_svalue::key_t>
+: public member_function_hash_traits<unaryop_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* Concrete subclass of svalue representing a binary operation of
+ two svalues. */
+
+class binop_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of binop_svalue. */
+ struct key_t
+ {
+ key_t (tree type, enum tree_code op,
+ const svalue *arg0, const svalue *arg1)
+ : m_type (type), m_op (op), m_arg0 (arg0), m_arg1 (arg1)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_int (m_op);
+ hstate.add_ptr (m_arg0);
+ hstate.add_ptr (m_arg1);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_op == other.m_op
+ && m_arg0 == other.m_arg0
+ && m_arg1 == other.m_arg1);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
+
+ tree m_type;
+ enum tree_code m_op;
+ const svalue *m_arg0;
+ const svalue *m_arg1;
+ };
+
+ binop_svalue (tree type, enum tree_code op,
+ const svalue *arg0, const svalue *arg1)
+ : svalue (complexity::from_pair (arg0->get_complexity (),
+ arg1->get_complexity ()),
+ type),
+ m_op (op), m_arg0 (arg0), m_arg1 (arg1)
+ {
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_BINOP; }
+ virtual const binop_svalue *dyn_cast_binop_svalue () const { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set &,
+ const region_model *) const FINAL OVERRIDE;
+
+ enum tree_code get_op () const { return m_op; }
+ const svalue *get_arg0 () const { return m_arg0; }
+ const svalue *get_arg1 () const { return m_arg1; }
+
+ private:
+ enum tree_code m_op;
+ const svalue *m_arg0;
+ const svalue *m_arg1;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const binop_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_BINOP;
+}
+
+template <> struct default_hash_traits<binop_svalue::key_t>
+: public member_function_hash_traits<binop_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* Concrete subclass of svalue representing the result of accessing a subregion
+ of another svalue (the value of a component/field of a struct, or an element
+ from an array). */
+
+class sub_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of sub_svalue. */
+ struct key_t
+ {
+ key_t (tree type, const svalue *parent_svalue, const region *subregion)
+ : m_type (type), m_parent_svalue (parent_svalue), m_subregion (subregion)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_ptr (m_parent_svalue);
+ hstate.add_ptr (m_subregion);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_parent_svalue == other.m_parent_svalue
+ && m_subregion == other.m_subregion);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
+
+ tree m_type;
+ const svalue *m_parent_svalue;
+ const region *m_subregion;
+ };
+ sub_svalue (tree type, const svalue *parent_svalue,
+ const region *subregion);
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_SUB; }
+ const sub_svalue *dyn_cast_sub_svalue () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set &,
+ const region_model *) const FINAL OVERRIDE;
+
+ const svalue *get_parent () const { return m_parent_svalue; }
+ const region *get_subregion () const { return m_subregion; }
+
+ private:
+ const svalue *m_parent_svalue;
+ const region *m_subregion;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const sub_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_SUB;
+}
+
+template <> struct default_hash_traits<sub_svalue::key_t>
+: public member_function_hash_traits<sub_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* Concrete subclass of svalue: decorate another svalue,
+ so that the resulting svalue can be identified as being
+ "interesting to control flow".
+ For example, consider the return value from setjmp. We
+ don't want to merge states in which the result is 0 with
+ those in which the result is non-zero. By using an
+ unmergeable_svalue for the result, we can inhibit such merges
+ and have separate exploded nodes for those states, keeping
+ the first and second returns from setjmp distinct in the exploded
+ graph. */
+
+class unmergeable_svalue : public svalue
+{
+public:
+ unmergeable_svalue (const svalue *arg)
+ : svalue (complexity (arg), arg->get_type ()), m_arg (arg)
+ {
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_UNMERGEABLE; }
+ const unmergeable_svalue *
+ dyn_cast_unmergeable_svalue () const FINAL OVERRIDE { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set &,
+ const region_model *) const FINAL OVERRIDE;
+
+ const svalue *get_arg () const { return m_arg; }
+
+ private:
+ const svalue *m_arg;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const unmergeable_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_UNMERGEABLE;
+}
+
+namespace ana {
+
+/* Concrete subclass of svalue for use in selftests, where
+ we want a specific but unknown svalue.
+ Unlike other svalue subclasses these aren't managed by
+ region_model_manager. */
+
+class placeholder_svalue : public svalue
+{
+public:
+ placeholder_svalue (tree type, const char *name)
+ : svalue (complexity (1, 1), type), m_name (name)
+ {
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_PLACEHOLDER; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ private:
+ const char *m_name;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <placeholder_svalue *>::test (svalue *sval)
+{
+ return sval->get_kind () == SK_PLACEHOLDER;
+}
+
+namespace ana {
+
+/* Concrete subclass of svalue representing a "widening" seen when merging
+ states, widening from a base value to {base value, iter value} and thus
+ representing a possible fixed point in an iteration from the base to
+ +ve infinity, or -ve infinity, and thus useful for representing a value
+ within a loop.
+ We also need to capture the program_point at which the merger happens,
+ so that distinguish between different iterators, and thus handle
+ nested loops. (currently we capture the function_point instead, for
+ simplicity of hashing). */
+
+class widening_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of widening_svalue. */
+ struct key_t
+ {
+ key_t (tree type, const program_point &point,
+ const svalue *base_sval, const svalue *iter_sval)
+ : m_type (type), m_point (point.get_function_point ()),
+ m_base_sval (base_sval), m_iter_sval (iter_sval)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_base_sval);
+ hstate.add_ptr (m_iter_sval);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_point == other.m_point
+ && m_base_sval == other.m_base_sval
+ && m_iter_sval == other.m_iter_sval);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
+
+ tree m_type;
+ function_point m_point;
+ const svalue *m_base_sval;
+ const svalue *m_iter_sval;
+ };
+
+ enum direction_t
+ {
+ DIR_ASCENDING,
+ DIR_DESCENDING,
+ DIR_UNKNOWN
+ };
+
+ widening_svalue (tree type, const program_point &point,
+ const svalue *base_sval, const svalue *iter_sval)
+ : svalue (complexity::from_pair (base_sval->get_complexity (),
+ iter_sval->get_complexity ()),
+ type),
+ m_point (point.get_function_point ()),
+ m_base_sval (base_sval), m_iter_sval (iter_sval)
+ {
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_WIDENING; }
+ const widening_svalue *dyn_cast_widening_svalue () const { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ const svalue *get_base_svalue () const { return m_base_sval; }
+ const svalue *get_iter_svalue () const { return m_iter_sval; }
+
+ enum direction_t get_direction () const;
+
+ tristate eval_condition_without_cm (enum tree_code op,
+ tree rhs_cst) const;
+
+ private:
+ function_point m_point;
+ const svalue *m_base_sval;
+ const svalue *m_iter_sval;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <widening_svalue *>::test (svalue *sval)
+{
+ return sval->get_kind () == SK_WIDENING;
+}
+
+template <> struct default_hash_traits<widening_svalue::key_t>
+: public member_function_hash_traits<widening_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* Concrete subclass of svalue representing a mapping of bit-ranges
+ to svalues, analogous to a cluster within the store.
+
+ This is for use in places where we want to represent a store-like
+ mapping, but are required to use an svalue, such as when handling
+ compound assignments and compound return values.
+
+ Instances of this class shouldn't be bound as-is into the store;
+ instead they should be unpacked. Similarly, they should not be
+ nested. */
+
+class compound_svalue : public svalue
+{
+public:
+ typedef binding_map::iterator_t iterator_t;
+
+ /* A support class for uniquifying instances of compound_svalue.
+ Note that to avoid copies, keys store pointers to binding_maps,
+ rather than the maps themselves. */
+ struct key_t
+ {
+ key_t (tree type, const binding_map *map_ptr)
+ : m_type (type), m_map_ptr (map_ptr)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ //hstate.add_ptr (m_map_ptr); // TODO
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && *m_map_ptr == *other.m_map_ptr);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
+
+ tree m_type;
+ const binding_map *m_map_ptr;
+ };
+
+ compound_svalue (tree type, const binding_map &map)
+ : svalue (calc_complexity (map), type),
+ m_map (map)
+ {
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_COMPOUND; }
+ const compound_svalue *dyn_cast_compound_svalue () const { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ iterator_t begin () const { return m_map.begin (); }
+ iterator_t end () const { return m_map.end (); }
+
+ struct key_t make_key () const
+ {
+ return key_t (get_type (), &m_map);
+ }
+
+ private:
+ static complexity calc_complexity (const binding_map &map);
+
+ binding_map m_map;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <compound_svalue *>::test (svalue *sval)
+{
+ return sval->get_kind () == SK_COMPOUND;
+}
+
+template <> struct default_hash_traits<compound_svalue::key_t>
+: public member_function_hash_traits<compound_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* A defined value arising from a statement, where we want to identify a
+ particular unknown value, rather than resorting to the unknown_value
+ singleton, so that the value can have sm-state.
+
+ Comparisons of variables that share the same conjured_svalue are known
+ to be equal, even if we don't know what the value is.
+
+ For example, this is used for the values of regions that may have been
+ touched when calling an unknown function.
+
+ The value captures a region as well as a stmt in order to avoid falsely
+ aliasing the various values that could arise in one statement. For
+ example, after:
+ unknown_fn (&a, &b);
+ we want values to clobber a and b with, but we don't want to use the
+ same value, or it would falsely implicitly assume that a == b. */
+
+class conjured_svalue : public svalue
+{
+public:
+ typedef binding_map::iterator_t iterator_t;
+
+ /* A support class for uniquifying instances of conjured_svalue. */
+ struct key_t
+ {
+ key_t (tree type, const gimple *stmt, const region *id_reg)
+ : m_type (type), m_stmt (stmt), m_id_reg (id_reg)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_ptr (m_stmt);
+ hstate.add_ptr (m_id_reg);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_stmt == other.m_stmt
+ && m_id_reg == other.m_id_reg);
+ }
+
+ /* Use m_stmt to mark empty/deleted, as m_type can be NULL for
+ legitimate instances. */
+ void mark_deleted () { m_stmt = reinterpret_cast<const gimple *> (1); }
+ void mark_empty () { m_stmt = NULL; }
+ bool is_deleted () const
+ {
+ return m_stmt == reinterpret_cast<const gimple *> (1);
+ }
+ bool is_empty () const { return m_stmt == NULL; }
+
+ tree m_type;
+ const gimple *m_stmt;
+ const region *m_id_reg;
+ };
+
+ conjured_svalue (tree type, const gimple *stmt, const region *id_reg)
+ : svalue (complexity (id_reg), type),
+ m_stmt (stmt), m_id_reg (id_reg)
+ {
+ gcc_assert (m_stmt != NULL);
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_CONJURED; }
+ const conjured_svalue *dyn_cast_conjured_svalue () const { return this; }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ private:
+ const gimple *m_stmt;
+ const region *m_id_reg;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <conjured_svalue *>::test (svalue *sval)
+{
+ return sval->get_kind () == SK_CONJURED;
+}
+
+template <> struct default_hash_traits<conjured_svalue::key_t>
+: public member_function_hash_traits<conjured_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
/* An enum for discriminating between the different concrete subclasses
of region. */
enum region_kind
{
- RK_PRIMITIVE,
- RK_STRUCT,
- RK_UNION,
RK_FRAME,
RK_GLOBALS,
RK_CODE,
RK_FUNCTION,
- RK_ARRAY,
+ RK_LABEL,
RK_STACK,
RK_HEAP,
RK_ROOT,
- RK_SYMBOLIC
+ RK_SYMBOLIC,
+ RK_DECL,
+ RK_FIELD,
+ RK_ELEMENT,
+ RK_OFFSET,
+ RK_CAST,
+ RK_HEAP_ALLOCATED,
+ RK_ALLOCA,
+ RK_STRING,
+ RK_UNKNOWN
};
-extern const char *region_kind_to_str (enum region_kind);
-
/* Region and its subclasses.
The class hierarchy looks like this (using indentation to show
inheritance, and with region_kinds shown for the concrete subclasses):
region
- primitive_region (RK_PRIMITIVE)
- map_region
- struct_or_union_region
- struct_region (RK_STRUCT)
- union_region (RK_UNION)
- scope_region
- frame_region (RK_FRAME)
- globals_region (RK_GLOBALS)
+ space_region
+ frame_region (RK_FRAME)
+ globals_region (RK_GLOBALS)
code_region (RK_CODE)
- function_region (RK_FUNCTION)
- array_region (RK_ARRAY)
- stack_region (RK_STACK)
- heap_region (RK_HEAP)
+ stack_region (RK_STACK)
+ heap_region (RK_HEAP)
root_region (RK_ROOT)
- label_region (RK_FUNCTION)
- symbolic_region (RK_SYMBOLIC). */
-
-/* Abstract base class representing a chunk of memory.
+ function_region (RK_FUNCTION)
+ label_region (RK_LABEL)
+ symbolic_region (RK_SYMBOLIC)
+ decl_region (RK_DECL),
+ field_region (RK_FIELD)
+ element_region (RK_ELEMENT)
+ offset_region (RK_OFFSET)
+ cast_region (RK_CAST)
+ heap_allocated_region (RK_HEAP_ALLOCATED)
+ alloca_region (RK_ALLOCA)
+ string_region (RK_STRING)
+ unknown_region (RK_UNKNOWN). */
+
+/* Abstract base class for representing ways of accessing chunks of memory.
Regions form a tree-like hierarchy, with a root region at the base,
with memory space regions within it, representing the stack and
globals, with frames within the stack, and regions for variables
within the frames and the "globals" region. Regions for structs
- can have subregions for fields.
-
- A region can optionally have a value, or inherit its value from
- the first ancestor with a value. For example, the stack region
- has a "uninitialized" poison value which is inherited by all
- descendent regions that don't themselves have a value. */
+ can have subregions for fields. */
class region
{
public:
- virtual ~region () {}
-
- bool operator== (const region &other) const;
- bool operator!= (const region &other) const { return !(*this == other); }
+ virtual ~region ();
- virtual region *clone () const = 0;
+ unsigned get_id () const { return m_id; }
+ static int cmp_ids (const region *reg1, const region *reg2);
virtual enum region_kind get_kind () const = 0;
- virtual map_region *dyn_cast_map_region () { return NULL; }
- virtual array_region *dyn_cast_array_region () { return NULL; }
- virtual symbolic_region *dyn_cast_symbolic_region () { return NULL; }
- virtual const symbolic_region *dyn_cast_symbolic_region () const { return NULL; }
-
- region_id get_parent () const { return m_parent_rid; }
- region *get_parent_region (const region_model &model) const;
-
- void set_value (region_model &model, region_id this_rid, svalue_id rhs_sid,
- region_model_context *ctxt);
- svalue_id get_value (region_model &model, bool non_null,
- region_model_context *ctxt);
- svalue_id get_value_direct () const { return m_sval_id; }
-
- svalue_id get_inherited_child_sid (region *child,
- region_model &model,
- region_model_context *ctxt);
+ virtual const frame_region *
+ dyn_cast_frame_region () const { return NULL; }
+ virtual const function_region *
+ dyn_cast_function_region () const { return NULL; }
+ virtual const symbolic_region *
+ dyn_cast_symbolic_region () const { return NULL; }
+ virtual const decl_region *
+ dyn_cast_decl_region () const { return NULL; }
+ virtual const field_region *
+ dyn_cast_field_region () const { return NULL; }
+ virtual const element_region *
+ dyn_cast_element_region () const { return NULL; }
+ virtual const offset_region *
+ dyn_cast_offset_region () const { return NULL; }
+ virtual const cast_region *
+ dyn_cast_cast_region () const { return NULL; }
+ virtual const string_region *
+ dyn_cast_string_region () const { return NULL; }
+
+ virtual void accept (visitor *v) const;
+
+ const region *get_parent_region () const { return m_parent; }
+ const region *get_base_region () const;
+ bool base_region_p () const;
+ bool descendent_of_p (const region *elder) const;
+ const frame_region *maybe_get_frame_region () const;
+
+ tree maybe_get_decl () const;
tree get_type () const { return m_type; }
- hashval_t hash () const;
-
void print (const region_model &model,
- region_id this_rid,
pretty_printer *pp) const;
-
- virtual void dump_dot_to_pp (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const;
+ label_text get_desc (bool simple=true) const;
void dump_to_pp (const region_model &model,
- region_id this_rid,
pretty_printer *pp,
const char *prefix,
bool is_last_child) const;
- virtual void dump_child_label (const region_model &model,
- region_id this_rid,
- region_id child_rid,
- pretty_printer *pp) const;
- void remap_svalue_ids (const svalue_id_map &map);
- virtual void remap_region_ids (const region_id_map &map);
+ virtual void dump_to_pp (pretty_printer *pp, bool simple) const = 0;
+ void dump (bool simple) const;
- virtual void walk_for_canonicalization (canonicalization *c) const = 0;
+ bool non_null_p () const;
- void add_view (region_id view_rid, region_model *model);
- region_id get_view (tree type, region_model *model) const;
- region_id get_active_view () const { return m_active_view_rid; }
- bool is_view_p () const { return m_is_view; }
+ static int cmp_ptrs (const void *, const void *);
- virtual void validate (const region_model &model) const;
+ region_offset get_offset () const;
+ bool get_byte_size (byte_size_t *out) const;
+ bool get_bit_size (bit_size_t *out) const;
- bool non_null_p (const region_model &model) const;
+ void
+ get_subregions_for_binding (region_model_manager *mgr,
+ bit_offset_t start_bit_offset,
+ bit_size_t size_in_bits,
+ tree type,
+ auto_vec <const region *> *out) const;
- protected:
- region (region_id parent_rid, svalue_id sval_id, tree type);
- region (const region &other);
+ bool symbolic_for_unknown_ptr_p () const;
+
+ const complexity &get_complexity () const { return m_complexity; }
- virtual void add_to_hash (inchash::hash &hstate) const;
- virtual void print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const;
+ protected:
+ region (complexity c, unsigned id, const region *parent, tree type);
private:
- void become_active_view (region_model &model, region_id this_rid);
- void deactivate_any_active_view (region_model &model);
- void deactivate_view (region_model &model, region_id this_view_rid);
+ region_offset calc_offset () const;
- region_id m_parent_rid;
- svalue_id m_sval_id;
+ complexity m_complexity;
+ unsigned m_id; // purely for deterministic sorting at this stage, for dumps
+ const region *m_parent;
tree m_type;
- /* Child regions that are "views" (one per type). */
- auto_vec<region_id> m_view_rids;
- bool m_is_view;
- region_id m_active_view_rid;
+ mutable region_offset *m_cached_offset;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <region *>::test (region *)
+is_a_helper <const region *>::test (const region *)
{
return true;
}
namespace ana {
-/* Concrete region subclass for storing "primitive" types (integral types,
- pointers, etc). */
+/* Abstract subclass of region, for regions that represent an untyped
+ space within memory, such as the stack or the heap. */
-class primitive_region : public region
+class space_region : public region
{
-public:
- primitive_region (region_id parent_rid, tree type)
- : region (parent_rid, svalue_id::null (), type)
+protected:
+ space_region (unsigned id, const region *parent)
+ : region (complexity (parent), id, parent, NULL_TREE)
{}
-
- region *clone () const FINAL OVERRIDE;
-
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_PRIMITIVE; }
-
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
};
-/* A region that has children identified by tree keys.
- For example a stack frame has subregions per local, and a region
- for a struct has subregions per field. */
+/* Concrete space_region subclass, representing a function frame on the stack,
+ to contain the locals.
+ The parent is the stack region; there's also a hierarchy of call-stack
+ prefixes expressed via m_calling_frame.
+ For example, given "oldest" calling "middle" called "newest" we would have
+ - a stack depth of 3
+ - frame (A) for "oldest" with index 0 for depth 1, calling_frame == NULL
+ - frame (B) for "middle" with index 1 for depth 2, calling_frame == (A)
+ - frame (C) for "newest" with index 2 for depth 3, calling_frame == (B)
+ where the parent region for each of the frames is the "stack" region.
+ The index is the count of frames earlier than this in the stack. */
-class map_region : public region
+class frame_region : public space_region
{
public:
- typedef ordered_hash_map<tree, region_id> map_t;
- typedef map_t::iterator iterator_t;
+ /* A support class for uniquifying instances of frame_region. */
+ struct key_t
+ {
+ key_t (const frame_region *calling_frame, function *fun)
+ : m_calling_frame (calling_frame), m_fun (fun)
+ {
+ /* calling_frame can be NULL. */
+ gcc_assert (fun);
+ }
- map_region (region_id parent_rid, tree type)
- : region (parent_rid, svalue_id::null (), type)
- {}
- map_region (const map_region &other);
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_calling_frame);
+ hstate.add_ptr (m_fun);
+ return hstate.end ();
+ }
- map_region *dyn_cast_map_region () FINAL OVERRIDE { return this; }
+ bool operator== (const key_t &other) const
+ {
+ return (m_calling_frame == other.m_calling_frame && m_fun == other.m_fun);
+ }
- void dump_dot_to_pp (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
+ void mark_deleted () { m_fun = reinterpret_cast<function *> (1); }
+ void mark_empty () { m_fun = NULL; }
+ bool is_deleted () const
+ {
+ return m_fun == reinterpret_cast<function *> (1);
+ }
+ bool is_empty () const { return m_fun == NULL; }
- void dump_child_label (const region_model &model,
- region_id this_rid,
- region_id child_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
+ const frame_region *m_calling_frame;
+ function *m_fun;
+ };
- region_id get_or_create (region_model *model,
- region_id this_rid,
- tree expr, tree type,
- region_model_context *ctxt);
- void unbind (tree expr);
- region_id *get (tree expr);
+ frame_region (unsigned id, const region *parent,
+ const frame_region *calling_frame,
+ function *fun, int index)
+ : space_region (id, parent), m_calling_frame (calling_frame),
+ m_fun (fun), m_index (index)
+ {}
+ ~frame_region ();
- void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+ /* region vfuncs. */
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_FRAME; }
+ const frame_region * dyn_cast_frame_region () const FINAL OVERRIDE
+ {
+ return this;
+ }
+ void accept (visitor *v) const FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- tree get_tree_for_child_region (region_id child_rid) const;
+ /* Accessors. */
+ const frame_region *get_calling_frame () const { return m_calling_frame; }
+ function *get_function () const { return m_fun; }
+ int get_index () const { return m_index; }
+ int get_stack_depth () const { return m_index + 1; }
- tree get_tree_for_child_region (region *child,
- const region_model &model) const;
+ const decl_region *get_region_for_local (region_model_manager *mgr,
+ tree expr) const;
- static bool can_merge_p (const map_region *map_region_a,
- const map_region *map_region_b,
- map_region *merged_map_region,
- region_id merged_rid,
- model_merger *merger);
+ unsigned get_num_locals () const { return m_locals.elements (); }
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+ private:
+ const frame_region *m_calling_frame;
+ function *m_fun;
+ int m_index;
- virtual bool valid_key_p (tree key) const = 0;
+ /* The regions for the decls within this frame are managed by this
+ object, rather than the region_model_manager, to make it a simple
+ lookup by tree. */
+ typedef hash_map<tree, decl_region *> map_t;
+ map_t m_locals;
+};
- svalue_id get_value_by_name (tree identifier,
- const region_model &model) const;
+} // namespace ana
- iterator_t begin () { return m_map.begin (); }
- iterator_t end () { return m_map.end (); }
- size_t elements () const { return m_map.elements (); }
+template <>
+template <>
+inline bool
+is_a_helper <const frame_region *>::test (const region *reg)
+{
+ return reg->get_kind () == RK_FRAME;
+}
- protected:
- bool compare_fields (const map_region &other) const;
- void add_to_hash (inchash::hash &hstate) const OVERRIDE;
- void print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
- OVERRIDE;
- void validate (const region_model &model) const FINAL OVERRIDE;
+template <> struct default_hash_traits<frame_region::key_t>
+: public member_function_hash_traits<frame_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
- private:
- /* Mapping from tree to child region. */
- map_t m_map;
+namespace ana {
+
+/* Concrete space_region subclass, to hold global variables (data and bss). */
+
+class globals_region : public space_region
+{
+ public:
+ globals_region (unsigned id, const region *parent)
+ : space_region (id, parent)
+ {}
+
+ /* region vfuncs. */
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_GLOBALS; }
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <map_region *>::test (region *reg)
+is_a_helper <const globals_region *>::test (const region *reg)
{
- return (reg->dyn_cast_map_region () != NULL);
+ return reg->get_kind () == RK_GLOBALS;
}
namespace ana {
-/* Abstract subclass representing a region with fields
- (either a struct or a union). */
+/* Concrete space_region subclass, representing the code segment
+ containing functions. */
-class struct_or_union_region : public map_region
+class code_region : public space_region
{
public:
- bool valid_key_p (tree key) const FINAL OVERRIDE;
-
- protected:
- struct_or_union_region (region_id parent_rid, tree type)
- : map_region (parent_rid, type)
+ code_region (unsigned id, const region *parent)
+ : space_region (id, parent)
{}
- bool compare_fields (const struct_or_union_region &other) const;
+ /* region vfuncs. */
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_CODE; }
+
+ const region *get_element (region_model *model,
+ const svalue *index,
+ region_model_context *ctxt);
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <struct_or_union_region *>::test (region *reg)
+is_a_helper <const code_region *>::test (const region *reg)
{
- return (reg->get_kind () == RK_STRUCT
- || reg->get_kind () == RK_UNION);
+ return reg->get_kind () == RK_CODE;
}
namespace ana {
-/* Concrete region subclass. A map_region representing a struct, using
- FIELD_DECLs for its keys. */
+/* Concrete region subclass. A region representing the code for
+ a particular function. */
-class struct_region : public struct_or_union_region
+class function_region : public region
{
public:
- struct_region (region_id parent_rid, tree type)
- : struct_or_union_region (parent_rid, type)
+ function_region (unsigned id, const code_region *parent, tree fndecl)
+ : region (complexity (parent), id, parent, TREE_TYPE (fndecl)),
+ m_fndecl (fndecl)
{
- gcc_assert (TREE_CODE (type) == RECORD_TYPE);
+ gcc_assert (FUNC_OR_METHOD_TYPE_P (TREE_TYPE (fndecl)));
}
- region *clone () const FINAL OVERRIDE;
+ /* region vfuncs. */
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_FUNCTION; }
+ const function_region *
+ dyn_cast_function_region () const FINAL OVERRIDE{ return this; }
+
+ tree get_fndecl () const { return m_fndecl; }
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_STRUCT; }
+ region *get_element (region_model *model,
+ const svalue *index_sid,
+ region_model_context *ctxt);
- bool compare_fields (const struct_region &other) const;
+private:
+ tree m_fndecl;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <struct_region *>::test (region *reg)
+is_a_helper <const function_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_STRUCT;
+ return reg->get_kind () == RK_FUNCTION;
}
namespace ana {
-/* Concrete region subclass. A map_region representing a union, using
- FIELD_DECLs for its keys. */
+/* Concrete region subclass. A region representing a particular label
+ within a function. */
-class union_region : public struct_or_union_region
+class label_region : public region
{
public:
- union_region (region_id parent_rid, tree type)
- : struct_or_union_region (parent_rid, type)
+ label_region (unsigned id, const function_region *parent, tree label)
+ : region (complexity (parent), id, parent, NULL_TREE), m_label (label)
{
- gcc_assert (TREE_CODE (type) == UNION_TYPE);
+ gcc_assert (TREE_CODE (label) == LABEL_DECL);
}
- region *clone () const FINAL OVERRIDE;
-
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_UNION; }
+ /* region vfuncs. */
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_LABEL; }
- bool compare_fields (const union_region &other) const;
+private:
+ tree m_label;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <union_region *>::test (region *reg)
+is_a_helper <const label_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_UNION;
+ return reg->get_kind () == RK_LABEL;
}
namespace ana {
-/* Abstract map_region subclass for accessing decls, used as a base class
- for function frames and for the globals region. */
+/* Concrete space_region subclass representing a stack, containing all stack
+ frames. */
-class scope_region : public map_region
+class stack_region : public space_region
{
- public:
-
- protected:
- scope_region (region_id parent_rid)
- : map_region (parent_rid, NULL_TREE)
+public:
+ stack_region (unsigned id, region *parent)
+ : space_region (id, parent)
{}
- scope_region (const scope_region &other)
- : map_region (other)
- {
- }
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- bool compare_fields (const scope_region &other) const;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_STACK; }
};
-/* Concrete region subclass, representing a function frame on the stack,
- to contain the locals. */
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const stack_region *>::test (const region *reg)
+{
+ return reg->get_kind () == RK_STACK;
+}
+
+namespace ana {
-class frame_region : public scope_region
+/* Concrete space_region subclass: a region within which regions can be
+ dynamically allocated. */
+
+class heap_region : public space_region
{
public:
- frame_region (region_id parent_rid, function *fun, int depth)
- : scope_region (parent_rid), m_fun (fun), m_depth (depth)
+ heap_region (unsigned id, region *parent)
+ : space_region (id, parent)
{}
- frame_region (const frame_region &other)
- : scope_region (other), m_fun (other.m_fun), m_depth (other.m_depth)
- {
- }
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_HEAP; }
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+};
- /* region vfuncs. */
- region *clone () const FINAL OVERRIDE;
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_FRAME; }
- void print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
+} // namespace ana
- /* map_region vfuncs. */
- bool valid_key_p (tree key) const FINAL OVERRIDE;
+template <>
+template <>
+inline bool
+is_a_helper <const heap_region *>::test (const region *reg)
+{
+ return reg->get_kind () == RK_HEAP;
+}
- /* Accessors. */
- function *get_function () const { return m_fun; }
- int get_depth () const { return m_depth; }
+namespace ana {
+
+/* Concrete region subclass. The root region, containing all regions
+ (either directly, or as descendents).
+ Unique within a region_model_manager. */
- bool compare_fields (const frame_region &other) const;
+class root_region : public region
+{
+public:
+ root_region (unsigned id);
- private:
- function *m_fun;
- int m_depth;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_ROOT; }
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <frame_region *>::test (region *reg)
+is_a_helper <const root_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_FRAME;
+ return reg->get_kind () == RK_ROOT;
}
namespace ana {
-/* Concrete region subclass, to hold global variables (data and bss). */
+/* Concrete region subclass: a region to use when dereferencing an unknown
+ pointer. */
-class globals_region : public scope_region
+class symbolic_region : public region
{
- public:
- globals_region (region_id parent_rid)
- : scope_region (parent_rid)
+public:
+ /* A support class for uniquifying instances of symbolic_region. */
+ struct key_t
+ {
+ key_t (const region *parent, const svalue *sval_ptr)
+ : m_parent (parent), m_sval_ptr (sval_ptr)
+ {
+ gcc_assert (sval_ptr);
+ }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_parent);
+ hstate.add_ptr (m_sval_ptr);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_parent == other.m_parent && m_sval_ptr == other.m_sval_ptr);
+ }
+
+ void mark_deleted () { m_sval_ptr = reinterpret_cast<const svalue *> (1); }
+ void mark_empty () { m_sval_ptr = NULL; }
+ bool is_deleted () const
+ {
+ return m_sval_ptr == reinterpret_cast<const svalue *> (1);
+ }
+ bool is_empty () const { return m_sval_ptr == NULL; }
+
+ const region *m_parent;
+ const svalue *m_sval_ptr;
+ };
+
+ symbolic_region (unsigned id, region *parent, const svalue *sval_ptr)
+ : region (complexity::from_pair (parent, sval_ptr), id, parent,
+ TREE_TYPE (sval_ptr->get_type ())),
+ m_sval_ptr (sval_ptr)
{}
- globals_region (const globals_region &other)
- : scope_region (other)
- {
- }
+ const symbolic_region *
+ dyn_cast_symbolic_region () const FINAL OVERRIDE { return this; }
- /* region vfuncs. */
- region *clone () const FINAL OVERRIDE;
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_GLOBALS; }
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_SYMBOLIC; }
+ void accept (visitor *v) const FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- /* map_region vfuncs. */
- bool valid_key_p (tree key) const FINAL OVERRIDE;
+ const svalue *get_pointer () const { return m_sval_ptr; }
- bool compare_fields (const globals_region &other) const;
+private:
+ const svalue *m_sval_ptr;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <globals_region *>::test (region *reg)
+is_a_helper <const symbolic_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_GLOBALS;
+ return reg->get_kind () == RK_SYMBOLIC;
}
+template <> struct default_hash_traits<symbolic_region::key_t>
+: public member_function_hash_traits<symbolic_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
namespace ana {
-/* Concrete region subclass. A map_region representing the code, using
- FUNCTION_DECLs for its keys. */
+/* Concrete region subclass representing the memory occupied by a
+ variable (whether for a global or a local). */
-class code_region : public map_region
+class decl_region : public region
{
public:
- code_region (region_id parent_rid)
- : map_region (parent_rid, NULL_TREE)
- {}
- code_region (const code_region &other)
- : map_region (other)
+ decl_region (unsigned id, const region *parent, tree decl)
+ : region (complexity (parent), id, parent, TREE_TYPE (decl)), m_decl (decl)
{}
- /* region vfuncs. */
- region *clone () const FINAL OVERRIDE;
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_CODE; }
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_DECL; }
+ const decl_region *
+ dyn_cast_decl_region () const FINAL OVERRIDE { return this; }
- /* map_region vfunc. */
- bool valid_key_p (tree key) const FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- region_id get_element (region_model *model,
- region_id this_rid,
- svalue_id index_sid,
- region_model_context *ctxt);
+ tree get_decl () const { return m_decl; }
+ int get_stack_depth () const;
- bool compare_fields (const code_region &other) const;
+private:
+ tree m_decl;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <code_region *>::test (region *reg)
+is_a_helper <const decl_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_CODE;
+ return reg->get_kind () == RK_DECL;
}
namespace ana {
-/* Concrete region subclass. A map_region representing the code for
- a particular function, using LABEL_DECLs for its keys. */
+/* Concrete region subclass representing the memory occupied by a
+ field within a struct or union. */
-class function_region : public map_region
+class field_region : public region
{
public:
- function_region (region_id parent_rid, tree type)
- : map_region (parent_rid, type)
+ /* A support class for uniquifying instances of field_region. */
+ struct key_t
{
- gcc_assert (FUNC_OR_METHOD_TYPE_P (type));
- }
- function_region (const function_region &other)
- : map_region (other)
+ key_t (const region *parent, tree field)
+ : m_parent (parent), m_field (field)
+ {
+ gcc_assert (field);
+ }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_parent);
+ hstate.add_ptr (m_field);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_parent == other.m_parent && m_field == other.m_field);
+ }
+
+ void mark_deleted () { m_field = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_field = NULL_TREE; }
+ bool is_deleted () const { return m_field == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_field == NULL_TREE; }
+
+ const region *m_parent;
+ tree m_field;
+ };
+
+ field_region (unsigned id, const region *parent, tree field)
+ : region (complexity (parent), id, parent, TREE_TYPE (field)),
+ m_field (field)
{}
- /* region vfuncs. */
- region *clone () const FINAL OVERRIDE;
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_FUNCTION; }
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_FIELD; }
- /* map_region vfunc. */
- bool valid_key_p (tree key) const FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ const field_region *
+ dyn_cast_field_region () const FINAL OVERRIDE { return this; }
- region_id get_element (region_model *model,
- region_id this_rid,
- svalue_id index_sid,
- region_model_context *ctxt);
+ tree get_field () const { return m_field; }
- bool compare_fields (const function_region &other) const;
+private:
+ tree m_field;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <function_region *>::test (region *reg)
+is_a_helper <const field_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_FUNCTION;
+ return reg->get_kind () == RK_FIELD;
}
+template <> struct default_hash_traits<field_region::key_t>
+: public member_function_hash_traits<field_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
namespace ana {
-/* Concrete region subclass representing an array (or an array-like view
- of a parent region of memory.
- This can't be a map_region as we can't use trees as the keys: there's
- no guarantee about the uniqueness of an INTEGER_CST. */
+/* An element within an array. */
-class array_region : public region
+class element_region : public region
{
public:
-#if 0
- wide_int m_test;
-
- typedef wide_int key_t;
- typedef int_hash <wide_int, -1, -2> hash_t;
- typedef ordered_hash_map<hash_t, region_id> map_t;
-#else
- typedef int key_t;
- typedef int_hash <int, -1, -2> int_hash_t;
- typedef ordered_hash_map<int_hash_t, region_id> map_t;
-#endif
- typedef map_t::iterator iterator_t;
-
- array_region (region_id parent_rid, tree type)
- : region (parent_rid, svalue_id::null (), type)
+ /* A support class for uniquifying instances of element_region. */
+ struct key_t
{
- gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
- }
- array_region (const array_region &other);
-
- void dump_dot_to_pp (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
-
- void dump_child_label (const region_model &model,
- region_id this_rid,
- region_id child_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
-
- /* region vfuncs. */
- region *clone () const FINAL OVERRIDE;
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_ARRAY; }
- array_region *dyn_cast_array_region () { return this; }
-
- region_id get_element (region_model *model,
- region_id this_rid,
- svalue_id index_sid,
- region_model_context *ctxt);
-
- bool compare_fields (const array_region &other) const;
-
- static bool can_merge_p (const array_region *array_region_a,
- const array_region *array_region_b,
- array_region *merged_array_region,
- region_id merged_rid,
- model_merger *merger);
+ key_t (const region *parent, tree element_type, const svalue *index)
+ : m_parent (parent), m_element_type (element_type), m_index (index)
+ {
+ gcc_assert (index);
+ }
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_parent);
+ hstate.add_ptr (m_element_type);
+ hstate.add_ptr (m_index);
+ return hstate.end ();
+ }
- iterator_t begin () { return m_map.begin (); }
- iterator_t end () { return m_map.end (); }
- size_t elements () const { return m_map.elements (); }
+ bool operator== (const key_t &other) const
+ {
+ return (m_parent == other.m_parent
+ && m_element_type == other.m_element_type
+ && m_index == other.m_index);
+ }
- region_id get_or_create (region_model *model,
- region_id this_rid,
- key_t key, tree type,
- region_model_context *ctxt);
-// void unbind (int expr);
- region_id *get (key_t key);
+ void mark_deleted () { m_index = reinterpret_cast<const svalue *> (1); }
+ void mark_empty () { m_index = NULL; }
+ bool is_deleted () const
+ {
+ return m_index == reinterpret_cast<const svalue *> (1);
+ }
+ bool is_empty () const { return m_index == NULL; }
- void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+ const region *m_parent;
+ tree m_element_type;
+ const svalue *m_index;
+ };
- bool get_key_for_child_region (region_id child_rid,
- key_t *out) const;
+ element_region (unsigned id, const region *parent, tree element_type,
+ const svalue *index)
+ : region (complexity::from_pair (parent, index), id, parent, element_type),
+ m_index (index)
+ {}
-#if 0
- bool get_key_for_child_region (region *child,
- const region_model &model,
- key_t *out) const;
-#endif
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_ELEMENT; }
+ const element_region *
+ dyn_cast_element_region () const FINAL OVERRIDE { return this; }
- void add_to_hash (inchash::hash &hstate) const OVERRIDE;
- void print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
- OVERRIDE;
- void validate (const region_model &model) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
- static key_t key_from_constant (tree cst);
- tree constant_from_key (key_t key);
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- private:
- static int key_cmp (const void *, const void *);
+ const svalue *get_index () const { return m_index; }
- /* Mapping from tree to child region. */
- map_t m_map;
+private:
+ const svalue *m_index;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <array_region *>::test (region *reg)
+is_a_helper <const element_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_ARRAY;
+ return reg->get_kind () == RK_ELEMENT;
}
+template <> struct default_hash_traits<element_region::key_t>
+: public member_function_hash_traits<element_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
namespace ana {
-/* Concrete region subclass representing a stack, containing all stack
- frames, and implicitly providing a POISON_KIND_UNINIT value to all
- child regions by default. */
+/* A byte-offset within another region, for handling pointer arithmetic
+ as a region. */
-class stack_region : public region
+class offset_region : public region
{
public:
- stack_region (region_id parent_rid, svalue_id sval_id)
- : region (parent_rid, sval_id, NULL_TREE)
- {}
-
- stack_region (const stack_region &other);
-
- bool compare_fields (const stack_region &other) const;
-
- region *clone () const FINAL OVERRIDE;
-
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_STACK; }
+ /* A support class for uniquifying instances of offset_region. */
+ struct key_t
+ {
+ key_t (const region *parent, tree element_type, const svalue *byte_offset)
+ : m_parent (parent), m_element_type (element_type), m_byte_offset (byte_offset)
+ {
+ gcc_assert (byte_offset);
+ }
- void dump_child_label (const region_model &model,
- region_id this_rid,
- region_id child_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_parent);
+ hstate.add_ptr (m_element_type);
+ hstate.add_ptr (m_byte_offset);
+ return hstate.end ();
+ }
- void push_frame (region_id frame_rid);
- region_id get_current_frame_id () const;
- void pop_frame (region_model *model, region_id result_dst_rid,
- bool purge, purge_stats *stats,
- region_model_context *ctxt);
+ bool operator== (const key_t &other) const
+ {
+ return (m_parent == other.m_parent
+ && m_element_type == other.m_element_type
+ && m_byte_offset == other.m_byte_offset);
+ }
- void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+ void mark_deleted () { m_byte_offset = reinterpret_cast<const svalue *> (1); }
+ void mark_empty () { m_byte_offset = NULL; }
+ bool is_deleted () const
+ {
+ return m_byte_offset == reinterpret_cast<const svalue *> (1);
+ }
+ bool is_empty () const { return m_byte_offset == NULL; }
- unsigned get_num_frames () const { return m_frame_rids.length (); }
- region_id get_frame_rid (unsigned i) const { return m_frame_rids[i]; }
+ const region *m_parent;
+ tree m_element_type;
+ const svalue *m_byte_offset;
+ };
- static bool can_merge_p (const stack_region *stack_region_a,
- const stack_region *stack_region_b,
- model_merger *merger);
+ offset_region (unsigned id, const region *parent, tree type,
+ const svalue *byte_offset)
+ : region (complexity::from_pair (parent, byte_offset), id, parent, type),
+ m_byte_offset (byte_offset)
+ {}
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_OFFSET; }
+ const offset_region *
+ dyn_cast_offset_region () const FINAL OVERRIDE { return this; }
- svalue_id get_value_by_name (tree identifier,
- const region_model &model) const;
+ void accept (visitor *v) const FINAL OVERRIDE;
- void validate (const region_model &model) const FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- private:
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
- void print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
+ const svalue *get_byte_offset () const { return m_byte_offset; }
- auto_vec<region_id> m_frame_rids;
+private:
+ const svalue *m_byte_offset;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <stack_region *>::test (region *reg)
+is_a_helper <const offset_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_STACK;
+ return reg->get_kind () == RK_OFFSET;
}
+template <> struct default_hash_traits<offset_region::key_t>
+: public member_function_hash_traits<offset_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
namespace ana {
-/* Concrete region subclass: a region within which regions can be
- dynamically allocated. */
+/* A region that views another region using a different type. */
-class heap_region : public region
+class cast_region : public region
{
public:
- heap_region (region_id parent_rid, svalue_id sval_id)
- : region (parent_rid, sval_id, NULL_TREE)
- {}
- heap_region (const heap_region &other);
+ /* A support class for uniquifying instances of cast_region. */
+ struct key_t
+ {
+ key_t (const region *original_region, tree type)
+ : m_original_region (original_region), m_type (type)
+ {
+ gcc_assert (type);
+ }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_original_region);
+ hstate.add_ptr (m_type);
+ return hstate.end ();
+ }
- bool compare_fields (const heap_region &other) const;
+ bool operator== (const key_t &other) const
+ {
+ return (m_original_region == other.m_original_region
+ && m_type == other.m_type);
+ }
- region *clone () const FINAL OVERRIDE;
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = NULL_TREE; }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == NULL_TREE; }
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_HEAP; }
+ const region *m_original_region;
+ tree m_type;
+ };
- static bool can_merge_p (const heap_region *heap_a, region_id heap_a_rid,
- const heap_region *heap_b, region_id heap_b_rid,
- heap_region *merged_heap, region_id merged_heap_rid,
- model_merger *merger);
+ cast_region (unsigned id, const region *original_region, tree type)
+ : region (complexity (original_region), id,
+ original_region->get_parent_region (), type),
+ m_original_region (original_region)
+ {}
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_CAST; }
+ const cast_region *
+ dyn_cast_cast_region () const FINAL OVERRIDE { return this; }
+ void accept (visitor *v) const FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+ const region *get_original_region () const { return m_original_region; }
+private:
+ const region *m_original_region;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <heap_region *>::test (region *reg)
+is_a_helper <const cast_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_HEAP;
+ return reg->get_kind () == RK_CAST;
}
+template <> struct default_hash_traits<cast_region::key_t>
+: public member_function_hash_traits<cast_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
namespace ana {
-/* Concrete region subclass. The root region, containing all regions
- (either directly, or as descendents).
- Unique within a region_model. */
+/* An untyped region dynamically allocated on the heap via "malloc"
+ or similar. */
-class root_region : public region
+class heap_allocated_region : public region
{
public:
- root_region ();
- root_region (const root_region &other);
-
- bool compare_fields (const root_region &other) const;
-
- region *clone () const FINAL OVERRIDE;
+ heap_allocated_region (unsigned id, const region *parent)
+ : region (complexity (parent), id, parent, NULL_TREE)
+ {}
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_ROOT; }
+ enum region_kind
+ get_kind () const FINAL OVERRIDE { return RK_HEAP_ALLOCATED; }
- void dump_child_label (const region_model &model,
- region_id this_rid,
- region_id child_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+};
- region_id push_frame (region_model *model, function *fun,
- vec<svalue_id> *arg_sids,
- region_model_context *ctxt);
- region_id get_current_frame_id (const region_model &model) const;
- void pop_frame (region_model *model, region_id result_dst_rid,
- bool purge, purge_stats *stats,
- region_model_context *ctxt);
+/* An untyped region dynamically allocated on the stack via "alloca". */
- region_id ensure_stack_region (region_model *model);
- region_id get_stack_region_id () const { return m_stack_rid; }
- stack_region *get_stack_region (const region_model *model) const;
+class alloca_region : public region
+{
+public:
+ alloca_region (unsigned id, const frame_region *parent)
+ : region (complexity (parent), id, parent, NULL_TREE)
+ {}
- region_id ensure_globals_region (region_model *model);
- region_id get_globals_region_id () const { return m_globals_rid; }
- globals_region *get_globals_region (const region_model *model) const;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_ALLOCA; }
- region_id ensure_code_region (region_model *model);
- code_region *get_code_region (const region_model *model) const;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+};
- region_id ensure_heap_region (region_model *model);
- heap_region *get_heap_region (const region_model *model) const;
+/* A region for a STRING_CST. */
- void remap_region_ids (const region_id_map &map) FINAL OVERRIDE;
+class string_region : public region
+{
+public:
+ string_region (unsigned id, const region *parent, tree string_cst)
+ : region (complexity (parent), id, parent, TREE_TYPE (string_cst)),
+ m_string_cst (string_cst)
+ {}
- static bool can_merge_p (const root_region *root_region_a,
- const root_region *root_region_b,
- root_region *merged_root_region,
- model_merger *merger);
+ const string_region *
+ dyn_cast_string_region () const FINAL OVERRIDE { return this; }
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_STRING; }
- svalue_id get_value_by_name (tree identifier,
- const region_model &model) const;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
- void validate (const region_model &model) const FINAL OVERRIDE;
+ tree get_string_cst () const { return m_string_cst; }
private:
- void add_to_hash (inchash::hash &hstate) const FINAL OVERRIDE;
- void print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const
- FINAL OVERRIDE;
-
- region_id m_stack_rid;
- region_id m_globals_rid;
- region_id m_code_rid;
- region_id m_heap_rid;
+ tree m_string_cst;
};
} // namespace ana
template <>
template <>
inline bool
-is_a_helper <root_region *>::test (region *reg)
+is_a_helper <const string_region *>::test (const region *reg)
{
- return reg->get_kind () == RK_ROOT;
+ return reg->get_kind () == RK_STRING;
}
namespace ana {
-/* Concrete region subclass: a region to use when dereferencing an unknown
- pointer. */
+/* An unknown region, for handling unimplemented tree codes. */
-class symbolic_region : public region
+class unknown_region : public region
{
public:
- symbolic_region (region_id parent_rid, tree type, bool possibly_null)
- : region (parent_rid, svalue_id::null (), type),
- m_possibly_null (possibly_null)
+ unknown_region (unsigned id, const region *parent, tree type)
+ : region (complexity (parent), id, parent, type)
{}
- symbolic_region (const symbolic_region &other);
- const symbolic_region *dyn_cast_symbolic_region () const FINAL OVERRIDE
- { return this; }
- symbolic_region *dyn_cast_symbolic_region () FINAL OVERRIDE
- { return this; }
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_UNKNOWN; }
- bool compare_fields (const symbolic_region &other) const;
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+};
- region *clone () const FINAL OVERRIDE;
+/* A class responsible for owning and consolidating region and svalue
+ instances.
+ region and svalue instances are immutable as far as clients are
+ concerned, so they are provided as "const" ptrs. */
- enum region_kind get_kind () const FINAL OVERRIDE { return RK_SYMBOLIC; }
+class region_model_manager
+{
+public:
+ region_model_manager ();
+ ~region_model_manager ();
+
+ /* svalue consolidation. */
+ const svalue *get_or_create_constant_svalue (tree cst_expr);
+ const svalue *get_or_create_unknown_svalue (tree type);
+ const svalue *get_or_create_setjmp_svalue (const setjmp_record &r,
+ tree type);
+ const svalue *get_or_create_poisoned_svalue (enum poison_kind kind,
+ tree type);
+ const svalue *get_or_create_initial_value (const region *reg);
+ const svalue *get_ptr_svalue (tree ptr_type, const region *pointee);
+ const svalue *get_or_create_unaryop (tree type, enum tree_code op,
+ const svalue *arg);
+ const svalue *get_or_create_cast (tree type, const svalue *arg);
+ const svalue *get_or_create_binop (tree type,
+ enum tree_code op,
+ const svalue *arg0, const svalue *arg1);
+ const svalue *get_or_create_sub_svalue (tree type,
+ const svalue *parent_svalue,
+ const region *subregion);
+ const svalue *get_or_create_unmergeable (const svalue *arg);
+ const svalue *get_or_create_widening_svalue (tree type,
+ const program_point &point,
+ const svalue *base_svalue,
+ const svalue *iter_svalue);
+ const svalue *get_or_create_compound_svalue (tree type,
+ const binding_map &map);
+ const svalue *get_or_create_conjured_svalue (tree type, const gimple *stmt,
+ const region *id_reg);
+
+ const svalue *maybe_get_char_from_string_cst (tree string_cst,
+ tree byte_offset_cst);
+
+ /* region consolidation. */
+ const stack_region * get_stack_region () const { return &m_stack_region; }
+ const heap_region *get_heap_region () const { return &m_heap_region; }
+ const code_region *get_code_region () const { return &m_code_region; }
+ const globals_region *get_globals_region () const
+ {
+ return &m_globals_region;
+ }
+ const function_region *get_region_for_fndecl (tree fndecl);
+ const label_region *get_region_for_label (tree label);
+ const decl_region *get_region_for_global (tree expr);
+ const region *get_field_region (const region *parent, tree field);
+ const region *get_element_region (const region *parent,
+ tree element_type,
+ const svalue *index);
+ const region *get_offset_region (const region *parent,
+ tree type,
+ const svalue *byte_offset);
+ const region *get_cast_region (const region *original_region,
+ tree type);
+ const frame_region *get_frame_region (const frame_region *calling_frame,
+ function *fun);
+ const region *get_symbolic_region (const svalue *sval);
+ const string_region *get_region_for_string (tree string_cst);
+
+ const region *
+ get_region_for_unexpected_tree_code (region_model_context *ctxt,
+ tree t,
+ const dump_location_t &loc);
+
+ unsigned alloc_region_id () { return m_next_region_id++; }
+
+ store_manager *get_store_manager () { return &m_store_mgr; }
+
+ /* Dynamically-allocated region instances.
+ The number of these within the analysis can grow arbitrarily.
+ They are still owned by the manager. */
+ const region *create_region_for_heap_alloc ();
+ const region *create_region_for_alloca (const frame_region *frame);
+
+ void log_stats (logger *logger, bool show_objs) const;
+
+private:
+ bool too_complex_p (const complexity &c) const;
+ bool reject_if_too_complex (svalue *sval);
+
+ const svalue *maybe_fold_unaryop (tree type, enum tree_code op,
+ const svalue *arg);
+ const svalue *maybe_fold_binop (tree type, enum tree_code op,
+ const svalue *arg0, const svalue *arg1);
+ const svalue *maybe_fold_sub_svalue (tree type,
+ const svalue *parent_svalue,
+ const region *subregion);
+
+ unsigned m_next_region_id;
+ root_region m_root_region;
+ stack_region m_stack_region;
+ heap_region m_heap_region;
+
+ /* svalue consolidation. */
+ typedef hash_map<tree, constant_svalue *> constants_map_t;
+ constants_map_t m_constants_map;
+
+ typedef hash_map<tree, unknown_svalue *> unknowns_map_t;
+ unknowns_map_t m_unknowns_map;
+ const unknown_svalue *m_unknown_NULL;
+
+ typedef hash_map<poisoned_svalue::key_t,
+ poisoned_svalue *> poisoned_values_map_t;
+ poisoned_values_map_t m_poisoned_values_map;
+
+ typedef hash_map<setjmp_svalue::key_t,
+ setjmp_svalue *> setjmp_values_map_t;
+ setjmp_values_map_t m_setjmp_values_map;
+
+ typedef hash_map<const region *, initial_svalue *> initial_values_map_t;
+ initial_values_map_t m_initial_values_map;
+
+ typedef hash_map<region_svalue::key_t, region_svalue *> pointer_values_map_t;
+ pointer_values_map_t m_pointer_values_map;
+
+ typedef hash_map<unaryop_svalue::key_t,
+ unaryop_svalue *> unaryop_values_map_t;
+ unaryop_values_map_t m_unaryop_values_map;
+
+ typedef hash_map<binop_svalue::key_t, binop_svalue *> binop_values_map_t;
+ binop_values_map_t m_binop_values_map;
+
+ typedef hash_map<sub_svalue::key_t, sub_svalue *> sub_values_map_t;
+ sub_values_map_t m_sub_values_map;
+
+ typedef hash_map<const svalue *,
+ unmergeable_svalue *> unmergeable_values_map_t;
+ unmergeable_values_map_t m_unmergeable_values_map;
+
+ typedef hash_map<widening_svalue::key_t,
+ widening_svalue */*,
+ widening_svalue::key_t::hash_map_traits*/>
+ widening_values_map_t;
+ widening_values_map_t m_widening_values_map;
+
+ typedef hash_map<compound_svalue::key_t,
+ compound_svalue *> compound_values_map_t;
+ compound_values_map_t m_compound_values_map;
+
+ typedef hash_map<conjured_svalue::key_t,
+ conjured_svalue *> conjured_values_map_t;
+ conjured_values_map_t m_conjured_values_map;
+
+ /* Maximum complexity of svalues that weren't rejected. */
+ complexity m_max_complexity;
+
+ /* region consolidation. */
+
+ code_region m_code_region;
+ typedef hash_map<tree, function_region *> fndecls_map_t;
+ typedef fndecls_map_t::iterator fndecls_iterator_t;
+ fndecls_map_t m_fndecls_map;
+
+ typedef hash_map<tree, label_region *> labels_map_t;
+ typedef labels_map_t::iterator labels_iterator_t;
+ labels_map_t m_labels_map;
+
+ globals_region m_globals_region;
+ typedef hash_map<tree, decl_region *> globals_map_t;
+ typedef globals_map_t::iterator globals_iterator_t;
+ globals_map_t m_globals_map;
+
+ consolidation_map<field_region> m_field_regions;
+ consolidation_map<element_region> m_element_regions;
+ consolidation_map<offset_region> m_offset_regions;
+ consolidation_map<cast_region> m_cast_regions;
+ consolidation_map<frame_region> m_frame_regions;
+ consolidation_map<symbolic_region> m_symbolic_regions;
+
+ typedef hash_map<tree, string_region *> string_map_t;
+ string_map_t m_string_map;
+
+ store_manager m_store_mgr;
+
+ /* "Dynamically-allocated" region instances.
+ The number of these within the analysis can grow arbitrarily.
+ They are still owned by the manager. */
+ auto_delete_vec<region> m_managed_dynamic_regions;
+};
+
+struct append_ssa_names_cb_data;
+
+/* Helper class for handling calls to functions with known behavior.
+ Implemented in region-model-impl-calls.c. */
+
+class call_details
+{
+public:
+ call_details (const gcall *call, region_model *model,
+ region_model_context *ctxt);
+
+ region_model_context *get_ctxt () const { return m_ctxt; }
+ tree get_lhs_type () const { return m_lhs_type; }
+ const region *get_lhs_region () const { return m_lhs_region; }
- void walk_for_canonicalization (canonicalization *c) const FINAL OVERRIDE;
+ bool maybe_set_lhs (const svalue *result) const;
- void print_fields (const region_model &model,
- region_id this_rid,
- pretty_printer *pp) const FINAL OVERRIDE;
+ tree get_arg_tree (unsigned idx) const;
+ const svalue *get_arg_svalue (unsigned idx) const;
- bool m_possibly_null;
+ void dump_to_pp (pretty_printer *pp, bool simple) const;
+ void dump (bool simple) const;
+
+private:
+ const gcall *m_call;
+ region_model *m_model;
+ region_model_context *m_ctxt;
+ tree m_lhs_type;
+ const region *m_lhs_region;
};
/* A region_model encapsulates a representation of the state of memory, with
class region_model
{
public:
- region_model ();
+ region_model (region_model_manager *mgr);
region_model (const region_model &other);
~region_model ();
void print (pretty_printer *pp) const;
- void print_svalue (svalue_id sid, pretty_printer *pp) const;
-
- void dump_dot_to_pp (pretty_printer *pp) const;
- void dump_dot_to_file (FILE *fp) const;
- void dump_dot (const char *path) const;
-
- void dump_to_pp (pretty_printer *pp, bool summarize) const;
- void dump (FILE *fp, bool summarize) const;
- void dump (bool summarize) const;
+ void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
+ void dump (FILE *fp, bool simple, bool multiline) const;
+ void dump (bool simple) const;
void debug () const;
void validate () const;
- void canonicalize (region_model_context *ctxt);
+ void canonicalize ();
bool canonicalized_p () const;
- void check_for_poison (tree expr, region_model_context *ctxt);
void on_assignment (const gassign *stmt, region_model_context *ctxt);
+ const svalue *get_gassign_result (const gassign *assign,
+ region_model_context *ctxt);
bool on_call_pre (const gcall *stmt, region_model_context *ctxt);
void on_call_post (const gcall *stmt,
bool unknown_side_effects,
region_model_context *ctxt);
+
+ /* Specific handling for on_call_pre. */
+ bool impl_call_alloca (const call_details &cd);
+ void impl_call_analyzer_describe (const gcall *call,
+ region_model_context *ctxt);
+ void impl_call_analyzer_eval (const gcall *call,
+ region_model_context *ctxt);
+ bool impl_call_builtin_expect (const call_details &cd);
+ bool impl_call_calloc (const call_details &cd);
+ void impl_call_free (const call_details &cd);
+ bool impl_call_malloc (const call_details &cd);
+ bool impl_call_memset (const call_details &cd);
+ bool impl_call_strlen (const call_details &cd);
+
void handle_unrecognized_call (const gcall *call,
region_model_context *ctxt);
+ void get_reachable_svalues (svalue_set *out,
+ const svalue *extra_sval);
+
void on_return (const greturn *stmt, region_model_context *ctxt);
void on_setjmp (const gcall *stmt, const exploded_node *enode,
region_model_context *ctxt);
const cfg_superedge *last_cfg_superedge,
region_model_context *ctxt);
- void handle_phi (const gphi *phi,
- tree lhs, tree rhs, bool is_back_edge,
+ void handle_phi (const gphi *phi, tree lhs, tree rhs,
region_model_context *ctxt);
bool maybe_update_for_edge (const superedge &edge,
const gimple *last_stmt,
region_model_context *ctxt);
- region_id get_root_rid () const { return m_root_rid; }
- root_region *get_root_region () const;
-
- region_id get_stack_region_id () const;
- region_id push_frame (function *fun, vec<svalue_id> *arg_sids,
- region_model_context *ctxt);
- region_id get_current_frame_id () const;
+ const region *push_frame (function *fun, const vec<const svalue *> *arg_sids,
+ region_model_context *ctxt);
+ const frame_region *get_current_frame () const { return m_current_frame; }
function * get_current_function () const;
- void pop_frame (region_id result_dst_rid,
- bool purge, purge_stats *stats,
+ void pop_frame (const region *result_dst,
+ const svalue **out_result,
region_model_context *ctxt);
int get_stack_depth () const;
- function *get_function_at_depth (unsigned depth) const;
-
- region_id get_globals_region_id () const;
-
- svalue_id add_svalue (svalue *sval);
- void replace_svalue (svalue_id sid, svalue *new_sval);
-
- region_id add_region (region *r);
-
- region_id add_region_for_type (region_id parent_rid, tree type,
- region_model_context *ctxt);
-
- svalue *get_svalue (svalue_id sval_id) const;
- region *get_region (region_id rid) const;
-
- template <typename Subclass>
- Subclass *get_region (region_id rid) const
- {
- region *result = get_region (rid);
- if (result)
- gcc_assert (is_a<Subclass *> (result));
- return (Subclass *)result;
- }
-
- region_id get_lvalue (path_var pv, region_model_context *ctxt);
- region_id get_lvalue (tree expr, region_model_context *ctxt);
- svalue_id get_rvalue (path_var pv, region_model_context *ctxt);
- svalue_id get_rvalue (tree expr, region_model_context *ctxt);
-
- svalue_id get_or_create_ptr_svalue (tree ptr_type, region_id id);
- svalue_id get_or_create_constant_svalue (tree cst_expr);
- svalue_id get_svalue_for_fndecl (tree ptr_type, tree fndecl,
- region_model_context *ctxt);
- svalue_id get_svalue_for_label (tree ptr_type, tree label,
- region_model_context *ctxt);
-
- region_id get_region_for_fndecl (tree fndecl, region_model_context *ctxt);
- region_id get_region_for_label (tree label, region_model_context *ctxt);
+ const frame_region *get_frame_at_index (int index) const;
- svalue_id maybe_cast (tree type, svalue_id sid, region_model_context *ctxt);
- svalue_id maybe_cast_1 (tree type, svalue_id sid);
-
- region_id get_field_region (region_id rid, tree field,
- region_model_context *ctxt);
+ const region *get_lvalue (path_var pv, region_model_context *ctxt);
+ const region *get_lvalue (tree expr, region_model_context *ctxt);
+ const svalue *get_rvalue (path_var pv, region_model_context *ctxt);
+ const svalue *get_rvalue (tree expr, region_model_context *ctxt);
- region_id deref_rvalue (svalue_id ptr_sid, region_model_context *ctxt);
- region_id deref_rvalue (tree ptr, region_model_context *ctxt);
+ const region *deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
+ region_model_context *ctxt);
- void set_value (region_id lhs_rid, svalue_id rhs_sid,
+ void set_value (const region *lhs_reg, const svalue *rhs_sval,
region_model_context *ctxt);
void set_value (tree lhs, tree rhs, region_model_context *ctxt);
- svalue_id set_to_new_unknown_value (region_id dst_rid, tree type,
- region_model_context *ctxt);
+ void clobber_region (const region *reg);
+ void purge_region (const region *reg);
+ void zero_fill_region (const region *reg);
+ void mark_region_as_unknown (const region *reg);
- void copy_region (region_id dst_rid, region_id src_rid,
+ void copy_region (const region *dst_reg, const region *src_reg,
region_model_context *ctxt);
-
- tristate eval_condition (svalue_id lhs,
+ tristate eval_condition (const svalue *lhs,
enum tree_code op,
- svalue_id rhs) const;
- tristate eval_condition_without_cm (svalue_id lhs,
+ const svalue *rhs) const;
+ tristate eval_condition_without_cm (const svalue *lhs,
enum tree_code op,
- svalue_id rhs) const;
+ const svalue *rhs) const;
+ tristate compare_initial_and_pointer (const initial_svalue *init,
+ const region_svalue *ptr) const;
tristate eval_condition (tree lhs,
enum tree_code op,
tree rhs,
bool add_constraint (tree lhs, enum tree_code op, tree rhs,
region_model_context *ctxt);
- tree maybe_get_constant (svalue_id sid) const;
-
- region_id add_new_malloc_region ();
-
- tree get_representative_tree (svalue_id sid) const;
- path_var get_representative_path_var (region_id rid) const;
- void get_path_vars_for_svalue (svalue_id sid, vec<path_var> *out) const;
+ const region *create_region_for_heap_alloc (const svalue *size_in_bytes);
+ const region *create_region_for_alloca (const svalue *size_in_bytes);
- void purge_unused_svalues (purge_stats *out,
- region_model_context *ctxt,
- svalue_id_set *known_used_sids = NULL);
- void remap_svalue_ids (const svalue_id_map &map);
- void remap_region_ids (const region_id_map &map);
-
- void purge_regions (const region_id_set &set,
- purge_stats *stats,
- logger *logger);
-
- unsigned get_num_svalues () const { return m_svalues.length (); }
- unsigned get_num_regions () const { return m_regions.length (); }
+ tree get_representative_tree (const svalue *sval) const;
+ path_var
+ get_representative_path_var (const svalue *sval,
+ svalue_set *visited) const;
+ path_var
+ get_representative_path_var (const region *reg,
+ svalue_set *visited) const;
/* For selftests. */
constraint_manager *get_constraints ()
return m_constraints;
}
- void get_descendents (region_id rid, region_id_set *out,
- region_id exclude_rid) const;
+ store *get_store () { return &m_store; }
+ const store *get_store () const { return &m_store; }
- void delete_region_and_descendents (region_id rid,
- enum poison_kind pkind,
- purge_stats *stats,
- logger *logger);
+ region_model_manager *get_manager () const { return m_mgr; }
+
+ void unbind_region_and_descendents (const region *reg,
+ enum poison_kind pkind);
bool can_merge_with_p (const region_model &other_model,
- region_model *out_model,
- svalue_id_merger_mapping *out) const;
- bool can_merge_with_p (const region_model &other_model,
+ const program_point &point,
region_model *out_model) const;
- svalue_id get_value_by_name (const char *name) const;
-
- svalue_id convert_byte_offset_to_array_index (tree ptr_type,
- svalue_id offset_sid);
-
- region_id get_or_create_mem_ref (tree type,
- svalue_id ptr_sid,
- svalue_id offset_sid,
- region_model_context *ctxt);
- region_id get_or_create_pointer_plus_expr (tree type,
- svalue_id ptr_sid,
- svalue_id offset_sid,
- region_model_context *ctxt);
- region_id get_or_create_view (region_id raw_rid, tree type,
- region_model_context *ctxt);
-
tree get_fndecl_for_call (const gcall *call,
region_model_context *ctxt);
- private:
- region_id get_lvalue_1 (path_var pv, region_model_context *ctxt);
- svalue_id get_rvalue_1 (path_var pv, region_model_context *ctxt);
+ void get_ssa_name_regions_for_current_frame
+ (auto_vec<const decl_region *> *out) const;
+ static void append_ssa_names_cb (const region *base_reg,
+ struct append_ssa_names_cb_data *data);
+
+ const svalue *get_store_value (const region *reg) const;
- void copy_struct_region (region_id dst_rid, struct_region *dst_reg,
- struct_region *src_reg, region_model_context *ctxt);
- void copy_union_region (region_id dst_rid, union_region *src_reg,
- region_model_context *ctxt);
- void copy_array_region (region_id dst_rid, array_region *dst_reg,
- array_region *src_reg, region_model_context *ctxt);
+ bool region_exists_p (const region *reg) const;
- region_id make_region_for_unexpected_tree_code (region_model_context *ctxt,
- tree t,
- const dump_location_t &loc);
+ void loop_replay_fixup (const region_model *dst_state);
+
+ private:
+ const region *get_lvalue_1 (path_var pv, region_model_context *ctxt);
+ const svalue *get_rvalue_1 (path_var pv, region_model_context *ctxt);
void add_any_constraints_from_ssa_def_stmt (tree lhs,
enum tree_code op,
const gswitch *switch_stmt,
region_model_context *ctxt);
- void poison_any_pointers_to_bad_regions (const region_id_set &bad_regions,
- enum poison_kind pkind);
+ int poison_any_pointers_to_descendents (const region *reg,
+ enum poison_kind pkind);
- void dump_summary_of_rep_path_vars (pretty_printer *pp,
- auto_vec<path_var> *rep_path_vars,
- bool *is_first);
+ void on_top_level_param (tree param, region_model_context *ctxt);
+
+ void record_dynamic_extents (const region *reg,
+ const svalue *size_in_bytes);
+
+ /* Storing this here to avoid passing it around everywhere. */
+ region_model_manager *const m_mgr;
+
+ store m_store;
- auto_delete_vec<svalue> m_svalues;
- auto_delete_vec<region> m_regions;
- region_id m_root_rid;
constraint_manager *m_constraints; // TODO: embed, rather than dynalloc?
+
+ const frame_region *m_current_frame;
};
/* Some region_model activity could lead to warnings (e.g. attempts to use an
uninitialized value). This abstract base class encapsulates an interface
for the region model to use when emitting such warnings.
- It also provides an interface for being notified about svalue_ids being
- remapped, and being deleted.
-
Having this as an abstract base class allows us to support the various
operations needed by program_state in the analyzer within region_model,
whilst keeping them somewhat modularized. */
public:
virtual void warn (pending_diagnostic *d) = 0;
- /* Hook for clients that store svalue_id instances, so that they
- can remap their IDs when the underlying region_model renumbers
- the IDs. */
- virtual void remap_svalue_ids (const svalue_id_map &map) = 0;
-
-#if 0
- /* Return true if if's OK to purge SID when simplifying state.
- Subclasses can return false for values that have sm state,
- to avoid generating "leak" false positives. */
- virtual bool can_purge_p (svalue_id sid) = 0;
-#endif
+ /* Hook for clients to be notified when an SVAL that was reachable
+ in a previous state is no longer live, so that clients can emit warnings
+ about leaks. */
+ virtual void on_svalue_leak (const svalue *sval) = 0;
- /* Hook for clients to be notified when a range of SIDs have
- been purged, so that they can purge state relating to those
- values (and potentially emit warnings about leaks).
- All SIDs from FIRST_PURGED_SID numerically upwards are being
- purged.
- The return values is a count of how many items of data the client
- has purged (potentially for use in selftests).
- MAP has already been applied to the IDs, but is provided in case
- the client needs to figure out the old IDs. */
- virtual int on_svalue_purge (svalue_id first_purged_sid,
- const svalue_id_map &map) = 0;
+ /* Hook for clients to be notified when the set of explicitly live
+ svalues changes, so that they can purge state relating to dead
+ svalues. */
+ virtual void on_liveness_change (const svalue_set &live_svalues,
+ const region_model *model) = 0;
virtual logger *get_logger () = 0;
- /* Hook for clients to be notified when CHILD_SID is created
- from PARENT_SID, when "inheriting" a value for a region from a
- parent region.
- This exists so that state machines that inherit state can
- propagate the state from parent to child. */
- virtual void on_inherited_svalue (svalue_id parent_sid,
- svalue_id child_sid) = 0;
-
- /* Hook for clients to be notified when DST_SID is created
- (or reused) as a cast from SRC_SID.
- This exists so that state machines can propagate the state
- from SRC_SID to DST_SID. */
- virtual void on_cast (svalue_id src_sid,
- svalue_id dst_sid) = 0;
-
/* Hook for clients to be notified when the condition
"LHS OP RHS" is added to the region model.
This exists so that state machines can detect tests on edges,
virtual void on_condition (tree lhs, enum tree_code op, tree rhs) = 0;
/* Hooks for clients to be notified when an unknown change happens
- to SID (in response to a call to an unknown function). */
- virtual void on_unknown_change (svalue_id sid) = 0;
+ to SVAL (in response to a call to an unknown function). */
+ virtual void on_unknown_change (const svalue *sval, bool is_mutable) = 0;
/* Hooks for clients to be notified when a phi node is handled,
where RHS is the pertinent argument. */
{
public:
void warn (pending_diagnostic *) OVERRIDE {}
- void remap_svalue_ids (const svalue_id_map &) OVERRIDE {}
- int on_svalue_purge (svalue_id, const svalue_id_map &) OVERRIDE
- {
- return 0;
- }
+ void on_svalue_leak (const svalue *) OVERRIDE {}
+ void on_liveness_change (const svalue_set &,
+ const region_model *) OVERRIDE {}
logger *get_logger () OVERRIDE { return NULL; }
- void on_inherited_svalue (svalue_id parent_sid ATTRIBUTE_UNUSED,
- svalue_id child_sid ATTRIBUTE_UNUSED)
- OVERRIDE
- {
- }
- void on_cast (svalue_id src_sid ATTRIBUTE_UNUSED,
- svalue_id dst_sid ATTRIBUTE_UNUSED) OVERRIDE
- {
- }
void on_condition (tree lhs ATTRIBUTE_UNUSED,
enum tree_code op ATTRIBUTE_UNUSED,
tree rhs ATTRIBUTE_UNUSED) OVERRIDE
{
}
- void on_unknown_change (svalue_id sid ATTRIBUTE_UNUSED) OVERRIDE
+ void on_unknown_change (const svalue *sval ATTRIBUTE_UNUSED,
+ bool is_mutable ATTRIBUTE_UNUSED) OVERRIDE
{
}
void on_phi (const gphi *phi ATTRIBUTE_UNUSED,
{
model_merger (const region_model *model_a,
const region_model *model_b,
- region_model *merged_model,
- svalue_id_merger_mapping *sid_mapping)
+ const program_point &point,
+ region_model *merged_model)
: m_model_a (model_a), m_model_b (model_b),
- m_merged_model (merged_model),
- m_map_regions_from_a_to_m (model_a->get_num_regions ()),
- m_map_regions_from_b_to_m (model_b->get_num_regions ()),
- m_sid_mapping (sid_mapping)
+ m_point (point),
+ m_merged_model (merged_model)
{
- gcc_assert (sid_mapping);
}
- void dump_to_pp (pretty_printer *pp) const;
- void dump (FILE *fp) const;
- void dump () const;
-
- template <typename Subclass>
- Subclass *get_region_a (region_id rid_a) const
- {
- return m_model_a->get_region <Subclass> (rid_a);
- }
+ void dump_to_pp (pretty_printer *pp, bool simple) const;
+ void dump (FILE *fp, bool simple) const;
+ void dump (bool simple) const;
- template <typename Subclass>
- Subclass *get_region_b (region_id rid_b) const
+ region_model_manager *get_manager () const
{
- return m_model_b->get_region <Subclass> (rid_b);
+ return m_model_a->get_manager ();
}
- bool can_merge_values_p (svalue_id sid_a,
- svalue_id sid_b,
- svalue_id *merged_sid);
-
- void record_regions (region_id a_rid,
- region_id b_rid,
- region_id merged_rid);
-
- void record_svalues (svalue_id a_sid,
- svalue_id b_sid,
- svalue_id merged_sid);
-
const region_model *m_model_a;
const region_model *m_model_b;
+ const program_point &m_point;
region_model *m_merged_model;
-
- one_way_region_id_map m_map_regions_from_a_to_m;
- one_way_region_id_map m_map_regions_from_b_to_m;
- svalue_id_merger_mapping *m_sid_mapping;
};
-/* A bundle of data that can be optionally generated during merger of two
- region_models that describes how svalue_ids in each of the two inputs
- are mapped to svalue_ids in the merged output.
-
- For use when merging sm-states within program_state. */
+/* A bundle of state. */
-struct svalue_id_merger_mapping
+class engine
{
- svalue_id_merger_mapping (const region_model &a,
- const region_model &b);
-
- void dump_to_pp (pretty_printer *pp) const;
- void dump (FILE *fp) const;
- void dump () const;
-
- one_way_svalue_id_map m_map_from_a_to_m;
- one_way_svalue_id_map m_map_from_b_to_m;
-};
-
-/* A bundle of data used when canonicalizing a region_model so that the
- order of regions and svalues is in a predictable order (thus increasing
- the chance of two region_models being equal).
-
- This object is used to keep track of a recursive traversal across the
- svalues and regions within the model, made in a deterministic order,
- assigning new ids the first time each region or svalue is
- encountered. */
+public:
+ region_model_manager *get_model_manager () { return &m_mgr; }
-struct canonicalization
-{
- canonicalization (const region_model &model);
- void walk_rid (region_id rid);
- void walk_sid (svalue_id sid);
+ void log_stats (logger *logger) const;
- void dump_to_pp (pretty_printer *pp) const;
- void dump (FILE *fp) const;
- void dump () const;
+private:
+ region_model_manager m_mgr;
- const region_model &m_model;
- /* Maps from existing IDs to new IDs. */
- region_id_map m_rid_map;
- svalue_id_map m_sid_map;
- /* The next IDs to hand out. */
- int m_next_rid_int;
- int m_next_sid_int;
};
} // namespace ana
-namespace inchash
-{
- extern void add (svalue_id sid, hash &hstate);
- extern void add (region_id rid, hash &hstate);
-} // namespace inchash
-
extern void debug (const region_model &rmodel);
namespace ana {
FINAL OVERRIDE
{
internal_error ("unhandled tree code: %qs",
- t ? get_tree_code_name (TREE_CODE (t)) : "(null)");
+ get_tree_code_name (TREE_CODE (t)));
}
private:
/* Implementation detail of the ASSERT_CONDITION_* macros. */
+void assert_condition (const location &loc,
+ region_model &model,
+ const svalue *lhs, tree_code op, const svalue *rhs,
+ tristate expected);
+
void assert_condition (const location &loc,
region_model &model,
tree lhs, tree_code op, tree rhs,
--- /dev/null
+/* Regions of memory.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "diagnostic-core.h"
+#include "gimple-pretty-print.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "graphviz.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "convert.h"
+#include "target.h"
+#include "fold-const.h"
+#include "tree-pretty-print.h"
+#include "diagnostic-color.h"
+#include "diagnostic-metadata.h"
+#include "tristate.h"
+#include "bitmap.h"
+#include "selftest.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "sbitmap.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* class region and its various subclasses. */
+
+/* class region. */
+
+region::~region ()
+{
+ delete m_cached_offset;
+}
+
+/* Compare REG1 and REG2 by id. */
+
+int
+region::cmp_ids (const region *reg1, const region *reg2)
+{
+ return (long)reg1->get_id () - (long)reg2->get_id ();
+}
+
+/* Determine the base region for this region: when considering bindings
+ for this region, the base region is the ancestor which identifies
+ which cluster they should be partitioned into.
+ Regions within the same struct/union/array are in the same cluster.
+ Different decls are in different clusters. */
+
+const region *
+region::get_base_region () const
+{
+ const region *iter = this;
+ while (iter)
+ {
+ switch (iter->get_kind ())
+ {
+ case RK_FIELD:
+ case RK_ELEMENT:
+ case RK_OFFSET:
+ iter = iter->get_parent_region ();
+ continue;
+ case RK_CAST:
+ iter = iter->dyn_cast_cast_region ()->get_original_region ();
+ continue;
+ default:
+ return iter;
+ }
+ }
+ return iter;
+}
+
+/* Return true if get_base_region() == this for this region. */
+
+bool
+region::base_region_p () const
+{
+ switch (get_kind ())
+ {
+ /* Region kinds representing a descendent of a base region. */
+ case RK_FIELD:
+ case RK_ELEMENT:
+ case RK_OFFSET:
+ case RK_CAST:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+/* Return true if this region is ELDER or one of its descendents. */
+
+bool
+region::descendent_of_p (const region *elder) const
+{
+ const region *iter = this;
+ while (iter)
+ {
+ if (iter == elder)
+ return true;
+ if (iter->get_kind () == RK_CAST)
+ iter = iter->dyn_cast_cast_region ()->get_original_region ();
+ else
+ iter = iter->get_parent_region ();
+ }
+ return false;
+}
+
+/* If this region is a frame_region, or a descendent of one, return it.
+ Otherwise return NULL. */
+
+const frame_region *
+region::maybe_get_frame_region () const
+{
+ const region *iter = this;
+ while (iter)
+ {
+ if (const frame_region *frame_reg = iter->dyn_cast_frame_region ())
+ return frame_reg;
+ if (iter->get_kind () == RK_CAST)
+ iter = iter->dyn_cast_cast_region ()->get_original_region ();
+ else
+ iter = iter->get_parent_region ();
+ }
+ return NULL;
+}
+
+/* If this region is a decl_region, return the decl.
+ Otherwise return NULL. */
+
+tree
+region::maybe_get_decl () const
+{
+ if (const decl_region *decl_reg = dyn_cast_decl_region ())
+ return decl_reg->get_decl ();
+ return NULL_TREE;
+}
+
+/* Get the region_offset for this region (calculating it on the
+ first call and caching it internally). */
+
+region_offset
+region::get_offset () const
+{
+ if(!m_cached_offset)
+ m_cached_offset = new region_offset (calc_offset ());
+ return *m_cached_offset;
+}
+
+/* If the size of this region (in bytes) is known statically, write it to *OUT
+ and return true.
+ Otherwise return false. */
+
+bool
+region::get_byte_size (byte_size_t *out) const
+{
+ tree type = get_type ();
+
+ /* Bail out e.g. for heap-allocated regions. */
+ if (!type)
+ return false;
+
+ HOST_WIDE_INT bytes = int_size_in_bytes (type);
+ if (bytes == -1)
+ return false;
+ *out = bytes;
+ return true;
+}
+
+/* If the size of this region (in bits) is known statically, write it to *OUT
+ and return true.
+ Otherwise return false. */
+
+bool
+region::get_bit_size (bit_size_t *out) const
+{
+ byte_size_t byte_size;
+ if (!get_byte_size (&byte_size))
+ return false;
+ *out = byte_size * BITS_PER_UNIT;
+ return true;
+}
+
+/* Get the field within RECORD_TYPE at BIT_OFFSET. */
+
+static tree
+get_field_at_bit_offset (tree record_type, bit_offset_t bit_offset)
+{
+ gcc_assert (TREE_CODE (record_type) == RECORD_TYPE);
+ gcc_assert (bit_offset >= 0);
+
+ /* Find the first field that has an offset > BIT_OFFSET,
+ then return the one preceding it.
+ Skip other trees within the chain, such as FUNCTION_DECLs. */
+ tree last_field = NULL_TREE;
+ for (tree iter = TYPE_FIELDS (record_type); iter != NULL_TREE;
+ iter = DECL_CHAIN (iter))
+ {
+ if (TREE_CODE (iter) == FIELD_DECL)
+ {
+ int iter_field_offset = int_bit_position (iter);
+ if (bit_offset < iter_field_offset)
+ return last_field;
+ last_field = iter;
+ }
+ }
+ return last_field;
+}
+
+/* Populate *OUT with descendent regions of type TYPE that match
+ RELATIVE_BIT_OFFSET and SIZE_IN_BITS within this region. */
+
+void
+region::get_subregions_for_binding (region_model_manager *mgr,
+ bit_offset_t relative_bit_offset,
+ bit_size_t size_in_bits,
+ tree type,
+ auto_vec <const region *> *out) const
+{
+ if (get_type () == NULL_TREE)
+ return;
+ if (relative_bit_offset == 0
+ && types_compatible_p (get_type (), type))
+ {
+ out->safe_push (this);
+ return;
+ }
+ switch (TREE_CODE (get_type ()))
+ {
+ case ARRAY_TYPE:
+ {
+ tree element_type = TREE_TYPE (get_type ());
+ HOST_WIDE_INT hwi_byte_size = int_size_in_bytes (element_type);
+ if (hwi_byte_size > 0)
+ {
+ HOST_WIDE_INT bits_per_element
+ = hwi_byte_size << LOG2_BITS_PER_UNIT;
+ HOST_WIDE_INT element_index
+ = (relative_bit_offset.to_shwi () / bits_per_element);
+ tree element_index_cst
+ = build_int_cst (integer_type_node, element_index);
+ HOST_WIDE_INT inner_bit_offset
+ = relative_bit_offset.to_shwi () % bits_per_element;
+ const region *subregion = mgr->get_element_region
+ (this, element_type,
+ mgr->get_or_create_constant_svalue (element_index_cst));
+ subregion->get_subregions_for_binding (mgr, inner_bit_offset,
+ size_in_bits, type, out);
+ }
+ }
+ break;
+ case RECORD_TYPE:
+ {
+ /* The bit offset might be *within* one of the fields (such as
+ with nested structs).
+ So we want to find the enclosing field, adjust the offset,
+ and repeat. */
+ if (tree field = get_field_at_bit_offset (get_type (),
+ relative_bit_offset))
+ {
+ int field_bit_offset = int_bit_position (field);
+ const region *subregion = mgr->get_field_region (this, field);
+ subregion->get_subregions_for_binding
+ (mgr, relative_bit_offset - field_bit_offset,
+ size_in_bits, type, out);
+ }
+ }
+ break;
+ case UNION_TYPE:
+ {
+ for (tree field = TYPE_FIELDS (get_type ()); field != NULL_TREE;
+ field = DECL_CHAIN (field))
+ {
+ const region *subregion = mgr->get_field_region (this, field);
+ subregion->get_subregions_for_binding (mgr,
+ relative_bit_offset,
+ size_in_bits,
+ type,
+ out);
+ }
+ }
+ break;
+ default:
+ /* Do nothing. */
+ break;
+ }
+}
+
+/* Walk from this region up to the base region within its cluster, calculating
+ the offset relative to the base region, either as an offset in bits,
+ or a symbolic offset. */
+
+region_offset
+region::calc_offset () const
+{
+ const region *iter_region = this;
+ bit_offset_t accum_bit_offset = 0;
+
+ while (iter_region)
+ {
+ switch (iter_region->get_kind ())
+ {
+ case RK_FIELD:
+ {
+ const field_region *field_reg
+ = (const field_region *)iter_region;
+ iter_region = iter_region->get_parent_region ();
+
+ /* Compare with e.g. gimple-fold.c's
+ fold_nonarray_ctor_reference. */
+ tree field = field_reg->get_field ();
+ tree byte_offset = DECL_FIELD_OFFSET (field);
+ if (TREE_CODE (byte_offset) != INTEGER_CST)
+ return region_offset::make_symbolic (iter_region);
+ tree field_offset = DECL_FIELD_BIT_OFFSET (field);
+ /* Compute bit offset of the field. */
+ offset_int bitoffset
+ = (wi::to_offset (field_offset)
+ + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
+ accum_bit_offset += bitoffset;
+ }
+ continue;
+
+ case RK_ELEMENT:
+ {
+ const element_region *element_reg
+ = (const element_region *)iter_region;
+ iter_region = iter_region->get_parent_region ();
+
+ if (tree idx_cst
+ = element_reg->get_index ()->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (idx_cst) == INTEGER_CST);
+
+ tree elem_type = element_reg->get_type ();
+ offset_int element_idx = wi::to_offset (idx_cst);
+
+ /* First, use int_size_in_bytes, to reject the case where we
+ have an incomplete type, or a non-constant value. */
+ HOST_WIDE_INT hwi_byte_size = int_size_in_bytes (elem_type);
+ if (hwi_byte_size > 0)
+ {
+ offset_int element_bit_size
+ = hwi_byte_size << LOG2_BITS_PER_UNIT;
+ offset_int element_bit_offset
+ = element_idx * element_bit_size;
+ accum_bit_offset += element_bit_offset;
+ continue;
+ }
+ }
+ return region_offset::make_symbolic (iter_region);
+ }
+ continue;
+
+ case RK_OFFSET:
+ {
+ const offset_region *offset_reg
+ = (const offset_region *)iter_region;
+ iter_region = iter_region->get_parent_region ();
+
+ if (tree byte_offset_cst
+ = offset_reg->get_byte_offset ()->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (byte_offset_cst) == INTEGER_CST);
+ /* Use a signed value for the byte offset, to handle
+ negative offsets. */
+ HOST_WIDE_INT byte_offset
+ = wi::to_offset (byte_offset_cst).to_shwi ();
+ HOST_WIDE_INT bit_offset = byte_offset * BITS_PER_UNIT;
+ accum_bit_offset += bit_offset;
+ }
+ else
+ return region_offset::make_symbolic (iter_region);
+ }
+ continue;
+
+ case RK_CAST:
+ {
+ const cast_region *cast_reg
+ = as_a <const cast_region *> (iter_region);
+ iter_region = cast_reg->get_original_region ();
+ }
+ continue;
+
+ default:
+ return region_offset::make_concrete (iter_region, accum_bit_offset);
+ }
+ }
+ return region_offset::make_concrete (iter_region, accum_bit_offset);
+}
+
+/* Copy from SRC_REG to DST_REG, using CTXT for any issues that occur. */
+
+void
+region_model::copy_region (const region *dst_reg, const region *src_reg,
+ region_model_context *ctxt)
+{
+ gcc_assert (dst_reg);
+ gcc_assert (src_reg);
+ if (dst_reg == src_reg)
+ return;
+
+ const svalue *sval = get_store_value (src_reg);
+ set_value (dst_reg, sval, ctxt);
+}
+
+/* Dump a description of this region to stderr. */
+
+DEBUG_FUNCTION void
+region::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* Generate a description of this region. */
+
+DEBUG_FUNCTION label_text
+region::get_desc (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ dump_to_pp (&pp, simple);
+ return label_text::take (xstrdup (pp_formatted_text (&pp)));
+}
+
+/* Base implementation of region::accept vfunc.
+ Subclass implementations should chain up to this. */
+
+void
+region::accept (visitor *v) const
+{
+ v->visit_region (this);
+ if (m_parent)
+ m_parent->accept (v);
+}
+
+/* Return true if this is a symbolic region for deferencing an
+ unknown ptr.
+ We shouldn't attempt to bind values for this region (but
+ can unbind values for other regions). */
+
+bool
+region::symbolic_for_unknown_ptr_p () const
+{
+ if (const symbolic_region *sym_reg = dyn_cast_symbolic_region ())
+ if (sym_reg->get_pointer ()->get_kind () == SK_UNKNOWN)
+ return true;
+ return false;
+}
+
+/* region's ctor. */
+
+region::region (complexity c, unsigned id, const region *parent, tree type)
+: m_complexity (c), m_id (id), m_parent (parent), m_type (type),
+ m_cached_offset (NULL)
+{
+ gcc_assert (type == NULL_TREE || TYPE_P (type));
+}
+
+/* Comparator for regions, using their IDs to order them. */
+
+int
+region::cmp_ptrs (const void *p1, const void *p2)
+{
+ const region * const *reg1 = (const region * const *)p1;
+ const region * const *reg2 = (const region * const *)p2;
+
+ return cmp_ids (*reg1, *reg2);
+}
+
+/* Determine if a pointer to this region must be non-NULL.
+
+ Generally, pointers to regions must be non-NULL, but pointers
+ to symbolic_regions might, in fact, be NULL.
+
+ This allows us to simulate functions like malloc and calloc with:
+ - only one "outcome" from each statement,
+ - the idea that the pointer is on the heap if non-NULL
+ - the possibility that the pointer could be NULL
+ - the idea that successive values returned from malloc are non-equal
+ - to be able to zero-fill for calloc. */
+
+bool
+region::non_null_p () const
+{
+ switch (get_kind ())
+ {
+ default:
+ return true;
+ case RK_SYMBOLIC:
+ /* Are we within a symbolic_region? If so, it could be NULL, and we
+ have to fall back on the constraints. */
+ return false;
+ case RK_HEAP_ALLOCATED:
+ return false;
+ }
+}
+
+/* Comparator for trees to impose a deterministic ordering on
+ T1 and T2. */
+
+static int
+tree_cmp (const_tree t1, const_tree t2)
+{
+ gcc_assert (t1);
+ gcc_assert (t2);
+
+ /* Test tree codes first. */
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return TREE_CODE (t1) - TREE_CODE (t2);
+
+ /* From this point on, we know T1 and T2 have the same tree code. */
+
+ if (DECL_P (t1))
+ {
+ if (DECL_NAME (t1) && DECL_NAME (t2))
+ return strcmp (IDENTIFIER_POINTER (DECL_NAME (t1)),
+ IDENTIFIER_POINTER (DECL_NAME (t2)));
+ else
+ {
+ if (DECL_NAME (t1))
+ return -1;
+ else if (DECL_NAME (t2))
+ return 1;
+ else
+ return DECL_UID (t1) - DECL_UID (t2);
+ }
+ }
+
+ switch (TREE_CODE (t1))
+ {
+ case SSA_NAME:
+ {
+ if (SSA_NAME_VAR (t1) && SSA_NAME_VAR (t2))
+ {
+ int var_cmp = tree_cmp (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
+ if (var_cmp)
+ return var_cmp;
+ return SSA_NAME_VERSION (t1) - SSA_NAME_VERSION (t2);
+ }
+ else
+ {
+ if (SSA_NAME_VAR (t1))
+ return -1;
+ else if (SSA_NAME_VAR (t2))
+ return 1;
+ else
+ return SSA_NAME_VERSION (t1) - SSA_NAME_VERSION (t2);
+ }
+ }
+ break;
+
+ case INTEGER_CST:
+ return tree_int_cst_compare (t1, t2);
+
+ case REAL_CST:
+ {
+ const real_value *rv1 = TREE_REAL_CST_PTR (t1);
+ const real_value *rv2 = TREE_REAL_CST_PTR (t2);
+ if (real_compare (UNORDERED_EXPR, rv1, rv2))
+ {
+ /* Impose an arbitrary order on NaNs relative to other NaNs
+ and to non-NaNs. */
+ if (int cmp_isnan = real_isnan (rv1) - real_isnan (rv2))
+ return cmp_isnan;
+ if (int cmp_issignaling_nan
+ = real_issignaling_nan (rv1) - real_issignaling_nan (rv2))
+ return cmp_issignaling_nan;
+ return real_isneg (rv1) - real_isneg (rv2);
+ }
+ if (real_compare (LT_EXPR, rv1, rv2))
+ return -1;
+ if (real_compare (GT_EXPR, rv1, rv2))
+ return 1;
+ return 0;
+ }
+
+ case STRING_CST:
+ return strcmp (TREE_STRING_POINTER (t1),
+ TREE_STRING_POINTER (t2));
+
+ default:
+ gcc_unreachable ();
+ break;
+ }
+
+ gcc_unreachable ();
+
+ return 0;
+}
+
+/* qsort comparator for trees to impose a deterministic ordering on
+ P1 and P2. */
+
+int
+tree_cmp (const void *p1, const void *p2)
+{
+ const_tree t1 = *(const_tree const *)p1;
+ const_tree t2 = *(const_tree const *)p2;
+
+ return tree_cmp (t1, t2);
+}
+
+/* class frame_region : public space_region. */
+
+frame_region::~frame_region ()
+{
+ for (map_t::iterator iter = m_locals.begin ();
+ iter != m_locals.end ();
+ ++iter)
+ delete (*iter).second;
+}
+
+void
+frame_region::accept (visitor *v) const
+{
+ region::accept (v);
+ if (m_calling_frame)
+ m_calling_frame->accept (v);
+}
+
+/* Implementation of region::dump_to_pp vfunc for frame_region. */
+
+void
+frame_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_printf (pp, "frame: %qs@%i", function_name (m_fun), get_stack_depth ());
+ else
+ pp_printf (pp, "frame_region(%qs, index: %i, depth: %i)",
+ function_name (m_fun), m_index, get_stack_depth ());
+}
+
+const decl_region *
+frame_region::get_region_for_local (region_model_manager *mgr,
+ tree expr) const
+{
+ // TODO: could also check that VAR_DECLs are locals
+ gcc_assert (TREE_CODE (expr) == PARM_DECL
+ || TREE_CODE (expr) == VAR_DECL
+ || TREE_CODE (expr) == SSA_NAME
+ || TREE_CODE (expr) == RESULT_DECL);
+
+ /* Ideally we'd use mutable here. */
+ map_t &mutable_locals = const_cast <map_t &> (m_locals);
+
+ if (decl_region **slot = mutable_locals.get (expr))
+ return *slot;
+ decl_region *reg
+ = new decl_region (mgr->alloc_region_id (), this, expr);
+ mutable_locals.put (expr, reg);
+ return reg;
+}
+
+/* class globals_region : public space_region. */
+
+/* Implementation of region::dump_to_pp vfunc for globals_region. */
+
+void
+globals_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_string (pp, "::");
+ else
+ pp_string (pp, "globals");
+}
+
+/* class code_region : public map_region. */
+
+/* Implementation of region::dump_to_pp vfunc for code_region. */
+
+void
+code_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_string (pp, "code region");
+ else
+ pp_string (pp, "code_region()");
+}
+
+/* class function_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for function_region. */
+
+void
+function_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ dump_quoted_tree (pp, m_fndecl);
+ }
+ else
+ {
+ pp_string (pp, "function_region(");
+ dump_quoted_tree (pp, m_fndecl);
+ pp_string (pp, ")");
+ }
+}
+
+/* class label_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for label_region. */
+
+void
+label_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ dump_quoted_tree (pp, m_label);
+ }
+ else
+ {
+ pp_string (pp, "label_region(");
+ dump_quoted_tree (pp, m_label);
+ pp_string (pp, ")");
+ }
+}
+
+/* class stack_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for stack_region. */
+
+void
+stack_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_string (pp, "stack region");
+ else
+ pp_string (pp, "stack_region()");
+}
+
+/* class heap_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for heap_region. */
+
+void
+heap_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_string (pp, "heap region");
+ else
+ pp_string (pp, "heap_region()");
+}
+
+/* class root_region : public region. */
+
+/* root_region's ctor. */
+
+root_region::root_region (unsigned id)
+: region (complexity (1, 1), id, NULL, NULL_TREE)
+{
+}
+
+/* Implementation of region::dump_to_pp vfunc for root_region. */
+
+void
+root_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_string (pp, "root region");
+ else
+ pp_string (pp, "root_region()");
+}
+
+/* class symbolic_region : public map_region. */
+
+/* Implementation of region::accept vfunc for symbolic_region. */
+
+void
+symbolic_region::accept (visitor *v) const
+{
+ region::accept (v);
+ m_sval_ptr->accept (v);
+}
+
+/* Implementation of region::dump_to_pp vfunc for symbolic_region. */
+
+void
+symbolic_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "(*");
+ m_sval_ptr->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+ else
+ {
+ pp_string (pp, "symbolic_region(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ m_sval_ptr->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+}
+
+/* class decl_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for decl_region. */
+
+void
+decl_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_printf (pp, "%E", m_decl);
+ else
+ {
+ pp_string (pp, "decl_region(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ print_quoted_type (pp, get_type ());
+ pp_printf (pp, ", %qE)", m_decl);
+ }
+}
+
+/* Get the stack depth for the frame containing this decl, or 0
+ for a global. */
+
+int
+decl_region::get_stack_depth () const
+{
+ if (get_parent_region () == NULL)
+ return 0;
+ if (const frame_region *frame_reg
+ = get_parent_region ()->dyn_cast_frame_region ())
+ return frame_reg->get_stack_depth ();
+ return 0;
+}
+
+/* class field_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for field_region. */
+
+void
+field_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ".");
+ pp_printf (pp, "%E", m_field);
+ }
+ else
+ {
+ pp_string (pp, "field_region(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ print_quoted_type (pp, get_type ());
+ pp_printf (pp, ", %qE)", m_field);
+ }
+}
+
+/* class element_region : public region. */
+
+/* Implementation of region::accept vfunc for element_region. */
+
+void
+element_region::accept (visitor *v) const
+{
+ region::accept (v);
+ m_index->accept (v);
+}
+
+/* Implementation of region::dump_to_pp vfunc for element_region. */
+
+void
+element_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ //pp_string (pp, "(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, "[");
+ m_index->dump_to_pp (pp, simple);
+ pp_string (pp, "]");
+ //pp_string (pp, ")");
+ }
+ else
+ {
+ pp_string (pp, "element_region(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ m_index->dump_to_pp (pp, simple);
+ pp_printf (pp, ")");
+ }
+}
+
+/* class offset_region : public region. */
+
+/* Implementation of region::accept vfunc for offset_region. */
+
+void
+offset_region::accept (visitor *v) const
+{
+ region::accept (v);
+ m_byte_offset->accept (v);
+}
+
+/* Implementation of region::dump_to_pp vfunc for offset_region. */
+
+void
+offset_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ //pp_string (pp, "(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, "+");
+ m_byte_offset->dump_to_pp (pp, simple);
+ //pp_string (pp, ")");
+ }
+ else
+ {
+ pp_string (pp, "offset_region(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ m_byte_offset->dump_to_pp (pp, simple);
+ pp_printf (pp, ")");
+ }
+}
+
+/* class cast_region : public region. */
+
+/* Implementation of region::accept vfunc for cast_region. */
+
+void
+cast_region::accept (visitor *v) const
+{
+ region::accept (v);
+ m_original_region->accept (v);
+}
+
+/* Implementation of region::dump_to_pp vfunc for cast_region. */
+
+void
+cast_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "CAST_REG(");
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ m_original_region->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+ else
+ {
+ pp_string (pp, "cast_region(");
+ m_original_region->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ print_quoted_type (pp, get_type ());
+ pp_printf (pp, ")");
+ }
+}
+
+/* class heap_allocated_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for heap_allocated_region. */
+
+void
+heap_allocated_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_printf (pp, "HEAP_ALLOCATED_REGION(%i)", get_id ());
+ else
+ pp_printf (pp, "heap_allocated_region(%i)", get_id ());
+}
+
+/* class alloca_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for alloca_region. */
+
+void
+alloca_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_string (pp, "ALLOCA_REGION");
+ else
+ pp_string (pp, "alloca_region()");
+}
+
+/* class string_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for string_region. */
+
+void
+string_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ dump_tree (pp, m_string_cst);
+ else
+ {
+ pp_string (pp, "string_region(");
+ dump_tree (pp, m_string_cst);
+ pp_string (pp, " (");
+ pp_pointer (pp, m_string_cst);
+ pp_string (pp, "))");
+ }
+}
+
+/* class unknown_region : public region. */
+
+/* Implementation of region::dump_to_pp vfunc for unknown_region. */
+
+void
+unknown_region::dump_to_pp (pretty_printer *pp, bool /*simple*/) const
+{
+ pp_string (pp, "UNKNOWN_REGION");
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
#include "analyzer/pending-diagnostic.h"
#include "analyzer/function-set.h"
#include "analyzer/analyzer-selftests.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
#if ENABLE_ANALYZER
bool inherited_state_p () const FINAL OVERRIDE { return false; }
+ state_machine::state_t
+ get_default_state (const svalue *sval) const FINAL OVERRIDE
+ {
+ if (tree cst = sval->maybe_get_constant ())
+ {
+ if (zerop (cst))
+ return m_null;
+ }
+ return m_start;
+ }
+
bool on_stmt (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt) const FINAL OVERRIDE;
{
tree lhs = gimple_call_lhs (call);
if (lhs)
- {
- lhs = sm_ctxt->get_readable_tree (lhs);
- sm_ctxt->on_transition (node, stmt, lhs, m_start, m_unchecked);
- }
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_unchecked);
else
{
/* TODO: report leak. */
if (is_named_call_p (callee_fndecl, "fclose", call, 1))
{
tree arg = gimple_call_arg (call, 0);
- arg = sm_ctxt->get_readable_tree (arg);
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->on_transition (node, stmt, arg, m_start, m_closed);
sm_ctxt->on_transition (node, stmt , arg, m_nonnull, m_closed);
sm_ctxt->warn_for_state (node, stmt, arg, m_closed,
- new double_fclose (*this, arg));
+ new double_fclose (*this, diag_arg));
sm_ctxt->on_transition (node, stmt, arg, m_closed, m_stop);
return true;
}
#include "analyzer/analyzer-logging.h"
#include "analyzer/sm.h"
#include "analyzer/pending-diagnostic.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
#if ENABLE_ANALYZER
bool inherited_state_p () const FINAL OVERRIDE { return false; }
+ state_machine::state_t
+ get_default_state (const svalue *sval) const FINAL OVERRIDE
+ {
+ if (tree cst = sval->maybe_get_constant ())
+ {
+ if (zerop (cst))
+ return m_null;
+ }
+ if (const region_svalue *ptr = sval->dyn_cast_region_svalue ())
+ {
+ const region *reg = ptr->get_pointee ();
+ if (reg->get_kind () == RK_STRING)
+ return m_non_heap;
+ }
+ return m_start;
+ }
+
bool on_stmt (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt) const FINAL OVERRIDE;
bool can_purge_p (state_t s) const FINAL OVERRIDE;
pending_diagnostic *on_leak (tree var) const FINAL OVERRIDE;
+ bool reset_when_passed_to_unknown_fn_p (state_t s,
+ bool is_mutable) const FINAL OVERRIDE;
+
/* Start state. */
state_t m_start;
return label_text::borrow ("allocated here");
if (change.m_old_state == m_sm.m_unchecked
&& change.m_new_state == m_sm.m_nonnull)
- return change.formatted_print ("assuming %qE is non-NULL",
- change.m_expr);
+ {
+ if (change.m_expr)
+ return change.formatted_print ("assuming %qE is non-NULL",
+ change.m_expr);
+ else
+ return change.formatted_print ("assuming %qs is non-NULL",
+ "<unknown>");
+ }
if (change.m_new_state == m_sm.m_null)
{
if (change.m_old_state == m_sm.m_unchecked)
- return change.formatted_print ("assuming %qE is NULL",
- change.m_expr);
+ {
+ if (change.m_expr)
+ return change.formatted_print ("assuming %qE is NULL",
+ change.m_expr);
+ else
+ return change.formatted_print ("assuming %qs is NULL",
+ "<unknown>");
+ }
else
- return change.formatted_print ("%qE is NULL",
- change.m_expr);
+ {
+ if (change.m_expr)
+ return change.formatted_print ("%qE is NULL",
+ change.m_expr);
+ else
+ return change.formatted_print ("%qs is NULL",
+ "<unknown>");
+ }
}
return label_text ();
auto_diagnostic_group d;
diagnostic_metadata m;
m.add_cwe (690);
- bool warned = warning_meta (rich_loc, m, OPT_Wanalyzer_null_argument,
- "use of NULL %qE where non-null expected",
- m_arg);
+
+ bool warned;
+ if (zerop (m_arg))
+ warned = warning_meta (rich_loc, m, OPT_Wanalyzer_null_argument,
+ "use of NULL where non-null expected");
+ else
+ warned = warning_meta (rich_loc, m, OPT_Wanalyzer_null_argument,
+ "use of NULL %qE where non-null expected",
+ m_arg);
if (warned)
inform_nonnull_attribute (m_fndecl, m_arg_idx);
return warned;
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
- return ev.formatted_print ("argument %u (%qE) NULL"
- " where non-null expected",
- m_arg_idx + 1, ev.m_expr);
+ if (zerop (ev.m_expr))
+ return ev.formatted_print ("argument %u NULL where non-null expected",
+ m_arg_idx + 1);
+ else
+ return ev.formatted_print ("argument %u (%qE) NULL"
+ " where non-null expected",
+ m_arg_idx + 1, ev.m_expr);
}
private:
{
diagnostic_metadata m;
m.add_cwe (401);
- return warning_meta (rich_loc, m, OPT_Wanalyzer_malloc_leak,
- "leak of %qE", m_arg);
+ if (m_arg)
+ return warning_meta (rich_loc, m, OPT_Wanalyzer_malloc_leak,
+ "leak of %qE", m_arg);
+ else
+ return warning_meta (rich_loc, m, OPT_Wanalyzer_malloc_leak,
+ "leak of %qs", "<unknown>");
}
label_text describe_state_change (const evdesc::state_change &change)
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
- if (m_malloc_event.known_p ())
- return ev.formatted_print ("%qE leaks here; was allocated at %@",
- ev.m_expr, &m_malloc_event);
+ if (ev.m_expr)
+ {
+ if (m_malloc_event.known_p ())
+ return ev.formatted_print ("%qE leaks here; was allocated at %@",
+ ev.m_expr, &m_malloc_event);
+ else
+ return ev.formatted_print ("%qE leaks here", ev.m_expr);
+ }
else
- return ev.formatted_print ("%qE leaks here", ev.m_expr);
+ {
+ if (m_malloc_event.known_p ())
+ return ev.formatted_print ("%qs leaks here; was allocated at %@",
+ "<unknown>", &m_malloc_event);
+ else
+ return ev.formatted_print ("%qs leaks here", "<unknown>");
+ }
}
private:
{
tree lhs = gimple_call_lhs (call);
if (lhs)
- {
- lhs = sm_ctxt->get_readable_tree (lhs);
- sm_ctxt->on_transition (node, stmt, lhs, m_start, m_unchecked);
- }
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_unchecked);
else
{
/* TODO: report leak. */
{
tree lhs = gimple_call_lhs (call);
if (lhs)
- {
- lhs = sm_ctxt->get_readable_tree (lhs);
- sm_ctxt->on_transition (node, stmt, lhs, m_start, m_non_heap);
- }
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_non_heap);
return true;
}
|| is_named_call_p (callee_fndecl, "__builtin_free", call, 1))
{
tree arg = gimple_call_arg (call, 0);
-
- arg = sm_ctxt->get_readable_tree (arg);
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
/* start/unchecked/nonnull -> freed. */
sm_ctxt->on_transition (node, stmt, arg, m_start, m_freed);
/* freed -> stop, with warning. */
sm_ctxt->warn_for_state (node, stmt, arg, m_freed,
- new double_free (*this, arg));
+ new double_free (*this, diag_arg));
sm_ctxt->on_transition (node, stmt, arg, m_freed, m_stop);
/* non-heap -> stop, with warning. */
sm_ctxt->warn_for_state (node, stmt, arg, m_non_heap,
- new free_of_non_heap (*this, arg));
+ new free_of_non_heap (*this, diag_arg));
sm_ctxt->on_transition (node, stmt, arg, m_non_heap, m_stop);
return true;
}
if (bitmap_empty_p (nonnull_args)
|| bitmap_bit_p (nonnull_args, i))
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn_for_state
(node, stmt, arg, m_unchecked,
- new possible_null_arg (*this, arg, callee_fndecl, i));
+ new possible_null_arg (*this, diag_arg, callee_fndecl,
+ i));
sm_ctxt->on_transition (node, stmt, arg, m_unchecked,
m_nonnull);
sm_ctxt->warn_for_state
(node, stmt, arg, m_null,
- new null_arg (*this, arg, callee_fndecl, i));
+ new null_arg (*this, diag_arg, callee_fndecl, i));
sm_ctxt->on_transition (node, stmt, arg, m_null, m_stop);
}
}
}
}
- if (tree lhs = is_zero_assignment (stmt))
+ if (tree lhs = sm_ctxt->is_zero_assignment (stmt))
if (any_pointer_p (lhs))
on_zero_assignment (sm_ctxt, node, stmt,lhs);
+ /* If we have "LHS = &EXPR;" and EXPR is something other than a MEM_REF,
+ transition LHS from start to non_heap.
+ Doing it for ADDR_EXPR(MEM_REF()) is likely wrong, and can lead to
+ unbounded chains of unmergeable sm-state on pointer arithmetic in loops
+ when optimization is enabled. */
if (const gassign *assign_stmt = dyn_cast <const gassign *> (stmt))
{
enum tree_code op = gimple_assign_rhs_code (assign_stmt);
tree lhs = gimple_assign_lhs (assign_stmt);
if (lhs)
{
- lhs = sm_ctxt->get_readable_tree (lhs);
- sm_ctxt->on_transition (node, stmt, lhs, m_start, m_non_heap);
+ tree addr_expr = gimple_assign_rhs1 (assign_stmt);
+ if (TREE_CODE (TREE_OPERAND (addr_expr, 0)) != MEM_REF)
+ sm_ctxt->on_transition (node, stmt, lhs, m_start, m_non_heap);
}
}
}
if (TREE_CODE (op) == MEM_REF)
{
tree arg = TREE_OPERAND (op, 0);
- arg = sm_ctxt->get_readable_tree (arg);
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn_for_state (node, stmt, arg, m_unchecked,
- new possible_null_deref (*this, arg));
+ new possible_null_deref (*this, diag_arg));
sm_ctxt->on_transition (node, stmt, arg, m_unchecked, m_nonnull);
sm_ctxt->warn_for_state (node, stmt, arg, m_null,
- new null_deref (*this, arg));
+ new null_deref (*this, diag_arg));
sm_ctxt->on_transition (node, stmt, arg, m_null, m_stop);
sm_ctxt->warn_for_state (node, stmt, arg, m_freed,
- new use_after_free (*this, arg));
+ new use_after_free (*this, diag_arg));
sm_ctxt->on_transition (node, stmt, arg, m_freed, m_stop);
}
}
return new malloc_leak (*this, var);
}
+/* Implementation of state_machine::reset_when_passed_to_unknown_fn_p vfunc
+ for malloc_state_machine. */
+
+bool
+malloc_state_machine::reset_when_passed_to_unknown_fn_p (state_t s,
+ bool is_mutable) const
+{
+ /* An on-stack ptr doesn't stop being stack-allocated when passed to an
+ unknown fn. */
+ if (s == m_non_heap)
+ return false;
+
+ /* Otherwise, pointers passed as non-const can be freed. */
+ return is_mutable;
+}
+
/* Shared logic for handling GIMPLE_ASSIGNs and GIMPLE_PHIs that
assign zero to LHS. */
const gimple *stmt,
tree arg) const
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn_for_state (node, stmt, arg, m_sensitive,
- new exposure_through_output_file (*this, arg));
+ new exposure_through_output_file (*this, diag_arg));
}
/* Implementation of state_machine::on_stmt vfunc for
#include "tristate.h"
#include "ordered-hash-map.h"
#include "selftest.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/program-state.h"
#include "analyzer/checker-path.h"
#include "gimple-iterator.h"
#include "cgraph.h"
#include "analyzer/supergraph.h"
-#include "analyzer/call-string.h"
-#include "analyzer/program-point.h"
#include "alloc-pool.h"
#include "fibonacci_heap.h"
#include "analyzer/diagnostic-manager.h"
if (change.is_global_p ()
&& change.m_new_state == m_sm.m_in_signal_handler)
{
- function *handler
- = change.m_event.m_dst_state.m_region_model->get_current_function ();
+ function *handler = change.m_event.get_dest_function ();
return change.formatted_print ("registering %qD as signal handler",
handler->decl);
}
update_model_for_signal_handler (region_model *model,
function *handler_fun)
{
+ gcc_assert (model);
/* Purge all state within MODEL. */
- *model = region_model ();
+ *model = region_model (model->get_manager ());
model->push_frame (handler_fun, NULL, NULL);
}
exploded_node *dst_enode = eg->get_or_create_node (entering_handler,
state_entering_handler,
- NULL);
+ src_enode);
if (dst_enode)
- eg->add_edge (src_enode, dst_enode, NULL, state_change (),
+ eg->add_edge (src_enode, dst_enode, NULL, /*state_change (),*/
new signal_delivery_edge_info_t ());
}
if (is_named_call_p (callee_fndecl, "fread", call, 4))
{
tree arg = gimple_call_arg (call, 0);
- arg = sm_ctxt->get_readable_tree (arg);
sm_ctxt->on_transition (node, stmt, arg, m_start, m_tainted);
if (op == ARRAY_REF)
{
tree arg = TREE_OPERAND (rhs1, 1);
- arg = sm_ctxt->get_readable_tree (arg);
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
/* Unsigned types have an implicit lower bound. */
bool is_unsigned = false;
/* Complain about missing bounds. */
sm_ctxt->warn_for_state
(node, stmt, arg, m_tainted,
- new tainted_array_index (*this, arg,
+ new tainted_array_index (*this, diag_arg,
is_unsigned
? BOUNDS_LOWER : BOUNDS_NONE));
sm_ctxt->on_transition (node, stmt, arg, m_tainted, m_stop);
/* Complain about missing upper bound. */
sm_ctxt->warn_for_state (node, stmt, arg, m_has_lb,
- new tainted_array_index (*this, arg,
+ new tainted_array_index (*this, diag_arg,
BOUNDS_LOWER));
sm_ctxt->on_transition (node, stmt, arg, m_has_lb, m_stop);
if (!is_unsigned)
{
sm_ctxt->warn_for_state (node, stmt, arg, m_has_ub,
- new tainted_array_index (*this, arg,
+ new tainted_array_index (*this, diag_arg,
BOUNDS_UPPER));
sm_ctxt->on_transition (node, stmt, arg, m_has_ub, m_stop);
}
#if ENABLE_ANALYZER
-/* If STMT is an assignment from zero, return the LHS. */
-
-tree
-is_zero_assignment (const gimple *stmt)
-{
- const gassign *assign_stmt = dyn_cast <const gassign *> (stmt);
- if (!assign_stmt)
- return NULL_TREE;
-
- enum tree_code op = gimple_assign_rhs_code (assign_stmt);
- if (TREE_CODE_CLASS (op) != tcc_constant)
- return NULL_TREE;
-
- if (!zerop (gimple_assign_rhs1 (assign_stmt)))
- return NULL_TREE;
-
- return gimple_assign_lhs (assign_stmt);
-}
+namespace ana {
/* Return true if VAR has pointer or reference type. */
return POINTER_TYPE_P (TREE_TYPE (var));
}
-namespace ana {
-
/* Add a state with name NAME to this state_machine.
The string is required to outlive the state_machine.
/* Utility functions for use by state machines. */
-extern tree is_zero_assignment (const gimple *stmt);
-extern bool any_pointer_p (tree var);
-
namespace ana {
class state_machine;
class sm_context;
class pending_diagnostic;
+extern bool any_pointer_p (tree var);
+
/* An abstract base class for a state machine describing an API.
A mapping from state IDs to names, and various virtual functions
for pattern-matching on statements. */
within a heap-allocated struct. */
virtual bool inherited_state_p () const = 0;
+ virtual state_machine::state_t get_default_state (const svalue *) const
+ {
+ return 0;
+ }
+
const char *get_name () const { return m_name; }
const char *get_state_name (state_t s) const;
return NULL;
}
+ /* Return true if S should be reset to "start" for values passed (or reachable
+ from) calls to unknown functions. IS_MUTABLE is true for pointers as
+ non-const, false if only passed as const-pointers.
+
+ For example, in sm-malloc.cc, an on-stack ptr doesn't stop being
+ stack-allocated when passed to an unknown fn, but a malloc-ed pointer
+ could be freed when passed to an unknown fn (unless passed as "const"). */
+ virtual bool reset_when_passed_to_unknown_fn_p (state_t s ATTRIBUTE_UNUSED,
+ bool is_mutable) const
+ {
+ return is_mutable;
+ }
+
void validate (state_t s) const;
void dump_to_pp (pretty_printer *pp) const;
tree var, state_machine::state_t state,
pending_diagnostic *d) = 0;
- virtual tree get_readable_tree (tree expr)
+ /* For use when generating trees when creating pending_diagnostics, so that
+ rather than e.g.
+ "double-free of '<unknown>'"
+ we can print:
+ "double-free of 'inbuf.data'". */
+ virtual tree get_diagnostic_tree (tree expr)
{
return expr;
}
a signal handler. */
virtual void on_custom_transition (custom_transition *transition) = 0;
+ /* If STMT is an assignment known to assign zero to its LHS, return
+ the LHS.
+ Otherwise return NULL_TREE. */
+ virtual tree is_zero_assignment (const gimple *stmt) = 0;
+
protected:
sm_context (int sm_idx, const state_machine &sm)
: m_sm_idx (sm_idx), m_sm (sm) {}
--- /dev/null
+/* Classes for modeling the state of memory.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "graphviz.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "convert.h"
+#include "target.h"
+#include "fold-const.h"
+#include "tree-pretty-print.h"
+#include "diagnostic-color.h"
+#include "diagnostic-metadata.h"
+#include "tristate.h"
+#include "bitmap.h"
+#include "selftest.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "ordered-hash-map.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "sbitmap.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+#include "analyzer/analyzer-selftests.h"
+#include "stor-layout.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* Get a human-readable string for KIND for dumps. */
+
+const char *binding_kind_to_string (enum binding_kind kind)
+{
+ switch (kind)
+ {
+ default:
+ case BK_empty:
+ case BK_deleted:
+ /* We shouldn't be attempting to print the hash kinds. */
+ gcc_unreachable ();
+ case BK_direct:
+ return "direct";
+ case BK_default:
+ return "default";
+ }
+}
+
+/* class binding_key. */
+
+const binding_key *
+binding_key::make (store_manager *mgr, const region *r,
+ enum binding_kind kind)
+{
+ region_offset offset = r->get_offset ();
+ if (offset.symbolic_p ())
+ return mgr->get_symbolic_binding (r, kind);
+ else
+ {
+ bit_size_t bit_size;
+ if (r->get_bit_size (&bit_size))
+ return mgr->get_concrete_binding (offset.get_bit_offset (),
+ bit_size, kind);
+ else
+ return mgr->get_symbolic_binding (r, kind);
+ }
+}
+
+/* Base class implementation of binding_key::dump_to_pp vfunc. */
+
+void
+binding_key::dump_to_pp (pretty_printer *pp, bool /*simple*/) const
+{
+ pp_printf (pp, "kind: %s", binding_kind_to_string (m_kind));
+}
+
+/* Dump this binding_key to stderr. */
+
+DEBUG_FUNCTION void
+binding_key::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* qsort callback. */
+
+int
+binding_key::cmp_ptrs (const void *p1, const void *p2)
+{
+ const binding_key * const *pk1 = (const binding_key * const *)p1;
+ const binding_key * const *pk2 = (const binding_key * const *)p2;
+ return cmp (*pk1, *pk2);
+}
+
+/* Comparator for binding_keys. */
+
+int
+binding_key::cmp (const binding_key *k1, const binding_key *k2)
+{
+ enum binding_kind kind1 = k1->get_kind ();
+ enum binding_kind kind2 = k2->get_kind ();
+ if (kind1 != kind2)
+ return (int)kind1 - (int)kind2;
+
+ int concrete1 = k1->concrete_p ();
+ int concrete2 = k2->concrete_p ();
+ if (int concrete_cmp = concrete1 - concrete2)
+ return concrete_cmp;
+ if (concrete1)
+ {
+ const concrete_binding *b1 = (const concrete_binding *)k1;
+ const concrete_binding *b2 = (const concrete_binding *)k2;
+ if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
+ b2->get_start_bit_offset (),
+ SIGNED))
+ return start_cmp;
+ return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
+ SIGNED);
+ }
+ else
+ {
+ const symbolic_binding *s1 = (const symbolic_binding *)k1;
+ const symbolic_binding *s2 = (const symbolic_binding *)k2;
+ if (s1 > s2)
+ return 1;
+ if (s1 < s2)
+ return -1;
+ return 0;
+ }
+}
+
+/* class concrete_binding : public binding_key. */
+
+/* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
+
+void
+concrete_binding::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ binding_key::dump_to_pp (pp, simple);
+ pp_string (pp, ", start: ");
+ pp_wide_int (pp, m_start_bit_offset, SIGNED);
+ pp_string (pp, ", size: ");
+ pp_wide_int (pp, m_size_in_bits, SIGNED);
+ pp_string (pp, ", next: ");
+ pp_wide_int (pp, get_next_bit_offset (), SIGNED);
+}
+
+/* Return true if this binding overlaps with OTHER. */
+
+bool
+concrete_binding::overlaps_p (const concrete_binding &other) const
+{
+ if (m_start_bit_offset < other.get_next_bit_offset ()
+ && get_next_bit_offset () > other.get_start_bit_offset ())
+ return true;
+ return false;
+}
+
+/* class symbolic_binding : public binding_key. */
+
+void
+symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ binding_key::dump_to_pp (pp, simple);
+ pp_string (pp, ", region: ");
+ m_region->dump_to_pp (pp, simple);
+}
+
+/* The store is oblivious to the types of the svalues bound within
+ it: any type can get bound at any location.
+ Simplify any casts before binding.
+
+ For example, if we have:
+ struct big { int ia[1024]; };
+ struct big src, dst;
+ memcpy (&dst, &src, sizeof (struct big));
+ this reaches us in gimple form as:
+ MEM <unsigned char[4096]> [(char * {ref-all})&dst]
+ = MEM <unsigned char[4096]> [(char * {ref-all})&src];
+ Using cast_region when handling the MEM_REF would give us:
+ INIT_VAL(CAST_REG(unsigned char[4096], src))
+ as rhs_sval, but we can fold that into a cast svalue:
+ CAST(unsigned char[4096], INIT_VAL(src))
+ We can discard that cast from the svalue when binding it in
+ the store for "dst", and simply store:
+ cluster for: dst
+ key: {kind: direct, start: 0, size: 32768, next: 32768}
+ value: ‘struct big’ {INIT_VAL(src)}. */
+
+static const svalue *
+simplify_for_binding (const svalue *sval)
+{
+ if (const svalue *cast_sval = sval->maybe_undo_cast ())
+ sval = cast_sval;
+ return sval;
+}
+
+/* class binding_map. */
+
+/* binding_map's copy ctor. */
+
+binding_map::binding_map (const binding_map &other)
+: m_map (other.m_map)
+{
+}
+
+/* binding_map's assignment operator. */
+
+binding_map&
+binding_map::operator=(const binding_map &other)
+{
+ /* For now, assume we only ever copy to an empty cluster. */
+ gcc_assert (m_map.elements () == 0);
+ for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
+ ++iter)
+ {
+ const binding_key *key = (*iter).first;
+ const svalue *sval = (*iter).second;
+ m_map.put (key, sval);
+ }
+ return *this;
+}
+
+/* binding_map's equality operator. */
+
+bool
+binding_map::operator== (const binding_map &other) const
+{
+ if (m_map.elements () != other.m_map.elements ())
+ return false;
+
+ for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ {
+ const binding_key *key = (*iter).first;
+ const svalue *sval = (*iter).second;
+ const svalue **other_slot
+ = const_cast <map_t &> (other.m_map).get (key);
+ if (other_slot == NULL)
+ return false;
+ if (sval != *other_slot)
+ return false;
+ }
+ gcc_checking_assert (hash () == other.hash ());
+ return true;
+}
+
+/* Generate a hash value for this binding_map. */
+
+hashval_t
+binding_map::hash () const
+{
+ hashval_t result = 0;
+ for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ {
+ /* Use a new hasher for each key to avoid depending on the ordering
+ of keys when accumulating the result. */
+ inchash::hash hstate;
+ hstate.add_ptr ((*iter).first);
+ hstate.add_ptr ((*iter).second);
+ result ^= hstate.end ();
+ }
+ return result;
+}
+
+/* Dump a representation of this binding_map to PP.
+ SIMPLE controls how values and regions are to be printed.
+ If MULTILINE, then split the dump over multiple lines and
+ use whitespace for readability, otherwise put all on one line. */
+
+void
+binding_map::dump_to_pp (pretty_printer *pp, bool simple,
+ bool multiline) const
+{
+ auto_vec <const binding_key *> binding_keys;
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end (); ++iter)
+ {
+ const binding_key *key = (*iter).first;
+ binding_keys.safe_push (key);
+ }
+ binding_keys.qsort (binding_key::cmp_ptrs);
+
+ const binding_key *key;
+ unsigned i;
+ FOR_EACH_VEC_ELT (binding_keys, i, key)
+ {
+ const svalue *value = *const_cast <map_t &> (m_map).get (key);
+ if (multiline)
+ {
+ pp_string (pp, " key: {");
+ key->dump_to_pp (pp, simple);
+ pp_string (pp, "}");
+ pp_newline (pp);
+ pp_string (pp, " value: ");
+ if (tree t = value->get_type ())
+ dump_quoted_tree (pp, t);
+ pp_string (pp, " {");
+ value->dump_to_pp (pp, simple);
+ pp_string (pp, "}");
+ pp_newline (pp);
+ }
+ else
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ pp_string (pp, "binding key: {");
+ key->dump_to_pp (pp, simple);
+ pp_string (pp, "}, value: {");
+ value->dump_to_pp (pp, simple);
+ pp_string (pp, "}");
+ }
+ }
+}
+
+/* Dump a multiline representation of this binding_map to stderr. */
+
+DEBUG_FUNCTION void
+binding_map::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple, true);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* class binding_cluster. */
+
+/* binding_cluster's copy ctor. */
+
+binding_cluster::binding_cluster (const binding_cluster &other)
+: m_base_region (other.m_base_region), m_map (other.m_map),
+ m_escaped (other.m_escaped), m_touched (other.m_touched)
+{
+}
+
+/* binding_cluster's assignment operator. */
+
+binding_cluster&
+binding_cluster::operator= (const binding_cluster &other)
+{
+ gcc_assert (m_base_region == other.m_base_region);
+ m_map = other.m_map;
+ m_escaped = other.m_escaped;
+ m_touched = other.m_touched;
+ return *this;
+}
+
+/* binding_cluster's equality operator. */
+
+bool
+binding_cluster::operator== (const binding_cluster &other) const
+{
+ if (m_map != other.m_map)
+ return false;
+
+ if (m_base_region != other.m_base_region)
+ return false;
+
+ if (m_escaped != other.m_escaped)
+ return false;
+
+ if (m_touched != other.m_touched)
+ return false;
+
+ gcc_checking_assert (hash () == other.hash ());
+
+ return true;
+}
+
+/* Generate a hash value for this binding_cluster. */
+
+hashval_t
+binding_cluster::hash () const
+{
+ return m_map.hash ();
+}
+
+/* Return true if this binding_cluster is symbolic
+ i.e. its base region is symbolic. */
+
+bool
+binding_cluster::symbolic_p () const
+{
+ return m_base_region->get_kind () == RK_SYMBOLIC;
+}
+
+/* Dump a representation of this binding_cluster to PP.
+ SIMPLE controls how values and regions are to be printed.
+ If MULTILINE, then split the dump over multiple lines and
+ use whitespace for readability, otherwise put all on one line. */
+
+void
+binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
+ bool multiline) const
+{
+ if (m_escaped)
+ {
+ if (multiline)
+ {
+ pp_string (pp, " ESCAPED");
+ pp_newline (pp);
+ }
+ else
+ pp_string (pp, "(ESCAPED)");
+ }
+ if (m_touched)
+ {
+ if (multiline)
+ {
+ pp_string (pp, " TOUCHED");
+ pp_newline (pp);
+ }
+ else
+ pp_string (pp, "(TOUCHED)");
+ }
+
+ m_map.dump_to_pp (pp, simple, multiline);
+}
+
+/* Dump a multiline representation of this binding_cluster to stderr. */
+
+DEBUG_FUNCTION void
+binding_cluster::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ pp_string (&pp, " cluster for: ");
+ m_base_region->dump_to_pp (&pp, simple);
+ pp_string (&pp, ": ");
+ pp_newline (&pp);
+ dump_to_pp (&pp, simple, true);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
+ compound_sval. */
+
+void
+binding_cluster::bind (store_manager *mgr,
+ const region *reg, const svalue *sval,
+ binding_kind kind)
+{
+ if (const compound_svalue *compound_sval
+ = sval->dyn_cast_compound_svalue ())
+ {
+ bind_compound_sval (mgr, reg, compound_sval);
+ return;
+ }
+
+ const binding_key *binding = binding_key::make (mgr, reg, kind);
+ bind_key (binding, sval);
+}
+
+/* Bind SVAL to KEY.
+ Unpacking of compound_svalues should already have been done by the
+ time this is called. */
+
+void
+binding_cluster::bind_key (const binding_key *key, const svalue *sval)
+{
+ gcc_assert (sval->get_kind () != SK_COMPOUND);
+
+ m_map.put (key, sval);
+ if (key->symbolic_p ())
+ m_touched = true;
+}
+
+/* Subroutine of binding_cluster::bind.
+ Unpack compound_svals when binding them, so that we bind them
+ element-wise. */
+
+void
+binding_cluster::bind_compound_sval (store_manager *mgr,
+ const region *reg,
+ const compound_svalue *compound_sval)
+{
+ region_offset reg_offset = reg->get_offset ();
+ if (reg_offset.symbolic_p ())
+ {
+ m_touched = true;
+ clobber_region (mgr, reg);
+ return;
+ }
+
+ for (map_t::iterator iter = compound_sval->begin ();
+ iter != compound_sval->end (); ++iter)
+ {
+ const binding_key *iter_key = (*iter).first;
+ const svalue *iter_sval = (*iter).second;
+
+ if (const concrete_binding *concrete_key
+ = iter_key->dyn_cast_concrete_binding ())
+ {
+ bit_offset_t effective_start
+ = (concrete_key->get_start_bit_offset ()
+ + reg_offset.get_bit_offset ());
+ const concrete_binding *effective_concrete_key
+ = mgr->get_concrete_binding (effective_start,
+ concrete_key->get_size_in_bits (),
+ iter_key->get_kind ());
+ bind_key (effective_concrete_key, iter_sval);
+ }
+ else
+ gcc_unreachable ();
+ }
+}
+
+/* Remove all bindings overlapping REG within this cluster. */
+
+void
+binding_cluster::clobber_region (store_manager *mgr, const region *reg)
+{
+ remove_overlapping_bindings (mgr, reg);
+}
+
+/* Remove any bindings for REG within this cluster. */
+
+void
+binding_cluster::purge_region (store_manager *mgr, const region *reg)
+{
+ gcc_assert (reg->get_kind () == RK_DECL);
+ const binding_key *binding
+ = binding_key::make (mgr, const_cast<region *> (reg),
+ BK_direct);
+ m_map.remove (binding);
+}
+
+/* Mark REG within this cluster as being filled with zeroes.
+ Remove all bindings, add a default binding to zero, and clear the
+ TOUCHED flag. */
+
+void
+binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
+{
+ clobber_region (mgr, reg);
+
+ /* Add a default binding to zero. */
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ tree cst_zero = build_int_cst (integer_type_node, 0);
+ const svalue *cst_sval = sval_mgr->get_or_create_constant_svalue (cst_zero);
+ const svalue *bound_sval = cst_sval;
+ if (reg->get_type ())
+ bound_sval = sval_mgr->get_or_create_unaryop (reg->get_type (), NOP_EXPR,
+ cst_sval);
+ bind (mgr, reg, bound_sval, BK_default);
+
+ m_touched = false;
+}
+
+/* Mark REG within this cluster as being unknown. */
+
+void
+binding_cluster::mark_region_as_unknown (store_manager *mgr,
+ const region *reg)
+{
+ remove_overlapping_bindings (mgr, reg);
+
+ /* Add a default binding to "unknown". */
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ const svalue *sval
+ = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
+ bind (mgr, reg, sval, BK_default);
+}
+
+/* Get any SVAL bound to REG within this cluster via kind KIND,
+ without checking parent regions of REG. */
+
+const svalue *
+binding_cluster::get_binding (store_manager *mgr,
+ const region *reg,
+ binding_kind kind) const
+{
+ const binding_key *reg_binding = binding_key::make (mgr, reg, kind);
+ const svalue *sval = m_map.get (reg_binding);
+ if (sval)
+ {
+ /* If we have a struct with a single field, then the binding of
+ the field will equal that of the struct, and looking up e.g.
+ PARENT_REG.field within:
+ cluster for PARENT_REG: INIT_VAL(OTHER_REG)
+ will erroneously return INIT_VAL(OTHER_REG), rather than
+ SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
+ Fix this issue by iterating upwards whilst the bindings are equal,
+ expressing the lookups as subvalues.
+ We have to gather a list of subregion accesses, then walk it
+ in reverse to get the subvalues. */
+ auto_vec<const region *> regions;
+ while (const region *parent_reg = reg->get_parent_region ())
+ {
+ const binding_key *parent_reg_binding
+ = binding_key::make (mgr, parent_reg, kind);
+ if (parent_reg_binding == reg_binding
+ && sval->get_type ()
+ && reg->get_type ()
+ && sval->get_type () != reg->get_type ())
+ {
+ regions.safe_push (reg);
+ reg = parent_reg;
+ }
+ else
+ break;
+ }
+ if (sval->get_type ()
+ && reg->get_type ()
+ && sval->get_type () == reg->get_type ())
+ {
+ unsigned i;
+ const region *iter_reg;
+ FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
+ {
+ region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
+ sval = rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
+ sval, iter_reg);
+ }
+ }
+ }
+ return sval;
+}
+
+/* Get any SVAL bound to REG within this cluster via kind KIND,
+ either directly for REG, or recursively checking for bindings within
+ parent regions and extracting subvalues if need be. */
+
+const svalue *
+binding_cluster::get_binding_recursive (store_manager *mgr,
+ const region *reg,
+ enum binding_kind kind) const
+{
+ if (const svalue *sval = get_binding (mgr, reg, kind))
+ return sval;
+ if (reg != m_base_region)
+ if (const region *parent_reg = reg->get_parent_region ())
+ if (const svalue *parent_sval
+ = get_binding_recursive (mgr, parent_reg, kind))
+ {
+ /* Extract child svalue from parent svalue. */
+ region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
+ return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
+ parent_sval, reg);
+ }
+ return NULL;
+}
+
+/* Get any value bound for REG within this cluster. */
+
+const svalue *
+binding_cluster::get_any_binding (store_manager *mgr,
+ const region *reg) const
+{
+ /* Look for a "direct" binding. */
+ if (const svalue *direct_sval
+ = get_binding_recursive (mgr, reg, BK_direct))
+ return direct_sval;
+
+ /* Look for a "default" binding, but not if there's been a symbolic
+ write. */
+ if (!m_touched)
+ if (const svalue *default_sval
+ = get_binding_recursive (mgr, reg, BK_default))
+ return default_sval;
+
+ /* If this cluster has been touched by a symbolic write, then the content
+ of any subregion not currently specifically bound is "UNKNOWN". */
+ if (m_touched)
+ {
+ region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
+ return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
+ }
+
+ if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
+ return compound_sval;
+
+ /* Otherwise, the initial value, or uninitialized. */
+ return NULL;
+}
+
+/* Attempt to get a compound_svalue for the bindings within the cluster
+ affecting REG (which could be the base region itself).
+
+ Create a compound_svalue with the subset of bindings the affect REG,
+ offsetting them so that the offsets are relative to the start of REG
+ within the cluster.
+
+ For example, REG could be one element within an array of structs.
+
+ Return the resulting compound_svalue, or NULL if there's a problem. */
+
+const svalue *
+binding_cluster::maybe_get_compound_binding (store_manager *mgr,
+ const region *reg) const
+{
+ binding_map map;
+
+ region_offset cluster_offset = m_base_region->get_offset ();
+ if (cluster_offset.symbolic_p ())
+ return NULL;
+ region_offset reg_offset = reg->get_offset ();
+ if (reg_offset.symbolic_p ())
+ return NULL;
+
+ for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ {
+ const binding_key *key = (*iter).first;
+ const svalue *sval = (*iter).second;
+
+ if (const concrete_binding *concrete_key
+ = key->dyn_cast_concrete_binding ())
+ {
+ /* Skip bindings that are outside the bit range of REG. */
+ if (concrete_key->get_start_bit_offset ()
+ < reg_offset.get_bit_offset ())
+ continue;
+ bit_size_t reg_bit_size;
+ if (reg->get_bit_size (®_bit_size))
+ if (concrete_key->get_start_bit_offset ()
+ >= reg_offset.get_bit_offset () + reg_bit_size)
+ continue;
+
+ /* Get offset of KEY relative to REG, rather than to
+ the cluster. */
+ bit_offset_t relative_start
+ = (concrete_key->get_start_bit_offset ()
+ - reg_offset.get_bit_offset ());
+ const concrete_binding *offset_concrete_key
+ = mgr->get_concrete_binding (relative_start,
+ concrete_key->get_size_in_bits (),
+ key->get_kind ());
+ map.put (offset_concrete_key, sval);
+ }
+ else
+ return NULL;
+ }
+
+ if (map.elements () == 0)
+ return NULL;
+
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ return sval_mgr->get_or_create_compound_svalue (reg->get_type (), map);
+}
+
+
+/* Populate OUT with all bindings within this cluster that overlap REG. */
+
+void
+binding_cluster::get_overlapping_bindings (store_manager *mgr,
+ const region *reg,
+ auto_vec<const binding_key *> *out)
+{
+ const binding_key *binding
+ = binding_key::make (mgr, reg, BK_direct);
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end (); ++iter)
+ {
+ const binding_key *iter_key = (*iter).first;
+ if (const concrete_binding *ckey
+ = binding->dyn_cast_concrete_binding ())
+ {
+ if (const concrete_binding *iter_ckey
+ = iter_key->dyn_cast_concrete_binding ())
+ {
+ if (ckey->overlaps_p (*iter_ckey))
+ out->safe_push (iter_key);
+ }
+ else
+ {
+ /* Assume overlap. */
+ out->safe_push (iter_key);
+ }
+ }
+ else
+ {
+ /* Assume overlap. */
+ out->safe_push (iter_key);
+ }
+ }
+}
+
+/* Remove any bindings within this cluster that overlap REG,
+ but retain default bindings that overlap but aren't fully covered
+ by REG. */
+
+void
+binding_cluster::remove_overlapping_bindings (store_manager *mgr,
+ const region *reg)
+{
+ auto_vec<const binding_key *> bindings;
+ get_overlapping_bindings (mgr, reg, &bindings);
+
+ unsigned i;
+ const binding_key *iter_binding;
+ FOR_EACH_VEC_ELT (bindings, i, iter_binding)
+ {
+ /* Don't remove default bindings, unless the default binding
+ is fully covered by REG. */
+ if (iter_binding->get_kind () == BK_default)
+ {
+ const binding_key *reg_binding
+ = binding_key::make (mgr, reg, BK_default);
+ if (reg_binding != iter_binding)
+ continue;
+ }
+ m_map.remove (iter_binding);
+ }
+}
+
+/* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
+ MGR and MERGER.
+ Return true if they can be merged, false otherwise. */
+
+bool
+binding_cluster::can_merge_p (const binding_cluster *cluster_a,
+ const binding_cluster *cluster_b,
+ binding_cluster *out_cluster,
+ store_manager *mgr,
+ model_merger *merger)
+{
+ gcc_assert (out_cluster);
+
+ /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
+ true if either of the inputs is true. */
+ if ((cluster_a && cluster_a->m_escaped)
+ || (cluster_b && cluster_b->m_escaped))
+ out_cluster->m_escaped = true;
+ if ((cluster_a && cluster_a->m_touched)
+ || (cluster_b && cluster_b->m_touched))
+ out_cluster->m_touched = true;
+
+ /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
+ could be NULL. Handle these cases. */
+ if (cluster_a == NULL)
+ {
+ gcc_assert (cluster_b != NULL);
+ gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
+ out_cluster->make_unknown_relative_to (cluster_b, mgr);
+ return true;
+ }
+ if (cluster_b == NULL)
+ {
+ gcc_assert (cluster_a != NULL);
+ gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
+ out_cluster->make_unknown_relative_to (cluster_a, mgr);
+ return true;
+ }
+
+ /* The "both inputs are non-NULL" case. */
+ gcc_assert (cluster_a != NULL && cluster_b != NULL);
+ gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
+ gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
+
+ hash_set<const binding_key *> keys;
+ for (map_t::iterator iter_a = cluster_a->m_map.begin ();
+ iter_a != cluster_a->m_map.end (); ++iter_a)
+ {
+ const binding_key *key_a = (*iter_a).first;
+ keys.add (key_a);
+ }
+ for (map_t::iterator iter_b = cluster_b->m_map.begin ();
+ iter_b != cluster_b->m_map.end (); ++iter_b)
+ {
+ const binding_key *key_b = (*iter_b).first;
+ keys.add (key_b);
+ }
+ for (hash_set<const binding_key *>::iterator iter = keys.begin ();
+ iter != keys.end (); ++iter)
+ {
+ const binding_key *key = *iter;
+ const svalue *sval_a = cluster_a->get_any_value (key);
+ const svalue *sval_b = cluster_b->get_any_value (key);
+
+ if (sval_a == sval_b)
+ {
+ gcc_assert (sval_a);
+ out_cluster->m_map.put (key, sval_a);
+ continue;
+ }
+ else if (sval_a && sval_b)
+ {
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ if (const svalue *merged_sval
+ = sval_a->can_merge_p (sval_b, sval_mgr, merger))
+ {
+ out_cluster->m_map.put (key, merged_sval);
+ continue;
+ }
+ /* Merger of the svalues failed. Reject merger of the cluster. */
+ return false;
+ }
+
+ /* If we get here, then one cluster binds this key and the other
+ doesn't; merge them as "UNKNOWN". */
+ gcc_assert (sval_a || sval_b);
+ tree type = sval_a ? sval_a->get_type () : sval_b->get_type ();
+ const svalue *unknown_sval
+ = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
+ out_cluster->m_map.put (key, unknown_sval);
+ }
+
+ /* Handle the case where we get a default binding from one and a direct
+ binding from the other. */
+ auto_vec<const concrete_binding *> duplicate_keys;
+ for (map_t::iterator iter = out_cluster->m_map.begin ();
+ iter != out_cluster->m_map.end (); ++iter)
+ {
+ const concrete_binding *ckey
+ = (*iter).first->dyn_cast_concrete_binding ();
+ if (!ckey)
+ continue;
+ if (ckey->get_kind () != BK_direct)
+ continue;
+ const concrete_binding *def_ckey
+ = mgr->get_concrete_binding (ckey->get_start_bit_offset (),
+ ckey->get_size_in_bits (),
+ BK_default);
+ if (out_cluster->m_map.get (def_ckey))
+ duplicate_keys.safe_push (def_ckey);
+ }
+ unsigned i;
+ const concrete_binding *key;
+ FOR_EACH_VEC_ELT (duplicate_keys, i, key)
+ out_cluster->m_map.remove (key);
+
+ /* We don't handle other kinds of overlaps yet. */
+
+ return true;
+}
+
+/* Update this cluster to reflect an attempt to merge OTHER where there
+ is no other cluster to merge with, and so we're notionally merging the
+ bound values in OTHER with the initial value of the relevant regions.
+
+ Any bound keys in OTHER should be bound to unknown in this. */
+
+void
+binding_cluster::make_unknown_relative_to (const binding_cluster *other,
+ store_manager *mgr)
+{
+ for (map_t::iterator iter = other->m_map.begin ();
+ iter != other->m_map.end (); ++iter)
+ {
+ const binding_key *iter_key = (*iter).first;
+ const svalue *iter_sval = (*iter).second;
+ const svalue *unknown_sval
+ = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
+ (iter_sval->get_type ());
+ m_map.put (iter_key, unknown_sval);
+ }
+}
+
+/* Mark this cluster as having escaped. */
+
+void
+binding_cluster::mark_as_escaped ()
+{
+ m_escaped = true;
+}
+
+/* If this cluster has escaped (by this call, or by an earlier one, or
+ by being an external param), then unbind all values and mark it
+ as "touched", so that it has an unknown value, rather than an
+ initial_svalue. */
+
+void
+binding_cluster::on_unknown_fncall (const gcall *call,
+ store_manager *mgr)
+{
+ if (m_escaped)
+ {
+ m_map.empty ();
+
+ /* Bind it to a new "conjured" value using CALL. */
+ const svalue *sval
+ = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
+ (m_base_region->get_type (), call, m_base_region);
+ bind (mgr, m_base_region, sval, BK_direct);
+
+ m_touched = true;
+ }
+}
+
+/* Return true if this binding_cluster has no information
+ i.e. if there are no bindings, and it hasn't been marked as having
+ escaped, or touched symbolically. */
+
+bool
+binding_cluster::redundant_p () const
+{
+ return (m_map.elements () == 0
+ && !m_escaped
+ && !m_touched);
+}
+
+/* Find representative path_vars for SVAL within this binding of BASE_REG,
+ appending the results to OUT_PVS. */
+
+void
+binding_cluster::get_representative_path_vars (const region_model *model,
+ svalue_set *visited,
+ const region *base_reg,
+ const svalue *sval,
+ auto_vec<path_var> *out_pvs)
+ const
+{
+ sval = simplify_for_binding (sval);
+
+ for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ {
+ const binding_key *key = (*iter).first;
+ const svalue *bound_sval = (*iter).second;
+ if (bound_sval == sval)
+ {
+ if (const concrete_binding *ckey
+ = key->dyn_cast_concrete_binding ())
+ {
+ auto_vec <const region *> subregions;
+ base_reg->get_subregions_for_binding
+ (model->get_manager (),
+ ckey->get_start_bit_offset (),
+ ckey->get_size_in_bits (),
+ sval->get_type (),
+ &subregions);
+ unsigned i;
+ const region *subregion;
+ FOR_EACH_VEC_ELT (subregions, i, subregion)
+ {
+ if (path_var pv
+ = model->get_representative_path_var (subregion,
+ visited))
+ out_pvs->safe_push (pv);
+ }
+ }
+ else
+ {
+ const symbolic_binding *skey = (const symbolic_binding *)key;
+ if (path_var pv
+ = model->get_representative_path_var (skey->get_region (),
+ visited))
+ out_pvs->safe_push (pv);
+ }
+ }
+ }
+}
+
+/* Get any svalue bound to KEY, or NULL. */
+
+const svalue *
+binding_cluster::get_any_value (const binding_key *key) const
+{
+ return m_map.get (key);
+}
+
+/* If this cluster has a single direct binding for the whole of the region,
+ return it.
+ For use in simplifying dumps. */
+
+const svalue *
+binding_cluster::maybe_get_simple_value (store_manager *mgr) const
+{
+ /* Fail gracefully if MGR is NULL to make it easier to dump store
+ instances in the debugger. */
+ if (mgr == NULL)
+ return NULL;
+
+ if (m_map.elements () != 1)
+ return NULL;
+
+ const binding_key *key = binding_key::make (mgr, m_base_region, BK_direct);
+ return get_any_value (key);
+}
+
+/* class store_manager. */
+
+/* binding consolidation. */
+
+const concrete_binding *
+store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
+ bit_offset_t size_in_bits,
+ enum binding_kind kind)
+{
+ concrete_binding b (start_bit_offset, size_in_bits, kind);
+ if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
+ return existing;
+
+ concrete_binding *to_save = new concrete_binding (b);
+ m_concrete_binding_key_mgr.put (b, to_save);
+ return to_save;
+}
+
+const symbolic_binding *
+store_manager::get_symbolic_binding (const region *reg,
+ enum binding_kind kind)
+{
+ symbolic_binding b (reg, kind);
+ if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
+ return existing;
+
+ symbolic_binding *to_save = new symbolic_binding (b);
+ m_symbolic_binding_key_mgr.put (b, to_save);
+ return to_save;
+}
+
+/* class store. */
+
+/* store's default ctor. */
+
+store::store ()
+: m_called_unknown_fn (false)
+{
+}
+
+/* store's copy ctor. */
+
+store::store (const store &other)
+: m_called_unknown_fn (other.m_called_unknown_fn)
+{
+ for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
+ iter != other.m_cluster_map.end ();
+ ++iter)
+ {
+ const region *reg = (*iter).first;
+ gcc_assert (reg);
+ binding_cluster *c = (*iter).second;
+ gcc_assert (c);
+ m_cluster_map.put (reg, new binding_cluster (*c));
+ }
+}
+
+/* store's dtor. */
+
+store::~store ()
+{
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end ();
+ ++iter)
+ delete (*iter).second;
+}
+
+/* store's assignment operator. */
+
+store &
+store::operator= (const store &other)
+{
+ /* Delete existing cluster map. */
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end ();
+ ++iter)
+ delete (*iter).second;
+ m_cluster_map.empty ();
+
+ m_called_unknown_fn = other.m_called_unknown_fn;
+
+ for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
+ iter != other.m_cluster_map.end ();
+ ++iter)
+ {
+ const region *reg = (*iter).first;
+ gcc_assert (reg);
+ binding_cluster *c = (*iter).second;
+ gcc_assert (c);
+ m_cluster_map.put (reg, new binding_cluster (*c));
+ }
+ return *this;
+}
+
+/* store's equality operator. */
+
+bool
+store::operator== (const store &other) const
+{
+ if (m_called_unknown_fn != other.m_called_unknown_fn)
+ return false;
+
+ if (m_cluster_map.elements () != other.m_cluster_map.elements ())
+ return false;
+
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end ();
+ ++iter)
+ {
+ const region *reg = (*iter).first;
+ binding_cluster *c = (*iter).second;
+ binding_cluster **other_slot
+ = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
+ if (other_slot == NULL)
+ return false;
+ if (*c != **other_slot)
+ return false;
+ }
+
+ gcc_checking_assert (hash () == other.hash ());
+
+ return true;
+}
+
+/* Get a hash value for this store. */
+
+hashval_t
+store::hash () const
+{
+ hashval_t result = 0;
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end ();
+ ++iter)
+ result ^= (*iter).second->hash ();
+ return result;
+}
+
+/* Populate OUT with a sorted list of parent regions for the regions in IN,
+ removing duplicate parents. */
+
+static void
+get_sorted_parent_regions (auto_vec<const region *> *out,
+ auto_vec<const region *> &in)
+{
+ /* Get the set of parent regions. */
+ hash_set<const region *> parent_regions;
+ const region *iter_reg;
+ unsigned i;
+ FOR_EACH_VEC_ELT (in, i, iter_reg)
+ {
+ const region *parent_reg = iter_reg->get_parent_region ();
+ gcc_assert (parent_reg);
+ parent_regions.add (parent_reg);
+ }
+
+ /* Write to OUT. */
+ for (hash_set<const region *>::iterator iter = parent_regions.begin();
+ iter != parent_regions.end(); ++iter)
+ out->safe_push (*iter);
+
+ /* Sort OUT. */
+ out->qsort (region::cmp_ptrs);
+}
+
+/* Dump a representation of this store to PP, using SIMPLE to control how
+ svalues and regions are printed.
+ MGR is used for simplifying dumps if non-NULL, but can also be NULL
+ (to make it easier to use from the debugger). */
+
+void
+store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
+ store_manager *mgr) const
+{
+ /* Sort into some deterministic order. */
+ auto_vec<const region *> base_regions;
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ {
+ const region *base_reg = (*iter).first;
+ base_regions.safe_push (base_reg);
+ }
+ base_regions.qsort (region::cmp_ptrs);
+
+ /* Gather clusters, organize by parent region, so that we can group
+ together locals, globals, etc. */
+ auto_vec<const region *> parent_regions;
+ get_sorted_parent_regions (&parent_regions, base_regions);
+
+ const region *parent_reg;
+ unsigned i;
+ FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
+ {
+ gcc_assert (parent_reg);
+ pp_string (pp, "clusters within ");
+ parent_reg->dump_to_pp (pp, simple);
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, " {");
+
+ const region *base_reg;
+ unsigned j;
+ FOR_EACH_VEC_ELT (base_regions, j, base_reg)
+ {
+ /* This is O(N * M), but N ought to be small. */
+ if (base_reg->get_parent_region () != parent_reg)
+ continue;
+ binding_cluster *cluster
+ = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
+ if (!multiline)
+ {
+ if (j > 0)
+ pp_string (pp, ", ");
+ }
+ if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
+ {
+ /* Special-case to simplify dumps for the common case where
+ we just have one value directly bound to the whole of a
+ region. */
+ if (multiline)
+ {
+ pp_string (pp, " cluster for: ");
+ base_reg->dump_to_pp (pp, simple);
+ pp_string (pp, ": ");
+ sval->dump_to_pp (pp, simple);
+ if (cluster->escaped_p ())
+ pp_string (pp, " (ESCAPED)");
+ if (cluster->touched_p ())
+ pp_string (pp, " (TOUCHED)");
+ pp_newline (pp);
+ }
+ else
+ {
+ pp_string (pp, "region: {");
+ base_reg->dump_to_pp (pp, simple);
+ pp_string (pp, ", value: ");
+ sval->dump_to_pp (pp, simple);
+ if (cluster->escaped_p ())
+ pp_string (pp, " (ESCAPED)");
+ if (cluster->touched_p ())
+ pp_string (pp, " (TOUCHED)");
+ pp_string (pp, "}");
+ }
+ }
+ else if (multiline)
+ {
+ pp_string (pp, " cluster for: ");
+ base_reg->dump_to_pp (pp, simple);
+ pp_newline (pp);
+ cluster->dump_to_pp (pp, simple, multiline);
+ }
+ else
+ {
+ pp_string (pp, "base region: {");
+ base_reg->dump_to_pp (pp, simple);
+ pp_string (pp, "} has cluster: {");
+ cluster->dump_to_pp (pp, simple, multiline);
+ pp_string (pp, "}");
+ }
+ }
+ if (!multiline)
+ pp_string (pp, "}");
+ }
+ pp_printf (pp, "m_called_unknown_fn: %s",
+ m_called_unknown_fn ? "TRUE" : "FALSE");
+ if (multiline)
+ pp_newline (pp);
+}
+
+/* Dump a multiline representation of this store to stderr. */
+
+DEBUG_FUNCTION void
+store::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple, true, NULL);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* Get any svalue bound to REG, or NULL. */
+
+const svalue *
+store::get_any_binding (store_manager *mgr, const region *reg) const
+{
+ const region *base_reg = reg->get_base_region ();
+ binding_cluster **cluster_slot
+ = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
+ if (!cluster_slot)
+ return NULL;
+ return (*cluster_slot)->get_any_binding (mgr, reg);
+}
+
+/* Set the value of LHS_REG to RHS_SVAL. */
+
+void
+store::set_value (store_manager *mgr, const region *lhs_reg,
+ const svalue *rhs_sval, enum binding_kind kind)
+{
+ remove_overlapping_bindings (mgr, lhs_reg);
+
+ rhs_sval = simplify_for_binding (rhs_sval);
+
+ const region *lhs_base_reg = lhs_reg->get_base_region ();
+ binding_cluster *lhs_cluster;
+ if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
+ /* Reject attempting to bind values into a symbolic region
+ for an unknown ptr; merely invalidate values below. */
+ lhs_cluster = NULL;
+ else
+ {
+ lhs_cluster = get_or_create_cluster (lhs_base_reg);
+ lhs_cluster->bind (mgr, lhs_reg, rhs_sval, kind);
+ }
+
+ /* Bindings to a cluster can affect other clusters if a symbolic
+ base region is involved.
+ Writes to concrete clusters can't affect other concrete clusters,
+ but can affect symbolic clusters.
+ Writes to symbolic clusters can affect both concrete and symbolic
+ clusters.
+ Invalidate our knowledge of other clusters that might have been
+ affected by the write. */
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ {
+ const region *iter_base_reg = (*iter).first;
+ binding_cluster *iter_cluster = (*iter).second;
+ if (iter_base_reg != lhs_base_reg
+ && (lhs_cluster == NULL
+ || lhs_cluster->symbolic_p ()
+ || iter_cluster->symbolic_p ()))
+ {
+ tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
+ switch (t_alias.get_value ())
+ {
+ default:
+ gcc_unreachable ();
+
+ case tristate::TS_UNKNOWN:
+ iter_cluster->mark_region_as_unknown (mgr, iter_base_reg);
+ break;
+
+ case tristate::TS_TRUE:
+ gcc_unreachable ();
+ break;
+
+ case tristate::TS_FALSE:
+ /* If they can't be aliases, then don't invalidate this
+ cluster. */
+ break;
+ }
+ }
+ }
+}
+
+/* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
+
+tristate
+store::eval_alias (const region *base_reg_a,
+ const region *base_reg_b)
+{
+ /* SSA names can't alias. */
+ tree decl_a = base_reg_a->maybe_get_decl ();
+ if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
+ return tristate::TS_FALSE;
+ tree decl_b = base_reg_b->maybe_get_decl ();
+ if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
+ return tristate::TS_FALSE;
+
+ if (const symbolic_region *sym_reg_a
+ = base_reg_a->dyn_cast_symbolic_region ())
+ {
+ const svalue *sval_a = sym_reg_a->get_pointer ();
+ if (sval_a->get_kind () == SK_INITIAL
+ && decl_b
+ && !is_global_var (decl_b))
+ {
+ /* The initial value of a pointer can't point to a local. */
+ return tristate::TS_FALSE;
+ }
+ }
+ if (const symbolic_region *sym_reg_b
+ = base_reg_b->dyn_cast_symbolic_region ())
+ {
+ const svalue *sval_b = sym_reg_b->get_pointer ();
+ if (sval_b->get_kind () == SK_INITIAL
+ && decl_a
+ && !is_global_var (decl_a))
+ {
+ /* The initial value of a pointer can't point to a local. */
+ return tristate::TS_FALSE;
+ }
+ }
+
+ return tristate::TS_UNKNOWN;
+}
+
+/* Remove all bindings overlapping REG within this store. */
+
+void
+store::clobber_region (store_manager *mgr, const region *reg)
+{
+ const region *base_reg = reg->get_base_region ();
+ binding_cluster **slot = m_cluster_map.get (base_reg);
+ if (!slot)
+ return;
+ binding_cluster *cluster = *slot;
+ cluster->clobber_region (mgr, reg);
+ if (cluster->redundant_p ())
+ {
+ delete cluster;
+ m_cluster_map.remove (base_reg);
+ }
+}
+
+/* Remove any bindings for REG within this store. */
+
+void
+store::purge_region (store_manager *mgr, const region *reg)
+{
+ const region *base_reg = reg->get_base_region ();
+ binding_cluster **slot = m_cluster_map.get (base_reg);
+ if (!slot)
+ return;
+ binding_cluster *cluster = *slot;
+ cluster->purge_region (mgr, reg);
+ if (cluster->redundant_p ())
+ {
+ delete cluster;
+ m_cluster_map.remove (base_reg);
+ }
+}
+
+/* Zero-fill REG. */
+
+void
+store::zero_fill_region (store_manager *mgr, const region *reg)
+{
+ const region *base_reg = reg->get_base_region ();
+ if (base_reg->symbolic_for_unknown_ptr_p ())
+ return;
+ binding_cluster *cluster = get_or_create_cluster (base_reg);
+ cluster->zero_fill_region (mgr, reg);
+}
+
+/* Mark REG as having unknown content. */
+
+void
+store::mark_region_as_unknown (store_manager *mgr, const region *reg)
+{
+ const region *base_reg = reg->get_base_region ();
+ if (base_reg->symbolic_for_unknown_ptr_p ())
+ return;
+ binding_cluster *cluster = get_or_create_cluster (base_reg);
+ cluster->mark_region_as_unknown (mgr, reg);
+}
+
+/* Get the cluster for BASE_REG, or NULL (const version). */
+
+const binding_cluster *
+store::get_cluster (const region *base_reg) const
+{
+ gcc_assert (base_reg);
+ gcc_assert (base_reg->get_base_region () == base_reg);
+ if (binding_cluster **slot
+ = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
+ return *slot;
+ else
+ return NULL;
+}
+
+/* Get the cluster for BASE_REG, or NULL (non-const version). */
+
+binding_cluster *
+store::get_cluster (const region *base_reg)
+{
+ gcc_assert (base_reg);
+ gcc_assert (base_reg->get_base_region () == base_reg);
+ if (binding_cluster **slot = m_cluster_map.get (base_reg))
+ return *slot;
+ else
+ return NULL;
+}
+
+/* Get the cluster for BASE_REG, creating it if doesn't already exist. */
+
+binding_cluster *
+store::get_or_create_cluster (const region *base_reg)
+{
+ gcc_assert (base_reg);
+ gcc_assert (base_reg->get_base_region () == base_reg);
+
+ /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
+ gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
+
+ if (binding_cluster **slot = m_cluster_map.get (base_reg))
+ return *slot;
+
+ binding_cluster *cluster = new binding_cluster (base_reg);
+ m_cluster_map.put (base_reg, cluster);
+
+ return cluster;
+}
+
+/* Remove any cluster for BASE_REG, for use by
+ region_model::unbind_region_and_descendents
+ when popping stack frames and handling deleted heap regions. */
+
+void
+store::purge_cluster (const region *base_reg)
+{
+ gcc_assert (base_reg->get_base_region () == base_reg);
+ binding_cluster **slot = m_cluster_map.get (base_reg);
+ if (!slot)
+ return;
+ binding_cluster *cluster = *slot;
+ delete cluster;
+ m_cluster_map.remove (base_reg);
+}
+
+/* Attempt to merge STORE_A and STORE_B into OUT_STORE.
+ Return true if successful, or false if the stores can't be merged. */
+
+bool
+store::can_merge_p (const store *store_a, const store *store_b,
+ store *out_store, store_manager *mgr,
+ model_merger *merger)
+{
+ if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
+ out_store->m_called_unknown_fn = true;
+
+ /* Get the union of all base regions for STORE_A and STORE_B. */
+ hash_set<const region *> base_regions;
+ for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
+ iter_a != store_a->m_cluster_map.end (); ++iter_a)
+ {
+ const region *base_reg_a = (*iter_a).first;
+ base_regions.add (base_reg_a);
+ }
+ for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
+ iter_b != store_b->m_cluster_map.end (); ++iter_b)
+ {
+ const region *base_reg_b = (*iter_b).first;
+ base_regions.add (base_reg_b);
+ }
+
+ for (hash_set<const region *>::iterator iter = base_regions.begin ();
+ iter != base_regions.end (); ++iter)
+ {
+ const region *base_reg = *iter;
+ const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
+ const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
+ /* At least one of cluster_a and cluster_b must be non-NULL. */
+ binding_cluster *out_cluster
+ = out_store->get_or_create_cluster (base_reg);
+ if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
+ out_cluster, mgr, merger))
+ return false;
+ }
+ return true;
+}
+
+/* Mark the cluster for BASE_REG as having escaped.
+ For use when handling an unrecognized function call, and
+ for params to "top-level" calls.
+ Further unknown function calls could touch it, even if the cluster
+ isn't reachable from args of those calls. */
+
+void
+store::mark_as_escaped (const region *base_reg)
+{
+ gcc_assert (base_reg);
+ gcc_assert (base_reg->get_base_region () == base_reg);
+
+ binding_cluster *cluster = get_or_create_cluster (base_reg);
+ cluster->mark_as_escaped ();
+}
+
+/* Handle an unknown fncall by updating any clusters that have escaped
+ (either in this fncall, or in a prior one). */
+
+void
+store::on_unknown_fncall (const gcall *call, store_manager *mgr)
+{
+ m_called_unknown_fn = true;
+
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ (*iter).second->on_unknown_fncall (call, mgr);
+}
+
+/* Return true if a non-const pointer to BASE_REG (or something within it)
+ has escaped to code outside of the TU being analyzed. */
+
+bool
+store::escaped_p (const region *base_reg) const
+{
+ gcc_assert (base_reg);
+ gcc_assert (base_reg->get_base_region () == base_reg);
+
+ if (binding_cluster **cluster_slot
+ = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
+ return (*cluster_slot)->escaped_p ();
+ return false;
+}
+
+/* Populate OUT_PVS with a list of path_vars for describing SVAL based on
+ this store, using VISITED to ensure the traversal terminates. */
+
+void
+store::get_representative_path_vars (const region_model *model,
+ svalue_set *visited,
+ const svalue *sval,
+ auto_vec<path_var> *out_pvs) const
+{
+ gcc_assert (sval);
+
+ /* Find all bindings that reference SVAL. */
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ {
+ const region *base_reg = (*iter).first;
+ binding_cluster *cluster = (*iter).second;
+ cluster->get_representative_path_vars (model, visited, base_reg, sval,
+ out_pvs);
+ }
+
+ if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
+ {
+ const region *reg = init_sval->get_region ();
+ if (path_var pv = model->get_representative_path_var (reg,
+ visited))
+ out_pvs->safe_push (pv);
+ }
+}
+
+/* Remove all bindings overlapping REG within this store, removing
+ any clusters that become redundant. */
+
+void
+store::remove_overlapping_bindings (store_manager *mgr, const region *reg)
+{
+ const region *base_reg = reg->get_base_region ();
+ if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
+ {
+ binding_cluster *cluster = *cluster_slot;
+ if (reg == base_reg && !escaped_p (base_reg))
+ {
+ /* Remove whole cluster. */
+ m_cluster_map.remove (base_reg);
+ delete cluster;
+ return;
+ }
+ cluster->remove_overlapping_bindings (mgr, reg);
+ }
+}
+
+/* Subclass of visitor that accumulates a hash_set of the regions that
+ were visited. */
+
+struct region_finder : public visitor
+{
+ void visit_region (const region *reg) FINAL OVERRIDE
+ {
+ m_regs.add (reg);
+ }
+
+ hash_set<const region *> m_regs;
+};
+
+/* Canonicalize this store, to maximize the chance of equality between
+ instances. */
+
+void
+store::canonicalize (store_manager *mgr)
+{
+ /* If we have e.g.:
+ cluster for: HEAP_ALLOCATED_REGION(543)
+ ESCAPED
+ TOUCHED
+ where the heap region is empty and unreferenced, then purge that
+ cluster, to avoid unbounded state chains involving these. */
+
+ /* Find regions that are referenced by bound values in the store. */
+ region_finder s;
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ {
+ binding_cluster *cluster = (*iter).second;
+ for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
+ bind_iter != cluster->m_map.end (); ++bind_iter)
+ (*bind_iter).second->accept (&s);
+ }
+
+ /* Locate heap-allocated regions that have empty bindings that weren't
+ found above. */
+ hash_set<const region *> purgeable_regions;
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ {
+ const region *base_reg = (*iter).first;
+ binding_cluster *cluster = (*iter).second;
+ if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
+ {
+ if (cluster->empty_p ())
+ if (!s.m_regs.contains (base_reg))
+ purgeable_regions.add (base_reg);
+
+ /* Also cover the UNKNOWN case. */
+ if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
+ if (sval->get_kind () == SK_UNKNOWN)
+ if (!s.m_regs.contains (base_reg))
+ purgeable_regions.add (base_reg);
+ }
+ }
+
+ /* Purge them. */
+ for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
+ iter != purgeable_regions.end (); ++iter)
+ {
+ const region *base_reg = *iter;
+ purge_cluster (base_reg);
+ }
+}
+
+/* Subroutine for use by exploded_path::feasible_p.
+
+ We need to deal with state differences between:
+ (a) when the exploded_graph is being initially constructed and
+ (b) when replaying the state changes along a specific path in
+ in exploded_path::feasible_p.
+
+ In (a), state merging happens, so when exploring a loop
+ for (i = 0; i < 1024; i++)
+ on successive iterations we have i == 0, then i == WIDENING.
+
+ In (b), no state merging happens, so naively replaying the path
+ that goes twice through the loop then exits it
+ would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
+ that exits the loop, which would be found to be infeasible as i == 1,
+ and the path would be rejected.
+
+ We need to fix up state during replay. This subroutine is
+ called whenever we enter a supernode that we've already
+ visited along this exploded_path, passing in OTHER_STORE
+ from the destination enode's state.
+
+ Find bindings to widening values in OTHER_STORE.
+ For all that are found, update the binding in this store to UNKNOWN. */
+
+void
+store::loop_replay_fixup (const store *other_store,
+ region_model_manager *mgr)
+{
+ gcc_assert (other_store);
+ for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
+ iter != other_store->m_cluster_map.end (); ++iter)
+ {
+ const region *base_reg = (*iter).first;
+ binding_cluster *cluster = (*iter).second;
+ for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
+ bind_iter != cluster->m_map.end (); ++bind_iter)
+ {
+ const binding_key *key = (*bind_iter).first;
+ const svalue *sval = (*bind_iter).second;
+ if (sval->get_kind () == SK_WIDENING)
+ {
+ binding_cluster *this_cluster
+ = get_or_create_cluster (base_reg);
+ const svalue *unknown
+ = mgr->get_or_create_unknown_svalue (sval->get_type ());
+ this_cluster->bind_key (key, unknown);
+ }
+ }
+ }
+}
+
+#if CHECKING_P
+
+namespace selftest {
+
+/* Implementation detail of ASSERT_OVERLAP. */
+
+static void
+assert_overlap (const location &loc,
+ const concrete_binding *b1,
+ const concrete_binding *b2)
+{
+ ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
+ ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
+}
+
+/* Implementation detail of ASSERT_DISJOINT. */
+
+static void
+assert_disjoint (const location &loc,
+ const concrete_binding *b1,
+ const concrete_binding *b2)
+{
+ ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
+ ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
+}
+
+/* Assert that B1 and B2 overlap, checking both ways. */
+
+#define ASSERT_OVERLAP(B1, B2) \
+ SELFTEST_BEGIN_STMT \
+ assert_overlap (SELFTEST_LOCATION, B1, B2); \
+ SELFTEST_END_STMT
+
+/* Assert that B1 and B2 do not overlap, checking both ways. */
+
+#define ASSERT_DISJOINT(B1, B2) \
+ SELFTEST_BEGIN_STMT \
+ assert_disjoint (SELFTEST_LOCATION, B1, B2); \
+ SELFTEST_END_STMT
+
+/* Verify that concrete_binding::overlaps_p works as expected. */
+
+static void
+test_binding_key_overlap ()
+{
+ store_manager mgr (NULL);
+
+ /* Various 8-bit bindings. */
+ const concrete_binding *cb_0_7
+ = mgr.get_concrete_binding (0, 8, BK_direct);
+ const concrete_binding *cb_8_15
+ = mgr.get_concrete_binding (8, 8, BK_direct);
+ const concrete_binding *cb_16_23
+ = mgr.get_concrete_binding (16, 8, BK_direct);
+ const concrete_binding *cb_24_31
+ = mgr.get_concrete_binding (24, 8, BK_direct);
+
+ /* 16-bit bindings. */
+ const concrete_binding *cb_0_15
+ = mgr.get_concrete_binding (0, 16, BK_direct);
+ const concrete_binding *cb_8_23
+ = mgr.get_concrete_binding (8, 16, BK_direct);
+ const concrete_binding *cb_16_31
+ = mgr.get_concrete_binding (16, 16, BK_direct);
+
+ /* 32-bit binding. */
+ const concrete_binding *cb_0_31
+ = mgr.get_concrete_binding (0, 32, BK_direct);
+
+ /* Everything should self-overlap. */
+ ASSERT_OVERLAP (cb_0_7, cb_0_7);
+ ASSERT_OVERLAP (cb_8_15, cb_8_15);
+ ASSERT_OVERLAP (cb_16_23, cb_16_23);
+ ASSERT_OVERLAP (cb_24_31, cb_24_31);
+ ASSERT_OVERLAP (cb_0_15, cb_0_15);
+ ASSERT_OVERLAP (cb_8_23, cb_8_23);
+ ASSERT_OVERLAP (cb_16_31, cb_16_31);
+ ASSERT_OVERLAP (cb_0_31, cb_0_31);
+
+ /* Verify the 8-bit bindings that don't overlap each other. */
+ ASSERT_DISJOINT (cb_0_7, cb_8_15);
+ ASSERT_DISJOINT (cb_8_15, cb_16_23);
+
+ /* Check for overlap of differently-sized bindings. */
+ ASSERT_OVERLAP (cb_0_7, cb_0_31);
+ /* ...and with differing start points. */
+ ASSERT_OVERLAP (cb_8_15, cb_0_31);
+ ASSERT_DISJOINT (cb_8_15, cb_16_31);
+ ASSERT_OVERLAP (cb_16_23, cb_0_31);
+ ASSERT_OVERLAP (cb_16_31, cb_0_31);
+
+ ASSERT_DISJOINT (cb_0_7, cb_8_23);
+ ASSERT_OVERLAP (cb_8_23, cb_16_23);
+ ASSERT_OVERLAP (cb_8_23, cb_16_31);
+ ASSERT_DISJOINT (cb_8_23, cb_24_31);
+}
+
+/* Run all of the selftests within this file. */
+
+void
+analyzer_store_cc_tests ()
+{
+ test_binding_key_overlap ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
--- /dev/null
+/* Classes for modeling the state of memory.
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_STORE_H
+#define GCC_ANALYZER_STORE_H
+
+/* Implementation of the region-based ternary model described in:
+ "A Memory Model for Static Analysis of C Programs"
+ (Zhongxing Xu, Ted Kremenek, and Jian Zhang)
+ http://lcs.ios.ac.cn/~xuzb/canalyze/memmodel.pdf */
+
+/* The store models memory as a collection of "clusters", where regions
+ are partitioned into clusters via their base region.
+
+ For example, given:
+ int a, b, c;
+ struct coord { double x; double y; } verts[3];
+ then "verts[0].y" and "verts[1].x" both have "verts" as their base region.
+ Each of a, b, c, and verts will have their own clusters, so that we
+ know that writes to e.g. "verts[1].x".don't affect e.g. "a".
+
+ Within each cluster we store a map of bindings to values, where the
+ binding keys can be either concrete or symbolic.
+
+ Concrete bindings affect a specific range of bits relative to the start
+ of the base region of the cluster, whereas symbolic bindings affect
+ a specific subregion within the cluster.
+
+ Consider (from the symbolic-1.c testcase):
+
+ char arr[1024];
+ arr[2] = a; (1)
+ arr[3] = b; (2)
+ After (1) and (2), the cluster for "arr" has concrete bindings
+ for bits 16-23 and for bits 24-31, with svalues "INIT_VAL(a)"
+ and "INIT_VAL(b)" respectively:
+ cluster: {bits 16-23: "INIT_VAL(a)",
+ bits 24-31: "INIT_VAL(b)";
+ flags: {}}
+ Attempting to query unbound subregions e.g. arr[4] will
+ return "UNINITIALIZED".
+ "a" and "b" are each in their own clusters, with no explicit
+ bindings, and thus implicitly have value INIT_VAL(a) and INIT_VAL(b).
+
+ arr[3] = c; (3)
+ After (3), the concrete binding for bits 24-31 is replaced with the
+ svalue "INIT_VAL(c)":
+ cluster: {bits 16-23: "INIT_VAL(a)", (from before)
+ bits 24-31: "INIT_VAL(c)"; (updated)
+ flags: {}}
+
+ arr[i] = d; (4)
+ After (4), we lose the concrete bindings and replace them with a
+ symbolic binding for "arr[i]", with svalue "INIT_VAL(d)". We also
+ mark the cluster as having been "symbolically touched": future
+ attempts to query the values of subregions other than "arr[i]",
+ such as "arr[3]" are "UNKNOWN", since we don't know if the write
+ to arr[i] affected them.
+ cluster: {symbolic_key(arr[i]): "INIT_VAL(d)";
+ flags: {TOUCHED}}
+
+ arr[j] = e; (5)
+ After (5), we lose the symbolic binding for "arr[i]" since we could
+ have overwritten it, and add a symbolic binding for "arr[j]".
+ cluster: {symbolic_key(arr[j]): "INIT_VAL(d)"; (different symbolic
+ flags: {TOUCHED}} binding)
+
+ arr[3] = f; (6)
+ After (6), we lose the symbolic binding for "arr[j]" since we could
+ have overwritten it, and gain a concrete binding for bits 24-31
+ again, this time with svalue "INIT_VAL(e)":
+ cluster: {bits 24-31: "INIT_VAL(d)";
+ flags: {TOUCHED}}
+ The cluster is still flagged as touched, so that we know that
+ accesses to other elements are "UNKNOWN" rather than
+ "UNINITIALIZED".
+
+ Handling symbolic regions requires us to handle aliasing.
+
+ In the first example above, each of a, b, c and verts are non-symbolic
+ base regions and so their clusters are "concrete clusters", whereas given:
+ struct coord *p, *q;
+ then "*p" and "*q" are symbolic base regions, and thus "*p" and "*q"
+ have "symbolic clusters".
+
+ In the above, "verts[i].x" will have a symbolic *binding* within a
+ concrete cluster for "verts", whereas "*p" is a symbolic *cluster*.
+
+ Writes to concrete clusters can't affect other concrete clusters,
+ but can affect symbolic clusters; e.g. after:
+ verts[0].x = 42;
+ we bind 42 in the cluster for "verts", but the clusters for "b" and "c"
+ can't be affected. Any symbolic clusters for *p and for *q can be
+ affected, *p and *q could alias verts.
+
+ Writes to a symbolic cluster can affect other clusters, both
+ concrete and symbolic; e.g. after:
+ p->x = 17;
+ we bind 17 within the cluster for "*p". The concrete clusters for a, b,
+ c, and verts could be affected, depending on whether *p aliases them.
+ Similarly, the symbolic cluster to *q could be affected. */
+
+namespace ana {
+
+class concrete_binding;
+
+/* An enum for discriminating between "direct" vs "default" levels of
+ mapping. */
+
+enum binding_kind
+{
+ /* Special-case value for hash support.
+ This is the initial entry, so that hash traits can have
+ empty_zero_p = true. */
+ BK_empty = 0,
+
+ /* Special-case value for hash support. */
+ BK_deleted,
+
+ /* The normal kind of mapping. */
+ BK_direct,
+
+ /* A lower-priority kind of mapping, for use when inheriting
+ default values from a parent region. */
+ BK_default
+};
+
+extern const char *binding_kind_to_string (enum binding_kind kind);
+
+/* Abstract base class for describing ranges of bits within a binding_map
+ that can have svalues bound to them. */
+
+class binding_key
+{
+public:
+ virtual ~binding_key () {}
+ virtual bool concrete_p () const = 0;
+ bool symbolic_p () const { return !concrete_p (); }
+
+ static const binding_key *make (store_manager *mgr, const region *r,
+ enum binding_kind kind);
+
+ virtual void dump_to_pp (pretty_printer *pp, bool simple) const;
+ void dump (bool simple) const;
+
+ static int cmp_ptrs (const void *, const void *);
+ static int cmp (const binding_key *, const binding_key *);
+
+ virtual const concrete_binding *dyn_cast_concrete_binding () const
+ { return NULL; }
+
+ enum binding_kind get_kind () const { return m_kind; }
+
+ void mark_deleted () { m_kind = BK_deleted; }
+ void mark_empty () { m_kind = BK_empty; }
+ bool is_deleted () const { return m_kind == BK_deleted; }
+ bool is_empty () const { return m_kind == BK_empty; }
+
+protected:
+ binding_key (enum binding_kind kind) : m_kind (kind) {}
+
+ hashval_t impl_hash () const
+ {
+ return m_kind;
+ }
+ bool impl_eq (const binding_key &other) const
+ {
+ return m_kind == other.m_kind;
+ }
+
+private:
+ enum binding_kind m_kind;
+};
+
+/* Concrete subclass of binding_key, for describing a concrete range of
+ bits within the binding_map (e.g. "bits 8-15"). */
+
+class concrete_binding : public binding_key
+{
+public:
+ /* This class is its own key for the purposes of consolidation. */
+ typedef concrete_binding key_t;
+
+ concrete_binding (bit_offset_t start_bit_offset, bit_size_t size_in_bits,
+ enum binding_kind kind)
+ : binding_key (kind),
+ m_start_bit_offset (start_bit_offset),
+ m_size_in_bits (size_in_bits)
+ {}
+ bool concrete_p () const FINAL OVERRIDE { return true; }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_wide_int (m_start_bit_offset);
+ hstate.add_wide_int (m_size_in_bits);
+ return hstate.end () ^ binding_key::impl_hash ();
+ }
+ bool operator== (const concrete_binding &other) const
+ {
+ if (!binding_key::impl_eq (other))
+ return false;
+ return (m_start_bit_offset == other.m_start_bit_offset
+ && m_size_in_bits == other.m_size_in_bits);
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+
+ const concrete_binding *dyn_cast_concrete_binding () const FINAL OVERRIDE
+ { return this; }
+
+ bit_offset_t get_start_bit_offset () const { return m_start_bit_offset; }
+ bit_size_t get_size_in_bits () const { return m_size_in_bits; }
+ /* Return the next bit offset after the end of this binding. */
+ bit_offset_t get_next_bit_offset () const
+ {
+ return m_start_bit_offset + m_size_in_bits;
+ }
+
+ bool overlaps_p (const concrete_binding &other) const;
+
+private:
+ bit_offset_t m_start_bit_offset;
+ bit_size_t m_size_in_bits;
+};
+
+} // namespace ana
+
+template <> struct default_hash_traits<ana::concrete_binding>
+: public member_function_hash_traits<ana::concrete_binding>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* Concrete subclass of binding_key, for describing a symbolic set of
+ bits within the binding_map in terms of a region (e.g. "arr[i]"). */
+
+class symbolic_binding : public binding_key
+{
+public:
+ /* This class is its own key for the purposes of consolidation. */
+ typedef symbolic_binding key_t;
+
+ symbolic_binding (const region *region, enum binding_kind kind)
+ : binding_key (kind),
+ m_region (region)
+ {}
+ bool concrete_p () const FINAL OVERRIDE { return false; }
+
+ hashval_t hash () const
+ {
+ return (binding_key::impl_hash () ^ (long)m_region);
+ }
+ bool operator== (const symbolic_binding &other) const
+ {
+ if (!binding_key::impl_eq (other))
+ return false;
+ return (m_region == other.m_region);
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+
+ const region *get_region () const { return m_region; }
+
+private:
+ const region *m_region;
+};
+
+} // namespace ana
+
+template <> struct default_hash_traits<ana::symbolic_binding>
+: public member_function_hash_traits<ana::symbolic_binding>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* A mapping from binding_keys to svalues, for use by binding_cluster
+ and compound_svalue. */
+
+class binding_map
+{
+public:
+ typedef hash_map <const binding_key *, const svalue *> map_t;
+ typedef map_t::iterator iterator_t;
+
+ binding_map () : m_map () {}
+ binding_map (const binding_map &other);
+ binding_map& operator=(const binding_map &other);
+
+ bool operator== (const binding_map &other) const;
+ bool operator!= (const binding_map &other) const
+ {
+ return !(*this == other);
+ }
+
+ hashval_t hash () const;
+
+ const svalue *get (const binding_key *key) const
+ {
+ const svalue **slot = const_cast<map_t &> (m_map).get (key);
+ if (slot)
+ return *slot;
+ else
+ return NULL;
+ }
+ bool put (const binding_key *k, const svalue *v)
+ {
+ gcc_assert (v);
+ return m_map.put (k, v);
+ }
+
+ void remove (const binding_key *k) { m_map.remove (k); }
+ void empty () { m_map.empty (); }
+
+ iterator_t begin () const { return m_map.begin (); }
+ iterator_t end () const { return m_map.end (); }
+ size_t elements () const { return m_map.elements (); }
+
+ void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
+ void dump (bool simple) const;
+
+private:
+ map_t m_map;
+};
+
+/* Concept: BindingVisitor, for use by binding_cluster::for_each_binding
+ and store::for_each_binding.
+
+ Should implement:
+ void on_binding (const binding_key *key, const svalue *&sval);
+*/
+
+/* All of the bindings within a store for regions that share the same
+ base region. */
+
+class binding_cluster
+{
+public:
+ friend class store;
+
+ typedef hash_map <const binding_key *, const svalue *> map_t;
+ typedef map_t::iterator iterator_t;
+
+ binding_cluster (const region *base_region)
+ : m_base_region (base_region), m_map (),
+ m_escaped (false), m_touched (false) {}
+ binding_cluster (const binding_cluster &other);
+ binding_cluster& operator=(const binding_cluster &other);
+
+ bool operator== (const binding_cluster &other) const;
+ bool operator!= (const binding_cluster &other) const
+ {
+ return !(*this == other);
+ }
+
+ hashval_t hash () const;
+
+ bool symbolic_p () const;
+
+ void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
+ void dump (bool simple) const;
+
+ void bind (store_manager *mgr, const region *, const svalue *,
+ binding_kind kind);
+
+ void clobber_region (store_manager *mgr, const region *reg);
+ void purge_region (store_manager *mgr, const region *reg);
+ void zero_fill_region (store_manager *mgr, const region *reg);
+ void mark_region_as_unknown (store_manager *mgr, const region *reg);
+
+ const svalue *get_binding (store_manager *mgr, const region *reg,
+ binding_kind kind) const;
+ const svalue *get_binding_recursive (store_manager *mgr,
+ const region *reg,
+ enum binding_kind kind) const;
+ const svalue *get_any_binding (store_manager *mgr,
+ const region *reg) const;
+ const svalue *maybe_get_compound_binding (store_manager *mgr,
+ const region *reg) const;
+
+ void remove_overlapping_bindings (store_manager *mgr, const region *reg);
+
+ template <typename T>
+ void for_each_value (void (*cb) (const svalue *sval, T user_data),
+ T user_data)
+ {
+ for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ cb ((*iter).second, user_data);
+ }
+
+ static bool can_merge_p (const binding_cluster *cluster_a,
+ const binding_cluster *cluster_b,
+ binding_cluster *out_cluster,
+ store_manager *mgr,
+ model_merger *merger);
+ void make_unknown_relative_to (const binding_cluster *other_cluster,
+ store_manager *mgr);
+
+ void mark_as_escaped ();
+ void on_unknown_fncall (const gcall *call, store_manager *mgr);
+
+ bool escaped_p () const { return m_escaped; }
+ bool touched_p () const { return m_touched; }
+
+ bool redundant_p () const;
+ bool empty_p () const { return m_map.elements () == 0; }
+
+ void get_representative_path_vars (const region_model *model,
+ svalue_set *visited,
+ const region *base_reg,
+ const svalue *sval,
+ auto_vec<path_var> *out_pvs) const;
+
+ const svalue *maybe_get_simple_value (store_manager *mgr) const;
+
+ template <typename BindingVisitor>
+ void for_each_binding (BindingVisitor &v)
+ {
+ for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ {
+ const binding_key *key = (*iter).first;
+ const svalue *&sval = (*iter).second;
+ v.on_binding (key, sval);
+ }
+ }
+
+ iterator_t begin () const { return m_map.begin (); }
+ iterator_t end () const { return m_map.end (); }
+
+private:
+ const svalue *get_any_value (const binding_key *key) const;
+ void get_overlapping_bindings (store_manager *mgr, const region *reg,
+ auto_vec<const binding_key *> *out);
+ void bind_compound_sval (store_manager *mgr,
+ const region *reg,
+ const compound_svalue *compound_sval);
+ void bind_key (const binding_key *key, const svalue *sval);
+
+ const region *m_base_region;
+
+ binding_map m_map;
+
+ /* Has a pointer to this cluster "escaped" into a part of the program
+ we don't know about (via a call to a function with an unknown body,
+ or by being passed in as a pointer param of a "top-level" function call).
+ Such regions could be overwritten when other such functions are called,
+ even if the region is no longer reachable by pointers that we are
+ tracking. */
+ bool m_escaped;
+
+ /* Has this cluster been written to via a symbolic binding?
+ If so, then we don't know anything about unbound subregions,
+ so we can't use initial_svalue, treat them as uninitialized, or
+ inherit values from a parent region. */
+ bool m_touched;
+};
+
+/* The mapping from regions to svalues.
+ This is actually expressed by subdividing into clusters, to better
+ handle aliasing. */
+
+class store
+{
+public:
+ typedef hash_map <const region *, binding_cluster *> cluster_map_t;
+
+ store ();
+ store (const store &other);
+ ~store ();
+
+ store &operator= (const store &other);
+
+ bool operator== (const store &other) const;
+ bool operator!= (const store &other) const
+ {
+ return !(*this == other);
+ }
+
+ hashval_t hash () const;
+
+ void dump_to_pp (pretty_printer *pp, bool summarize, bool multiline,
+ store_manager *mgr) const;
+ void dump (bool simple) const;
+ void summarize_to_pp (pretty_printer *pp, bool simple) const;
+
+ const svalue *get_direct_binding (store_manager *mgr, const region *reg);
+ const svalue *get_default_binding (store_manager *mgr, const region *reg);
+ const svalue *get_any_binding (store_manager *mgr, const region *reg) const;
+
+ bool called_unknown_fn_p () const { return m_called_unknown_fn; }
+
+ void set_value (store_manager *mgr, const region *lhs_reg,
+ const svalue *rhs_sval, enum binding_kind kind);
+ void clobber_region (store_manager *mgr, const region *reg);
+ void purge_region (store_manager *mgr, const region *reg);
+ void zero_fill_region (store_manager *mgr, const region *reg);
+ void mark_region_as_unknown (store_manager *mgr, const region *reg);
+
+ const binding_cluster *get_cluster (const region *base_reg) const;
+ binding_cluster *get_cluster (const region *base_reg);
+ binding_cluster *get_or_create_cluster (const region *base_reg);
+ void purge_cluster (const region *base_reg);
+
+ template <typename T>
+ void for_each_cluster (void (*cb) (const region *base_reg, T user_data),
+ T user_data) const
+ {
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ cb ((*iter).first, user_data);
+ }
+
+ static bool can_merge_p (const store *store_a, const store *store_b,
+ store *out_store, store_manager *mgr,
+ model_merger *merger);
+
+ void mark_as_escaped (const region *base_reg);
+ void on_unknown_fncall (const gcall *call, store_manager *mgr);
+ bool escaped_p (const region *reg) const;
+
+ void get_representative_path_vars (const region_model *model,
+ svalue_set *visited,
+ const svalue *sval,
+ auto_vec<path_var> *out_pvs) const;
+
+ cluster_map_t::iterator begin () const { return m_cluster_map.begin (); }
+ cluster_map_t::iterator end () const { return m_cluster_map.end (); }
+
+ tristate eval_alias (const region *base_reg_a,
+ const region *base_reg_b);
+
+ template <typename BindingVisitor>
+ void for_each_binding (BindingVisitor &v)
+ {
+ for (cluster_map_t::iterator iter = m_cluster_map.begin ();
+ iter != m_cluster_map.end (); ++iter)
+ (*iter).second->for_each_binding (v);
+ }
+
+ void canonicalize (store_manager *mgr);
+ void loop_replay_fixup (const store *other_store,
+ region_model_manager *mgr);
+
+private:
+ void remove_overlapping_bindings (store_manager *mgr, const region *reg);
+
+ cluster_map_t m_cluster_map;
+
+ /* If this is true, then unknown code has been called, and so
+ any global variable that isn't currently modelled by the store
+ has unknown state, rather than being in an "initial state".
+ This is to avoid having to mark (and thus explicitly track)
+ every global when an unknown function is called; instead, they
+ can be tracked implicitly. */
+ bool m_called_unknown_fn;
+};
+
+/* A class responsible for owning and consolidating binding keys
+ (both concrete and symbolic).
+ Key instances are immutable as far as clients are concerned, so they
+ are provided as "const" ptrs. */
+
+class store_manager
+{
+public:
+ store_manager (region_model_manager *mgr) : m_mgr (mgr) {}
+
+ /* binding consolidation. */
+ const concrete_binding *
+ get_concrete_binding (bit_offset_t start_bit_offset,
+ bit_offset_t size_in_bits,
+ enum binding_kind kind);
+ const symbolic_binding *
+ get_symbolic_binding (const region *region,
+ enum binding_kind kind);
+
+ region_model_manager *get_svalue_manager () const
+ {
+ return m_mgr;
+ }
+
+ void log_stats (logger *logger, bool show_objs) const;
+
+private:
+ region_model_manager *m_mgr;
+ consolidation_map<concrete_binding> m_concrete_binding_key_mgr;
+ consolidation_map<symbolic_binding> m_symbolic_binding_key_mgr;
+};
+
+} // namespace ana
+
+#endif /* GCC_ANALYZER_STORE_H */
--- /dev/null
+/* Symbolic values.
+ Copyright (C) 2019-2020 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "diagnostic-core.h"
+#include "gimple-pretty-print.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "graphviz.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-dfa.h"
+#include "stringpool.h"
+#include "convert.h"
+#include "target.h"
+#include "fold-const.h"
+#include "tree-pretty-print.h"
+#include "tristate.h"
+#include "bitmap.h"
+#include "selftest.h"
+#include "function.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "options.h"
+#include "cgraph.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* struct complexity. */
+
+/* Get complexity for a new node that references REG
+ (the complexity of REG, plus one for the new node). */
+
+complexity::complexity (const region *reg)
+: m_num_nodes (reg->get_complexity ().m_num_nodes + 1),
+ m_max_depth (reg->get_complexity ().m_max_depth + 1)
+{
+}
+
+/* Get complexity for a new node that references SVAL.
+ (the complexity of SVAL, plus one for the new node). */
+
+complexity::complexity (const svalue *sval)
+: m_num_nodes (sval->get_complexity ().m_num_nodes + 1),
+ m_max_depth (sval->get_complexity ().m_max_depth + 1)
+{
+}
+
+/* Get complexity for a new node that references nodes with complexity
+ C1 and C2. */
+
+complexity
+complexity::from_pair (const complexity &c1, const complexity &c2)
+{
+ return complexity (c1.m_num_nodes + c2.m_num_nodes + 1,
+ MAX (c1.m_max_depth, c2.m_max_depth) + 1);
+}
+
+/* class svalue and its various subclasses. */
+
+/* class svalue. */
+
+/* Dump a representation of this svalue to stderr. */
+
+DEBUG_FUNCTION void
+svalue::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* Generate a textual representation of this svalue for debugging purposes. */
+
+label_text
+svalue::get_desc (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ dump_to_pp (&pp, simple);
+ return label_text::take (xstrdup (pp_formatted_text (&pp)));
+}
+
+/* If this svalue is a constant_svalue, return the underlying tree constant.
+ Otherwise return NULL_TREE. */
+
+tree
+svalue::maybe_get_constant () const
+{
+ if (const constant_svalue *cst_sval = dyn_cast_constant_svalue ())
+ return cst_sval->get_constant ();
+ else
+ return NULL_TREE;
+}
+
+/* If this svalue is a cast (i.e a unaryop NOP_EXPR), return the underlying
+ svalue.
+ Otherwise return NULL. */
+
+const svalue *
+svalue::maybe_undo_cast () const
+{
+ if (const unaryop_svalue *unaryop_sval = dyn_cast_unaryop_svalue ())
+ if (unaryop_sval->get_op () == NOP_EXPR)
+ return unaryop_sval->get_arg ();
+ return NULL;
+}
+
+/* If this svalue is an unmergeable decorator around another svalue, return
+ the underlying svalue.
+ Otherwise return this svalue. */
+
+const svalue *
+svalue::unwrap_any_unmergeable () const
+{
+ if (const unmergeable_svalue *unmergeable = dyn_cast_unmergeable_svalue ())
+ return unmergeable->get_arg ();
+ return this;
+}
+
+/* Attempt to merge THIS with OTHER, returning the merged svalue.
+ Return NULL if not mergeable. */
+
+const svalue *
+svalue::can_merge_p (const svalue *other,
+ region_model_manager *mgr,
+ model_merger *merger) const
+{
+ if (!(get_type () && other->get_type ()))
+ return NULL;
+
+ if (!types_compatible_p (get_type (), other->get_type ()))
+ return NULL;
+
+ /* Reject attempts to merge unmergeable svalues. */
+ if ((get_kind () == SK_UNMERGEABLE)
+ || (other->get_kind () == SK_UNMERGEABLE))
+ return NULL;
+
+ /* Reject attempts to merge NULL pointers with not-NULL-pointers. */
+ if (POINTER_TYPE_P (get_type ()))
+ {
+ bool null0 = false;
+ bool null1 = false;
+ if (tree cst0 = maybe_get_constant ())
+ if (zerop (cst0))
+ null0 = true;
+ if (tree cst1 = other->maybe_get_constant ())
+ if (zerop (cst1))
+ null1 = true;
+ if (null0 != null1)
+ return NULL;
+ }
+
+ /* Widening. */
+ /* Merge: (new_cst, existing_cst) -> widen (existing, new). */
+ if (maybe_get_constant () && other->maybe_get_constant ())
+ {
+ return mgr->get_or_create_widening_svalue (other->get_type (),
+ merger->m_point,
+ other, this);
+ }
+
+ /* Merge: (Widen(existing_val, V), existing_val) -> Widen (existing_val, V)
+ and thus get a fixed point. */
+ if (const widening_svalue *widen_sval = dyn_cast_widening_svalue ())
+ {
+ if (other == widen_sval->get_base_svalue ())
+ return this;
+ if (other == widen_sval->get_iter_svalue ())
+ return this;
+ }
+
+ if (const binop_svalue *binop_sval = dyn_cast_binop_svalue ())
+ if (const widening_svalue *widen_arg0
+ = binop_sval->get_arg0 ()->dyn_cast_widening_svalue ())
+ {
+ if (other == binop_sval->get_arg1 ())
+ {
+ /* Merger of: (Widen(..., OTHER) BINOP X)
+ and : OTHER
+ to : (Widen(..., OTHER) BINOP X)
+ e.g. merge of Widen(0, 1) + 1 with 1 to the Widen(0, 1) + 1. */
+ return this;
+ }
+
+ /* Merger of : (Widen() BINOP X)
+ and : Widen()
+ to : Widen()
+ e.g. merge of Widen(0, 1) + 1 and Widen(0, 1) to Widen(0, 1).
+ However, we want to update constraints for this case, since we're
+ considering another iteration.
+ Presumably we also want to ensure that it converges; we don't want
+ a descending chain of constraints. */
+ if (other == widen_arg0)
+ {
+ return widen_arg0;
+ }
+ }
+
+ return mgr->get_or_create_unknown_svalue (get_type ());
+}
+
+/* Determine if this svalue is either within LIVE_SVALUES, or is implicitly
+ live with respect to LIVE_SVALUES and MODEL. */
+
+bool
+svalue::live_p (const svalue_set &live_svalues,
+ const region_model *model) const
+{
+ /* Determine if SVAL is explicitly live. */
+ if (const_cast<svalue_set &> (live_svalues).contains (this))
+ return true;
+
+ /* Otherwise, determine if SVAL is implicitly live due to being made of
+ other live svalues. */
+ return implicitly_live_p (live_svalues, model);
+}
+
+/* Base implementation of svalue::implicitly_live_p. */
+
+bool
+svalue::implicitly_live_p (const svalue_set &, const region_model *) const
+{
+ return false;
+}
+
+/* class region_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for region_svalue. */
+
+void
+region_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "&");
+ m_reg->dump_to_pp (pp, simple);
+ }
+ else
+ {
+ pp_string (pp, "region_svalue(");
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ m_reg->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+}
+
+/* Implementation of svalue::accept vfunc for region_svalue. */
+
+void
+region_svalue::accept (visitor *v) const
+{
+ v->visit_region_svalue (this);
+ m_reg->accept (v);
+}
+
+/* Evaluate the condition LHS OP RHS.
+ Subroutine of region_model::eval_condition for when we have a pair of
+ pointers. */
+
+tristate
+region_svalue::eval_condition (const region_svalue *lhs,
+ enum tree_code op,
+ const region_svalue *rhs)
+{
+ /* See if they point to the same region. */
+ const region *lhs_reg = lhs->get_pointee ();
+ const region *rhs_reg = rhs->get_pointee ();
+ bool ptr_equality = lhs_reg == rhs_reg;
+ switch (op)
+ {
+ default:
+ gcc_unreachable ();
+
+ case EQ_EXPR:
+ if (ptr_equality)
+ return tristate::TS_TRUE;
+ else
+ return tristate::TS_FALSE;
+ break;
+
+ case NE_EXPR:
+ if (ptr_equality)
+ return tristate::TS_FALSE;
+ else
+ return tristate::TS_TRUE;
+ break;
+
+ case GE_EXPR:
+ case LE_EXPR:
+ if (ptr_equality)
+ return tristate::TS_TRUE;
+ break;
+
+ case GT_EXPR:
+ case LT_EXPR:
+ if (ptr_equality)
+ return tristate::TS_FALSE;
+ break;
+ }
+
+ return tristate::TS_UNKNOWN;
+}
+
+/* class constant_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for constant_svalue. */
+
+void
+constant_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "(");
+ dump_tree (pp, get_type ());
+ pp_string (pp, ")");
+ dump_tree (pp, m_cst_expr);
+ }
+ else
+ {
+ pp_string (pp, "constant_svalue(");
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ dump_tree (pp, m_cst_expr);
+ pp_string (pp, ")");
+ }
+}
+
+/* Implementation of svalue::accept vfunc for constant_svalue. */
+
+void
+constant_svalue::accept (visitor *v) const
+{
+ v->visit_constant_svalue (this);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for constant_svalue.
+ Constants are implicitly live. */
+
+bool
+constant_svalue::implicitly_live_p (const svalue_set &,
+ const region_model *) const
+{
+ return true;
+}
+
+/* Evaluate the condition LHS OP RHS.
+ Subroutine of region_model::eval_condition for when we have a pair of
+ constants. */
+
+tristate
+constant_svalue::eval_condition (const constant_svalue *lhs,
+ enum tree_code op,
+ const constant_svalue *rhs)
+{
+ tree lhs_const = lhs->get_constant ();
+ tree rhs_const = rhs->get_constant ();
+
+ gcc_assert (CONSTANT_CLASS_P (lhs_const));
+ gcc_assert (CONSTANT_CLASS_P (rhs_const));
+
+ /* Check for comparable types. */
+ if (types_compatible_p (TREE_TYPE (lhs_const), TREE_TYPE (rhs_const)))
+ {
+ tree comparison
+ = fold_binary (op, boolean_type_node, lhs_const, rhs_const);
+ if (comparison == boolean_true_node)
+ return tristate (tristate::TS_TRUE);
+ if (comparison == boolean_false_node)
+ return tristate (tristate::TS_FALSE);
+ }
+ return tristate::TS_UNKNOWN;
+}
+
+/* class unknown_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for unknown_svalue. */
+
+void
+unknown_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "UNKNOWN(");
+ if (get_type ())
+ dump_tree (pp, get_type ());
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "unknown_svalue(");
+ if (get_type ())
+ dump_tree (pp, get_type ());
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for unknown_svalue. */
+
+void
+unknown_svalue::accept (visitor *v) const
+{
+ v->visit_unknown_svalue (this);
+}
+
+/* Get a string for KIND for use in debug dumps. */
+
+const char *
+poison_kind_to_str (enum poison_kind kind)
+{
+ switch (kind)
+ {
+ default:
+ gcc_unreachable ();
+ case POISON_KIND_FREED:
+ return "freed";
+ case POISON_KIND_POPPED_STACK:
+ return "popped stack";
+ }
+}
+
+/* class poisoned_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for poisoned_svalue. */
+
+void
+poisoned_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_printf (pp, "POISONED(%s)", poison_kind_to_str (m_kind));
+ else
+ pp_printf (pp, "poisoned_svalue(%s)", poison_kind_to_str (m_kind));
+}
+
+/* Implementation of svalue::accept vfunc for poisoned_svalue. */
+
+void
+poisoned_svalue::accept (visitor *v) const
+{
+ v->visit_poisoned_svalue (this);
+}
+
+/* class setjmp_svalue's implementation is in engine.cc, so that it can use
+ the declaration of exploded_node. */
+
+/* class initial_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for initial_svalue. */
+
+void
+initial_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "INIT_VAL(");
+ m_reg->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+ else
+ {
+ pp_string (pp, "initial_svalue(");
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ m_reg->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+}
+
+/* Implementation of svalue::accept vfunc for initial_svalue. */
+
+void
+initial_svalue::accept (visitor *v) const
+{
+ v->visit_initial_svalue (this);
+ m_reg->accept (v);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for initial_svalue. */
+
+bool
+initial_svalue::implicitly_live_p (const svalue_set &,
+ const region_model *model) const
+{
+ /* This svalue may be implicitly live if the region still implicitly
+ has its initial value and is reachable. */
+
+ /* It must be a region that exists; we don't want to consider
+ INIT_VAL(R) as still being implicitly reachable if R is in
+ a popped stack frame. */
+ if (model->region_exists_p (m_reg))
+ {
+ const svalue *reg_sval = model->get_store_value (m_reg);
+ if (reg_sval == this)
+ return true;
+ }
+
+ return false;
+}
+
+/* class unaryop_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for unaryop_svalue. */
+
+void
+unaryop_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ if (m_op == NOP_EXPR)
+ {
+ pp_string (pp, "CAST(");
+ dump_tree (pp, get_type ());
+ pp_string (pp, ", ");
+ m_arg->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_character (pp, '(');
+ pp_string (pp, get_tree_code_name (m_op));
+ //pp_string (pp, op_symbol_code (m_op));
+ m_arg->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ }
+ else
+ {
+ pp_string (pp, "unaryop_svalue (");
+ pp_string (pp, get_tree_code_name (m_op));
+ pp_string (pp, ", ");
+ m_arg->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for unaryop_svalue. */
+
+void
+unaryop_svalue::accept (visitor *v) const
+{
+ v->visit_unaryop_svalue (this);
+ m_arg->accept (v);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for unaryop_svalue. */
+
+bool
+unaryop_svalue::implicitly_live_p (const svalue_set &live_svalues,
+ const region_model *model) const
+{
+ return get_arg ()->live_p (live_svalues, model);
+}
+
+/* class binop_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for binop_svalue. */
+
+void
+binop_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_character (pp, '(');
+ m_arg0->dump_to_pp (pp, simple);
+ pp_string (pp, op_symbol_code (m_op));
+ m_arg1->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "binop_svalue (");
+ pp_string (pp, get_tree_code_name (m_op));
+ pp_string (pp, ", ");
+ m_arg0->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_arg1->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for binop_svalue. */
+
+void
+binop_svalue::accept (visitor *v) const
+{
+ v->visit_binop_svalue (this);
+ m_arg0->accept (v);
+ m_arg1->accept (v);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for binop_svalue. */
+
+bool
+binop_svalue::implicitly_live_p (const svalue_set &live_svalues,
+ const region_model *model) const
+{
+ return (get_arg0 ()->live_p (live_svalues, model)
+ && get_arg1 ()->live_p (live_svalues, model));
+}
+
+/* class sub_svalue : public svalue. */
+
+/* sub_svalue'c ctor. */
+
+sub_svalue::sub_svalue (tree type, const svalue *parent_svalue,
+ const region *subregion)
+: svalue (complexity::from_pair (parent_svalue->get_complexity (),
+ subregion->get_complexity ()),
+ type),
+ m_parent_svalue (parent_svalue), m_subregion (subregion)
+{
+}
+
+/* Implementation of svalue::dump_to_pp vfunc for sub_svalue. */
+
+void
+sub_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "SUB(");
+ m_parent_svalue->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_subregion->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "sub_svalue (");
+ pp_string (pp, ", ");
+ m_parent_svalue->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_subregion->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for sub_svalue. */
+
+void
+sub_svalue::accept (visitor *v) const
+{
+ v->visit_sub_svalue (this);
+ m_parent_svalue->accept (v);
+ m_subregion->accept (v);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for sub_svalue. */
+
+bool
+sub_svalue::implicitly_live_p (const svalue_set &live_svalues,
+ const region_model *model) const
+{
+ return get_parent ()->live_p (live_svalues, model);
+}
+
+/* class widening_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for widening_svalue. */
+
+void
+widening_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "WIDENING(");
+ pp_character (pp, '{');
+ m_point.print (pp, format (false));
+ pp_string (pp, "}, ");
+ m_base_sval->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_iter_sval->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "widening_svalue (");
+ pp_string (pp, ", ");
+ pp_character (pp, '{');
+ m_point.print (pp, format (false));
+ pp_string (pp, "}, ");
+ m_base_sval->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_iter_sval->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for widening_svalue. */
+
+void
+widening_svalue::accept (visitor *v) const
+{
+ v->visit_widening_svalue (this);
+ m_base_sval->accept (v);
+ m_iter_sval->accept (v);
+}
+
+/* Attempt to determine in which direction this value is changing
+ w.r.t. the initial value. */
+
+enum widening_svalue::direction_t
+widening_svalue::get_direction () const
+{
+ tree base_cst = m_base_sval->maybe_get_constant ();
+ if (base_cst == NULL_TREE)
+ return DIR_UNKNOWN;
+ tree iter_cst = m_iter_sval->maybe_get_constant ();
+ if (iter_cst == NULL_TREE)
+ return DIR_UNKNOWN;
+
+ tree iter_gt_base = fold_binary (GT_EXPR, boolean_type_node,
+ iter_cst, base_cst);
+ if (iter_gt_base == boolean_true_node)
+ return DIR_ASCENDING;
+
+ tree iter_lt_base = fold_binary (LT_EXPR, boolean_type_node,
+ iter_cst, base_cst);
+ if (iter_lt_base == boolean_true_node)
+ return DIR_DESCENDING;
+
+ return DIR_UNKNOWN;
+}
+
+/* Compare this value against constant RHS_CST. */
+
+tristate
+widening_svalue::eval_condition_without_cm (enum tree_code op,
+ tree rhs_cst) const
+{
+ tree base_cst = m_base_sval->maybe_get_constant ();
+ if (base_cst == NULL_TREE)
+ return tristate::TS_UNKNOWN;
+ tree iter_cst = m_iter_sval->maybe_get_constant ();
+ if (iter_cst == NULL_TREE)
+ return tristate::TS_UNKNOWN;
+
+ switch (get_direction ())
+ {
+ default:
+ gcc_unreachable ();
+ case DIR_ASCENDING:
+ /* LHS is in [base_cst, +ve infinity), assuming no overflow. */
+ switch (op)
+ {
+ case LE_EXPR:
+ case LT_EXPR:
+ {
+ /* [BASE, +INF) OP RHS:
+ This is either true or false at +ve ininity,
+ It can be true for points X where X OP RHS, so we have either
+ "false", or "unknown". */
+ tree base_op_rhs = fold_binary (op, boolean_type_node,
+ base_cst, rhs_cst);
+ if (base_op_rhs == boolean_true_node)
+ return tristate::TS_UNKNOWN;
+ else
+ return tristate::TS_FALSE;
+ }
+
+ case GE_EXPR:
+ case GT_EXPR:
+ {
+ /* [BASE, +INF) OP RHS:
+ This is true at +ve infinity. It will be true everywhere
+ in the range if BASE >= RHS. */
+ tree base_op_rhs = fold_binary (op, boolean_type_node,
+ base_cst, rhs_cst);
+ if (base_op_rhs == boolean_true_node)
+ return tristate::TS_TRUE;
+ else
+ return tristate::TS_UNKNOWN;
+ }
+
+ case EQ_EXPR:
+ {
+ /* [BASE, +INF) == RHS:
+ Could this be true at any point in the range? If so we
+ have "unknown", otherwise we have "false". */
+ tree base_le_rhs = fold_binary (LE_EXPR, boolean_type_node,
+ base_cst, rhs_cst);
+ if (base_le_rhs == boolean_true_node)
+ return tristate::TS_UNKNOWN;
+ else
+ return tristate::TS_FALSE;
+ }
+
+ case NE_EXPR:
+ {
+ /* [BASE, +INF) != RHS:
+ Could we have equality at any point in the range? If so we
+ have "unknown", otherwise we have "true". */
+ tree base_le_rhs = fold_binary (LE_EXPR, boolean_type_node,
+ base_cst, rhs_cst);
+ if (base_le_rhs == boolean_true_node)
+ return tristate::TS_UNKNOWN;
+ else
+ return tristate::TS_TRUE;
+ }
+
+ default:
+ return tristate::TS_UNKNOWN;
+ }
+
+ case DIR_DESCENDING:
+ /* LHS is in (-ve infinity, base_cst], assuming no overflow. */
+ return tristate::TS_UNKNOWN;
+
+ case DIR_UNKNOWN:
+ return tristate::TS_UNKNOWN;
+ }
+}
+
+/* class placeholder_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for placeholder_svalue. */
+
+void
+placeholder_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ pp_printf (pp, "PLACEHOLDER(%qs)", m_name);
+ else
+ pp_printf (pp, "placeholder_svalue (%qs)", m_name);
+}
+
+/* Implementation of svalue::accept vfunc for placeholder_svalue. */
+
+void
+placeholder_svalue::accept (visitor *v) const
+{
+ v->visit_placeholder_svalue (this);
+}
+
+/* class unmergeable_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for unmergeable_svalue. */
+
+void
+unmergeable_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "UNMERGEABLE(");
+ m_arg->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "unmergeable_svalue (");
+ m_arg->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for unmergeable_svalue. */
+
+void
+unmergeable_svalue::accept (visitor *v) const
+{
+ v->visit_unmergeable_svalue (this);
+ m_arg->accept (v);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for unmergeable_svalue. */
+
+bool
+unmergeable_svalue::implicitly_live_p (const svalue_set &live_svalues,
+ const region_model *model) const
+{
+ return get_arg ()->live_p (live_svalues, model);
+}
+
+/* class compound_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for compound_svalue. */
+
+void
+compound_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "COMPOUND(");
+ m_map.dump_to_pp (pp, simple, false);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "compound_svalue (");
+ pp_string (pp, ", ");
+ pp_character (pp, '{');
+ m_map.dump_to_pp (pp, simple, false);
+ pp_string (pp, "}, ");
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for compound_svalue. */
+
+void
+compound_svalue::accept (visitor *v) const
+{
+ v->visit_compound_svalue (this);
+ for (binding_map::iterator_t iter = m_map.begin ();
+ iter != m_map.end (); ++iter)
+ {
+ //(*iter).first.accept (v);
+ (*iter).second->accept (v);
+ }
+}
+
+/* Calculate what the complexity of a compound_svalue instance for MAP
+ will be, based on the svalues bound within MAP. */
+
+complexity
+compound_svalue::calc_complexity (const binding_map &map)
+{
+ unsigned num_child_nodes = 0;
+ unsigned max_child_depth = 0;
+ for (binding_map::iterator_t iter = map.begin ();
+ iter != map.end (); ++iter)
+ {
+ const complexity &sval_c = (*iter).second->get_complexity ();
+ num_child_nodes += sval_c.m_num_nodes;
+ max_child_depth = MAX (max_child_depth, sval_c.m_max_depth);
+ }
+ return complexity (num_child_nodes + 1, max_child_depth + 1);
+}
+
+/* class conjured_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for conjured_svalue. */
+
+void
+conjured_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "CONJURED(");
+ pp_gimple_stmt_1 (pp, m_stmt, 0, (dump_flags_t)0);
+ pp_string (pp, ", ");
+ m_id_reg->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "conjured_svalue (");
+ pp_string (pp, ", ");
+ pp_gimple_stmt_1 (pp, m_stmt, 0, (dump_flags_t)0);
+ pp_string (pp, ", ");
+ m_id_reg->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for conjured_svalue. */
+
+void
+conjured_svalue::accept (visitor *v) const
+{
+ v->visit_conjured_svalue (this);
+ m_id_reg->accept (v);
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
(Zhongxing Xu, Ted Kremenek, and Jian Zhang).
A @code{region_model} encapsulates a representation of the state of
-memory, with a tree of @code{region} instances, along with their associated
-values. The representation is graph-like because values can be pointers
-to regions. It also stores a constraint_manager, capturing relationships
-between the values.
+memory, with a @code{store} recording a binding between @code{region}
+instances, to @code{svalue} instances. The bindings are organized into
+clusters, where regions accessible via well-defined pointer arithmetic
+are in the same cluster. The representation is graph-like because values
+can be pointers to regions. It also stores a constraint_manager,
+capturing relationships between the values.
Because each node in the @code{exploded_graph} has a @code{region_model},
and each of the latter is graph-like, the @code{exploded_graph} is in some
ways a graph of graphs.
-Here's an example of printing a @code{region_model}, showing the ASCII-art
-used to visualize the region hierarchy (colorized when printing to stderr):
+Here's an example of printing a @code{program_state}, showing the
+@code{region_model} within it, along with state for the @code{malloc}
+state machine.
@smallexample
(gdb) call debug (*this)
-r0: @{kind: 'root', parent: null, sval: null@}
-|-stack: r1: @{kind: 'stack', parent: r0, sval: sv1@}
-| |: sval: sv1: @{poisoned: uninit@}
-| |-frame for 'test': r2: @{kind: 'frame', parent: r1, sval: null, map: @{'ptr_3': r3@}, function: 'test', depth: 0@}
-| | `-'ptr_3': r3: @{kind: 'map', parent: r2, sval: sv3, type: 'void *', map: @{@}@}
-| | |: sval: sv3: @{type: 'void *', unknown@}
-| | |: type: 'void *'
-| `-frame for 'calls_malloc': r4: @{kind: 'frame', parent: r1, sval: null, map: @{'result_3': r7, '_4': r8, '<anonymous>': r5@}, function: 'calls_malloc', depth: 1@}
-| |-'<anonymous>': r5: @{kind: 'map', parent: r4, sval: sv4, type: 'void *', map: @{@}@}
-| | |: sval: sv4: @{type: 'void *', &r6@}
-| | |: type: 'void *'
-| |-'result_3': r7: @{kind: 'map', parent: r4, sval: sv4, type: 'void *', map: @{@}@}
-| | |: sval: sv4: @{type: 'void *', &r6@}
-| | |: type: 'void *'
-| `-'_4': r8: @{kind: 'map', parent: r4, sval: sv4, type: 'void *', map: @{@}@}
-| |: sval: sv4: @{type: 'void *', &r6@}
-| |: type: 'void *'
-`-heap: r9: @{kind: 'heap', parent: r0, sval: sv2@}
- |: sval: sv2: @{poisoned: uninit@}
- `-r6: @{kind: 'symbolic', parent: r9, sval: null, map: @{@}@}
-svalues:
- sv0: @{type: 'size_t', '1024'@}
- sv1: @{poisoned: uninit@}
- sv2: @{poisoned: uninit@}
- sv3: @{type: 'void *', unknown@}
- sv4: @{type: 'void *', &r6@}
-constraint manager:
+rmodel:
+stack depth: 1
+ frame (index 0): frame: ‘test’@@1
+clusters within frame: ‘test’@@1
+ cluster for: ptr_3: &HEAP_ALLOCATED_REGION(12)
+m_called_unknown_fn: FALSE
+constraint_manager:
equiv classes:
- ec0: @{sv0 == '1024'@}
- ec1: @{sv4@}
constraints:
+malloc:
+ 0x2e89590: &HEAP_ALLOCATED_REGION(12): unchecked ('ptr_3')
@end smallexample
This is the state at the point of returning from @code{calls_malloc} back
@}
@end smallexample
-The ``root'' region (``r0'') has a ``stack'' child (``r1''), with two
-children: a frame for @code{test} (``r2''), and a frame for
-@code{calls_malloc} (``r4''). These frame regions have child regions for
-storing their local variables. For example, the return region
-and that of various other regions within the ``calls_malloc'' frame all have
-value ``sv4'', a pointer to a heap-allocated region ``r6''. Within the parent
-frame, @code{ptr_3} has value ``sv3'', an unknown @code{void *}.
+Within the store, there is the cluster for @code{ptr_3} within the frame
+for @code{test}, where the whole cluster is bound to a pointer value,
+pointing at @code{HEAP_ALLOCATED_REGION(12)}. Additionally, this pointer
+has the @code{unchecked} state for the @code{malloc} state machine
+indicating it hasn't yet been checked against NULL since the allocation
+call.
@subsection Analyzer Paths
As a simple workaround, constraints on floating-point values are
currently ignored.
@item
-The region model code creates lots of little mutable objects at each
-@code{region_model} (and thus per @code{exploded_node}) rather than
-sharing immutable objects and having the mutable state in the
-@code{program_state} or @code{region_model}. The latter approach might be
-more efficient, and might avoid dealing with IDs rather than pointers
-(which requires us to impose an ordering to get meaningful equality).
-@item
-The region model code doesn't yet support @code{memcpy}. At the
-gimple-ssa level these have been optimized to statements like this:
-@smallexample
-_10 = MEM <long unsigned int> [(char * @{ref-all@})&c]
-MEM <long unsigned int> [(char * @{ref-all@})&d] = _10;
-@end smallexample
-Perhaps they could be supported via a new @code{compound_svalue} type.
-@item
There are various other limitations in the region model (grep for TODO/xfail
in the testsuite).
@item
that source is reached. By putting a series of these in the source, it's
much easier to effectively step through the program state as it's analyzed.
+The analyzer handles:
+
+@smallexample
+__analyzer_describe (0, expr);
+@end smallexample
+
+by emitting a warning describing the 2nd argument (which can be of any
+type), at a verbosity level given by the 1st argument. This is for use when
+debugging, and may be of use in DejaGnu tests.
+
@smallexample
__analyzer_dump ();
@end smallexample
One approach when tracking down where a particular bogus state is
introduced into the @code{exploded_graph} is to add custom code to
-@code{region_model::validate}.
-
-For example, this custom code (added to @code{region_model::validate})
-breaks with an assertion failure when a variable called @code{ptr}
-acquires a value that's unknown, using
-@code{region_model::get_value_by_name} to locate the variable
-
-@smallexample
- /* Find a variable matching "ptr". */
- svalue_id sid = get_value_by_name ("ptr");
- if (!sid.null_p ())
- @{
- svalue *sval = get_svalue (sid);
- gcc_assert (sval->get_kind () != SK_UNKNOWN);
- @}
-@end smallexample
-
-making it easier to investigate further in a debugger when this occurs.
+@code{program_state::validate}.
{
int aha = 3;
return [&aha] {
- return aha;
+ return aha; // { dg-warning "dereferencing pointer '.*' to within stale stack frame" }
};
+ /* TODO: may be worth special-casing the reporting of dangling
+ references from lambdas, to highlight the declaration, and maybe fix
+ the wording (it's a reference, not a pointer, for one thing). */
}
int main()
};
void h (e * i)
{
- void *j = nullptr; // { dg-bogus "NULL" "" { xfail *-*-* } }
- // TODO(xfail): we report "'i' is NULL" above, which is the wrong location
-
+ void *j = nullptr; // { dg-bogus "NULL" }
i->f = *i->g; // { dg-warning "dereference of NULL 'i'" }
}
virtual void c (int, int)
--- /dev/null
+// { dg-do compile { target c++11 } }
+// { dg-additional-options "-O1" }
+
+template <typename DV> DV
+vu (DV j4)
+{
+ return [j4] () { return j4 () ? j4 : throw j4 (); } ();
+}
+
+void
+foo ()
+{
+ auto n1 = [] { return nullptr; };
+
+ vu (n1);
+}
throw()
#endif
{
- return calloc (b, sizeof (int)); // { dg-warning "leak" }
+ return calloc (b, sizeof (int)); // { dg-bogus "leak" "" { xfail c++98_only } }
}
j (B *, int)
{
- } // { dg-warning "leak" }
+ }
};
j *
--- /dev/null
+template <typename> class allocator {
+public:
+ allocator(const allocator &);
+ allocator();
+};
+
+template <typename> struct allocator_traits;
+template <typename _Tp> struct allocator_traits<allocator<_Tp> > {
+ static allocator<_Tp> select_on_container_copy_construction() {
+ return allocator<_Tp>();
+ }
+ static allocator<_Tp> _S_select_on_copy() {
+ return select_on_container_copy_construction();
+ }
+};
+
+class basic_string {
+ struct _Alloc_hider {
+ _Alloc_hider(allocator<char>);
+ } _M_dataplus;
+
+public:
+ basic_string(basic_string &)
+ : _M_dataplus(allocator_traits<allocator<char> >::_S_select_on_copy()) {}
+} xxx(xxx);
--- /dev/null
+// { dg-additional-options "-O1" }
+
+class kz {
+public:
+ kz ();
+
+private:
+ int yu;
+};
+
+const kz vl;
+kz ax;
+
+void
+c1 (bool va, bool ze)
+{
+ kz ny, fb = vl;
+
+ if (va)
+ {
+ if (ze)
+ ny = vl;
+
+ fb = ny;
+ }
+
+ ax = fb;
+}
--- /dev/null
+/* { dg-additional-options "-fanalyzer-show-duplicate-count" } */
+
+#include <stdlib.h>
+
+typedef struct _krb5_data {
+ char *data;
+} krb5_data;
+
+/* Ensure that we de-duplicate the various paths to reach here,
+ and only emit one diagnostic. */
+
+void
+recvauth_common(krb5_data common)
+{
+ free(common.data);
+ free(common.data); /* { dg-warning "double-'free' of 'common.data'" "inner warning" } */
+ /* { dg-warning "double-'free' of 'inbuf_a.data' " "inbuf_a warning" { target *-*-* } .-1 } */
+ /* { dg-warning "double-'free' of 'inbuf_b.data' " "inbuf_b warning" { target *-*-* } .-2 } */
+ /* { dg-message "2 duplicates" "duplicates notification" { xfail *-*-* } .-3 } */
+}
+
+void krb5_recvauth(krb5_data inbuf_a)
+{
+ recvauth_common(inbuf_a);
+}
+
+void krb5_recvauth_version(krb5_data inbuf_b)
+{
+ recvauth_common(inbuf_b);
+}
{
free(inbuf.data);
free(inbuf.data); /* { dg-warning "double-'free'" "warning" } */
- /* { dg-message "2 duplicates" "duplicates notification" { target *-*-* } .-1 } */
+ /* { dg-message "2 duplicates" "duplicates notification" { xfail *-*-* } .-1 } */
}
void krb5_recvauth(krb5_data inbuf)
#include <stdlib.h>
+#include "analyzer-decls.h"
typedef struct _krb5_data {
char *data;
}
free((char *)inbuf.data); /* { dg-warning "double-'free' of 'inbuf.data'" } */
}
+
+extern void unknown_fn (void *);
+
+void
+test_4 (krb5_data inbuf)
+{
+ unknown_fn (NULL);
+ free(inbuf.data); /* { dg-message "first 'free' here" } */
+ free(inbuf.data); /* { dg-warning "double-'free' of 'inbuf.data'" } */
+}
+
+void
+test_5 (krb5_data inbuf)
+{
+ unknown_fn (&inbuf);
+ free(inbuf.data); /* { dg-message "first 'free' here" } */
+ free(inbuf.data); /* { dg-warning "double-'free' of 'inbuf.data'" "inbuf.data" } */
+ /* { dg-bogus "double-'free' of 'inbuf'" "inbuf" { target *-*-* } .-1 } */
+}
+
+typedef struct _padded_krb5_data {
+ int pad;
+ char *data;
+} padded_krb5_data;
+
+void
+test_6 (padded_krb5_data inbuf)
+{
+ unknown_fn (&inbuf.data);
+ free((char *)inbuf.data); /* { dg-message "first 'free' here" } */
+ free((char *)inbuf.data); /* { dg-warning "double-'free' of 'inbuf.data'" "inbuf.data" } */
+}
+
+void
+test_7 (padded_krb5_data inbuf)
+{
+ unknown_fn (&inbuf.data);
+ free((char *)inbuf.data);
+
+ unknown_fn (&inbuf.data);
+ free((char *)inbuf.data);
+}
+
+void
+test_8 (padded_krb5_data inbuf, int flag)
+{
+ if (flag)
+ {
+ unknown_fn (&inbuf.data);
+ free((char *)inbuf.data);
+ }
+ /* Should have two enodes, one for the explicit "freed" state, and one
+ for the implicit "start" state. */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+
+ unknown_fn (&inbuf.data);
+
+ /* Should have just one enode, for the implicit "start" state. */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+extern long int labs (long int x)
+ __attribute__ ((__nothrow__ , __leaf__))
+ __attribute__ ((__const__));
+
+long int test_1 (long int x)
+{
+ return labs (x);
+}
+
+static long __attribute__((noinline))
+hide_long (long x)
+{
+ return x;
+}
+
+long int test_2 (long int x)
+{
+ __analyzer_eval (labs (hide_long (42)) == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (labs (hide_long (-17)) == 17); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+int a;
+void test (int *p, int x)
+{
+ int y;
+
+ a = 17;
+ x = 42;
+ y = 13;
+
+ __analyzer_eval (a == 17); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (y == 13); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p == &a); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p == &x); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p == &y); /* { dg-warning "FALSE" } */
+
+ *p = 73;
+
+ __analyzer_eval (a == 17); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (x == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (y == 13); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+extern void escape (int *p);
+
+int a;
+void test (int *p, int x)
+{
+ int y;
+
+ a = 17;
+ x = 42;
+ y = 13;
+
+ __analyzer_eval (a == 17); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (y == 13); /* { dg-warning "TRUE" } */
+
+ escape (&x);
+ __analyzer_eval (a == 17); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (x == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (y == 13); /* { dg-warning "TRUE" } */
+
+ __analyzer_eval (p == &a); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (p == &x); /* { dg-warning "FALSE" } */
+ __analyzer_eval (p == &y); /* { dg-warning "FALSE" } */
+
+ *p = 73;
+
+ __analyzer_eval (a == 17); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (x == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (y == 13); /* { dg-warning "TRUE" } */
+}
/* Trigger a breakpoint in the analyzer when reached. */
extern void __analyzer_break (void);
+/* Emit a warning describing the 2nd argument (which can be of any
+ type), at the given verbosity level. This is for use when
+ debugging, and may be of use in DejaGnu tests. */
+extern void __analyzer_describe (int verbosity, ...);
+
/* Dump copious information about the analyzer’s state when reached. */
extern void __analyzer_dump (void);
will also dump all of the states within those nodes. */
extern void __analyzer_dump_exploded_nodes (int);
-extern void __analyzer_dump_num_heap_regions (void);
-
/* Emit a placeholder "note" diagnostic with a path to this call site,
if the analyzer finds a feasible path to it. */
extern void __analyzer_dump_path (void);
void test_1 (void *p, void *q, void *r)
{
foo(p, q, r);
- foo(NULL, q, r);
+ foo(NULL, q, r); /* { dg-warning "use of NULL where non-null expected" "warning" } */
+ /* { dg-message "argument 1 NULL where non-null expected" "note" { target *-*-* } .-1 } */
foo(p, NULL, r);
- foo(p, q, NULL);
+ foo(p, q, NULL); /* { dg-warning "use of NULL where non-null expected" } */
}
void test_1a (void *q, void *r)
void test_2 (void *p, void *q, void *r)
{
bar(p, q, r);
- bar(NULL, q, r);
- bar(p, NULL, r);
- bar(p, q, NULL);
+ bar(NULL, q, r); /* { dg-warning "use of NULL where non-null expected" "warning" } */
+ bar(p, NULL, r); /* { dg-warning "use of NULL where non-null expected" "warning" } */
+ /* { dg-message "argument 2 NULL where non-null expected" "note" { target *-*-* } .-1 } */
+ bar(p, q, NULL); /* { dg-warning "use of NULL where non-null expected" "warning" } */
}
void test_3 (void *q, void *r)
--- /dev/null
+#include "analyzer-decls.h"
+
+extern void bzero(void *s, __SIZE_TYPE__ n);
+
+void test_1 (void)
+{
+ char tmp[1024];
+ bzero (tmp, 1024);
+ __analyzer_eval (tmp[0] == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (tmp[1023] == 0); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+struct s1
+{
+ char a;
+ char b;
+ char c;
+ char d;
+};
+
+struct s2
+{
+ char arr[4];
+};
+
+void test_1 ()
+{
+ struct s1 x = {'A', 'B', 'C', 'D'};
+ __analyzer_eval (x.a == 'A'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x.b == 'B'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x.c == 'C'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x.d == 'D'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (((struct s2 *)&x)->arr[0] == 'A'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (((struct s2 *)&x)->arr[1] == 'B'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (((struct s2 *)&x)->arr[2] == 'C'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (((struct s2 *)&x)->arr[3] == 'D'); /* { dg-warning "TRUE" } */
+
+ ((struct s2 *)&x)->arr[1] = '#';
+ __analyzer_eval (((struct s2 *)&x)->arr[1] == '#'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x.b == '#'); /* { dg-warning "TRUE" } */
+}
+
+void test_2 ()
+{
+ struct s2 x = {{'A', 'B', 'C', 'D'}};
+ __analyzer_eval (x.arr[0] == 'A'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x.arr[1] == 'B'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x.arr[2] == 'C'); /* { dg-warning "TRUE" } */
+ __analyzer_eval (x.arr[3] == 'D'); /* { dg-warning "TRUE" } */
+ struct s1 *p = (struct s1 *)&x;
+ __analyzer_eval (p->a == 'A'); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ __analyzer_eval (p->b == 'B'); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ __analyzer_eval (p->c == 'C'); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ __analyzer_eval (p->d == 'D'); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test_1 (int i)
+{
+ char c1 = i;
+ char c2 = i;
+ __analyzer_eval (c1 == i); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (c1 == c2); /* { dg-warning "TRUE" } */
+}
+
+void test_2 (char c)
+{
+ int i = c;
+ __analyzer_eval (i == c); /* { dg-warning "TRUE" } */
+}
struct ptr_wrapper r;
r.ptr = malloc (sizeof (int)); /* { dg-message "allocated here" } */
} /* { dg-warning "leak of 'r.ptr'" } */
-/* { dg-bogus "leak of '<unknown>'" "unknown leak" { xfail *-*-* } .-1 } */
+/* { dg-bogus "leak of '<unknown>'" "unknown leak" { target *-*-* } .-1 } */
static struct ptr_wrapper __attribute__((noinline))
called_by_test_5a (void)
{
struct ptr_wrapper r;
- r.ptr = malloc (sizeof (int));
+ r.ptr = malloc (sizeof (int)); /* { dg-message "allocated here" } */
return r;
}
{
struct ptr_wrapper q = called_by_test_5a ();
} /* { dg-warning "leak of 'q.ptr'" } */
-/* TODO: show the allocation point. */
static struct ptr_wrapper __attribute__((noinline))
called_by_test_5b (void)
{
struct ptr_wrapper r;
r.ptr = malloc (sizeof (int));
- return r; /* { dg-warning "leak" } */
- /* TODO: show the allocation point. */
+ return r; /* { dg-warning "leak of '<return-value>.ptr'" } */
+ /* TODO: show the allocation point; improve above message. */
}
void test_5b (void)
{
struct union_wrapper uw2;
uw2.u.ptr = malloc (1024);
-} /* { dg-warning "leak of '\\(void \\*\\)uw2.u'" } */
+} /* { dg-warning "leak of 'uw2.u.ptr'" } */
--- /dev/null
+#include "analyzer-decls.h"
+
+struct coord
+{
+ int x;
+ int y;
+};
+
+void test_1 (void)
+{
+ struct coord arr[16];
+
+ arr[2].y = 4;
+ arr[3].x = 5;
+ arr[3].y = 6;
+ arr[4].x = 7;
+ arr[6].y = 8;
+ arr[8].x = 9;
+
+ arr[7] = arr[3];
+
+ __analyzer_eval (arr[7].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[7].y == 6); /* { dg-warning "TRUE" } */
+
+ /* Make sure we don't touch the neighbors. */
+ __analyzer_eval (arr[6].y == 8); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[8].x == 9); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+struct coord
+{
+ int x;
+ int y;
+};
+
+/* Copying from one on-stack array to another. */
+
+void test_1 (void)
+{
+ struct coord arr_a[16];
+ struct coord arr_b[16];
+ arr_a[3].x = 5;
+ arr_a[3].y = 6;
+
+ arr_b[7] = arr_a[3];
+
+ __analyzer_eval (arr_b[7].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr_b[7].y == 6); /* { dg-warning "TRUE" } */
+}
+
+/* Copying from an on-stack array to a global array. */
+
+struct coord glob_arr[16];
+
+void test_2 (void)
+{
+ struct coord arr[16];
+ arr[3].x = 5;
+ arr[3].y = 6;
+
+ glob_arr[7] = arr[3];
+
+ __analyzer_eval (glob_arr[7].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (glob_arr[7].y == 6); /* { dg-warning "TRUE" } */
+}
+
+/* Copying from a partially initialized on-stack array to a global array. */
+
+struct coord glob_arr[16];
+
+void test_3 (void)
+{
+ struct coord arr[16];
+ arr[3].y = 6;
+
+ glob_arr[7] = arr[3]; // or should the uninit warning be here?
+
+ __analyzer_eval (glob_arr[7].x); /* { dg-warning "uninitialized" "uninit" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ __analyzer_eval (glob_arr[7].y == 6); /* { dg-warning "TRUE" } */
+}
+
+/* Symbolic bindings: copying from one array to another. */
+
+struct coord glob_arr[16];
+
+void test_4 (int i)
+{
+ struct coord arr_a[16];
+ struct coord arr_b[16];
+ arr_a[i].x = 5;
+ arr_a[i].y = 6;
+ __analyzer_eval (arr_a[i].x == 5); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+ __analyzer_eval (arr_a[i].y == 6); /* { dg-warning "TRUE" } */
+
+ arr_b[i] = arr_a[i];
+
+ __analyzer_eval (arr_b[i].x == 5); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+ __analyzer_eval (arr_b[i].y == 6); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+}
+
+/* Symbolic bindings: copying within an array: symbolic src and dest */
+
+struct coord glob_arr[16];
+
+void test_5a (int i, int j)
+{
+ struct coord arr[16];
+ arr[i].x = 5;
+ arr[i].y = 6;
+
+ arr[j] = arr[i];
+
+ __analyzer_eval (arr[j].x == 5); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+ __analyzer_eval (arr[j].y == 6); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+}
+
+/* Symbolic bindings: copying within an array: symbolic src, concrete dest. */
+
+struct coord glob_arr[16];
+
+void test_5b (int i)
+{
+ struct coord arr[16];
+ arr[i].x = 5;
+ arr[i].y = 6;
+
+ arr[3] = arr[i];
+
+ __analyzer_eval (arr[3].x == 5); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+ __analyzer_eval (arr[3].y == 6); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+}
+
+/* Symbolic bindings: copying within an array: concrete src, symbolic dest. */
+
+struct coord glob_arr[16];
+
+void test_5c (int i)
+{
+ struct coord arr[16];
+ arr[3].x = 5;
+ arr[3].y = 6;
+
+ arr[i] = arr[3];
+
+ __analyzer_eval (arr[i].x == 5); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+ __analyzer_eval (arr[i].y == 6); /* { dg-warning "TRUE" "TRUE" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "UNKNOWN" { xfail *-*-* } .-1 } */
+}
+
+/* No info on the subregion being copied, and hence
+ binding_cluster2::maybe_get_compound_binding should return NULL. */
+
+void test_6 (void)
+{
+ struct coord arr[16];
+ arr[7] = glob_arr[3];
+
+ __analyzer_eval (arr[7].x == 5); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (arr[7].y == 6); /* { dg-warning "UNKNOWN" } */
+}
{
__analyzer_eval (i > 4); /* { dg-warning "TRUE" } */
__analyzer_eval (i <= 4); /* { dg-warning "FALSE" } */
- __analyzer_eval (i > 3); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i > 3); /* { dg-warning "TRUE" } */
__analyzer_eval (i > 5); /* { dg-warning "UNKNOWN" } */
- __analyzer_eval (i != 3); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i != 3); /* { dg-warning "TRUE" } */
- __analyzer_eval (i == 3); /* { dg-warning "FALSE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i == 3); /* { dg-warning "FALSE" } */
__analyzer_eval (i != 4); /* { dg-warning "TRUE" } */
__analyzer_eval (i == 4); /* { dg-warning "FALSE" } */
__analyzer_eval (i <= 4); /* { dg-warning "TRUE" } */
__analyzer_eval (i > 3); /* { dg-warning "UNKNOWN" } */
- __analyzer_eval (i > 5); /* { dg-warning "FALSE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i > 5); /* { dg-warning "FALSE" } */
__analyzer_eval (i != 3); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (i == 3); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (i != 4); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (i == 4); /* { dg-warning "UNKNOWN" } */
- __analyzer_eval (i == 5); /* { dg-warning "FALSE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
- __analyzer_eval (i != 5); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i == 5); /* { dg-warning "FALSE" } */
+ __analyzer_eval (i != 5); /* { dg-warning "TRUE" } */
__analyzer_eval (i < 5); /* { dg-warning "TRUE" } */
- __analyzer_eval (i <= 5); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i <= 5); /* { dg-warning "TRUE" } */
}
}
{
if (i > 3)
if (i < 5)
- __analyzer_eval (i == 4); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
}
void test_range_float_gt_lt (float f)
{
if (i >= 4)
if (i < 5)
- __analyzer_eval (i == 4); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
}
void test_range_float_ge_lt (float f)
{
if (i > 3)
if (i <= 4)
- __analyzer_eval (i == 4); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
}
void test_range_float_gt_le (float f)
{
if (i >= 4)
if (i <= 4)
- __analyzer_eval (i == 4); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (i == 4); /* { dg-warning "TRUE" } */
}
void test_range_float_ge_le (float f)
{
struct coord d;
d = c;
- __analyzer_eval (d.x == c.x); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "actual" { target *-*-* } .-1 } */
- /* TODO(xfail): c and d share the same unknown value of type "coord", but
- attempts to access the fields lead to different unknown values. */
-
- __analyzer_eval (d.y == c.y); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "actual" { target *-*-* } .-1 } */
- // TODO(xfail): likewise
-
+ __analyzer_eval (d.x == c.x); /* { dg-warning "TRUE" } */
+ __analyzer_eval (d.y == c.y); /* { dg-warning "TRUE" } */
__analyzer_eval (d.x == d.y); /* { dg-warning "UNKNOWN" } */
/* d and c share an unknown value of type "struct coord".
But d.x and d.y should be different unknown values (although they inherit
{
__analyzer_eval (o->mid.in.f == 0.f); /* { dg-warning "UNKNOWN" } */
o->mid.in.f = 0.f;
- __analyzer_eval (o->mid.in.f == 0.f); /* { dg-warning "TRUE" "PR 93356" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "disabled float comparisons" { target *-*-* } .-1 } */
+ __analyzer_eval (o->mid.in.f == 0.f); /* { dg-warning "TRUE" } */
}
void test_14 (struct outer o)
{
__analyzer_eval (o.mid.in.f == 0.f); /* { dg-warning "UNKNOWN" } */
o.mid.in.f = 0.f;
- __analyzer_eval (o.mid.in.f == 0.f); /* { dg-warning "TRUE" "PR 93356" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "disabled float comparisons" { target *-*-* } .-1 } */
+ __analyzer_eval (o.mid.in.f == 0.f); /* { dg-warning "TRUE" } */
}
void test_15 (const char *str)
{
char ch = str[0];
__analyzer_eval (ch == 'a'); /* { dg-warning "UNKNOWN" } */
- __analyzer_eval (ch == str[0]); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail)
+ __analyzer_eval (ch == str[0]); /* { dg-warning "TRUE" } */
ch = 'a';
__analyzer_eval (ch == 'a'); /* { dg-warning "TRUE" } */
__analyzer_eval (msg != NULL); /* { dg-warning "TRUE" } */
- __analyzer_eval (msg[0] == 'h'); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail)
+ __analyzer_eval (msg[0] == 'h'); /* { dg-warning "TRUE" } */
- __analyzer_eval (msg[1] == 'e'); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail)
+ __analyzer_eval (msg[1] == 'e'); /* { dg-warning "TRUE" } */
__analyzer_eval (strlen (msg) == 11); /* { dg-warning "TRUE" } */
+
+ /* Out-of-bounds. */
+ __analyzer_eval (msg[100] == 'e'); /* { dg-warning "UNKNOWN" } */
+ // TODO: some kind of warning for the out-of-bounds access
}
static const char *__attribute__((noinline))
__analyzer_eval (msg != NULL); /* { dg-warning "TRUE" } */
- __analyzer_eval (msg[0] == 'h'); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail)
+ __analyzer_eval (msg[0] == 'h'); /* { dg-warning "TRUE" } */
- __analyzer_eval (msg[1] == 'e'); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail)
+ __analyzer_eval (msg[1] == 'e'); /* { dg-warning "TRUE" } */
__analyzer_eval (strlen (msg) == 11); /* { dg-warning "TRUE" } */
}
__analyzer_eval (j == i); /* { dg-warning "UNKNOWN" } */
}
-/* TODO: and more complicated graph-like examples, where anything that's
- reachable from the pointer might be modified. */
-
void test_17 (int i)
{
int j = 42;
i = f->i + g->i;
j = f->i + g->i;
k = f->i * g->i;
- __analyzer_eval (i == j); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- /* TODO(xfail): we'd need to record that the two unknown values are both
- the sum of the two unknown input values (and thus are the same); not
- yet sure if we want arbitrary expression trees in the representation
- (analysis termination concerns). */
-
+ __analyzer_eval (i == j); /* { dg-warning "TRUE" } */
__analyzer_eval (i == k); /* { dg-warning "UNKNOWN" } */
}
/* Overwriting a whole struct should invalidate our knowledge
about fields within it. */
g = *f;
- __analyzer_eval (g.i == 42); /* { dg-warning "UNKNOWN" "desired" { xfail *-*-* } } */
- /* { dg-warning "TRUE" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail)
+ __analyzer_eval (g.i == 42); /* { dg-warning "UNKNOWN" } */
}
void test_25 (struct foo *f)
source value should update our knowledge about fields within
the dest value. */
g = *f;
- __analyzer_eval (g.i == 43); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "FALSE" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail)
+ __analyzer_eval (g.i == 43); /* { dg-warning "TRUE" } */
}
void test_26 (struct coord *p, struct coord *q)
{
p->x = 42;
- q->y = 17;
- __analyzer_eval (p->x == 42); /* { dg-warning "TRUE" } */
+ q->y = 17; /* could clobber p->x. */
+ __analyzer_eval (p->x == 42); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (p->y); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (q->x); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (q->y == 17); /* { dg-warning "TRUE" } */
source value should update our knowledge about fields within
the dest value. */
*p = *q;
- __analyzer_eval (p->x); /* { dg-warning "UNKNOWN" "desired" { xfail *-*-* } } */
- /* { dg-warning "TRUE" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail): should have been overwritten
+ __analyzer_eval (p->x); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (p->y == 17); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
/* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
// TODO(xfail): should have been overwritten with q->y
__analyzer_eval (q->x); /* { dg-warning "UNKNOWN" } */
- __analyzer_eval (q->y == 17); /* { dg-warning "TRUE" } */
+ __analyzer_eval (q->y == 17); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
}
void test_27 (struct coord *p)
{
memset (p, 0, sizeof (struct coord));
- __analyzer_eval (p->x == 0); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail):
- __analyzer_eval (p->y == 0); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail):
+ __analyzer_eval (p->x == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (p->y == 0); /* { dg-warning "TRUE" } */
}
void test_28 (struct coord *p)
__analyzer_eval (q[-2].y == 107025); /* { dg-warning "TRUE" } */
q -= 2;
+ __analyzer_eval (q == &p[7]); /* { dg-warning "UNKNOWN" } */
+ // TODO: make this be TRUE
__analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
__analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
__analyzer_eval (q[-2].y == 107025); /* { dg-warning "TRUE" } */
q -= 2;
+ __analyzer_eval (q == &p[7]); /* { dg-warning "TRUE" } */
__analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
__analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
__analyzer_eval (q[-2].y == 107025); /* { dg-warning "TRUE" } */
q -= 2;
+ __analyzer_eval (q == &p[7]); /* { dg-warning "TRUE" } */
__analyzer_eval (q->x == 107024); /* { dg-warning "TRUE" } */
__analyzer_eval (q->y == 107025); /* { dg-warning "TRUE" } */
union u u;
u.i = 42;
__analyzer_eval (u.i == 42); /* { dg-warning "TRUE" } */
- __analyzer_eval (u.ptr == NULL); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (u.ptr == NULL); /* { dg-warning "UNKNOWN|FALSE" } */
/* Writes to a union member should invalidate knowledge about other members. */
u.ptr = NULL;
__analyzer_eval (u.ptr == NULL); /* { dg-warning "TRUE" } */
- __analyzer_eval (u.i == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (u.i == 42); /* { dg-warning "UNKNOWN|FALSE" } */
}
void test_42 (void)
float f;
i = 42;
f = i;
- __analyzer_eval (f == 42.0); /* { dg-warning "TRUE" "PR 93356" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "disabled float comparisons" { target *-*-* } .-1 } */
+ __analyzer_eval (f == 42.0); /* { dg-warning "TRUE" } */
}
void test_43 (void)
{
struct coord d;
memcpy (&d, &c, sizeof (struct coord));
- __analyzer_eval (c.x == d.x); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- __analyzer_eval (c.y == d.y); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ __analyzer_eval (c.x == d.x); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c.y == d.y); /* { dg-warning "TRUE" } */
}
struct big
void test_2 (void)
{
global_union.ptr_val = malloc (1024); /* { dg-message "allocated here" } */
- global_union.int_val = 0;
-} /* { dg-warning "leak of '<unknown>' " } */
-/* TODO: something better than "<unknown>". */
-/* TODO: better location for the leak. */
-
+ global_union.int_val = 0; /* { dg-warning "leak of 'global_union.ptr_val' " } */
+}
-/* FIXME: we shouldn't need this. */
-/* { dg-additional-options "-fanalyzer-fine-grained" } */
-
#include <stdlib.h>
void *global_ptr;
void test_1 (int i)
{
global_ptr = malloc (1024); /* { dg-message "allocated here" } */
- *(int *)&global_ptr = i; /* { dg-warning "leak of '<unknown>'" } */
- // TODO: something better than "<unknown>" here ^^^
+ *(int *)&global_ptr = i; /* { dg-warning "leak of 'global_ptr'" } */
}
void test_2 (int i)
{
- void *p = malloc (1024); /* { dg-message "allocated here" "" { xfail *-*-* } } */
- // TODO(xfail)
+ void *p = malloc (1024); /* { dg-message "allocated here" } */
global_ptr = p;
*(int *)&p = i;
p = global_ptr;
__analyzer_eval (p[3] == 42); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (p[i] == 17); /* { dg-warning "TRUE" } */
- __analyzer_eval (p[j] == 17); /* { dg-warning "UNKNOWN" "desired" { xfail *-*-* } } */
- /* { dg-bogus "TRUE" "status quo" { xfail *-*-* } .-1 } */
- // FIXME(xfails) ^^^
+ __analyzer_eval (p[j] == 17); /* { dg-warning "UNKNOWN" } */
}
--- /dev/null
+/* { dg-additional-options "-Wno-analyzer-too-complex" } */
+
+#include <stdlib.h>
+
+struct foo { int dummy; };
+
+struct foo **
+test (int n) {
+ struct foo **arr;
+ int i;
+
+ if ((arr = (struct foo **)malloc(n * sizeof(struct foo *))) == NULL)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ if ((arr[i] = (struct foo *)malloc(sizeof(struct foo))) == NULL) {
+ for (; i >= 0; i++) {
+ free(arr[i]); /* { dg-bogus "double-'free'" } */
+ }
+ free(arr);
+ return NULL;
+ }
+ }
+ return arr;
+}
{
if (--obj->ob_refcnt == 0) /* { dg-bogus "dereference of uninitialized pointer 'obj'" } */
obj->ob_type->tp_dealloc (obj);
+ /* { dg-warning "dereference of NULL 'obj'" "deref of NULL" { target *-*-* } .-2 } */
+ /* FIXME: ideally we wouldn't issue this, as we've already issued a
+ warning about str_obj which is now in the "stop" state; the cast
+ confuses things. */
}
void test_1 (const char *str)
{
base_obj *obj = new_string_obj (str);
- //__analyzer_dump();
unref (obj);
-} /* { dg-bogus "leak" } */
+} /* { dg-bogus "leak" "" { xfail *-*-* } } */
+/* XFAIL (false leak):
+ Given that we only know "len" symbolically, this line:
+ str_obj->str_buf[len] = '\0';
+ is a symbolic write which could clobber the ob_type or ob_refcnt.
+ It reports a leak when following the path where the refcount is clobbered
+ to be a value that leads to the deallocator not being called. */
//__analyzer_dump();
if (obj)
unref (obj);
-} /* { dg-bogus "leak of 'obj'" "" { xfail *-*-* } } */
-/* TODO(xfail): the false leak report involves the base_obj.ob_refcnt
- being 1, but the string_obj.str_base.ob_refcnt being unknown (when
- they ought to be the same region), thus allowing for a path in which
- the object is allocated but not freed. */
+} /* { dg-bogus "leak" "" { xfail *-*-* } } */
+/* XFAIL (false leak):
+ Given that we only know "len" symbolically, this line:
+ str_obj->str_buf[len] = '\0';
+ is a symbolic write which could clobber the ob_type or ob_refcnt.
+ It reports a leak when following the path where the refcount is clobbered
+ to be a value that leads to the deallocator not being called. */
string_obj *obj = new_string_obj (str);
if (obj)
unref (obj);
-} /* { dg-bogus "leak of 'obj'" "" { xfail *-*-* } } */
-/* TODO(xfail): the false leak report involves the base_obj.ob_refcnt
- being 1, but the string_obj.str_base.ob_refcnt being unknown (when
- they ought to be the same region), thus allowing for a path in which
- the object is allocated but not freed. */
-
+} /* { dg-bogus "leak" "" { xfail *-*-* } } */
+/* XFAIL (false leak):
+ Given that we only know "len" symbolically, this line:
+ str_obj->str_buf[len] = '\0';
+ is a symbolic write which could clobber the ob_type or ob_refcnt.
+ It reports a leak when following the path where the refcount is clobbered
+ to be a value that leads to the deallocator not being called. */
#include <stdlib.h>
#include "analyzer-decls.h"
-typedef struct base_obj base_obj;
-typedef struct type_obj type_obj;
-typedef struct string_obj string_obj;
-
-struct base_obj
+typedef struct base_obj
{
struct type_obj *ob_type;
int ob_refcnt;
-};
+} base_obj;
-struct type_obj
+typedef struct type_obj
{
base_obj tp_base;
-};
+ void (*tp_dealloc) (base_obj *);
+} type_obj;
-struct string_obj
+typedef struct boxed_int_obj
{
- base_obj str_base;
- size_t str_len;
- char str_buf[];
-};
+ base_obj int_base;
+ int int_val;
+} boxed_int_obj;
+
+extern void int_del (base_obj *);
type_obj type_type = {
- { &type_type, 1},
+ { &type_type, 1}
};
-type_obj str_type = {
- { &str_type, 1},
+type_obj boxed_int_type = {
+ { &type_type, 1},
+ int_del
};
base_obj *alloc_obj (type_obj *ob_type, size_t sz)
return obj;
}
+base_obj *new_int_obj (int val)
+{
+ boxed_int_obj *int_obj
+ = (boxed_int_obj *)alloc_obj (&boxed_int_type, sizeof (boxed_int_obj));
+ if (!int_obj)
+ return NULL;
+ int_obj->int_val = val;
+ return (base_obj *)int_obj;
+}
+
void unref (base_obj *obj)
{
- //__analyzer_dump();
if (--obj->ob_refcnt == 0)
- free (obj);
+ obj->ob_type->tp_dealloc (obj);
}
-void test_1 ()
+void test_1 (const char *str)
{
- base_obj *obj = alloc_obj (&str_type, sizeof (string_obj));
- if (obj)
- {
- __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '1'" } */
- unref (obj);
- __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '0'" } */
- }
-}
+ base_obj *obj = new_int_obj (42);
+ if (!obj)
+ return;
+ __analyzer_eval (((boxed_int_obj *)obj)->int_val == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (obj->ob_refcnt == 1); /* { dg-warning "TRUE" } */
+ unref (obj);
+} /* { dg-bogus "leak" "" } */
+++ /dev/null
-#include <stdlib.h>
-#include "analyzer-decls.h"
-
-/* Verify that we don't accumulate state after a malloc/free pair. */
-
-void test (void)
-{
- void *ptr;
- __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '0'" } */
- ptr = malloc (1024);
- __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '1'" } */
- free (ptr);
- __analyzer_dump_num_heap_regions (); /* { dg-warning "num heap regions: '0'" } */
-}
struct base *bp = (struct base *)&s;
- __analyzer_eval (bp->i == 3); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ __analyzer_eval (bp->i == 3); /* { dg-warning "TRUE" } */
}
--- /dev/null
+/* Smoketest for __analyzer_describe. */
+
+#include "analyzer-decls.h"
+
+void test (int i)
+{
+ __analyzer_describe (0, 42); /* { dg-warning "svalue: '\\(int\\)42'" } */
+ __analyzer_describe (0, i); /* { dg-warning "svalue: 'INIT_VAL\\(i.*\\)'" } */
+ __analyzer_describe (0, &i); /* { dg-warning "svalue: '&i'" } */
+ /* Further cases would risk overspecifying things. */
+}
/* A loop, to ensure we have phi nodes. */
for (i = 0; i < n; i++)
- result[i] = buf[i] + i; /* { dg-warning "possibly-NULL" "" { xfail *-*-* } } */
- /* TODO(xfail): why isn't the warning appearing? */
+ result[i] = buf[i] + i; /* { dg-warning "possibly-NULL" } */
/* Example of a "'" (to test quoting). */
*out = some_call (i, 'a');
{
default:
case 0:
- *pp = malloc (16);
+ *pp = malloc (16); /* { dg-warning "leak" } */
break;
case 1:
free (*pp);
{
default:
case 0:
- p0 = malloc (16);
+ p0 = malloc (16); /* { dg-warning "leak" } */
break;
case 1:
- free (p0); /* { dg-warning "double-'free' of 'p0'" } */
+ free (p0); /* { dg-warning "double-'free' of 'p0'" "" { xfail *-*-* } } */
break;
case 2:
- p1 = malloc (16);
+ p1 = malloc (16); /* { dg-warning "leak" } */
break;
case 3:
- free (p1); /* { dg-warning "double-'free' of 'p1'" } */
+ free (p1); /* { dg-warning "double-'free' of 'p1'" "" { xfail *-*-* } } */
break;
case 4:
- p2 = malloc (16);
+ p2 = malloc (16); /* { dg-warning "leak" } */
break;
case 5:
- free (p2); /* { dg-warning "double-'free' of 'p2'" } */
+ free (p2); /* { dg-warning "double-'free' of 'p2'" "" { xfail *-*-* } } */
break;
case 6:
- p3 = malloc (16);
+ p3 = malloc (16); /* { dg-warning "leak" } */
break;
case 7:
- free (p3); /* { dg-warning "double-'free' of 'p3'" } */
+ free (p3); /* { dg-warning "double-'free' of 'p3'" "" { xfail *-*-* } } */
break;
}
}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test_1 (void)
+{
+ __analyzer_dump_path (); /* { dg-message "path" } */
+}
+
+void test_2 (int flag)
+{
+ if (flag)
+ __analyzer_dump_path (); /* { dg-message "path" } */
+}
+
+void test_3 (int flag)
+{
+ if (flag)
+ if (!flag)
+ __analyzer_dump_path (); /* { dg-bogus "path" } */
+}
+
+int global_for_test_4;
+static void __attribute__((noinline)) called_by_test_4 () {}
+void test_4 (void)
+{
+ /* Verify that a state change that happens in a stmt that
+ isn't the first within its BB can affect path feasibility. */
+ global_for_test_4 = 0;
+ global_for_test_4 = 1;
+ /* Thwart the optimizer. */
+ called_by_test_4 ();
+ if (global_for_test_4)
+ __analyzer_dump_path (); /* { dg-message "path" } */
+}
+
+/* Verify that loops don't confuse the feasibility checker. */
+
+void test_5 (void)
+{
+ for (int i = 0; i < 1024; i++)
+ {
+ }
+ __analyzer_dump_path (); /* { dg-message "path" } */
+}
+
+/* Reproducer for an issue seen with CVE-2005-1689 (PR analyzer/96374): if we
+ take the shortest path and update state and check feasibility per-edge, we
+ can erroneously reject valid diagnostics. */
+
+int test_6 (int a, int b)
+{
+ int problem = 0;
+ if (a)
+ problem = 1;
+ if (b)
+ {
+ if (!problem)
+ problem = 2;
+ __analyzer_dump_path (); /* { dg-message "path" "" { xfail *-*-* } } */
+ /* XFAIL is PR analyzer/96374. */
+ }
+ return problem;
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+typedef struct base_obj
+{
+ int m_first;
+ int m_second;
+} base_obj;
+
+typedef struct sub_obj
+{
+ base_obj base;
+} sub_obj;
+
+void test (sub_obj *sub)
+{
+ sub->base.m_first = 1;
+ sub->base.m_second = 2;
+ __analyzer_eval (sub->base.m_first == 1); /* { dg-warning "TRUE" } */
+ __analyzer_eval (sub->base.m_second == 2); /* { dg-warning "TRUE" } */
+
+ base_obj *base = (struct base_obj *)sub;
+ __analyzer_eval (base->m_first == 1); /* { dg-warning "TRUE" } */
+ __analyzer_eval (base->m_second == 2); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+/* A toy re-implementation of CPython's object model. */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "analyzer-decls.h"
+
+typedef struct base_obj base_obj;
+typedef struct string_obj string_obj;
+
+struct base_obj
+{
+ int ob_refcnt;
+};
+
+struct string_obj
+{
+ base_obj str_base;
+ size_t str_len;
+ char str_buf[];
+};
+
+base_obj *alloc_obj (const char *str)
+{
+ size_t len = strlen (str);
+ base_obj *obj = (base_obj *)malloc (sizeof (string_obj) + len + 1);
+ if (!obj)
+ return NULL;
+ obj->ob_refcnt = 1;
+ string_obj *str_obj = (string_obj *)obj;
+ __analyzer_eval (str_obj->str_base.ob_refcnt == 1); /* { dg-warning "TRUE" } */
+ return obj;
+}
--- /dev/null
+/* Tests of brace-enclosed initializers
+ Some of these use the CONSTRUCTOR tree code, but it appears
+ only for a full zero-init; it appears that by the time the analyzer
+ runs that this initialization has been converted into field-wise
+ gimple assign stmts, with just "zero-init everything" CONSTRUCTORs
+ and "clobber" CONSTRUCTORs. */
+
+#include "analyzer-decls.h"
+
+struct coord
+{
+ int x;
+ int y;
+};
+
+struct tri
+{
+ struct coord v[3];
+};
+
+union iap
+{
+ int i;
+ void *p;
+};
+
+void test_1 (void)
+{
+ struct coord c = {3, 4};
+ __analyzer_eval (c.x == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c.y == 4); /* { dg-warning "TRUE" } */
+}
+
+void test_2 (void)
+{
+ struct coord c = {3};
+ __analyzer_eval (c.x == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c.y == 0); /* { dg-warning "TRUE" } */
+}
+
+void test_3 (void)
+{
+ struct coord c = {};
+ __analyzer_eval (c.x == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c.y == 0); /* { dg-warning "TRUE" } */
+}
+
+void test_4 (void)
+{
+ int c[2] = {3, 4};
+ __analyzer_eval (c[0] == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1] == 4); /* { dg-warning "TRUE" } */
+}
+
+void test_5 (void)
+{
+ int c[2] = {3};
+ __analyzer_eval (c[0] == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1] == 0); /* { dg-warning "TRUE" } */
+}
+
+void test_6 (void)
+{
+ int c[2] = {};
+ __analyzer_eval (c[0] == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1] == 0); /* { dg-warning "TRUE" } */
+}
+
+void test_7 (void)
+{
+ struct coord c[2] = {{3, 4}, {5, 6}};
+ __analyzer_eval (c[0].x == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[0].y == 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].y == 6); /* { dg-warning "TRUE" } */
+}
+
+void test_8 (void)
+{
+ struct coord c[2] = {{3}, {5}};
+ __analyzer_eval (c[0].x == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[0].y == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].y == 0); /* { dg-warning "TRUE" } */
+}
+
+void test_9 (void)
+{
+ struct coord c[2] = {{}, {}};
+ __analyzer_eval (c[0].x == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[0].y == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].x == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].y == 0); /* { dg-warning "TRUE" } */
+}
+
+void test_10 (void)
+{
+ struct coord c[2] = {{.y = 4, .x = 3}, {5, 6}};
+ __analyzer_eval (c[0].x == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[0].y == 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].y == 6); /* { dg-warning "TRUE" } */
+}
+
+void test_11 (void)
+{
+ struct coord c[2] = {{.y = 4}, {5, 6}};
+ __analyzer_eval (c[0].x == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[0].y == 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c[1].y == 6); /* { dg-warning "TRUE" } */
+}
+
+void test_12 (void)
+{
+ struct tri t = {};
+ __analyzer_eval (t.v[0].x == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (t.v[2].y == 0); /* { dg-warning "TRUE" } */
+}
+
+void test_13 (void)
+{
+ struct tri t = {3, 4, 5, 6, 7, 8};
+ __analyzer_eval (t.v[0].x == 3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (t.v[0].y == 4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (t.v[1].x == 5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (t.v[1].y == 6); /* { dg-warning "TRUE" } */
+ __analyzer_eval (t.v[2].x == 7); /* { dg-warning "TRUE" } */
+ __analyzer_eval (t.v[2].y == 8); /* { dg-warning "TRUE" } */
+}
+
+void test_14 (void)
+{
+ union iap u = {};
+ __analyzer_eval (u.i == 0); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+void *ptr;
+
+void *test (void)
+{
+ ptr = malloc (1024);
+ ptr = NULL; /* { dg-warning "leak of 'ptr'" } */
+}
--- /dev/null
+#include <stdlib.h>
+
+#include "analyzer-decls.h"
+
+struct iter
+{
+ int start;
+ int end;
+ int step;
+ int val;
+};
+
+struct iter * __attribute__((noinline))
+iter_new (int start, int end, int step)
+{
+ struct iter *it = (struct iter *)malloc (sizeof (struct iter));
+ if (!it)
+ abort ();
+ it->start = start;
+ it->end = end;
+ it->step = step;
+ it->val = start;
+ return it;
+}
+
+int __attribute__((noinline))
+iter_done_p (struct iter *it)
+{
+ return it->val >= it->end;
+}
+
+void __attribute__((noinline))
+iter_next (struct iter *it)
+{
+ it->val += it->step;
+}
+
+/* Example of an iterator object, to see how well we cope with a well-disguised
+ iteration from 0 to n with a step of 1. */
+
+void test(int n)
+{
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ struct iter *it = iter_new (0, n, 1);
+ while (!iter_done_p (it))
+ {
+ __analyzer_eval (it->val < n); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ __analyzer_eval (it->val == 0); /* { dg-warning "TRUE" "true on 1st iter" } */
+ /* { dg-warning "UNKNOWN" "unknown" { target *-*-* } .-1 } */
+ /* TODO: should we ought to figure out i > 0 after 1st iteration? */
+
+ __analyzer_eval (it->val >= 0); /* { dg-warning "TRUE" } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+
+ iter_next (it);
+ }
+
+ __analyzer_eval (it->val >= n); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+
+ __analyzer_eval (it->val == n); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): it only figures out i >= 256, rather than i == 256. */
+
+ free (it);
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test(int n)
+{
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ for (i = 0; i < n; i++) {
+ __analyzer_eval (i < n); /* { dg-warning "TRUE" } */
+ /* (should report TRUE twice). */
+
+ __analyzer_eval (i == 0); /* { dg-warning "TRUE" "1st" } */
+ /* { dg-warning "FALSE" "2nd" { xfail *-*-* } .-1 } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ __analyzer_eval (i >= 0); /* { dg-warning "TRUE" } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ }
+
+ __analyzer_eval (i >= n); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+
+ __analyzer_eval (i == n); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): it only figures out i >= 256, rather than i == 256. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
for (u.i=0; u.i<256; u.i++) {
- __analyzer_eval (u.i < 256); /* { dg-warning "TRUE" "1st" } */
- /* { dg-warning "TRUE" "2nd" { xfail *-*-* } .-1 } */
- /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-2 } */
- /* (should report TRUE twice). */
+ __analyzer_eval (u.i < 256); /* { dg-warning "TRUE" } */
__analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
/* TODO(xfail^^^): we're only capturing the first iteration, so
we erroneously get i == 0. */
- //__analyzer_eval (u.i >= 0); /* { d-todo-g-warning "TRUE" } */
+ __analyzer_eval (u.i >= 0); /* { dg-warning "TRUE" } */
}
- __analyzer_eval (u.i >= 256); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ __analyzer_eval (u.i >= 256); /* { dg-warning "TRUE" } */
__analyzer_eval (u.i == 256); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
/* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
char *buffer = (char*)malloc(256);
for (i=0; i<255; i++) {
- buffer[i] = c; /* { dg-warning "use after 'free' of 'buffer'" } */
- /* BUG: the malloc could have failed
- TODO: the checker doesn't yet pick up on this, perhaps
- due to the pointer arithmetic not picking up on the
- state */
+ buffer[i] = c; /* { dg-warning "use after 'free' of 'buffer'" "use after free" { xfail *-*-* } } */
+ /* { dg-warning "possibly-NULL 'buffer'" "deref of unchecked" { target *-*-* } .-1 } */
free(buffer); /* { dg-warning "double-'free' of 'buffer'" } */
}
-// FIXME:
-/* { dg-additional-options "-fno-analyzer-state-purge" } */
-
/* Example of nested loops. */
#include "analyzer-decls.h"
for (i=0; i<256; i++) {
- __analyzer_eval (i >= 0); /* { dg-warning "TRUE" "true" } */
- /* { dg-warning "UNKNOWN" "unknown" { target *-*-* } .-1 } */
+ __analyzer_eval (i >= 0); /* { dg-warning "TRUE" } */
__analyzer_eval (i < 256); /* { dg-warning "TRUE" } */
__analyzer_eval (j >= 0); /* { dg-warning "TRUE" "true" } */
/* { dg-warning "UNKNOWN" "unknown" { target *-*-* } .-1 } */
- __analyzer_eval (j < 256); /* { dg-warning "TRUE" } */
+ __analyzer_eval (j < 256); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
__analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
__analyzer_eval (k >= 0); /* { dg-warning "TRUE" "true" } */
/* { dg-warning "UNKNOWN" "unknown" { target *-*-* } .-1 } */
- __analyzer_eval (k < 256); /* { dg-warning "TRUE" } */
+ __analyzer_eval (k < 256); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
__analyzer_dump_exploded_nodes (0); /* { dg-warning "4 processed enodes" } */
}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test(int n)
+{
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ for (i = n; i > 0; i--) {
+ __analyzer_eval (i > 0); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
+
+ __analyzer_eval (i == n); /* { dg-warning "TRUE" "1st" } */
+ /* { dg-warning "FALSE" "2nd" { xfail *-*-* } .-1 } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ __analyzer_eval (i <= n); /* { dg-warning "TRUE" "1st" } */
+ /* { dg-warning "TRUE" "2nd" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i >= 0 for all iterations. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ }
+
+ __analyzer_eval (i <= 0); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+
+ __analyzer_eval (i == 0); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): it only figures out i >= 256, rather than i == 256. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test(int start, int end, int step)
+{
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ for (i = start; i > end; i --) {
+ __analyzer_eval (i > end); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
+
+ __analyzer_eval (i == start); /* { dg-warning "TRUE" "1st" } */
+ /* { dg-warning "FALSE" "2nd" { xfail *-*-* } .-1 } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ __analyzer_eval (i <= start); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ }
+
+ __analyzer_eval (i >= end); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+
+ // FIXME: do we know this? What if we overshoot?
+ __analyzer_eval (i == end); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): it only figures out i >= 256, rather than i == 256. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test(int start, int end, int step)
+{
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ for (i = start; i > end; i -= step) {
+ __analyzer_eval (i > end); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
+
+ __analyzer_eval (i == start); /* { dg-warning "TRUE" "1st" } */
+ /* { dg-warning "FALSE" "2nd" { xfail *-*-* } .-1 } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ /* We don't know the direction of step. */
+ __analyzer_eval (i <= start); /* { dg-warning "TRUE" "true" } */
+ /* { dg-warning "UNKNOWN" "unknown" { target *-*-* } .-1 } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ }
+
+ __analyzer_eval (i <= end); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test(int start, int end, int step)
+{
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ for (i = start; i < end; i += step) {
+ __analyzer_eval (i < end); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
+
+ __analyzer_eval (i == start); /* { dg-warning "TRUE" "1st" } */
+ /* { dg-warning "FALSE" "2nd" { xfail *-*-* } .-1 } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ /* We don't know the direction of step. */
+ __analyzer_eval (i >= start); /* { dg-warning "TRUE" "true" } */
+ /* { dg-warning "UNKNOWN" "unknown" { target *-*-* } .-1 } */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ }
+
+ // FIXME: do we know this? What about direction of step?
+ __analyzer_eval (i >= end); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+
+ // FIXME: do we know this? What if we overshoot?
+ __analyzer_eval (i == end); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): it only figures out i >= 256, rather than i == 256. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test(int start, int end)
+{
+ int i;
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ for (i = start; i < end; i++) {
+ __analyzer_eval (i < end); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
+
+ __analyzer_eval (i == start); /* { dg-warning "TRUE" "1st" } */
+ /* { dg-warning "FALSE" "2nd" { xfail *-*-* } .-1 } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
+ /* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
+
+ __analyzer_eval (i >= start); /* { dg-warning "TRUE" "true" } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+ /* TODO(xfail^^^): should report TRUE twice. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ }
+
+ __analyzer_eval (i >= end); /* { dg-warning "TRUE" "true" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "unknown" { xfail *-*-* } .-1 } */
+
+ __analyzer_eval (i == end); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
+ /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
+ /* TODO(xfail^^^): it only figures out i >= end, rather than i == end. */
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
-/* { dg-additional-options "-fno-analyzer-state-purge" } */
-
#include "analyzer-decls.h"
void test(void)
__analyzer_eval (i < 256); /* { dg-warning "TRUE" } */
/* (should report TRUE twice). */
- __analyzer_eval (i == 0); /* { dg-warning "TRUE" "1st" } */
+ __analyzer_eval (i == 0); /* { dg-warning "TRUE" } */
/* { dg-warning "FALSE" "2nd" { xfail *-*-* } .-1 } */
/* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
/* TODO(xfail^^^): ideally we ought to figure out i > 0 after 1st iteration. */
- __analyzer_eval (i >= 0); /* { dg-warning "TRUE" "1st" } */
- /* { dg-warning "TRUE" "2nd" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-2 } */
- /* TODO(xfail^^^): ideally we ought to figure out i >= 0 for all iterations. */
+ __analyzer_eval (i >= 0); /* { dg-warning "TRUE" } */
__analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
}
struct link tmp;
tmp.m_ptr = (struct link *)malloc (sizeof (struct link)); /* { dg-message "allocated here" } */
} /* { dg-warning "leak of 'tmp.m_ptr'" } */
-/* { dg-bogus "leak of '<unknown>'" "" { xfail *-*-* } .-1 } */
+/* { dg-bogus "leak of '<unknown>'" "leak of unknown" { target *-*-* } .-1 } */
void test_31 (void)
{
void *ptr = malloc (sizeof (struct link)); /* { dg-message "allocated here" } */
tmp.m_ptr = (struct link *)ptr;
} /* { dg-warning "leak of 'ptr'" } */
-/* { dg-bogus "leak of 'tmp.m_ptr'" "" { xfail *-*-* } .-1 } */
+/* { dg-bogus "leak of 'tmp.m_ptr'" "" { target *-*-* } .-1 } */
void test_32 (void)
{
void *p = malloc (1024);
void *q = p + 64;
free (q - 64); /* this is probably OK. */
-} /* { dg-bogus "leak of 'p'" "" { xfail *-*-* } } */
-// TODO(xfail)
+} /* { dg-bogus "leak of 'p'" } */
#if 0
void test_31 (void *p)
void test_43 (void)
{
global_link.m_ptr = malloc (sizeof (struct link)); /* { dg-message "allocated here" } */
- global_link.m_ptr = NULL;
-} /* { dg-warning "leak of '<unknown>'" } */
-/* TODO: should be more precise than just '<unknown>', and
- ideally would be at the assigment to NULL. */
+ global_link.m_ptr = NULL; /* { dg-warning "leak of 'global_link.m_ptr'" } */
+}
struct link *global_ptr;
int *p = NULL; /* { dg-message "'p' is NULL" } */
*p = 1; /* { dg-warning "dereference of NULL 'p'" } */
}
+
+/* As test_48, but where the assignment of NULL is not at the start of a BB. */
+
+int test_49 (int i)
+{
+ int *p;
+ int x;
+
+ x = i * 2;
+ p = NULL; /* { dg-message "'p' is NULL" } */
+ *p = 1; /* { dg-warning "dereference of NULL 'p'" } */
+ return x;
+}
*tm = p;
if (!p)
abort ();
- return p; /* { dg-warning "leak of 'tm'" } */
+ return p;
}
void a5 (void)
{
struct bar *qb = NULL;
hv (&qb);
-} /* { dg-warning "leak of '\\(struct foo \\*\\)qb'" } */
+} /* { dg-warning "leak of 'qb'" } */
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+extern void foo (int *);
+
+void test (int n)
+{
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ for (int i = 0; i < n; i++)
+ {
+ int *ptr = (int *)malloc (sizeof (int) * i);
+ foo (ptr);
+ free (ptr);
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
| | |
| | (2) calling 'make_boxed_int' from 'test'
|
- +--> 'make_boxed_int': events 3-6
+ +--> 'make_boxed_int': events 3-4
|
| NN | make_boxed_int (int i)
| | ^~~~~~~~~~~~~~
| | |
| | (3) entry to 'make_boxed_int'
- |......
+ | NN | {
+ | NN | boxed_int *result = (boxed_int *)wrapped_malloc (sizeof (boxed_int));
+ | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (4) calling 'wrapped_malloc' from 'make_boxed_int'
+ |
+ +--> 'wrapped_malloc': events 5-6
+ |
+ | NN | void *wrapped_malloc (size_t size)
+ | | ^~~~~~~~~~~~~~
+ | | |
+ | | (5) entry to 'wrapped_malloc'
+ | NN | {
+ | NN | return malloc (size);
+ | | ~~~~~~~~~~~~~
+ | | |
+ | | (6) allocated here (state of '<unknown>': 'start' -> 'unchecked', NULL origin)
+ |
+ <------+
+ |
+ 'make_boxed_int': events 7-10
+ |
+ | NN | boxed_int *result = (boxed_int *)wrapped_malloc (sizeof (boxed_int));
+ | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ | | |
+ | | (7) returning to 'make_boxed_int' from 'wrapped_malloc'
| NN | if (!result)
- | | ~
+ | | ~
| | |
- | | (4) following 'false' branch (when 'result' is non-NULL)...
+ | | (8) assuming 'result' is non-NULL (state of 'result': 'unchecked' -> 'nonnull', NULL origin)
+ | | (9) following 'false' branch (when 'result' is non-NULL)...
| NN | abort ();
| NN | result->i = i;
- | | ~~~~~~~~~~~~~
+ | | ~~~~~~~~~~~~~
| | |
- | | (5) ...to here
- | NN | return result;
- | | ~~~~~~
- | | |
- | | (6) state of '<return-value>': 'start' -> 'nonnull' (origin: NULL)
+ | | (10) ...to here
|
<------+
|
- 'test': events 7-8
+ 'test': events 11-12
|
| NN | boxed_int *obj = make_boxed_int (i);
| | ^~~~~~~~~~~~~~~~~~
| | |
- | | (7) returning to 'test' from 'make_boxed_int'
+ | | (11) returning to 'test' from 'make_boxed_int'
| NN |
| NN | free_boxed_int (obj);
| | ~~~~~~~~~~~~~~~~~~~~
| | |
- | | (8) calling 'free_boxed_int' from 'test'
+ | | (12) calling 'free_boxed_int' from 'test'
|
- +--> 'free_boxed_int': events 9-10
+ +--> 'free_boxed_int': events 13-14
|
| NN | free_boxed_int (boxed_int *bi)
| | ^~~~~~~~~~~~~~
| | |
- | | (9) entry to 'free_boxed_int'
+ | | (13) entry to 'free_boxed_int'
| NN | {
| NN | wrapped_free (bi);
| | ~~~~~~~~~~~~~~~~~
| | |
- | | (10) calling 'wrapped_free' from 'free_boxed_int'
+ | | (14) calling 'wrapped_free' from 'free_boxed_int'
|
- +--> 'wrapped_free': events 11-12
+ +--> 'wrapped_free': events 15-16
|
| NN | void wrapped_free (void *ptr)
| | ^~~~~~~~~~~~
| | |
- | | (11) entry to 'wrapped_free'
+ | | (15) entry to 'wrapped_free'
| NN | {
| NN | free (ptr);
| | ~~~~~~~~~~
| | |
- | | (12) first 'free' here (state of 'ptr': 'nonnull' -> 'freed', origin: NULL)
+ | | (16) first 'free' here (state of 'ptr': 'nonnull' -> 'freed', NULL origin)
|
<------+
|
- 'free_boxed_int': event 13
+ 'free_boxed_int': event 17
|
| NN | wrapped_free (bi);
| | ^~~~~~~~~~~~~~~~~
| | |
- | | (13) returning to 'free_boxed_int' from 'wrapped_free'
+ | | (17) returning to 'free_boxed_int' from 'wrapped_free'
|
<------+
|
- 'test': events 14-15
+ 'test': events 18-19
|
| NN | free_boxed_int (obj);
| | ^~~~~~~~~~~~~~~~~~~~
| | |
- | | (14) returning to 'test' from 'free_boxed_int'
+ | | (18) returning to 'test' from 'free_boxed_int'
| NN |
| NN | free_boxed_int (obj);
| | ~~~~~~~~~~~~~~~~~~~~
| | |
- | | (15) passing freed pointer 'obj' in call to 'free_boxed_int' from 'test'
+ | | (19) passing freed pointer 'obj' in call to 'free_boxed_int' from 'test'
|
- +--> 'free_boxed_int': events 16-17
+ +--> 'free_boxed_int': events 20-21
|
| NN | free_boxed_int (boxed_int *bi)
| | ^~~~~~~~~~~~~~
| | |
- | | (16) entry to 'free_boxed_int'
+ | | (20) entry to 'free_boxed_int'
| NN | {
| NN | wrapped_free (bi);
| | ~~~~~~~~~~~~~~~~~
| | |
- | | (17) passing freed pointer 'bi' in call to 'wrapped_free' from 'free_boxed_int'
+ | | (21) passing freed pointer 'bi' in call to 'wrapped_free' from 'free_boxed_int'
|
- +--> 'wrapped_free': events 18-19
+ +--> 'wrapped_free': events 22-23
|
| NN | void wrapped_free (void *ptr)
| | ^~~~~~~~~~~~
| | |
- | | (18) entry to 'wrapped_free'
+ | | (22) entry to 'wrapped_free'
| NN | {
| NN | free (ptr);
| | ~~~~~~~~~~
| | |
- | | (19) second 'free' here; first 'free' was at (12) ('ptr' is in state 'freed')
+ | | (23) second 'free' here; first 'free' was at (16) ('ptr' is in state 'freed')
|
{ dg-end-multiline-output "" } */
/* Example of a multilevel wrapper around malloc, with an unchecked write. */
-/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fanalyzer-checker=malloc -fdiagnostics-show-caret -fanalyzer-verbose-state-changes" } */
+/* { dg-additional-options "-fdiagnostics-show-line-numbers -fdiagnostics-path-format=inline-events -fanalyzer-checker=malloc -fdiagnostics-show-caret" } */
/* { dg-enable-nn-line-numbers "" } */
#include <stdlib.h>
| NN | return malloc (size);
| | ~~~~~~~~~~~~~
| | |
- | | (4) this call could return NULL (state of '<return-value>': 'start' -> 'unchecked', origin: NULL)
+ | | (4) this call could return NULL
|
<------+
|
| NN | result->i = i;
| | ~~~~~~~~~~~~~
| | |
- | | (6) 'result' could be NULL: unchecked value from (4) ('result' is in state 'unchecked')
+ | | (6) 'result' could be NULL: unchecked value from (4)
|
{ dg-end-multiline-output "" } */
free (ptr); /* No double-'free' warning: we've already attempted
to dereference it above. */
return *ptr; /* { dg-warning "use after 'free' of 'ptr'" "use-after-free" } */
- // TODO: two warnings here: one is from sm-malloc, the other from region model
- /* { dg-warning "leak of 'ptr'" "leak" { target *-*-* } .-2 } */
+ /* { dg-warning "leak of 'ptr'" "leak" { target *-*-* } .-1 } */
}
/* "dereference of possibly-NULL 'ptr'". */
| | (7) 'ptr' leaks here; was allocated at (1)
|
{ dg-end-multiline-output "" } */
-
-/* "use after 'free' of 'ptr'". */
-/* { dg-begin-multiline-output "" }
- NN | *ptr = 19;
- | ~~~~~^~~~
- 'test_3': events 1-3
- |
- | NN | if (x)
- | | ^
- | | |
- | | (1) following 'true' branch (when 'x != 0')...
- | NN | free (ptr);
- | | ~~~~~~~~~~
- | | |
- | | (2) ...to here
- | NN |
- | NN | *ptr = 19;
- | | ~~~~~~~~~
- | | |
- | | (3) use after 'free' of 'ptr' here
- |
- { dg-end-multiline-output "" } */
-
-/* "use after 'free' of 'ptr'". */
-/* { dg-begin-multiline-output "" }
- NN | return *ptr;
- | ^~~~
- 'test_3': events 1-5
- |
- | NN | if (x)
- | | ^
- | | |
- | | (1) following 'false' branch (when 'x == 0')...
- |......
- | NN | *ptr = 19;
- | | ~~~~~~~~~
- | | |
- | | (2) ...to here
- |......
- | NN | if (y)
- | | ~
- | | |
- | | (3) following 'true' branch (when 'y != 0')...
- | NN | free (ptr);
- | | ~~~~~~~~~~
- | | |
- | | (4) ...to here
- | NN | to dereference it above
- | NN | return *ptr;
- | | ~~~~
- | | |
- | | (5) use after 'free' of 'ptr' here
- |
- { dg-end-multiline-output "" } */
-/* TODO: this is really a duplicate; can we either eliminate it, or
- improve the path? */
int sum = 0;
int i;
for (i = 0; i < n; i++)
- p[i] = i;
+ p[i] = i; /* { dg-warning "dereference of possibly-NULL 'p'" } */
for (i = 0; i < n; i++)
sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
return sum;
result = do_stuff (ptr, n);
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
- // FIXME: why 3 here?
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
- // FIXME: why 3 here?
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 processed enodes" } */
+ // FIXME: why 5 here?
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 processed enodes" } */
+ // FIXME: why 5 here?
if (n > 10)
free (ptr); /* { dg-bogus "not on the heap" } */
result = do_stuff (ptr, n);
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
- // FIXME: why 3 here?
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 processed enodes" } */
+ // FIXME: why 5 here?
if (need_to_free)
free (ptr); /* { dg-bogus "not on the heap" } */
result = do_stuff (ptr, n);
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
- // FIXME: why 3 here?
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 processed enodes" } */
+ // FIXME: why 5 here?
if (ptr != buf)
free (ptr); /* { dg-bogus "not on the heap" } */
result = do_stuff (ptr, n);
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 processed enodes" } */
- // FIXME: why 5 here?
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "6 processed enodes" } */
+ // FIXME: why 6 here?
if (n > 10)
free (ptr); /* { dg-bogus "not on the heap" } */
int sum = 0;
int i;
for (i = 0; i < n; i++)
- p[i] = i;
+ p[i] = i; /* { dg-warning "dereference of possibly-NULL" } */
for (i = 0; i < n; i++)
sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
result = sum;
}
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
if (n > 10)
free (ptr); /* { dg-bogus "not on the heap" } */
int sum = 0;
int i;
for (i = 0; i < n; i++)
- p[i] = i;
+ p[i] = i; /* { dg-warning "dereference of possibly-NULL" } */
result = sum;
}
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
if (n > 10)
free (ptr); /* { dg-bogus "not on the heap" } */
int sum = 0;
int i;
for (i = 0; i < n; i++)
- p[i] = i;
+ p[i] = i; /* { dg-warning "dereference of possibly-NULL" } */
for (i = 0; i < n; i++)
sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
result = sum;
}
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
if (need_to_free)
free (ptr); /* { dg-bogus "not on the heap" } */
int sum = 0;
int i;
for (i = 0; i < n; i++)
- p[i] = i;
+ p[i] = i; /* { dg-warning "dereference of possibly-NULL" } */
for (i = 0; i < n; i++)
sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
result = sum;
}
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
if (ptr != buf)
free (ptr); /* { dg-bogus "not on the heap" } */
int sum = 0;
int i;
for (i = 0; i < n; i++)
- p[i] = i;
+ p[i] = i; /* { dg-warning "dereference of possibly-NULL" } */
for (i = 0; i < n; i++)
sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
result = sum;
}
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
- return result; /* { dg-message "leak of 'p'" } */
- /* FIXME: should this be 'ptr'? */
+ return result; /* { dg-message "leak of 'p'|leak of 'ptr'" } */
}
/* A simpler version of the above. */
--- /dev/null
+#include <string.h>
+#include "analyzer-decls.h"
+
+/* Zero-fill of uninitialized buffer. */
+
+void test_1 (void)
+{
+ char buf[256];
+ memset (buf, 0, 256);
+ __analyzer_eval (buf[42] == 0); /* { dg-warning "TRUE" } */
+}
+
+/* As above, but with __builtin_memset. */
+
+void test_1a (void)
+{
+ char buf[256];
+ __builtin_memset (buf, 0, 256);
+ __analyzer_eval (buf[42] == 0); /* { dg-warning "TRUE" } */
+}
+
+/* Zero-fill of partially initialized buffer. */
+
+void test_2 (void)
+{
+ char buf[256];
+ buf[42] = 'A';
+ __analyzer_eval (buf[42] == 'A'); /* { dg-warning "TRUE" } */
+ memset (buf, 0, 256);
+ __analyzer_eval (buf[42] == '\0'); /* { dg-warning "TRUE" } */
+}
+
+/* A "memset" with known non-zero value. */
+
+void test_3 (int val)
+{
+ char buf[256];
+ memset (buf, 'A', 256);
+ /* We currently merely mark such regions as "unknown", so querying
+ values within them yields UNKNOWN when ideally it would be TRUE. */
+ __analyzer_eval (buf[42] == 'A'); /* { dg-warning "TRUE" "known nonzero" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+}
+
+/* A "memset" with unknown value. */
+
+void test_4 (int val)
+{
+ char buf[256];
+ memset (buf, val, 256);
+ /* We currently merely mark such regions as "unknown", so querying
+ values within them yields UNKNOWN when ideally it would be TRUE. */
+ __analyzer_eval (buf[42] == (char)val); /* { dg-warning "TRUE" "known nonzero" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+}
+
+/* A "memset" with unknown num bytes. */
+
+void test_5 (int n)
+{
+ char buf[256];
+ buf[42] = 'A';
+ __analyzer_eval (buf[42] == 'A'); /* { dg-warning "TRUE" } */
+ memset (buf, 0, n);
+
+ /* We can't know if buf[42] was written to or not. */
+ __analyzer_eval (buf[42] == 'A'); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (buf[42] == '\0'); /* { dg-warning "UNKNOWN" } */
+}
+
+/* A "memset" with unknown value, but with zero size. */
+
+static size_t __attribute__((noinline))
+get_zero (void)
+{
+ return 0;
+}
+
+void test_6 (int val)
+{
+ char buf[256];
+ buf[42] = 'A';
+ memset (buf, 'B', get_zero ());
+ __analyzer_eval (buf[42] == 'A'); /* { dg-warning "TRUE" } */
+}
+
+/* A "memset" of known size that's not the full buffer. */
+
+void test_7 (void)
+{
+ char buf[256];
+ buf[128] = 'A';
+ memset (buf, 0, 128);
+ /* We currently merely mark the whole region as "unknown", so querying
+ values within them yields UNKNOWN. */
+ __analyzer_eval (buf[127] == '\0'); /* { dg-warning "TRUE" "known nonzero" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+ __analyzer_eval (buf[128] == 'A'); /* { dg-warning "TRUE" "known nonzero" { xfail *-*-* } } */
+ /* { dg-bogus "UNKNOWN" "status quo" { xfail *-*-* } .-1 } */
+}
else
p = malloc (32);
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
if (a > 5)
{
else
p = malloc (32);
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
if (a > 6) /* different condition */
{
__analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
while (1)
{
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
/* TODO: why does the above need an extra stmt to merge state? */
do_stuff (s, s->mode);
}
__analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
while (1)
{
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "3 processed enodes" } */
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
/* TODO: why does the above need an extra stmt to merge state? */
switch (s->mode)
{
case 0:
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
do_stuff (s, 0);
break;
case 1:
break;
}
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
__analyzer_eval (f == 3); /* { dg-warning "TRUE" } */
__analyzer_eval (g == 4); /* { dg-warning "TRUE" } */
__analyzer_eval (h == 5); /* { dg-warning "TRUE" } */
q = malloc (256);
p = malloc (256);
}
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
free (p);
free (q);
}
sum += foo (p[i]); /* { dg-bogus "uninitialized" } */
result = sum;
- __analyzer_dump_exploded_nodes (0); /* { dg-warning "5 processed enodes" } */
- // FIXME: why 5 here?
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enodes" } */
free (ptr); /* { dg-warning "double-'free' of 'ptr'" } */
return result;
--- /dev/null
+/* { dg-do "compile" } */
+
+/* Minimal replacement of system headers. */
+#define NULL ((void *) 0)
+typedef struct _IO_FILE FILE;
+extern FILE *fopen(const char *__restrict __filename,
+ const char *__restrict __modes);
+extern int fclose (FILE *__stream);
+
+extern void unzRepair(const char* file, const char* fileOut, const char* fileOutTmp)
+{
+ FILE* fpZip = fopen(file, "rb");
+ FILE* fpOut = fopen(fileOut, "wb");
+ FILE* fpOutCD = fopen(fileOutTmp, "wb");
+ if (fpZip != NULL && fpOut != NULL) {
+ fclose(fpOutCD);
+ fclose(fpZip);
+ fclose(fpOut);
+ }
+} /* { dg-warning "leak of FILE 'fpZip'" "leak of fpZip" } */
+ /* { dg-warning "leak of FILE 'fpOut'" "leak of fpOut" { target *-*-* } .-1 } */
+ /* { dg-warning "leak of FILE 'fpOutCD'" "leak of fpOutCD" { target *-*-* } .-2 } */
--- /dev/null
+/* Integration test to ensure we issue FILE * leak diagnostics for
+ this particular non-trivial case.
+ Adapted from zlib/contrib/minizip/mztools.c, with all #includes
+ removed. */
+
+/* { dg-do "compile" } */
+
+/* Minimal replacement of system headers. */
+
+typedef __SIZE_TYPE__ size_t;
+#define NULL ((void *) 0)
+
+typedef struct _IO_FILE FILE;
+extern FILE *fopen(const char *__restrict __filename,
+ const char *__restrict __modes);
+extern size_t fread (void *__restrict __ptr, size_t __size,
+ size_t __n, FILE *__restrict __stream);
+extern size_t fwrite (const void *__restrict __ptr, size_t __size,
+ size_t __n, FILE *__restrict __s);
+extern int fclose (FILE *__stream);
+extern int remove (const char *__filename)
+ __attribute__ ((__nothrow__ , __leaf__));
+
+extern void *malloc (size_t __size)
+ __attribute__ ((__nothrow__ , __leaf__))
+ __attribute__ ((__malloc__));
+extern void free (void *__ptr)
+ __attribute__ ((__nothrow__ , __leaf__));
+
+extern size_t strlen (const char *__s)
+ __attribute__ ((__nothrow__ , __leaf__))
+ __attribute__ ((__pure__))
+ __attribute__ ((__nonnull__ (1)));
+
+/* Minimal replacement of zlib headers. */
+
+#define ZEXPORT
+typedef unsigned long uLong; /* 32 bits or more */
+#define Z_OK 0
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_MEM_ERROR (-4)
+
+/*
+ Additional tools for Minizip
+ Code: Xavier Roche '2004
+ License: Same as ZLIB (www.gzip.org)
+*/
+
+/* Code */
+
+#define READ_8(adr) ((unsigned char)*(adr))
+#define READ_16(adr) ( READ_8(adr) | (READ_8(adr+1) << 8) )
+#define READ_32(adr) ( READ_16(adr) | (READ_16((adr)+2) << 16) )
+
+#define WRITE_8(buff, n) do { \
+ *((unsigned char*)(buff)) = (unsigned char) ((n) & 0xff); \
+} while(0)
+#define WRITE_16(buff, n) do { \
+ WRITE_8((unsigned char*)(buff), n); \
+ WRITE_8(((unsigned char*)(buff)) + 1, (n) >> 8); \
+} while(0)
+#define WRITE_32(buff, n) do { \
+ WRITE_16((unsigned char*)(buff), (n) & 0xffff); \
+ WRITE_16((unsigned char*)(buff) + 2, (n) >> 16); \
+} while(0)
+
+extern int ZEXPORT unzRepair(file, fileOut, fileOutTmp, nRecovered, bytesRecovered)
+const char* file;
+const char* fileOut;
+const char* fileOutTmp;
+uLong* nRecovered;
+uLong* bytesRecovered;
+{
+ int err = Z_OK;
+ FILE* fpZip = fopen(file, "rb");
+ FILE* fpOut = fopen(fileOut, "wb");
+ FILE* fpOutCD = fopen(fileOutTmp, "wb");
+ if (fpZip != NULL && fpOut != NULL) {
+ int entries = 0;
+ uLong totalBytes = 0;
+ char header[30];
+ char filename[1024];
+ char extra[1024];
+ int offset = 0;
+ int offsetCD = 0;
+ while ( fread(header, 1, 30, fpZip) == 30 ) {
+ int currentOffset = offset;
+
+ /* File entry */
+ if (READ_32(header) == 0x04034b50) {
+ unsigned int version = READ_16(header + 4);
+ unsigned int gpflag = READ_16(header + 6);
+ unsigned int method = READ_16(header + 8);
+ unsigned int filetime = READ_16(header + 10);
+ unsigned int filedate = READ_16(header + 12);
+ unsigned int crc = READ_32(header + 14); /* crc */
+ unsigned int cpsize = READ_32(header + 18); /* compressed size */
+ unsigned int uncpsize = READ_32(header + 22); /* uncompressed sz */
+ unsigned int fnsize = READ_16(header + 26); /* file name length */
+ unsigned int extsize = READ_16(header + 28); /* extra field length */
+ filename[0] = extra[0] = '\0';
+
+ /* Header */
+ if (fwrite(header, 1, 30, fpOut) == 30) {
+ offset += 30;
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+
+ /* Filename */
+ if (fnsize > 0) {
+ if (fnsize < sizeof(filename)) {
+ if (fread(filename, 1, fnsize, fpZip) == fnsize) {
+ if (fwrite(filename, 1, fnsize, fpOut) == fnsize) {
+ offset += fnsize;
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ } else {
+ err = Z_STREAM_ERROR;
+ break;
+ }
+
+ /* Extra field */
+ if (extsize > 0) {
+ if (extsize < sizeof(extra)) {
+ if (fread(extra, 1, extsize, fpZip) == extsize) {
+ if (fwrite(extra, 1, extsize, fpOut) == extsize) {
+ offset += extsize;
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ }
+
+ /* Data */
+ {
+ int dataSize = cpsize;
+ if (dataSize == 0) {
+ dataSize = uncpsize;
+ }
+ if (dataSize > 0) {
+ char* data = malloc(dataSize);
+ if (data != NULL) {
+ if ((int)fread(data, 1, dataSize, fpZip) == dataSize) {
+ if ((int)fwrite(data, 1, dataSize, fpOut) == dataSize) {
+ offset += dataSize;
+ totalBytes += dataSize;
+ } else {
+ err = Z_ERRNO;
+ }
+ } else {
+ err = Z_ERRNO;
+ }
+ free(data);
+ if (err != Z_OK) {
+ break;
+ }
+ } else {
+ err = Z_MEM_ERROR;
+ break;
+ }
+ }
+ }
+
+ /* Central directory entry */
+ {
+ char header[46];
+ char* comment = "";
+ int comsize = (int) strlen(comment);
+ WRITE_32(header, 0x02014b50);
+ WRITE_16(header + 4, version);
+ WRITE_16(header + 6, version);
+ WRITE_16(header + 8, gpflag);
+ WRITE_16(header + 10, method);
+ WRITE_16(header + 12, filetime);
+ WRITE_16(header + 14, filedate);
+ WRITE_32(header + 16, crc);
+ WRITE_32(header + 20, cpsize);
+ WRITE_32(header + 24, uncpsize);
+ WRITE_16(header + 28, fnsize);
+ WRITE_16(header + 30, extsize);
+ WRITE_16(header + 32, comsize);
+ WRITE_16(header + 34, 0); /* disk # */
+ WRITE_16(header + 36, 0); /* int attrb */
+ WRITE_32(header + 38, 0); /* ext attrb */
+ WRITE_32(header + 42, currentOffset);
+ /* Header */
+ if (fwrite(header, 1, 46, fpOutCD) == 46) {
+ offsetCD += 46;
+
+ /* Filename */
+ if (fnsize > 0) {
+ if (fwrite(filename, 1, fnsize, fpOutCD) == fnsize) {
+ offsetCD += fnsize;
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ } else {
+ err = Z_STREAM_ERROR;
+ break;
+ }
+
+ /* Extra field */
+ if (extsize > 0) {
+ if (fwrite(extra, 1, extsize, fpOutCD) == extsize) {
+ offsetCD += extsize;
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ }
+
+ /* Comment field */
+ if (comsize > 0) {
+ if ((int)fwrite(comment, 1, comsize, fpOutCD) == comsize) {
+ offsetCD += comsize;
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ }
+
+
+ } else {
+ err = Z_ERRNO;
+ break;
+ }
+ }
+
+ /* Success */
+ entries++;
+
+ } else {
+ break;
+ }
+ }
+
+ /* Final central directory */
+ {
+ int entriesZip = entries;
+ char header[22];
+ char* comment = ""; // "ZIP File recovered by zlib/minizip/mztools";
+ int comsize = (int) strlen(comment);
+ if (entriesZip > 0xffff) {
+ entriesZip = 0xffff;
+ }
+ WRITE_32(header, 0x06054b50);
+ WRITE_16(header + 4, 0); /* disk # */
+ WRITE_16(header + 6, 0); /* disk # */
+ WRITE_16(header + 8, entriesZip); /* hack */
+ WRITE_16(header + 10, entriesZip); /* hack */
+ WRITE_32(header + 12, offsetCD); /* size of CD */
+ WRITE_32(header + 16, offset); /* offset to CD */
+ WRITE_16(header + 20, comsize); /* comment */
+
+ /* Header */
+ if (fwrite(header, 1, 22, fpOutCD) == 22) {
+
+ /* Comment field */
+ if (comsize > 0) {
+ if ((int)fwrite(comment, 1, comsize, fpOutCD) != comsize) {
+ err = Z_ERRNO;
+ }
+ }
+
+ } else {
+ err = Z_ERRNO;
+ }
+ }
+
+ /* Final merge (file + central directory) */
+ fclose(fpOutCD);
+ if (err == Z_OK) {
+ fpOutCD = fopen(fileOutTmp, "rb");
+ if (fpOutCD != NULL) {
+ int nRead;
+ char buffer[8192];
+ while ( (nRead = (int)fread(buffer, 1, sizeof(buffer), fpOutCD)) > 0) {
+ if ((int)fwrite(buffer, 1, nRead, fpOut) != nRead) {
+ err = Z_ERRNO;
+ break;
+ }
+ }
+ fclose(fpOutCD);
+ }
+ }
+
+ /* Close */
+ fclose(fpZip);
+ fclose(fpOut);
+
+ /* Wipe temporary file */
+ (void)remove(fileOutTmp);
+
+ /* Number of recovered entries */
+ if (err == Z_OK) {
+ if (nRecovered != NULL) {
+ *nRecovered = entries;
+ }
+ if (bytesRecovered != NULL) {
+ *bytesRecovered = totalBytes;
+ }
+ }
+ } else {
+ err = Z_STREAM_ERROR;
+ }
+ return err; /* { dg-warning "leak of FILE 'fpZip'" "leak of fpZip" } */
+ /* { dg-warning "leak of FILE 'fpOut'" "leak of fpOut" { target *-*-* } .-1 } */
+ /* { dg-warning "leak of FILE 'fpOutCD'" "leak of fpOutCD" { target *-*-* } .-2 } */
+}
{
int n1[1];
- fread (n1, sizeof (n1[0]), 1, fp); /* { dg-message "'n1' gets an unchecked value here" } */
- idx = n1[0]; /* { dg-message "'idx' has an unchecked value here \\\(from 'n1'\\\)" } */
+ fread (n1, sizeof (n1[0]), 1, fp); /* { dg-message "'n1' gets an unchecked value here" "" { xfail *-*-* } } */
+ idx = n1[0]; /* { dg-message "'idx' has an unchecked value here \\\(from 'n1'\\\)" "" { xfail *-*-* } } */
}
int arr[10];
pl (void)
{
ql ();
- return arr[idx]; /* { dg-warning "use of tainted value 'idx' in array lookup without bounds checking" } */
+ return arr[idx]; /* { dg-warning "use of tainted value 'idx' in array lookup without bounds checking" "" { xfail *-*-* } } */
}
--- /dev/null
+/* Taken from gcc.dg/pr70022.c, adding -O1 to the options
+ (and -fanalyzer, implicitly). */
+
+/* { dg-do compile } */
+/* { dg-options "-w -Wno-psabi -O1" } */
+
+typedef int v4si __attribute__ ((vector_size (16)));
+
+int
+foo (v4si v)
+{
+ return v[~0UL];
+}
for (sc = 0; sc < 1; ++sc)
{
th.gk.hk = 0;
- th.gk.bg[sc] = 0; /* { dg-warning "uninitialized" "uninit-warning-removed" { xfail *-*-* } } */
+ th.gk.bg[sc] = 0; /* { dg-warning "dereference of NULL '0'" } */
+ // TODO: above message could be improved
l3 (&th);
}
}
--- /dev/null
+#include <stdlib.h>
+
+#define _cleanup_(f) __attribute__((cleanup(f)))
+
+static inline void freep(void **p) {
+ free(*p);
+}
+
+void test(void) {
+ _cleanup_(freep) void *ptr;
+
+ ptr = malloc(3);
+} /* { dg-bogus "leak" } */
int test (void)
{
struct foo f = {};
- return *f.v;
+ return *f.v; /* { dg-warning "dereference of NULL" } */
}
--- /dev/null
+#include <stdlib.h>
+
+struct ret
+{
+ int **array;
+};
+
+struct ret *allocate_stuff(void)
+{
+ struct ret *ret;
+
+ ret = calloc(1, sizeof (struct ret));
+ if (!ret) {
+ abort();
+ }
+
+ ret->array = calloc (10, sizeof(int *));
+ if (!ret->array) {
+ abort();
+ }
+
+ return ret;
+}
--- /dev/null
+#include <stdio.h>
+
+int debug;
+
+int opencfgfile(const char *cfgfile, FILE **fd)
+{
+ if (cfgfile[0] != '\0') {
+
+ if ((*fd = fopen(cfgfile, "r")) != NULL) {
+ if (debug)
+ printf("Config file: --config\n");
+ }
+
+ }
+
+ return 2;
+}
--- /dev/null
+int a, b;
+void d();
+void c()
+{
+ d((void (*)()) & a + b);
+}
--- /dev/null
+typedef void (*F) (void);
+void bar (F);
+
+void
+foo (void *a, int b)
+{
+ bar ((F) a + b);
+}
--- /dev/null
+struct bitmap
+{
+ int min;
+ int max;
+ int *vec;
+};
+
+int bitmap_create(struct bitmap *bm, int min, int max)
+{
+ int sz;
+
+ sz = (max / sizeof(int)) + 1;
+
+ bm->min = min;
+ bm->max = max;
+ bm->vec = __builtin_calloc(sz, sizeof(int));
+ if (!bm->vec)
+ return (-12);
+ return 0; /* { dg-bogus "leak" } */
+}
--- /dev/null
+struct _IO_FILE;
+typedef struct _IO_FILE FILE;
+typedef struct _message
+{
+ FILE *fp;
+} MESSAGE;
+extern FILE *fopen (const char *__restrict __filename,
+ const char *__restrict __modes);
+FILE *f (void);
+int imap_fetch_message (int i, MESSAGE *msg, char *p)
+{
+ if ((msg->fp = i ? 0 : f ()))
+ return 0;
+ if (p)
+ msg->fp = fopen (p, "r");
+ return -1;
+}
--- /dev/null
+typedef __SIZE_TYPE__ size_t;
+
+extern void *calloc(size_t nmemb, size_t size);
+extern void free(void *ptr);
+
+static char *activeTroubleArray;
+
+int
+initActiveTroubleArray ()
+{
+ activeTroubleArray = calloc (1, 1);
+ return activeTroubleArray ? 0 : 1;
+}
+
+void
+freeActiveTroubleArray ()
+{
+ free (activeTroubleArray);
+}
+
+int main (int argc, char *argv[])
+{
+ initActiveTroubleArray ();
+ freeActiveTroubleArray ();
+
+ return 1;
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+typedef struct obj {
+ int ob_refcnt;
+} PyObject;
+
+extern void Py_Dealloc (PyObject *op);
+
+#define Py_INCREF(op) \
+ do { \
+ ((PyObject*)(op))->ob_refcnt++; \
+ } while (0)
+
+#define Py_DECREF(op) \
+ do { \
+ if (--((PyObject*)(op))->ob_refcnt == 0) \
+ { \
+ /*Py_Dealloc((PyObject *)(op));*/ \
+ } \
+ } while (0)
+
+void test_1 (PyObject *obj)
+{
+ int orig_refcnt = obj->ob_refcnt;
+ Py_INCREF (obj);
+ Py_INCREF (obj);
+ Py_DECREF (obj);
+ Py_INCREF (obj);
+ __analyzer_eval (obj->ob_refcnt == orig_refcnt + 2); /* { dg-warning "TRUE" } */
+}
+/* TODO: uncomment the Py_Dealloc, which leads to two paths. */
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+struct foo
+{
+ char *ptr;
+};
+
+void test_1 (struct foo f)
+{
+ __analyzer_describe (0, f.ptr); /* { dg-warning "svalue: 'INIT_VAL\\(f.ptr\\)'" } */
+}
+
+static void called_by_test_2 (struct foo f_inner)
+{
+ free (f_inner.ptr);
+ free (f_inner.ptr); /* { dg-warning "double-'free' of 'f_outer.ptr'" } */
+}
+void test_2 (struct foo f_outer)
+{
+ called_by_test_2 (f_outer);
+}
+
+struct nested
+{
+ struct foo f;
+};
+
+static void called_by_test_3 (struct nested n_inner)
+{
+ free (n_inner.f.ptr);
+ free (n_inner.f.ptr); /* { dg-warning "double-'free' of 'n_outer.f.ptr'" } */
+}
+void test_3 (struct nested n_outer)
+{
+ called_by_test_3 (n_outer);
+}
--- /dev/null
+
+int *global_ptr;
+
+static void __attribute__((noinline))
+called_by_test_1 (void)
+{
+ int i = 42;
+ global_ptr = &i;
+}
+
+int test_1 (void)
+{
+ called_by_test_1 ();
+ return *global_ptr; /* { dg-warning "dereferencing pointer 'global_ptr' to within stale stack frame" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+/* The example from store2.h */
+
+void test_1 (char a, char b, char c, char d, char e, char f,
+ int i, int j)
+{
+ char arr[1024];
+ arr[2] = a; /* (1) */
+ arr[3] = b; /* (2) */
+
+ __analyzer_eval (arr[2] == a); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[3] == b); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[4]); /* { dg-warning "UNKNOWN" } */ // TODO: report uninit
+
+ /* Replace one concrete binding's value with a different value. */
+ arr[3] = c; /* (3) */
+ __analyzer_eval (arr[2] == a); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[3] == c); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[3] == b); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (arr[4]); /* { dg-warning "UNKNOWN" } */ // TODO: report uninit
+
+ /* Symbolic binding. */
+ arr[i] = d; /* (4) */
+ __analyzer_eval (arr[i] == d); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[2] == a); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (arr[3] == c); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (arr[4]); /* { dg-warning "UNKNOWN" } */ /* Don't report uninit. */
+
+ /* Replace symbolic binding with a different one. */
+ arr[j] = e; /* (5) */
+ __analyzer_eval (arr[j] == e); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[i] == d); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (arr[4]); /* { dg-warning "UNKNOWN" } */ /* Don't report uninit. */
+
+ /* Add a concrete binding. */
+ arr[3] = f; /* (6) */
+ __analyzer_eval (arr[3] == f); /* { dg-warning "TRUE" } */
+ __analyzer_eval (arr[j] == e); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (arr[4]); /* { dg-warning "UNKNOWN" } */ /* Don't report uninit. */
+}
+
+// TODO: as above, but with int rather than char so there's a cast
--- /dev/null
+#include "analyzer-decls.h"
+
+struct foo
+{
+ int ival;
+ int iarr[10];
+};
+
+void test_1 (int i, int j)
+{
+ struct foo fooarr[4];
+ fooarr[1].ival = 42;
+ fooarr[1].iarr[3] = 27;
+ fooarr[2].iarr[1] = 17;
+ __analyzer_eval (fooarr[1].ival == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (fooarr[1].iarr[3] == 27); /* { dg-warning "TRUE" } */
+ __analyzer_eval (fooarr[2].iarr[1] == 17); /* { dg-warning "TRUE" } */
+
+ /* Symbolic binding. */
+ fooarr[2].iarr[i] = j;
+ __analyzer_eval (fooarr[2].iarr[i] == j); /* { dg-warning "TRUE" } */
+
+ /* We should have lost our knowledge about fooarr[2].
+ It's not clear to me if we should also lose our knowledge about
+ fooarr[1] (for the case where i is negative). For now, we do. */
+ __analyzer_eval (fooarr[1].ival == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (fooarr[1].iarr[3] == 27); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (fooarr[2].iarr[1] == 17); /* { dg-warning "UNKNOWN" } */
+ /* Should also be safe to read from fooarr[2];
+ it isn't known to be uninit anymore. */
+ __analyzer_eval (fooarr[2].iarr[10] == 17); /* { dg-warning "UNKNOWN" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+int iarr[16];
+
+void test_1 (int i, int j)
+{
+ int init_el_8 = iarr[8];
+ __analyzer_eval (init_el_8 == iarr[8]); /* { dg-warning "TRUE" } */
+
+ iarr[i] = j;
+ __analyzer_eval (init_el_8 == iarr[8]); /* { dg-warning "UNKNOWN" } */
+}
--- /dev/null
+#include <string.h>
+#include "analyzer-decls.h"
+
+void test_1 (int i, int j, int k)
+{
+ int iarr[16];
+ iarr[i] = j;
+ __analyzer_eval (iarr[i] == j); /* { dg-warning "TRUE" } */
+ __analyzer_eval (iarr[k] == j); /* { dg-warning "UNKNOWN" } */
+
+ memset (iarr, 0, sizeof (iarr));
+ __analyzer_eval (iarr[0] == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (iarr[i] == 0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (iarr[i] == j); /* { dg-warning "UNKNOWN" } */
+
+ iarr[i] = j;
+ __analyzer_eval (iarr[i] == j); /* { dg-warning "TRUE" } */
+ __analyzer_eval (iarr[0] == 0); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (iarr[i] == 0); /* { dg-warning "UNKNOWN" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+int a[1024];
+int b[1024];
+
+extern void escape (void *ptr);
+
+void test_1 (int *p)
+{
+ int c, d;
+ escape (&c);
+ a[16] = 42;
+ b[16] = 17;
+ c = 33;
+ d = 44;
+ __analyzer_eval (a[16] == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (b[16] == 17); /* { dg-warning "TRUE" } */
+ __analyzer_eval (c == 33); /* { dg-warning "TRUE" } */
+ __analyzer_eval (d == 44); /* { dg-warning "TRUE" } */
+
+ /* Write through an externally-provided pointer. */
+ *p = 100;
+ /* It could clobber our writes to the global arrays... */
+ __analyzer_eval (a[16] == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (b[16] == 17); /* { dg-warning "UNKNOWN" } */
+ /* ...but can't clobber locals, even ones like "c" that have escaped. */
+ __analyzer_eval (c == 33); /* { dg-warning "TRUE" } */
+ __analyzer_eval (d == 44); /* { dg-warning "TRUE" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+int a[1024];
+int b[1024];
+
+extern void escape (void *ptr);
+
+void test_1 (int *p)
+{
+ int c, d;
+ escape (&c);
+
+ *p = 42;
+ __analyzer_eval (*p == 42); /* { dg-warning "TRUE" } */
+
+ /* These writes shouldn't affect *p. */
+ c = 33;
+ d = 44;
+ __analyzer_eval (*p == 42); /* { dg-warning "TRUE" } */
+
+ /* This write could affect *p. */
+ a[16] = 55;
+ __analyzer_eval (*p == 42); /* { dg-warning "UNKNOWN" } */
+}
{
struct foo tmp;
- if (1 == fread(&tmp, sizeof(tmp), 1, f)) { /* { dg-message "\\(1\\) 'tmp' gets an unchecked value here" "event 1" } */
- /* { dg-message "\\(2\\) following 'true' branch\\.\\.\\." "event 2" { target *-*-* } .-1 } */
+ if (1 == fread(&tmp, sizeof(tmp), 1, f)) { /* { dg-message "\\(\[0-9\]+\\) 'tmp' gets an unchecked value here" "event: tmp gets unchecked value" { xfail *-*-* } } */
+ /* { dg-message "\\(\[0-9\]+\\) following 'true' branch\\.\\.\\." "event: following true branch" { target *-*-* } .-1 } */
/* BUG: the following array lookup trusts that the input data's index is
in the range 0 <= i < 256; otherwise it's accessing the stack */
return tmp.buf[tmp.i]; // { dg-warning "use of tainted value 'tmp.i' in array lookup without bounds checking" "warning" } */
- /* { dg-message "23: \\(3\\) \\.\\.\\.to here" "event 3" { target *-*-* } .-1 } */
- /* { dg-message "23: \\(4\\) 'tmp.i' has an unchecked value here \\(from 'tmp'\\)" "event 4" { target *-*-* } .-2 } */
- /* { dg-message "\\(5\\) use of tainted value 'tmp.i' in array lookup without bounds checking" "event 5" { target *-*-* } .-3 } */
+ /* { dg-message "23: \\(\[0-9\]+\\) \\.\\.\\.to here" "event: to here" { target *-*-* } .-1 } */
+ /* { dg-message "23: \\(\[0-9\]+\\) 'tmp.i' has an unchecked value here \\(from 'tmp'\\)" "event: tmp.i has an unchecked value" { xfail *-*-* } .-2 } */
+ /* { dg-message "\\(\[0-9\]+\\) use of tainted value 'tmp.i' in array lookup without bounds checking" "final event" { target *-*-* } .-3 } */
// TOOD: better messages for state changes
}
struct foo tmp;
if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
- if (tmp.i >= 0) { /* { dg-message "'tmp.i' has an unchecked value here \\(from 'tmp'\\)" "warning" } */
- /* { dg-message "'tmp.i' has its lower bound checked here" "event" { target *-*-* } .-1 } */
- return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without upper-bounds checking" } */
+ if (tmp.i >= 0) { /* { dg-message "'tmp.i' has an unchecked value here \\(from 'tmp'\\)" "event: tmp.i has an unchecked value" { xfail *-*-* } } */
+ /* { dg-message "'tmp.i' has its lower bound checked here" "event: lower bound checked" { target *-*-* } .-1 } */
+ return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without upper-bounds checking" "warning" } */
}
}
return 0;
struct foo tmp;
if (1 == fread(&tmp, sizeof(tmp), 1, f)) {
- if (tmp.i < 256) { /* { dg-message "'tmp.i' has an unchecked value here \\(from 'tmp'\\)" "warning" } */
- /* { dg-message "'tmp.i' has its upper bound checked here" "event" { target *-*-* } .-1 } */
- return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without lower-bounds checking" } */
+ if (tmp.i < 256) { /* { dg-message "'tmp.i' has an unchecked value here \\(from 'tmp'\\)" "event: tmp.i has an unchecked value" { xfail *-*-* } } */
+ /* { dg-message "'tmp.i' has its upper bound checked here" "event: upper bound checked" { target *-*-* } .-1 } */
+ return tmp.buf[tmp.i]; /* { dg-warning "use of tainted value 'tmp.i' in array lookup without lower-bounds checking" "warning" } */
}
}
return 0;
--- /dev/null
+/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
+
+#include "../analyzer-decls.h"
+
+void test (int *p)
+{
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ while (*p)
+ {
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
+ p++;
+ }
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
+
+#include "../analyzer-decls.h"
+
+void test (int *p, int val, int count)
+{
+ int n = count;
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ while (n--)
+ {
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
+ *p++ = val;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
+
+#include "../analyzer-decls.h"
+
+void test (int *p, int a, int b, int count)
+{
+ int n = count;
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+
+ while (n--)
+ {
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "2 processed enode" } */
+ *p++ = a;
+ *p++ = b;
+ }
+
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+/* Tests for data model handling of unknown fns. */
+
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+void unknown_fn (void *);
+
+void test_1 (void)
+{
+ int i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ unknown_fn (NULL);
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ unknown_fn (&i);
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* Even though we're not passing &i to unknown_fn, it escaped
+ above, so unknown_fn could write to it. */
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "UNKNOWN" } */
+}
+
+/* As test_1, but with an unknown fn_ptr. */
+
+void test_1a (void (*fn_ptr) (void *))
+{
+ int i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ fn_ptr (NULL);
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ fn_ptr (&i);
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* Even though we're not passing &i to unknown_fn, it escaped
+ above, so fn_ptr (NULL) could write to it. */
+ fn_ptr (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "UNKNOWN" } */
+}
+
+int *global_for_test_2;
+
+void test_2 (void)
+{
+ int i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ global_for_test_2 = &i;
+ unknown_fn (NULL);
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+
+ global_for_test_2 = NULL;
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* Even though the global no longer points to i, it escaped
+ above, so unknown_fn could write to it. */
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "UNKNOWN" } */
+}
+
+struct used_by_test_3
+{
+ int *int_ptr;
+};
+
+void test_3 (void)
+{
+ int i;
+
+ struct used_by_test_3 s;
+ s.int_ptr = &i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ unknown_fn (NULL);
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.int_ptr == &i); /* { dg-warning "TRUE" } */
+
+ /* i should escape here. */
+ unknown_fn (&s);
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+ __analyzer_eval (s.int_ptr == &i); /* { dg-warning "UNKNOWN" } */
+
+ s.int_ptr = NULL;
+ __analyzer_eval (s.int_ptr == NULL); /* { dg-warning "TRUE" } */
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* Even though nothing we know about points to i, it escaped
+ above, so unknown_fn could write to it. */
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "UNKNOWN" } */
+}
+
+struct used_by_test_4
+{
+ int *int_ptr;
+};
+
+void test_4 (struct used_by_test_4 *st4_ptr)
+{
+ /* Something unknown called "test_4", and hence *st4_ptr has
+ effectively already escaped. */
+
+ int i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ unknown_fn (NULL);
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ /* Given that *st4_ptr has effectively already escaped, calling
+ an unknown fn should invalidate our knowledge of i". */
+ st4_ptr->int_ptr = &i;
+ unknown_fn (NULL);
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+
+ /* ...and "&i" should now be treated as having escaped. */
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+ st4_ptr->int_ptr = NULL;
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "UNKNOWN" } */
+}
+
+static void __attribute__((noinline))
+known_fn (void *ptr)
+{
+ /* Empty. */
+}
+
+void test_5 (void)
+{
+ int i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ known_fn (&i);
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* Ensure that we don't consider &i to have escaped. */
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+}
+
+extern int __attribute__ ((__pure__))
+unknown_pure_fn (void *);
+
+void test_6 (void)
+{
+ int i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ unknown_pure_fn (&i);
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* Ensure that we don't consider &i to have escaped. */
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+}
+
+extern void unknown_const_fn (const void *);
+
+void test_7 (void)
+{
+ int i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ /* &i is passed as a const void *, so i shouldn't be clobbered by
+ the call. */
+ unknown_const_fn (&i);
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* Ensure that we don't consider &i to have escaped. */
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+}
+
+struct used_by_test_8
+{
+ int *int_ptr;
+};
+
+void test_8 (void)
+{
+ int i;
+
+ i = 42;
+ __analyzer_eval (i == 42); /* { dg-warning "TRUE" } */
+
+ struct used_by_test_8 st8;
+ st8.int_ptr = &i;
+
+ /* Although unknown_const_fn takes a const void *, the
+ int_ptr is a non-const int *, and so &i should be considered
+ writable. */
+ unknown_const_fn (&st8);
+ __analyzer_eval (i == 42); /* { dg-warning "UNKNOWN" } */
+
+ i = 17;
+ __analyzer_eval (i == 17); /* { dg-warning "TRUE" } */
+
+ /* &i should be considered to have escaped. */
+ unknown_fn (NULL);
+ __analyzer_eval (i == 17); /* { dg-warning "UNKNOWN" } */
+}
--- /dev/null
+/* Tests for handling constraints on results of unknown fns. */
+
+#include <stddef.h>
+#include "analyzer-decls.h"
+
+void unknown_fn (void *);
+
+void test_1 (void)
+{
+ int i;
+ unknown_fn (&i);
+ if (i)
+ __analyzer_eval (i); /* { dg-warning "TRUE" } */
+ else
+ __analyzer_eval (i); /* { dg-warning "FALSE" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
+
+struct foo
+{
+ int i;
+ int j;
+};
+
+void test_2 (void)
+{
+ struct foo f;
+ unknown_fn (&f);
+ if (f.j)
+ __analyzer_eval (f.j); /* { dg-warning "TRUE" } */
+ else
+ __analyzer_eval (f.j); /* { dg-warning "FALSE" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
+
+void test_3 (int flag)
+{
+ int i;
+ unknown_fn (&i);
+ if (i)
+ {
+ __analyzer_eval (i); /* { dg-warning "TRUE" } */
+ if (flag)
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+ else
+ __analyzer_eval (flag); /* { dg-warning "FALSE" } */
+ }
+ else
+ __analyzer_eval (i); /* { dg-warning "FALSE" } */
+ if (flag)
+ __analyzer_eval (flag); /* { dg-warning "TRUE" } */
+ else
+ __analyzer_eval (flag); /* { dg-warning "FALSE" } */
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
+
+void test_4 (int y)
+{
+ int x;
+ unknown_fn (&x);
+ if (x)
+ {
+ __analyzer_eval (x); /* { dg-warning "TRUE" } */
+ x = 0;
+ }
+ __analyzer_dump_exploded_nodes (0); /* { dg-warning "1 processed enode" } */
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+int get(void);
+void test (void)
+{
+ int got = 0;
+ while (1)
+ {
+ if (get ())
+ got = 1;
+ else
+ if (got)
+ __analyzer_dump_path (); /* { dg-message "path" "" { xfail *-*-* } } */
+ }
+}
node_b.ptr = malloc (sizeof (int));
global_ptr = &node_a;
*node_b.ptr = 42; /* { dg-warning "possibly-NULL" "possibly-NULL" } */
- /* { dg-warning "leak" "leak" { target *-*-* } .-1 } */
- /* FIXME: the above leak report is correct, but is reported at the wrong
- location. */
-} /* { dg-warning "leak" } */
+ /* Although there's a chain of pointers to the allocation, pointed to
+ by global_ptr, the chain goes through the stack frame and thus
+ there's a leak when it is popped. */
+} /* { dg-warning "leak of 'node_b.ptr'" } */
/* With a call to an unknown function. */
--- /dev/null
+#include <stdlib.h>
+#include "analyzer-decls.h"
+
+struct link { struct link *next; };
+
+int free_a_list_badly (struct link *n)
+{
+ while (n) {
+ free(n); /* { dg-message "freed here" } */
+ n = n->next; /* { dg-warning "use after 'free' of 'n'" } */
+ }
+}
--- /dev/null
+#include "analyzer-decls.h"
+
+void test_1 (int n)
+{
+ struct
+ {
+ char a[n], b;
+ } s;
+ s.a[0] = 42;
+ __analyzer_eval (s.a[0] == 42); /* { dg-warning "TRUE" } */
+ s.b = 17;
+ __analyzer_eval (s.b == 17); /* { dg-warning "TRUE" } */
+}
#define Z_NULL 0
-void test ()
+int test ()
{
uLong comprLen = 10000*sizeof(int);
uLong uncomprLen = comprLen;
Byte *compr = (Byte*)calloc((uInt)comprLen, 1);
Byte *uncompr = (Byte*)calloc((uInt)uncomprLen, 1);
if (compr == Z_NULL || uncompr == Z_NULL)
- exit (1);
+ {
+ return 1; /* { dg-warning "leak of 'uncompr'" "uncompr leak" } */
+ /* { dg-warning "leak of 'compr'" "compr leak" { target *-*-* } .-1 } */
+ }
strcpy((char*)uncompr, "garbage");
- exit (0);
+ return 0; /* { dg-warning "leak of 'uncompr'" "uncompr leak" } */
+ /* { dg-warning "leak of 'compr'" "compr leak" { target *-*-* } .-1 } */
}
allocate (tm) ! { dg-bogus "dereference of possibly-NULL" }
ce => tm
- end function hv ! { dg-warning "leak of 'tm'" }
+ end function hv
end module gg
return m_value != other.m_value;
}
+ enum value get_value () const { return m_value; }
+
private:
enum value m_value;
};