m_eg->on_escaped_function (fndecl);
}
+/* struct setjmp_record. */
+
+int
+setjmp_record::cmp (const setjmp_record &rec1, const setjmp_record &rec2)
+{
+ if (int cmp_enode = rec1.m_enode->m_index - rec2.m_enode->m_index)
+ return cmp_enode;
+ gcc_assert (&rec1 == &rec2);
+ return 0;
+}
+
/* class setjmp_svalue : public svalue. */
/* Implementation of svalue::accept vfunc for setjmp_svalue. */
void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE
{
- gv->println ("subgraph \"cluster_supernode_%p\" {",
- (const void *)this);
+ gv->println ("subgraph \"cluster_supernode_%i\" {", m_supernode->m_index);
gv->indent ();
gv->println ("style=\"dashed\";");
gv->println ("label=\"SN: %i (bb: %i; scc: %i)\";",
m_enodes.safe_push (en);
}
+ /* Comparator for use by auto_vec<supernode_cluster *>::qsort. */
+
+ static int cmp_ptr_ptr (const void *p1, const void *p2)
+ {
+ const supernode_cluster *c1
+ = *(const supernode_cluster * const *)p1;
+ const supernode_cluster *c2
+ = *(const supernode_cluster * const *)p2;
+ return c1->m_supernode->m_index - c2->m_supernode->m_index;
+ }
+
private:
const supernode *m_supernode;
auto_vec <exploded_node *> m_enodes;
{
const char *funcname = function_name (m_fun);
- gv->println ("subgraph \"cluster_function_%p\" {", (const void *)this);
+ gv->println ("subgraph \"cluster_function_%s\" {",
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (m_fun->decl)));
gv->indent ();
gv->write_indent ();
gv->print ("label=\"call string: ");
gv->print (" function: %s \";", funcname);
gv->print ("\n");
+ /* Dump m_map, sorting it to avoid churn when comparing dumps. */
+ auto_vec<supernode_cluster *> child_clusters (m_map.elements ());
for (map_t::iterator iter = m_map.begin ();
iter != m_map.end ();
++iter)
- (*iter).second->dump_dot (gv, args);
+ child_clusters.quick_push ((*iter).second);
+
+ child_clusters.qsort (supernode_cluster::cmp_ptr_ptr);
+
+ unsigned i;
+ supernode_cluster *child_cluster;
+ FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
+ child_cluster->dump_dot (gv, args);
/* Terminate subgraph. */
gv->outdent ();
}
}
+ /* Comparator for use by auto_vec<function_call_string_cluster *>. */
+
+ static int cmp_ptr_ptr (const void *p1, const void *p2)
+ {
+ const function_call_string_cluster *c1
+ = *(const function_call_string_cluster * const *)p1;
+ const function_call_string_cluster *c2
+ = *(const function_call_string_cluster * const *)p2;
+ if (int cmp_names
+ = strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c1->m_fun->decl)),
+ IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (c2->m_fun->decl))))
+ return cmp_names;
+ return call_string::cmp (c1->m_cs, c2->m_cs);
+ }
+
private:
function *m_fun;
call_string m_cs;
FOR_EACH_VEC_ELT (m_functionless_enodes, i, enode)
enode->dump_dot (gv, args);
+ /* Dump m_map, sorting it to avoid churn when comparing dumps. */
+ auto_vec<function_call_string_cluster *> child_clusters (m_map.elements ());
for (map_t::iterator iter = m_map.begin ();
iter != m_map.end ();
++iter)
- (*iter).second->dump_dot (gv, args);
+ child_clusters.quick_push ((*iter).second);
+
+ child_clusters.qsort (function_call_string_cluster::cmp_ptr_ptr);
+
+ function_call_string_cluster *child_cluster;
+ FOR_EACH_VEC_ELT (child_clusters, i, child_cluster)
+ child_cluster->dump_dot (gv, args);
}
void add_node (exploded_node *en) FINAL OVERRIDE
return result;
}
+/* Comparator for imposing an order on function_points. */
+
+int
+function_point::cmp (const function_point &point_a,
+ const function_point &point_b)
+{
+ int idx_a = point_a.m_supernode ? point_a.m_supernode->m_index : -1;
+ int idx_b = point_b.m_supernode ? point_b.m_supernode->m_index : -1;
+ if (int cmp_idx = idx_a - idx_b)
+ return cmp_idx;
+ gcc_assert (point_a.m_supernode == point_b.m_supernode);
+ if (point_a.m_supernode)
+ return cmp_within_supernode (point_a, point_b);
+ else
+ return 0;
+}
+
+/* Comparator for use by vec<function_point>::qsort. */
+
+int
+function_point::cmp_ptr (const void *p1, const void *p2)
+{
+ const function_point *fp1 = (const function_point *)p1;
+ const function_point *fp2 = (const function_point *)p2;
+ return cmp (*fp1, *fp2);
+}
+
/* For PK_BEFORE_STMT, go to next stmt (or to PK_AFTER_SUPERNODE). */
void
const function_point &point_b);
static int cmp_within_supernode (const function_point &point_a,
const function_point &point_b);
+ static int cmp (const function_point &point_a,
+ const function_point &point_b);
+ static int cmp_ptr (const void *p1, const void *p2);
/* For before_stmt, go to next stmt. */
void next_stmt ();
pp_newline (pp);
first = false;
}
+ auto_vec <const svalue *> keys (m_map.elements ());
for (map_t::iterator iter = m_map.begin ();
iter != m_map.end ();
++iter)
+ keys.quick_push ((*iter).first);
+ keys.qsort (svalue::cmp_ptr_ptr);
+ unsigned i;
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (keys, i, sval)
{
if (multiline)
pp_string (pp, " ");
else if (!first)
pp_string (pp, ", ");
first = false;
- const svalue *sval = (*iter).first;
- pp_pointer (pp, sval);
- pp_string (pp, ": ");
+ if (!flag_dump_noaddr)
+ {
+ pp_pointer (pp, sval);
+ pp_string (pp, ": ");
+ }
sval->dump_to_pp (pp, simple);
- entry_t e = (*iter).second;
+ entry_t e = *const_cast <map_t &> (m_map).get (sval);
pp_string (pp, ": ");
e.m_state->dump_to_pp (pp);
if (model)
auto_vec<const decl_region *> ssa_name_regs;
new_state.m_region_model->get_ssa_name_regions_for_current_frame
(&ssa_name_regs);
+ ssa_name_regs.qsort (region::cmp_ptr_ptr);
unsigned i;
const decl_region *reg;
FOR_EACH_VEC_ELT (ssa_name_regs, i, reg)
static void
log_set_of_svalues (logger *logger, const char *name,
- const svalue_set &set)
+ const svalue_set &set)
{
logger->log (name);
logger->inc_indent ();
+ auto_vec<const svalue *> sval_vecs (set.elements ());
for (svalue_set::iterator iter = set.begin ();
iter != set.end (); ++iter)
+ sval_vecs.quick_push (*iter);
+ sval_vecs.qsort (svalue::cmp_ptr_ptr);
+ unsigned i;
+ const svalue *sval;
+ FOR_EACH_VEC_ELT (sval_vecs, i, sval)
{
logger->start_log_line ();
pretty_printer *pp = logger->get_printer ();
- const svalue *sval = (*iter);
- pp_pointer (pp, sval);
- pp_string (pp, ": ");
+ if (!flag_dump_noaddr)
+ {
+ pp_pointer (pp, sval);
+ pp_string (pp, ": ");
+ }
sval->dump_to_pp (pp, false);
logger->end_log_line ();
}
const hash_map<K, T*> &uniq_map)
{
logger->log (" # %s: %li", title, uniq_map.elements ());
- if (show_objs)
- for (typename hash_map<K, T*>::iterator iter = uniq_map.begin ();
- iter != uniq_map.end (); ++iter)
- {
- T *managed_obj = (*iter).second;
- log_managed_object<T> (logger, managed_obj);
- }
+ if (!show_objs)
+ return;
+ auto_vec<const T *> vec_objs (uniq_map.elements ());
+ for (typename hash_map<K, T*>::iterator iter = uniq_map.begin ();
+ iter != uniq_map.end (); ++iter)
+ vec_objs.quick_push ((*iter).second);
+
+ vec_objs.qsort (T::cmp_ptr_ptr);
+
+ unsigned i;
+ const T *obj;
+ FOR_EACH_VEC_ELT (vec_objs, i, obj)
+ log_managed_object<T> (logger, obj);
}
/* Dump the number of objects that were managed by MAP to LOGGER.
const consolidation_map<T> &map)
{
logger->log (" # %s: %li", title, map.elements ());
- if (show_objs)
- for (typename consolidation_map<T>::iterator iter = map.begin ();
- iter != map.end (); ++iter)
- {
- T *managed_obj = (*iter).second;
- log_managed_object<T> (logger, managed_obj);
- }
+ if (!show_objs)
+ return;
+
+ auto_vec<const T *> vec_objs (map.elements ());
+ for (typename consolidation_map<T>::iterator iter = map.begin ();
+ iter != map.end (); ++iter)
+ vec_objs.quick_push ((*iter).second);
+
+ vec_objs.qsort (T::cmp_ptr_ptr);
+
+ unsigned i;
+ const T *obj;
+ FOR_EACH_VEC_ELT (vec_objs, i, obj)
+ log_managed_object<T> (logger, obj);
}
/* Dump the number of objects of each class that were managed by this
}
}
+/* Dump SET to PP, sorting it to avoid churn when comparing dumps. */
+
+template <typename T>
+static void
+dump_set (const hash_set<const T *> &set, pretty_printer *pp)
+{
+ auto_vec<const T *> elements (set.elements ());
+ for (typename hash_set<const T *>::iterator iter = set.begin ();
+ iter != set.end (); ++iter)
+ elements.quick_push (*iter);
+
+ elements.qsort (T::cmp_ptr_ptr);
+
+ unsigned i;
+ const T *element;
+ FOR_EACH_VEC_ELT (elements, i, element)
+ {
+ pp_string (pp, " ");
+ element->dump_to_pp (pp, true);
+ pp_newline (pp);
+ }
+}
+
/* Dump a multiline representation of this object to PP. */
void
{
pp_string (pp, "reachable clusters: ");
pp_newline (pp);
- for (hash_set<const region *>::iterator iter = m_reachable_base_regs.begin ();
- iter != m_reachable_base_regs.end (); ++iter)
- {
- pp_string (pp, " ");
- (*iter)->dump_to_pp (pp, true);
- pp_newline (pp);
- }
+ dump_set (m_reachable_base_regs, pp);
+
pp_string (pp, "mutable clusters: ");
pp_newline (pp);
- for (hash_set<const region *>::iterator iter = m_mutable_base_regs.begin ();
- iter != m_mutable_base_regs.end (); ++iter)
- {
- pp_string (pp, " ");
- (*iter)->dump_to_pp (pp, true);
- pp_newline (pp);
- }
+ dump_set (m_mutable_base_regs, pp);
+
pp_string (pp, "reachable svals: ");
pp_newline (pp);
- for (svalue_set::iterator iter = m_reachable_svals.begin ();
- iter != m_reachable_svals.end (); ++iter)
- {
- pp_string (pp, " ");
- (*iter)->dump_to_pp (pp, true);
- pp_newline (pp);
- }
+ dump_set (m_reachable_svals, pp);
+
pp_string (pp, "mutable svals: ");
pp_newline (pp);
- for (svalue_set::iterator iter = m_mutable_svals.begin ();
- iter != m_mutable_svals.end (); ++iter)
- {
- pp_string (pp, " ");
- (*iter)->dump_to_pp (pp, true);
- pp_newline (pp);
- }
+ dump_set (m_mutable_svals, pp);
}
/* Dump a multiline representation of this object to stderr. */
virtual bool implicitly_live_p (const svalue_set &live_svalues,
const region_model *model) const;
+ static int cmp_ptr (const svalue *, const svalue *);
+ static int cmp_ptr_ptr (const void *, const void *);
+
protected:
svalue (complexity c, tree type)
: m_complexity (c), m_type (type)
hstate->add_ptr (m_setjmp_call);
}
+ static int cmp (const setjmp_record &rec1, const setjmp_record &rec2);
+
const exploded_node *m_enode;
const gcall *m_setjmp_call;
};
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
+ const char *get_name () const { return m_name; }
+
private:
const char *m_name;
};
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
+ const function_point &get_point () const { return m_point; }
const svalue *get_base_svalue () const { return m_base_sval; }
const svalue *get_iter_svalue () const { return m_iter_sval; }
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
+ const binding_map &get_map () const { return m_map; }
+
iterator_t begin () const { return m_map.begin (); }
iterator_t end () const { return m_map.end (); }
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
+ const gimple *get_stmt () const { return m_stmt; }
+ const region *get_id_region () const { return m_id_reg; }
+
private:
const gimple *m_stmt;
const region *m_id_reg;
bool non_null_p () const;
- static int cmp_ptrs (const void *, const void *);
+ static int cmp_ptr_ptr (const void *, const void *);
region_offset get_offset () const;
bool get_byte_size (byte_size_t *out) const;
gcc_assert (type == NULL_TREE || TYPE_P (type));
}
-/* Comparator for regions, using their IDs to order them. */
+/* Comparator for use by vec<const region *>::qsort,
+ using their IDs to order them. */
int
-region::cmp_ptrs (const void *p1, const void *p2)
+region::cmp_ptr_ptr (const void *p1, const void *p2)
{
const region * const *reg1 = (const region * const *)p1;
const region * const *reg2 = (const region * const *)p2;
if (map.get_logger ())
{
map.log ("%qE in %qD is needed to process:", name, fun->decl);
+ /* Log m_points_needing_name, sorting it to avoid churn when comparing
+ dumps. */
+ auto_vec<function_point> points;
for (point_set_t::iterator iter = m_points_needing_name.begin ();
iter != m_points_needing_name.end ();
++iter)
+ points.safe_push (*iter);
+ points.qsort (function_point::cmp_ptr);
+ unsigned i;
+ function_point *point;
+ FOR_EACH_VEC_ELT (points, i, point)
{
map.start_log_line ();
map.get_logger ()->log_partial (" point: ");
- (*iter).print (map.get_logger ()->get_printer (), format (false));
+ point->print (map.get_logger ()->get_printer (), format (false));
map.end_log_line ();
}
}
return false;
}
+/* Comparator for use by vec<const concrete_binding *>::qsort. */
+
+int
+concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
+{
+ const concrete_binding *b1 = *(const concrete_binding * const *)p1;
+ const concrete_binding *b2 = *(const concrete_binding * const *)p2;
+
+ if (int kind_cmp = b1->get_kind () - b2->get_kind ())
+ return kind_cmp;
+
+ if (int start_cmp = wi::cmps (b1->m_start_bit_offset, b2->m_start_bit_offset))
+ return start_cmp;
+
+ return wi::cmpu (b1->m_size_in_bits, b2->m_size_in_bits);
+}
+
/* class symbolic_binding : public binding_key. */
void
m_region->dump_to_pp (pp, simple);
}
+/* Comparator for use by vec<const symbolic_binding *>::qsort. */
+
+int
+symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
+{
+ const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
+ const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
+
+ if (int kind_cmp = b1->get_kind () - b2->get_kind ())
+ return kind_cmp;
+
+ return region::cmp_ids (b1->get_region (), b2->get_region ());
+}
+
/* The store is oblivious to the types of the svalues bound within
it: any type can get bound at any location.
Simplify any casts before binding.
return map_obj;
}
+/* Comparator for imposing an order on binding_maps. */
+
+int
+binding_map::cmp (const binding_map &map1, const binding_map &map2)
+{
+ if (int count_cmp = map1.elements () - map2.elements ())
+ return count_cmp;
+
+ auto_vec <const binding_key *> keys1 (map1.elements ());
+ for (map_t::iterator iter = map1.begin ();
+ iter != map1.end (); ++iter)
+ keys1.quick_push ((*iter).first);
+ keys1.qsort (binding_key::cmp_ptrs);
+
+ auto_vec <const binding_key *> keys2 (map2.elements ());
+ for (map_t::iterator iter = map2.begin ();
+ iter != map2.end (); ++iter)
+ keys2.quick_push ((*iter).first);
+ keys2.qsort (binding_key::cmp_ptrs);
+
+ for (size_t i = 0; i < keys1.length (); i++)
+ {
+ const binding_key *k1 = keys1[i];
+ const binding_key *k2 = keys2[i];
+ if (int key_cmp = binding_key::cmp (k1, k2))
+ return key_cmp;
+ gcc_assert (k1 == k2);
+ if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
+ return sval_cmp;
+ }
+
+ return 0;
+}
+
/* Get the child region of PARENT_REG based upon INDEX within a
CONSTRUCTOR. */
out->safe_push (*iter);
/* Sort OUT. */
- out->qsort (region::cmp_ptrs);
+ out->qsort (region::cmp_ptr_ptr);
}
/* Dump a representation of this store to PP, using SIMPLE to control how
const region *base_reg = (*iter).first;
base_regions.safe_push (base_reg);
}
- base_regions.qsort (region::cmp_ptrs);
+ base_regions.qsort (region::cmp_ptr_ptr);
/* Gather clusters, organize by parent region, so that we can group
together locals, globals, etc. */
const region *base_reg = (*iter).first;
base_regions.safe_push (base_reg);
}
- base_regions.qsort (region::cmp_ptrs);
+ base_regions.qsort (region::cmp_ptr_ptr);
/* Gather clusters, organize by parent region, so that we can group
together locals, globals, etc. */
base_regions.add (base_reg_b);
}
+ /* Sort the base regions before considering them. This ought not to
+ affect the results, but can affect which types UNKNOWN_REGIONs are
+ created for in a run; sorting them thus avoids minor differences
+ in logfiles. */
+ auto_vec<const region *> vec_base_regions (base_regions.elements ());
for (hash_set<const region *>::iterator iter = base_regions.begin ();
iter != base_regions.end (); ++iter)
+ vec_base_regions.quick_push (*iter);
+ vec_base_regions.qsort (region::cmp_ptr_ptr);
+ unsigned i;
+ const region *base_reg;
+ FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
{
- const region *base_reg = *iter;
const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
/* At least one of cluster_a and cluster_b must be non-NULL. */
bool overlaps_p (const concrete_binding &other) const;
+ static int cmp_ptr_ptr (const void *, const void *);
+
private:
bit_offset_t m_start_bit_offset;
bit_size_t m_size_in_bits;
const region *get_region () const { return m_region; }
+ static int cmp_ptr_ptr (const void *, const void *);
+
private:
const region *m_region;
};
bool apply_ctor_to_region (const region *parent_reg, tree ctor,
region_model_manager *mgr);
+ static int cmp (const binding_map &map1, const binding_map &map2);
+
private:
bool apply_ctor_val_to_range (const region *parent_reg,
region_model_manager *mgr,
/* supergraph's ctor. Walk the callgraph, building supernodes for each
CFG basic block, splitting the basic blocks at callsites. Join
together the supernodes with interprocedural and intraprocedural
- superedges as appropriate. */
+ superedges as appropriate.
+ Assign UIDs to the gimple stmts. */
supergraph::supergraph (logger *logger)
{
LOG_FUNC (logger);
- /* First pass: make supernodes. */
+ /* First pass: make supernodes (and assign UIDs to the gimple stmts). */
{
+ unsigned next_uid = 0;
+
/* Sort the cgraph_nodes? */
cgraph_node *node;
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
gimple *stmt = gsi_stmt (gpi);
m_stmt_to_node_t.put (stmt, node_for_stmts);
+ stmt->uid = next_uid++;
}
/* Append statements from BB to the current supernode, splitting
gimple *stmt = gsi_stmt (gsi);
node_for_stmts->m_stmts.safe_push (stmt);
m_stmt_to_node_t.put (stmt, node_for_stmts);
+ stmt->uid = next_uid++;
if (cgraph_edge *edge = supergraph_call_edge (fun, stmt))
{
m_cgraph_edge_to_caller_prev_node.put(edge, node_for_stmts);
return false;
}
+/* Comparator for imposing a deterministic order on constants that are
+ of the same type. */
+
+static int
+cmp_cst (const_tree cst1, const_tree cst2)
+{
+ gcc_assert (TREE_TYPE (cst1) == TREE_TYPE (cst2));
+ gcc_assert (TREE_CODE (cst1) == TREE_CODE (cst2));
+ switch (TREE_CODE (cst1))
+ {
+ default:
+ gcc_unreachable ();
+ case INTEGER_CST:
+ return tree_int_cst_compare (cst1, cst2);
+ case STRING_CST:
+ return strcmp (TREE_STRING_POINTER (cst1),
+ TREE_STRING_POINTER (cst2));
+ case REAL_CST:
+ /* Impose an arbitrary but deterministic order. */
+ return memcmp (TREE_REAL_CST_PTR (cst1),
+ TREE_REAL_CST_PTR (cst2),
+ sizeof (real_value));
+ case VECTOR_CST:
+ if (int cmp_log2_npatterns
+ = ((int)VECTOR_CST_LOG2_NPATTERNS (cst1)
+ - (int)VECTOR_CST_LOG2_NPATTERNS (cst2)))
+ return cmp_log2_npatterns;
+ if (int cmp_nelts_per_pattern
+ = ((int)VECTOR_CST_NELTS_PER_PATTERN (cst1)
+ - (int)VECTOR_CST_NELTS_PER_PATTERN (cst2)))
+ return cmp_nelts_per_pattern;
+ unsigned encoded_nelts = vector_cst_encoded_nelts (cst1);
+ for (unsigned i = 0; i < encoded_nelts; i++)
+ if (int el_cmp = cmp_cst (VECTOR_CST_ENCODED_ELT (cst1, i),
+ VECTOR_CST_ENCODED_ELT (cst2, i)))
+ return el_cmp;
+ return 0;
+ }
+}
+
+/* Comparator for imposing a deterministic order on svalues. */
+
+int
+svalue::cmp_ptr (const svalue *sval1, const svalue *sval2)
+{
+ if (sval1 == sval2)
+ return 0;
+ if (int cmp_kind = sval1->get_kind () - sval2->get_kind ())
+ return cmp_kind;
+ int t1 = sval1->get_type () ? TYPE_UID (sval1->get_type ()) : -1;
+ int t2 = sval2->get_type () ? TYPE_UID (sval2->get_type ()) : -1;
+ if (int cmp_type = t1 - t2)
+ return cmp_type;
+ switch (sval1->get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+ case SK_REGION:
+ {
+ const region_svalue *region_sval1 = (const region_svalue *)sval1;
+ const region_svalue *region_sval2 = (const region_svalue *)sval2;
+ return region::cmp_ids (region_sval1->get_pointee (),
+ region_sval2->get_pointee ());
+ }
+ break;
+ case SK_CONSTANT:
+ {
+ const constant_svalue *constant_sval1 = (const constant_svalue *)sval1;
+ const constant_svalue *constant_sval2 = (const constant_svalue *)sval2;
+ const_tree cst1 = constant_sval1->get_constant ();
+ const_tree cst2 = constant_sval2->get_constant ();
+ return cmp_cst (cst1, cst2);
+ }
+ break;
+ case SK_UNKNOWN:
+ {
+ gcc_assert (sval1 == sval2);
+ return 0;
+ }
+ break;
+ case SK_POISONED:
+ {
+ const poisoned_svalue *poisoned_sval1 = (const poisoned_svalue *)sval1;
+ const poisoned_svalue *poisoned_sval2 = (const poisoned_svalue *)sval2;
+ return (poisoned_sval1->get_poison_kind ()
+ - poisoned_sval2->get_poison_kind ());
+ }
+ break;
+ case SK_SETJMP:
+ {
+ const setjmp_svalue *setjmp_sval1 = (const setjmp_svalue *)sval1;
+ const setjmp_svalue *setjmp_sval2 = (const setjmp_svalue *)sval2;
+ const setjmp_record &rec1 = setjmp_sval1->get_setjmp_record ();
+ const setjmp_record &rec2 = setjmp_sval2->get_setjmp_record ();
+ return setjmp_record::cmp (rec1, rec2);
+ }
+ break;
+ case SK_INITIAL:
+ {
+ const initial_svalue *initial_sval1 = (const initial_svalue *)sval1;
+ const initial_svalue *initial_sval2 = (const initial_svalue *)sval2;
+ return region::cmp_ids (initial_sval1->get_region (),
+ initial_sval2->get_region ());
+ }
+ break;
+ case SK_UNARYOP:
+ {
+ const unaryop_svalue *unaryop_sval1 = (const unaryop_svalue *)sval1;
+ const unaryop_svalue *unaryop_sval2 = (const unaryop_svalue *)sval2;
+ if (int op_cmp = unaryop_sval1->get_op () - unaryop_sval2->get_op ())
+ return op_cmp;
+ return svalue::cmp_ptr (unaryop_sval1->get_arg (),
+ unaryop_sval2->get_arg ());
+ }
+ break;
+ case SK_BINOP:
+ {
+ const binop_svalue *binop_sval1 = (const binop_svalue *)sval1;
+ const binop_svalue *binop_sval2 = (const binop_svalue *)sval2;
+ if (int op_cmp = binop_sval1->get_op () - binop_sval2->get_op ())
+ return op_cmp;
+ if (int arg0_cmp = svalue::cmp_ptr (binop_sval1->get_arg0 (),
+ binop_sval2->get_arg0 ()))
+ return arg0_cmp;
+ return svalue::cmp_ptr (binop_sval1->get_arg1 (),
+ binop_sval2->get_arg1 ());
+ }
+ break;
+ case SK_SUB:
+ {
+ const sub_svalue *sub_sval1 = (const sub_svalue *)sval1;
+ const sub_svalue *sub_sval2 = (const sub_svalue *)sval2;
+ if (int parent_cmp = svalue::cmp_ptr (sub_sval1->get_parent (),
+ sub_sval2->get_parent ()))
+ return parent_cmp;
+ return region::cmp_ids (sub_sval1->get_subregion (),
+ sub_sval2->get_subregion ());
+ }
+ break;
+ case SK_UNMERGEABLE:
+ {
+ const unmergeable_svalue *unmergeable_sval1
+ = (const unmergeable_svalue *)sval1;
+ const unmergeable_svalue *unmergeable_sval2
+ = (const unmergeable_svalue *)sval2;
+ return svalue::cmp_ptr (unmergeable_sval1->get_arg (),
+ unmergeable_sval2->get_arg ());
+ }
+ break;
+ case SK_PLACEHOLDER:
+ {
+ const placeholder_svalue *placeholder_sval1
+ = (const placeholder_svalue *)sval1;
+ const placeholder_svalue *placeholder_sval2
+ = (const placeholder_svalue *)sval2;
+ return strcmp (placeholder_sval1->get_name (),
+ placeholder_sval2->get_name ());
+ }
+ break;
+ case SK_WIDENING:
+ {
+ const widening_svalue *widening_sval1 = (const widening_svalue *)sval1;
+ const widening_svalue *widening_sval2 = (const widening_svalue *)sval2;
+ if (int point_cmp = function_point::cmp (widening_sval1->get_point (),
+ widening_sval2->get_point ()))
+ return point_cmp;
+ if (int base_cmp = svalue::cmp_ptr (widening_sval1->get_base_svalue (),
+ widening_sval2->get_base_svalue ()))
+ return base_cmp;
+ return svalue::cmp_ptr (widening_sval1->get_iter_svalue (),
+ widening_sval2->get_iter_svalue ());
+ }
+ break;
+ case SK_COMPOUND:
+ {
+ const compound_svalue *compound_sval1 = (const compound_svalue *)sval1;
+ const compound_svalue *compound_sval2 = (const compound_svalue *)sval2;
+ return binding_map::cmp (compound_sval1->get_map (),
+ compound_sval2->get_map ());
+ }
+ break;
+ case SK_CONJURED:
+ {
+ const conjured_svalue *conjured_sval1 = (const conjured_svalue *)sval1;
+ const conjured_svalue *conjured_sval2 = (const conjured_svalue *)sval2;
+ if (int stmt_cmp = (conjured_sval1->get_stmt ()->uid
+ - conjured_sval2->get_stmt ()->uid))
+ return stmt_cmp;
+ return region::cmp_ids (conjured_sval1->get_id_region (),
+ conjured_sval2->get_id_region ());
+ }
+ break;
+ }
+}
+
+/* Comparator for use by vec<const svalue *>::qsort. */
+
+int
+svalue::cmp_ptr_ptr (const void *p1, const void *p2)
+{
+ const svalue *sval1 = *(const svalue * const *)p1;
+ const svalue *sval2 = *(const svalue * const *)p2;
+ return cmp_ptr (sval1, sval2);
+}
+
/* class region_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for region_svalue. */