This PR introduces new InferenceId for the theory of sets and uses them instead of InferenceId::UNKNOWN.
case InferenceId::SEP_PTO_NEG_PROP: return "SEP_PTO_NEG_PROP";
case InferenceId::SEP_PTO_PROP: return "SEP_PTO_PROP";
+ case InferenceId::SETS_COMPREHENSION: return "SETS_COMPREHENSION";
+ case InferenceId::SETS_DEQ: return "SETS_DEQ";
+ case InferenceId::SETS_DOWN_CLOSURE: return "SETS_DOWN_CLOSURE";
+ case InferenceId::SETS_EQ_MEM: return "SETS_EQ_MEM";
+ case InferenceId::SETS_EQ_MEM_CONFLICT: return "SETS_EQ_MEM_CONFLICT";
+ case InferenceId::SETS_MEM_EQ: return "SETS_MEM_EQ";
+ case InferenceId::SETS_MEM_EQ_CONFLICT: return "SETS_MEM_EQ_CONFLICT";
+ case InferenceId::SETS_PROXY: return "SETS_PROXY";
+ case InferenceId::SETS_PROXY_SINGLETON: return "SETS_PROXY_SINGLETON";
+ case InferenceId::SETS_SINGLETON_EQ: return "SETS_SINGLETON_EQ";
+ case InferenceId::SETS_UP_CLOSURE: return "SETS_UP_CLOSURE";
+ case InferenceId::SETS_UP_CLOSURE_2: return "SETS_UP_CLOSURE_2";
+ case InferenceId::SETS_UP_UNIV: return "SETS_UP_UNIV";
+ case InferenceId::SETS_UNIV_TYPE: return "SETS_UNIV_TYPE";
+ case InferenceId::SETS_CARD_CYCLE: return "SETS_CARD_CYCLE";
+ case InferenceId::SETS_CARD_EQUAL: return "SETS_CARD_EQUAL";
+ case InferenceId::SETS_CARD_GRAPH_EMP: return "SETS_CARD_GRAPH_EMP";
+ case InferenceId::SETS_CARD_GRAPH_EMP_PARENT:
+ return "SETS_CARD_GRAPH_EMP_PARENT";
+ case InferenceId::SETS_CARD_GRAPH_EQ_PARENT:
+ return "SETS_CARD_GRAPH_EQ_PARENT";
+ case InferenceId::SETS_CARD_GRAPH_EQ_PARENT_2:
+ return "SETS_CARD_GRAPH_EQ_PARENT_2";
+ case InferenceId::SETS_CARD_GRAPH_PARENT_SINGLETON:
+ return "SETS_CARD_GRAPH_PARENT_SINGLETON";
+ case InferenceId::SETS_CARD_MINIMAL: return "SETS_CARD_MINIMAL";
+ case InferenceId::SETS_CARD_NEGATIVE_MEMBER:
+ return "SETS_CARD_NEGATIVE_MEMBER";
+ case InferenceId::SETS_CARD_POSITIVE: return "SETS_CARD_POSITIVE";
+ case InferenceId::SETS_CARD_UNIV_SUPERSET: return "SETS_CARD_UNIV_SUPERSET";
+ case InferenceId::SETS_CARD_UNIV_TYPE: return "SETS_CARD_UNIV_TYPE";
+ case InferenceId::SETS_RELS_IDENTITY_DOWN: return "SETS_RELS_IDENTITY_DOWN";
+ case InferenceId::SETS_RELS_IDENTITY_UP: return "SETS_RELS_IDENTITY_UP";
+ case InferenceId::SETS_RELS_JOIN_COMPOSE: return "SETS_RELS_JOIN_COMPOSE";
+ case InferenceId::SETS_RELS_JOIN_IMAGE_DOWN:
+ return "SETS_RELS_JOIN_IMAGE_DOWN";
+ case InferenceId::SETS_RELS_JOIN_SPLIT_1: return "SETS_RELS_JOIN_SPLIT_1";
+ case InferenceId::SETS_RELS_JOIN_SPLIT_2: return "SETS_RELS_JOIN_SPLIT_2";
+ case InferenceId::SETS_RELS_PRODUCE_COMPOSE:
+ return "SETS_RELS_PRODUCE_COMPOSE";
+ case InferenceId::SETS_RELS_PRODUCT_SPLIT: return "SETS_RELS_PRODUCT_SPLIT";
+ case InferenceId::SETS_RELS_TCLOSURE_FWD: return "SETS_RELS_TCLOSURE_FWD";
+ case InferenceId::SETS_RELS_TRANSPOSE_EQ: return "SETS_RELS_TRANSPOSE_EQ";
+ case InferenceId::SETS_RELS_TRANSPOSE_REV: return "SETS_RELS_TRANSPOSE_REV";
+ case InferenceId::SETS_RELS_TUPLE_REDUCTION:
+ return "SETS_RELS_TUPLE_REDUCTION";
+
case InferenceId::STRINGS_I_NORM_S: return "I_NORM_S";
case InferenceId::STRINGS_I_CONST_MERGE: return "I_CONST_MERGE";
case InferenceId::STRINGS_I_CONST_CONFLICT: return "I_CONST_CONFLICT";
*/
enum class InferenceId
{
+ // ---------------------------------- arith theory
//-------------------- core
// simple congruence x=y => f(x)=f(y)
ARITH_NL_CONGRUENCE,
ARITH_NL_ICP_CONFLICT,
// propagation / contraction of variable bounds from icp
ARITH_NL_ICP_PROPAGATION,
- //-------------------- unknown
+ // ---------------------------------- end arith theory
+ // ---------------------------------- arrays theory
ARRAYS_EXT,
ARRAYS_READ_OVER_WRITE,
ARRAYS_READ_OVER_WRITE_1,
ARRAYS_READ_OVER_WRITE_CONTRA,
+ // ---------------------------------- end arrays theory
+ // ---------------------------------- bags theory
BAG_NON_NEGATIVE_COUNT,
BAG_MK_BAG_SAME_ELEMENT,
BAG_MK_BAG,
BAG_DIFFERENCE_SUBTRACT,
BAG_DIFFERENCE_REMOVE,
BAG_DUPLICATE_REMOVAL,
+ // ---------------------------------- end bags theory
+ // ---------------------------------- bitvector theory
BV_BITBLAST_CONFLICT,
BV_LAZY_CONFLICT,
BV_LAZY_LEMMA,
BV_SIMPLE_BITBLAST_LEMMA,
BV_EXTF_LEMMA,
BV_EXTF_COLLAPSE,
+ // ---------------------------------- end bitvector theory
+ // ---------------------------------- datatypes theory
// (= (C t1 ... tn) (C s1 .. sn)) => (= ti si)
DATATYPES_UNIF,
// ((_ is Ci) t) => (= t (Ci (sel_1 t) ... (sel_n t)))
DATATYPES_BISIMILAR,
// cycle conflict for datatypes
DATATYPES_CYCLE,
+ // ---------------------------------- end datatypes theory
+ // ---------------------------------- sep theory
// ensures that pto is a function: (pto x y) ^ ~(pto z w) ^ x = z => y != w
SEP_PTO_NEG_PROP,
// enforces injectiveness of pto: (pto x y) ^ (pto y w) ^ x = y => y = w
SEP_PTO_PROP,
+ // ---------------------------------- end sep theory
- //-------------------------------------- base solver
+ // ---------------------------------- sets theory
+ //-------------------- sets core solver
+ SETS_COMPREHENSION,
+ SETS_DEQ,
+ SETS_DOWN_CLOSURE,
+ SETS_EQ_MEM,
+ SETS_EQ_MEM_CONFLICT,
+ SETS_MEM_EQ,
+ SETS_MEM_EQ_CONFLICT,
+ SETS_PROXY,
+ SETS_PROXY_SINGLETON,
+ SETS_SINGLETON_EQ,
+ SETS_UP_CLOSURE,
+ SETS_UP_CLOSURE_2,
+ SETS_UP_UNIV,
+ SETS_UNIV_TYPE,
+ //-------------------- sets cardinality solver
+ // cycle of cardinalities, hence all sets have the same
+ SETS_CARD_CYCLE,
+ // two sets have the same cardinality
+ SETS_CARD_EQUAL,
+ SETS_CARD_GRAPH_EMP,
+ SETS_CARD_GRAPH_EMP_PARENT,
+ SETS_CARD_GRAPH_EQ_PARENT,
+ SETS_CARD_GRAPH_EQ_PARENT_2,
+ SETS_CARD_GRAPH_PARENT_SINGLETON,
+ // cardinality is at least the number of elements we already know
+ SETS_CARD_MINIMAL,
+ // negative members are part of the universe
+ SETS_CARD_NEGATIVE_MEMBER,
+ // all sets have non-negative cardinality
+ SETS_CARD_POSITIVE,
+ // the universe is a superset of every set
+ SETS_CARD_UNIV_SUPERSET,
+ // cardinality of the universe is at most cardinality of the type
+ SETS_CARD_UNIV_TYPE,
+ //-------------------- sets relations solver
+ SETS_RELS_IDENTITY_DOWN,
+ SETS_RELS_IDENTITY_UP,
+ SETS_RELS_JOIN_COMPOSE,
+ SETS_RELS_JOIN_IMAGE_DOWN,
+ SETS_RELS_JOIN_SPLIT_1,
+ SETS_RELS_JOIN_SPLIT_2,
+ SETS_RELS_PRODUCE_COMPOSE,
+ SETS_RELS_PRODUCT_SPLIT,
+ SETS_RELS_TCLOSURE_FWD,
+ SETS_RELS_TRANSPOSE_EQ,
+ SETS_RELS_TRANSPOSE_REV,
+ SETS_RELS_TUPLE_REDUCTION,
+ //-------------------------------------- end sets theory
+
+ //-------------------------------------- strings theory
+ //-------------------- base solver
// initial normalize singular
// x1 = "" ^ ... ^ x_{i-1} = "" ^ x_{i+1} = "" ^ ... ^ xn = "" =>
// x1 ++ ... ++ xn = xi
STRINGS_CARD_SP,
// The cardinality inference for strings, see Liang et al CAV 2014.
STRINGS_CARDINALITY,
- //-------------------------------------- end base solver
- //-------------------------------------- core solver
+ //-------------------- end base solver
+ //-------------------- core solver
// A cycle in the empty string equivalence class, e.g.:
// x ++ y = "" => x = ""
// This is typically not applied due to length constraints implying emptiness.
// is unknown, we apply the inference:
// len(s) != len(t) V len(s) = len(t)
STRINGS_DEQ_LENGTH_SP,
- //-------------------------------------- end core solver
- //-------------------------------------- codes solver
+ //-------------------- end core solver
+ //-------------------- codes solver
// str.to_code( v ) = rewrite( str.to_code(c) )
// where v is the proxy variable for c.
STRINGS_CODE_PROXY,
// str.code(x) = -1 V str.code(x) != str.code(y) V x = y
STRINGS_CODE_INJ,
- //-------------------------------------- end codes solver
- //-------------------------------------- regexp solver
+ //-------------------- end codes solver
+ //-------------------- regexp solver
// regular expression normal form conflict
// ( x in R ^ x = y ^ rewrite((str.in_re y R)) = false ) => false
// where y is the normal form computed for x.
STRINGS_RE_DELTA_CONF,
// regular expression derive ???
STRINGS_RE_DERIVE,
- //-------------------------------------- end regexp solver
- //-------------------------------------- extended function solver
+ //-------------------- end regexp solver
+ //-------------------- extended function solver
// Standard extended function inferences from context-dependent rewriting
// produced by constant substitutions. See Reynolds et al CAV 2017. These are
// inferences of the form:
// f(x1, .., xn) and P is the reduction predicate for f
// (see theory_strings_preprocess).
STRINGS_REDUCTION,
- //-------------------------------------- end extended function solver
- //-------------------------------------- prefix conflict
+ //-------------------- end extended function solver
+ //-------------------- prefix conflict
// prefix conflict (coarse-grained)
STRINGS_PREFIX_CONFLICT,
- //-------------------------------------- end prefix conflict
+ //-------------------- end prefix conflict
+ //-------------------------------------- end strings theory
+ //-------------------------------------- uf theory
// Clause from the uf symmetry breaker
UF_BREAK_SYMMETRY,
- //-------------------------------------- begin cardinality extension to UF
+ //-------------------- cardinality extension to UF
// The inferences below are described in Reynolds' thesis 2013.
// conflict of the form (card_T n) => (not (distinct t1 ... tn))
UF_CARD_CLIQUE,
// (or (= t1 t2) (not (= t1 t2))
// to satisfy the cardinality constraints on the type of t1, t2.
UF_CARD_SPLIT,
- //-------------------------------------- end cardinality extension to UF
- //-------------------------------------- begin HO extension to UF
+ //-------------------- end cardinality extension to UF
+ //-------------------- HO extension to UF
// Encodes an n-ary application as a chain of binary HO_APPLY applications
// (= (f t1 ... tn) (@ (@ ... (@ f t1) ...) tn))
UF_HO_APP_ENCODE,
// different applications
// (not (= (f sk1 .. skn) (g sk1 .. skn))
UF_HO_EXTENSIONALITY,
- //-------------------------------------- begin model-construction specific part
+ //-------------------- model-construction specific part
// These rules are necessary to ensure that we build models properly. For more
// details see Section 3.3 of Barbosa et al. CADE'19.
//
// different applications
// (not (= (f sk1 .. skn) (g sk1 .. skn))
UF_HO_MODEL_EXTENSIONALITY,
- //-------------------------------------- end model-construction specific part
- //-------------------------------------- end HO extension to UF
+ //-------------------- end model-construction specific part
+ //-------------------- end HO extension to UF
+ //-------------------------------------- end uf theory
+ //-------------------------------------- unknown
UNKNOWN
};
// (=> true (<= (card (as univset t)) cardUniv)
if (!d_state.isEntailed(leq, true))
{
- d_im.assertInference(
- leq, d_true, "univset cardinality <= type cardinality", 1);
+ d_im.assertInference(leq, InferenceId::SETS_CARD_UNIV_TYPE, d_true, 1);
}
}
subset = Rewriter::rewrite(subset);
if (!d_state.isEntailed(subset, true))
{
- d_im.assertInference(subset, d_true, "univset is a super set", 1);
+ d_im.assertInference(
+ subset, InferenceId::SETS_CARD_UNIV_SUPERSET, d_true, 1);
}
// negative members are members in the universe set
// (not (member negativeMember representative)))
// (member negativeMember (as univset t)))
d_im.assertInference(
- member, notMember, "negative members are in the universe", 1);
+ member, InferenceId::SETS_CARD_NEGATIVE_MEMBER, notMember, 1);
}
}
}
cterms.push_back(s);
}
Node pos_lem = nm->mkNode(GEQ, nm->mkNode(CARD, n), d_zero);
- d_im.assertInference(pos_lem, d_emp_exp, "pcard", 1);
+ d_im.assertInference(
+ pos_lem, InferenceId::SETS_CARD_POSITIVE, d_emp_exp, 1);
}
else
{
Node nn = cterms[k];
Node nk = d_treg.getProxy(nn);
Node pos_lem = nm->mkNode(GEQ, nm->mkNode(CARD, nk), d_zero);
- d_im.assertInference(pos_lem, d_emp_exp, "pcard", 1);
+ d_im.assertInference(
+ pos_lem, InferenceId::SETS_CARD_POSITIVE, d_emp_exp, 1);
if (nn != nk)
{
Node lem = nm->mkNode(EQUAL, nm->mkNode(CARD, nk), nm->mkNode(CARD, nn));
lem = Rewriter::rewrite(lem);
Trace("sets-card") << " " << k << " : " << lem << std::endl;
- d_im.assertInference(lem, d_emp_exp, "card", 1);
+ d_im.assertInference(lem, InferenceId::SETS_CARD_EQUAL, d_emp_exp, 1);
}
}
d_im.doPendingLemmas();
Node fact = conc.size() == 1 ? conc[0] : nm->mkNode(AND, conc);
Trace("sets-cycle-debug")
<< "CYCLE: " << fact << " from " << exp << std::endl;
- d_im.assertInference(fact, exp, "card_cycle");
+ d_im.assertInference(fact, InferenceId::SETS_CARD_CYCLE, exp);
d_im.doPendingLemmas();
}
else
conc.push_back(n[e].eqNode(sib[e]));
}
}
- d_im.assertInference(conc, n.eqNode(emp_set), "cg_emp");
+ d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_EMP, n.eqNode(emp_set));
d_im.doPendingLemmas();
if (d_im.hasSent())
{
{
Trace("sets-debug") << " it is empty..." << std::endl;
Assert(!d_state.areEqual(n, emp_set));
- d_im.assertInference(n.eqNode(emp_set), p.eqNode(emp_set), "cg_emppar");
+ d_im.assertInference(
+ n.eqNode(emp_set), InferenceId::SETS_CARD_GRAPH_EMP_PARENT, p.eqNode(emp_set));
d_im.doPendingLemmas();
if (d_im.hasSent())
{
}
Trace("sets-debug")
<< "...derived " << conc.size() << " conclusions" << std::endl;
- d_im.assertInference(conc, n.eqNode(p), "cg_eqpar");
+ d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_EQ_PARENT, n.eqNode(p));
d_im.doPendingLemmas();
if (d_im.hasSent())
{
if (eq_parent)
{
Node conc = n.eqNode(cpk);
- d_im.assertInference(conc, exps, "cg_par_sing");
+ d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_PARENT_SINGLETON, exps);
d_im.doPendingLemmas();
}
else
{
// split on empty
Trace("sets-nf") << "Split empty : " << n << std::endl;
- d_im.split(n.eqNode(emp_set), 1);
+ d_im.split(n.eqNode(emp_set), InferenceId::UNKNOWN, 1);
}
Assert(d_im.hasSent());
return;
conc.push_back(cpk.eqNode(n));
}
}
- d_im.assertInference(conc, cpk.eqNode(cpnl), "cg_pareq");
+ d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_EQ_PARENT_2, cpk.eqNode(cpnl));
d_im.doPendingLemmas();
if (d_im.hasSent())
{
Trace("sets-nf") << "Actual Split : ";
d_treg.debugPrintSet(r, "sets-nf");
Trace("sets-nf") << std::endl;
- d_im.split(r.eqNode(emp_set), 1);
+ d_im.split(r.eqNode(emp_set), InferenceId::UNKNOWN, 1);
Assert(d_im.hasSent());
return;
}
&& !d_state.hasMembers(eqc))
{
Trace("sets-nf-debug") << "Split on leaf " << eqc << std::endl;
- d_im.split(eqc.eqNode(emp_set));
+ d_im.split(eqc.eqNode(emp_set), InferenceId::UNKNOWN);
success = false;
}
else
Node conc =
nm->mkNode(GEQ, cardTerm, nm->mkConst(Rational(members.size())));
Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp);
- d_im.assertInference(conc, expn, "mincard", 1);
+ d_im.assertInference(conc, InferenceId::SETS_CARD_MINIMAL, expn, 1);
}
}
// flush the lemmas
d_false = NodeManager::currentNM()->mkConst(false);
}
-bool InferenceManager::assertFactRec(Node fact, Node exp, int inferType)
+bool InferenceManager::assertFactRec(Node fact, InferenceId id, Node exp, int inferType)
{
// should we send this fact out as a lemma?
if ((options::setsInferAsLemmas() && inferType != -1) || inferType == 1)
{
lem = NodeManager::currentNM()->mkNode(IMPLIES, exp, fact);
}
- addPendingLemma(lem, InferenceId::UNKNOWN);
+ addPendingLemma(lem, id);
return true;
}
Trace("sets-fact") << "Assert fact rec : " << fact << ", exp = " << exp
if (fact == d_false)
{
Trace("sets-lemma") << "Conflict : " << exp << std::endl;
- conflict(exp, InferenceId::UNKNOWN);
+ conflict(exp, id);
return true;
}
return false;
for (unsigned i = 0; i < f.getNumChildren(); i++)
{
Node factc = fact.getKind() == NOT ? f[i].negate() : f[i];
- bool tret = assertFactRec(factc, exp, inferType);
+ bool tret = assertFactRec(factc, id, exp, inferType);
ret = ret || tret;
if (d_state.isInConflict())
{
|| (atom.getKind() == EQUAL && atom[0].getType().isSet()))
{
// send to equality engine
- if (assertInternalFact(atom, polarity, InferenceId::UNKNOWN, exp))
+ if (assertInternalFact(atom, polarity, id, exp))
{
// return true if this wasn't redundant
return true;
{
lem = NodeManager::currentNM()->mkNode(IMPLIES, exp, fact);
}
- addPendingLemma(lem, InferenceId::UNKNOWN);
+ addPendingLemma(lem, id);
return true;
}
return false;
}
void InferenceManager::assertInference(Node fact,
+ InferenceId id,
Node exp,
- const char* c,
int inferType)
{
- if (assertFactRec(fact, exp, inferType))
+ if (assertFactRec(fact, id, exp, inferType))
{
Trace("sets-lemma") << "Sets::Lemma : " << fact << " from " << exp << " by "
- << c << std::endl;
- Trace("sets-assertion")
- << "(assert (=> " << exp << " " << fact << ")) ; by " << c << std::endl;
+ << id << std::endl;
+ Trace("sets-assertion") << "(assert (=> " << exp << " " << fact
+ << ")) ; by " << id << std::endl;
}
}
void InferenceManager::assertInference(Node fact,
+ InferenceId id,
std::vector<Node>& exp,
- const char* c,
int inferType)
{
Node exp_n = exp.empty() ? d_true
: (exp.size() == 1
? exp[0]
: NodeManager::currentNM()->mkNode(AND, exp));
- assertInference(fact, exp_n, c, inferType);
+ assertInference(fact, id, exp_n, inferType);
}
void InferenceManager::assertInference(std::vector<Node>& conc,
+ InferenceId id,
Node exp,
- const char* c,
int inferType)
{
if (!conc.empty())
{
Node fact = conc.size() == 1 ? conc[0]
: NodeManager::currentNM()->mkNode(AND, conc);
- assertInference(fact, exp, c, inferType);
+ assertInference(fact, id, exp, inferType);
}
}
void InferenceManager::assertInference(std::vector<Node>& conc,
+ InferenceId id,
std::vector<Node>& exp,
- const char* c,
int inferType)
{
Node exp_n = exp.empty() ? d_true
: (exp.size() == 1
? exp[0]
: NodeManager::currentNM()->mkNode(AND, exp));
- assertInference(conc, exp_n, c, inferType);
+ assertInference(conc, id, exp_n, inferType);
}
-void InferenceManager::split(Node n, int reqPol)
+void InferenceManager::split(Node n, InferenceId id, int reqPol)
{
n = Rewriter::rewrite(n);
Node lem = NodeManager::currentNM()->mkNode(OR, n, n.negate());
// send the lemma
- lemma(lem, InferenceId::UNKNOWN);
+ lemma(lem, id);
Trace("sets-lemma") << "Sets::Lemma split : " << lem << std::endl;
if (reqPol != 0)
{
* fact is processed as a lemma, where inferType=1 forces fact to be
* set as a lemma, and inferType=-1 forces fact to be processed as a fact
* (if possible).
- *
- * The argument c is the name of the inference, which is used for debugging.
*/
- void assertInference(Node fact, Node exp, const char* c, int inferType = 0);
+ void assertInference(Node fact,
+ InferenceId id,
+ Node exp,
+ int inferType = 0);
/** same as above, where exp is interpreted as a conjunction */
void assertInference(Node fact,
+ InferenceId id,
std::vector<Node>& exp,
- const char* c,
int inferType = 0);
/** same as above, where conc is interpreted as a conjunction */
void assertInference(std::vector<Node>& conc,
+ InferenceId id,
Node exp,
- const char* c,
int inferType = 0);
/** same as above, where both exp and conc are interpreted as conjunctions */
void assertInference(std::vector<Node>& conc,
+ InferenceId id,
std::vector<Node>& exp,
- const char* c,
int inferType = 0);
/** flush the splitting lemma ( n OR (NOT n) )
* If reqPol is not 0, then a phase requirement for n is requested with
* polarity ( reqPol>0 ).
*/
- void split(Node n, int reqPol = 0);
+ void split(Node n, InferenceId id, int reqPol = 0);
private:
/** constants */
* The argument inferType determines the policy on whether fact is processed
* as a fact or as a lemma (see assertInference above).
*/
- bool assertFactRec(Node fact, Node exp, int inferType = 0);
+ bool assertFactRec(Node fact, InferenceId id, Node exp, int inferType = 0);
};
} // namespace sets
d_proxy_to_term[k] = n;
Node eq = k.eqNode(n);
Trace("sets-lemma") << "Sets::Lemma : " << eq << " by proxy" << std::endl;
- d_im.lemma(eq, InferenceId::UNKNOWN, LemmaProperty::NONE, false);
+ d_im.lemma(eq, InferenceId::SETS_PROXY, LemmaProperty::NONE, false);
if (nk == SINGLETON)
{
Node slem = nm->mkNode(MEMBER, n[0], k);
Trace("sets-lemma") << "Sets::Lemma : " << slem << " by singleton"
<< std::endl;
- d_im.lemma(slem, InferenceId::UNKNOWN, LemmaProperty::NONE, false);
+ d_im.lemma(slem, InferenceId::SETS_PROXY_SINGLETON, LemmaProperty::NONE, false);
}
return k;
}
Node ulem = nm->mkNode(SUBSET, n1, n2);
Trace("sets-lemma") << "Sets::Lemma : " << ulem << " by univ-type"
<< std::endl;
- d_im.lemma(ulem, InferenceId::UNKNOWN, LemmaProperty::NONE, false);
+ d_im.lemma(ulem, InferenceId::SETS_UNIV_TYPE, LemmaProperty::NONE, false);
}
}
d_univset[tn] = n;
// infer equality between elements of singleton
Node exp = s1.eqNode(s2);
Node eq = s1[0].eqNode(s2[0]);
- d_im.assertInternalFact(eq, true, InferenceId::UNKNOWN, exp);
+ d_im.assertInternalFact(eq, true, InferenceId::SETS_SINGLETON_EQ, exp);
}
else
{
Assert(facts.size() == 1);
Trace("sets-prop") << "Propagate eq-mem conflict : " << facts[0]
<< std::endl;
- d_im.conflict(facts[0], InferenceId::UNKNOWN);
+ d_im.conflict(facts[0], InferenceId::SETS_EQ_MEM_CONFLICT);
return;
}
for (const Node& f : facts)
Assert(f.getKind() == kind::IMPLIES);
Trace("sets-prop") << "Propagate eq-mem eq inference : " << f[0] << " => "
<< f[1] << std::endl;
- d_im.assertInternalFact(f[1], true, InferenceId::UNKNOWN, f[0]);
+ d_im.assertInternalFact(f[1], true, InferenceId::SETS_EQ_MEM, f[0]);
}
}
}
std::vector<Node> exp;
exp.push_back(mem);
exp.push_back(mem[1].eqNode(eq_set));
- d_im.assertInference(nmem, exp, "downc");
+ d_im.assertInference(nmem, InferenceId::SETS_DOWN_CLOSURE, exp);
if (d_state.isInConflict())
{
return;
nmem = NodeManager::currentNM()->mkNode(
kind::OR, pmem.negate(), nmem);
}
- d_im.assertInference(nmem, exp, "downc");
+ d_im.assertInference(nmem, InferenceId::SETS_DOWN_CLOSURE, exp);
}
}
}
{
Node kk = d_treg.getProxy(term);
Node fact = nm->mkNode(kind::MEMBER, x, kk);
- d_im.assertInference(fact, exp, "upc", inferType);
+ d_im.assertInference(
+ fact, InferenceId::SETS_UP_CLOSURE, exp, inferType);
if (d_state.isInConflict())
{
return;
d_state.addEqualityToExp(term[1], itm2m.second[1], exp);
Node r = d_treg.getProxy(term);
Node fact = nm->mkNode(kind::MEMBER, x, r);
- d_im.assertInference(fact, exp, "upc2");
+ d_im.assertInference(fact, InferenceId::SETS_UP_CLOSURE_2, exp);
if (d_state.isInConflict())
{
return;
exp.push_back(v.eqNode(it2.second[1]));
}
Node fact = nm->mkNode(kind::MEMBER, it2.second[0], u);
- d_im.assertInference(fact, exp, "upuniv");
+ d_im.assertInference(fact, InferenceId::SETS_UP_UNIV, exp);
if (d_state.isInConflict())
{
return;
Node mem2 = nm->mkNode(MEMBER, x, deq[1]);
Node lem = nm->mkNode(OR, deq, nm->mkNode(EQUAL, mem1, mem2).negate());
lem = Rewriter::rewrite(lem);
- d_im.assertInference(lem, d_true, "diseq", 1);
+ d_im.assertInference(lem, InferenceId::SETS_DEQ, d_true, 1);
d_im.doPendingLemmas();
if (d_im.hasSent())
{
nm->mkNode(FORALL, nm->mkNode(BOUND_VAR_LIST, v), body.eqNode(mem));
Trace("sets-comprehension")
<< "Comprehension reduction: " << lem << std::endl;
- d_im.lemma(lem, InferenceId::UNKNOWN);
+ d_im.lemma(lem, InferenceId::SETS_COMPREHENSION);
}
}
Trace("sets-prop") << "Propagate mem-eq : " << pexp << std::endl;
Node eq = s[0].eqNode(atom[0]);
// triggers an internal inference
- d_im.assertInternalFact(eq, true, InferenceId::UNKNOWN, pexp);
+ d_im.assertInternalFact(eq, true, InferenceId::SETS_MEM_EQ, pexp);
}
}
else
{
Trace("sets-prop")
<< "Propagate mem-eq conflict : " << pexp << std::endl;
- d_im.conflict(pexp, InferenceId::UNKNOWN);
+ d_im.conflict(pexp, InferenceId::SETS_MEM_EQ_CONFLICT);
}
}
}
{
Trace("sets-cg-lemma")
<< "Should split on : " << x << "==" << y << std::endl;
- d_im.split(x.eqNode(y));
+ d_im.split(x.eqNode(y), InferenceId::UNKNOWN);
}
}
}
Assert(reasons.size() >= 1);
sendInfer(
new_membership,
- reasons.size() > 1 ? nm->mkNode(AND, reasons) : reasons[0],
- "JOIN-IMAGE UP");
+ InferenceId::UNKNOWN,
+ reasons.size() > 1 ? nm->mkNode(AND, reasons) : reasons[0]);
break;
}
}
if( distinct_skolems.size() >= 2 ) {
conclusion = NodeManager::currentNM()->mkNode( kind::AND, conclusion, NodeManager::currentNM()->mkNode( kind::DISTINCT, distinct_skolems ) );
}
- sendInfer(conclusion, reason, "JOIN-IMAGE DOWN");
+ sendInfer(conclusion, InferenceId::SETS_RELS_JOIN_IMAGE_DOWN, reason);
Trace("rels-debug") << "\n[Theory::Rels] *********** Done with applyJoinImageRule ***********" << std::endl;
}
reason = NodeManager::currentNM()->mkNode( kind::AND, reason, NodeManager::currentNM()->mkNode( kind::EQUAL, exp[1], iden_term ) );
}
sendInfer(nm->mkNode(AND, fact, nm->mkNode(EQUAL, fst_mem, snd_mem)),
- reason,
- "IDENTITY-DOWN");
+ InferenceId::SETS_RELS_IDENTITY_DOWN,
+ reason);
Trace("rels-debug") << "\n[Theory::Rels] *********** Done with applyIdenRule on " << iden_term << std::endl;
}
if( (*mem_rep_exp_it)[1] != iden_term_rel ) {
reason = NodeManager::currentNM()->mkNode( kind::AND, reason, NodeManager::currentNM()->mkNode( kind::EQUAL, (*mem_rep_exp_it)[1], iden_term_rel ) );
}
- sendInfer(nm->mkNode(MEMBER, new_mem, iden_term), reason, "IDENTITY-UP");
+ sendInfer(
+ nm->mkNode(MEMBER, new_mem, iden_term), InferenceId::SETS_RELS_IDENTITY_UP, reason);
++mem_rep_exp_it;
}
Trace("rels-debug") << "\n[Theory::Rels] *********** Done with computing members for Iden Term = " << iden_term << std::endl;
}
if( all_reasons.size() > 1) {
sendInfer(nm->mkNode(MEMBER, tc_mem, tc_rel),
- nm->mkNode(AND, all_reasons),
- "TCLOSURE-Forward");
+ InferenceId::SETS_RELS_TCLOSURE_FWD,
+ nm->mkNode(AND, all_reasons));
} else {
sendInfer(nm->mkNode(MEMBER, tc_mem, tc_rel),
- all_reasons.front(),
- "TCLOSURE-Forward");
+ InferenceId::SETS_RELS_TCLOSURE_FWD,
+ all_reasons.front());
}
// check if cur_node has been traversed or not
if( pt_rel != exp[1] ) {
reason = NodeManager::currentNM()->mkNode(kind::AND, exp, NodeManager::currentNM()->mkNode(kind::EQUAL, pt_rel, exp[1]));
}
- sendInfer(fact_1, reason, "product-split");
- sendInfer(fact_2, reason, "product-split");
+ sendInfer(fact_1, InferenceId::SETS_RELS_PRODUCT_SPLIT, reason);
+ sendInfer(fact_2, InferenceId::SETS_RELS_PRODUCT_SPLIT, reason);
}
/* join-split rule: (a, b) IS_IN (X JOIN Y)
reason = NodeManager::currentNM()->mkNode(kind::AND, reason, NodeManager::currentNM()->mkNode(kind::EQUAL, join_rel, exp[1]));
}
Node fact = NodeManager::currentNM()->mkNode(kind::MEMBER, mem1, join_rel[0]);
- sendInfer(fact, reason, "JOIN-Split-1");
+ sendInfer(fact, InferenceId::SETS_RELS_JOIN_SPLIT_1, reason);
fact = NodeManager::currentNM()->mkNode(kind::MEMBER, mem2, join_rel[1]);
- sendInfer(fact, reason, "JOIN-Split-2");
+ sendInfer(fact, InferenceId::SETS_RELS_JOIN_SPLIT_2, reason);
makeSharedTerm(shared_x, shared_type);
}
for( unsigned int i = 1; i < tp_terms.size(); i++ ) {
Trace("rels-debug") << "\n[Theory::Rels] *********** Applying TRANSPOSE-Equal rule on transposed term = " << tp_terms[0] << " and " << tp_terms[i] << std::endl;
sendInfer(nm->mkNode(EQUAL, tp_terms[0][0], tp_terms[i][0]),
- nm->mkNode(EQUAL, tp_terms[0], tp_terms[i]),
- "TRANSPOSE-Equal");
+ InferenceId::SETS_RELS_TRANSPOSE_EQ,
+ nm->mkNode(EQUAL, tp_terms[0], tp_terms[i]));
}
}
reason = NodeManager::currentNM()->mkNode(kind::AND, reason, NodeManager::currentNM()->mkNode(kind::EQUAL, tp_rel, exp[1]));
}
sendInfer(nm->mkNode(MEMBER, reversed_mem, tp_rel[0]),
- reason,
- "TRANSPOSE-Reverse");
+ InferenceId::SETS_RELS_TRANSPOSE_REV,
+ reason);
}
void TheorySetsRels::doTCInference() {
reason = NodeManager::currentNM()->mkNode(kind::AND, reason, NodeManager::currentNM()->mkNode(kind::EQUAL, rel[0], exps[i][1]));
}
sendInfer(nm->mkNode(MEMBER, RelsUtils::reverseTuple(exps[i][0]), rel),
- reason,
- "TRANSPOSE-reverse");
+ InferenceId::SETS_RELS_TRANSPOSE_REV,
+ reason);
}
}
}
reasons.push_back( NodeManager::currentNM()->mkNode(kind::EQUAL, r2, r2_rep_exps[j][1]) );
}
if( isProduct ) {
- sendInfer(fact,
- nm->mkNode(kind::AND, reasons),
- "PRODUCT-Compose");
+ sendInfer(
+ fact, InferenceId::SETS_RELS_PRODUCE_COMPOSE, nm->mkNode(kind::AND, reasons));
} else {
if( r1_rmost != r2_lmost ) {
reasons.push_back( NodeManager::currentNM()->mkNode(kind::EQUAL, r1_rmost, r2_lmost) );
}
- sendInfer(fact, nm->mkNode(kind::AND, reasons), "JOIN-Compose");
+ sendInfer(
+ fact, InferenceId::SETS_RELS_JOIN_COMPOSE, nm->mkNode(kind::AND, reasons));
}
}
}
{
if (p.getKind() == IMPLIES)
{
- processInference(p[1], p[0], "rels");
+ processInference(p[1], InferenceId::UNKNOWN, p[0]);
}
else
{
- processInference(p, d_trueNode, "rels");
+ processInference(p, InferenceId::UNKNOWN, d_trueNode);
}
if (d_state.isInConflict())
{
d_pending.clear();
}
- void TheorySetsRels::processInference(Node conc, Node exp, const char* c)
+ void TheorySetsRels::processInference(Node conc, InferenceId id, Node exp)
{
Trace("sets-pinfer") << "Process inference: " << exp << " => " << conc
<< std::endl;
Trace("sets-pinfer") << " must assert as lemma" << std::endl;
// we wrap the spurious explanation into a splitting lemma
Node lem = NodeManager::currentNM()->mkNode(OR, exp.negate(), conc);
- d_im.assertInference(lem, d_trueNode, c, 1);
+ d_im.assertInference(lem, id, d_trueNode, 1);
return;
}
// try to assert it as a fact
- d_im.assertInference(conc, exp, c);
+ d_im.assertInference(conc, id, exp);
}
bool TheorySetsRels::isRelationKind( Kind k ) {
Node tuple_reduct = NodeManager::currentNM()->mkNode(kind::APPLY_CONSTRUCTOR, tuple_elements);
tuple_reduct = NodeManager::currentNM()->mkNode(kind::MEMBER,tuple_reduct, n[1]);
Node tuple_reduction_lemma = NodeManager::currentNM()->mkNode(kind::EQUAL, n, tuple_reduct);
- sendInfer(tuple_reduction_lemma, d_trueNode, "tuple-reduction");
+ sendInfer(tuple_reduction_lemma, InferenceId::SETS_RELS_TUPLE_REDUCTION, d_trueNode);
d_symbolic_tuples.insert(n);
}
}
}
}
- void TheorySetsRels::sendInfer(Node fact, Node reason, const char* c)
+ void TheorySetsRels::sendInfer(Node fact, InferenceId id, Node reason)
{
Trace("rels-lemma") << "Rels::lemma " << fact << " from " << reason
- << " by " << c << std::endl;
+ << " by " << id << std::endl;
Node lemma = NodeManager::currentNM()->mkNode(IMPLIES, reason, fact);
d_pending.push_back(lemma);
}
* Called when we have inferred fact from explanation reason, where the
* latter should be a conjunction of facts that hold in the current context.
*
- * The argument c is used for debugging, to give the name of the inference
- * rule being used.
- *
* This method adds the node (=> reason exp) to the pending vector d_pending.
*/
- void sendInfer(Node fact, Node reason, const char* c);
+ void sendInfer(Node fact, InferenceId id, Node reason);
/**
* This method flushes the inferences in the pending vector d_pending to
* theory of sets, which may process them as lemmas or as facts to assert to
*
* A wrapper around d_im.assertInference that ensures that we do not send
* inferences with explanations that are not entailed.
- *
- * Argument c is used for debugging, typically the name of the inference.
*/
- void processInference(Node conc, Node exp, const char* c);
+ void processInference(Node conc, InferenceId id, Node exp);
/** Methods used in full effort */
void check();