From: Gereon Kremer Date: Thu, 18 Feb 2021 21:33:10 +0000 (+0100) Subject: Add InferenceIds for sets theory. (#5900) X-Git-Tag: cvc5-1.0.0~2264 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=94fdbe4bb325b1ff1874a2e699cad6ea76f44185;p=cvc5.git Add InferenceIds for sets theory. (#5900) This PR introduces new InferenceId for the theory of sets and uses them instead of InferenceId::UNKNOWN. --- diff --git a/src/theory/inference_id.cpp b/src/theory/inference_id.cpp index b80503fe9..cb15b4326 100644 --- a/src/theory/inference_id.cpp +++ b/src/theory/inference_id.cpp @@ -78,6 +78,53 @@ const char* toString(InferenceId i) case InferenceId::SEP_PTO_NEG_PROP: return "SEP_PTO_NEG_PROP"; case InferenceId::SEP_PTO_PROP: return "SEP_PTO_PROP"; + case InferenceId::SETS_COMPREHENSION: return "SETS_COMPREHENSION"; + case InferenceId::SETS_DEQ: return "SETS_DEQ"; + case InferenceId::SETS_DOWN_CLOSURE: return "SETS_DOWN_CLOSURE"; + case InferenceId::SETS_EQ_MEM: return "SETS_EQ_MEM"; + case InferenceId::SETS_EQ_MEM_CONFLICT: return "SETS_EQ_MEM_CONFLICT"; + case InferenceId::SETS_MEM_EQ: return "SETS_MEM_EQ"; + case InferenceId::SETS_MEM_EQ_CONFLICT: return "SETS_MEM_EQ_CONFLICT"; + case InferenceId::SETS_PROXY: return "SETS_PROXY"; + case InferenceId::SETS_PROXY_SINGLETON: return "SETS_PROXY_SINGLETON"; + case InferenceId::SETS_SINGLETON_EQ: return "SETS_SINGLETON_EQ"; + case InferenceId::SETS_UP_CLOSURE: return "SETS_UP_CLOSURE"; + case InferenceId::SETS_UP_CLOSURE_2: return "SETS_UP_CLOSURE_2"; + case InferenceId::SETS_UP_UNIV: return "SETS_UP_UNIV"; + case InferenceId::SETS_UNIV_TYPE: return "SETS_UNIV_TYPE"; + case InferenceId::SETS_CARD_CYCLE: return "SETS_CARD_CYCLE"; + case InferenceId::SETS_CARD_EQUAL: return "SETS_CARD_EQUAL"; + case InferenceId::SETS_CARD_GRAPH_EMP: return "SETS_CARD_GRAPH_EMP"; + case InferenceId::SETS_CARD_GRAPH_EMP_PARENT: + return "SETS_CARD_GRAPH_EMP_PARENT"; + case InferenceId::SETS_CARD_GRAPH_EQ_PARENT: + return "SETS_CARD_GRAPH_EQ_PARENT"; + case InferenceId::SETS_CARD_GRAPH_EQ_PARENT_2: + return "SETS_CARD_GRAPH_EQ_PARENT_2"; + case InferenceId::SETS_CARD_GRAPH_PARENT_SINGLETON: + return "SETS_CARD_GRAPH_PARENT_SINGLETON"; + case InferenceId::SETS_CARD_MINIMAL: return "SETS_CARD_MINIMAL"; + case InferenceId::SETS_CARD_NEGATIVE_MEMBER: + return "SETS_CARD_NEGATIVE_MEMBER"; + case InferenceId::SETS_CARD_POSITIVE: return "SETS_CARD_POSITIVE"; + case InferenceId::SETS_CARD_UNIV_SUPERSET: return "SETS_CARD_UNIV_SUPERSET"; + case InferenceId::SETS_CARD_UNIV_TYPE: return "SETS_CARD_UNIV_TYPE"; + case InferenceId::SETS_RELS_IDENTITY_DOWN: return "SETS_RELS_IDENTITY_DOWN"; + case InferenceId::SETS_RELS_IDENTITY_UP: return "SETS_RELS_IDENTITY_UP"; + case InferenceId::SETS_RELS_JOIN_COMPOSE: return "SETS_RELS_JOIN_COMPOSE"; + case InferenceId::SETS_RELS_JOIN_IMAGE_DOWN: + return "SETS_RELS_JOIN_IMAGE_DOWN"; + case InferenceId::SETS_RELS_JOIN_SPLIT_1: return "SETS_RELS_JOIN_SPLIT_1"; + case InferenceId::SETS_RELS_JOIN_SPLIT_2: return "SETS_RELS_JOIN_SPLIT_2"; + case InferenceId::SETS_RELS_PRODUCE_COMPOSE: + return "SETS_RELS_PRODUCE_COMPOSE"; + case InferenceId::SETS_RELS_PRODUCT_SPLIT: return "SETS_RELS_PRODUCT_SPLIT"; + case InferenceId::SETS_RELS_TCLOSURE_FWD: return "SETS_RELS_TCLOSURE_FWD"; + case InferenceId::SETS_RELS_TRANSPOSE_EQ: return "SETS_RELS_TRANSPOSE_EQ"; + case InferenceId::SETS_RELS_TRANSPOSE_REV: return "SETS_RELS_TRANSPOSE_REV"; + case InferenceId::SETS_RELS_TUPLE_REDUCTION: + return "SETS_RELS_TUPLE_REDUCTION"; + case InferenceId::STRINGS_I_NORM_S: return "I_NORM_S"; case InferenceId::STRINGS_I_CONST_MERGE: return "I_CONST_MERGE"; case InferenceId::STRINGS_I_CONST_CONFLICT: return "I_CONST_CONFLICT"; diff --git a/src/theory/inference_id.h b/src/theory/inference_id.h index 7adf3df0c..6dacee33c 100644 --- a/src/theory/inference_id.h +++ b/src/theory/inference_id.h @@ -40,6 +40,7 @@ namespace theory { */ enum class InferenceId { + // ---------------------------------- arith theory //-------------------- core // simple congruence x=y => f(x)=f(y) ARITH_NL_CONGRUENCE, @@ -94,13 +95,16 @@ enum class InferenceId ARITH_NL_ICP_CONFLICT, // propagation / contraction of variable bounds from icp ARITH_NL_ICP_PROPAGATION, - //-------------------- unknown + // ---------------------------------- end arith theory + // ---------------------------------- arrays theory ARRAYS_EXT, ARRAYS_READ_OVER_WRITE, ARRAYS_READ_OVER_WRITE_1, ARRAYS_READ_OVER_WRITE_CONTRA, + // ---------------------------------- end arrays theory + // ---------------------------------- bags theory BAG_NON_NEGATIVE_COUNT, BAG_MK_BAG_SAME_ELEMENT, BAG_MK_BAG, @@ -113,7 +117,9 @@ enum class InferenceId BAG_DIFFERENCE_SUBTRACT, BAG_DIFFERENCE_REMOVE, BAG_DUPLICATE_REMOVAL, + // ---------------------------------- end bags theory + // ---------------------------------- bitvector theory BV_BITBLAST_CONFLICT, BV_LAZY_CONFLICT, BV_LAZY_LEMMA, @@ -121,7 +127,9 @@ enum class InferenceId BV_SIMPLE_BITBLAST_LEMMA, BV_EXTF_LEMMA, BV_EXTF_COLLAPSE, + // ---------------------------------- end bitvector theory + // ---------------------------------- datatypes theory // (= (C t1 ... tn) (C s1 .. sn)) => (= ti si) DATATYPES_UNIF, // ((_ is Ci) t) => (= t (Ci (sel_1 t) ... (sel_n t))) @@ -142,13 +150,68 @@ enum class InferenceId DATATYPES_BISIMILAR, // cycle conflict for datatypes DATATYPES_CYCLE, + // ---------------------------------- end datatypes theory + // ---------------------------------- sep theory // ensures that pto is a function: (pto x y) ^ ~(pto z w) ^ x = z => y != w SEP_PTO_NEG_PROP, // enforces injectiveness of pto: (pto x y) ^ (pto y w) ^ x = y => y = w SEP_PTO_PROP, + // ---------------------------------- end sep theory - //-------------------------------------- base solver + // ---------------------------------- sets theory + //-------------------- sets core solver + SETS_COMPREHENSION, + SETS_DEQ, + SETS_DOWN_CLOSURE, + SETS_EQ_MEM, + SETS_EQ_MEM_CONFLICT, + SETS_MEM_EQ, + SETS_MEM_EQ_CONFLICT, + SETS_PROXY, + SETS_PROXY_SINGLETON, + SETS_SINGLETON_EQ, + SETS_UP_CLOSURE, + SETS_UP_CLOSURE_2, + SETS_UP_UNIV, + SETS_UNIV_TYPE, + //-------------------- sets cardinality solver + // cycle of cardinalities, hence all sets have the same + SETS_CARD_CYCLE, + // two sets have the same cardinality + SETS_CARD_EQUAL, + SETS_CARD_GRAPH_EMP, + SETS_CARD_GRAPH_EMP_PARENT, + SETS_CARD_GRAPH_EQ_PARENT, + SETS_CARD_GRAPH_EQ_PARENT_2, + SETS_CARD_GRAPH_PARENT_SINGLETON, + // cardinality is at least the number of elements we already know + SETS_CARD_MINIMAL, + // negative members are part of the universe + SETS_CARD_NEGATIVE_MEMBER, + // all sets have non-negative cardinality + SETS_CARD_POSITIVE, + // the universe is a superset of every set + SETS_CARD_UNIV_SUPERSET, + // cardinality of the universe is at most cardinality of the type + SETS_CARD_UNIV_TYPE, + //-------------------- sets relations solver + SETS_RELS_IDENTITY_DOWN, + SETS_RELS_IDENTITY_UP, + SETS_RELS_JOIN_COMPOSE, + SETS_RELS_JOIN_IMAGE_DOWN, + SETS_RELS_JOIN_SPLIT_1, + SETS_RELS_JOIN_SPLIT_2, + SETS_RELS_PRODUCE_COMPOSE, + SETS_RELS_PRODUCT_SPLIT, + SETS_RELS_TCLOSURE_FWD, + SETS_RELS_TRANSPOSE_EQ, + SETS_RELS_TRANSPOSE_REV, + SETS_RELS_TUPLE_REDUCTION, + //-------------------------------------- end sets theory + + //-------------------------------------- strings theory + //-------------------- base solver // initial normalize singular // x1 = "" ^ ... ^ x_{i-1} = "" ^ x_{i+1} = "" ^ ... ^ xn = "" => // x1 ++ ... ++ xn = xi @@ -184,8 +247,8 @@ enum class InferenceId STRINGS_CARD_SP, // The cardinality inference for strings, see Liang et al CAV 2014. STRINGS_CARDINALITY, - //-------------------------------------- end base solver - //-------------------------------------- core solver + //-------------------- end base solver + //-------------------- core solver // A cycle in the empty string equivalence class, e.g.: // x ++ y = "" => x = "" // This is typically not applied due to length constraints implying emptiness. @@ -322,15 +385,15 @@ enum class InferenceId // is unknown, we apply the inference: // len(s) != len(t) V len(s) = len(t) STRINGS_DEQ_LENGTH_SP, - //-------------------------------------- end core solver - //-------------------------------------- codes solver + //-------------------- end core solver + //-------------------- codes solver // str.to_code( v ) = rewrite( str.to_code(c) ) // where v is the proxy variable for c. STRINGS_CODE_PROXY, // str.code(x) = -1 V str.code(x) != str.code(y) V x = y STRINGS_CODE_INJ, - //-------------------------------------- end codes solver - //-------------------------------------- regexp solver + //-------------------- end codes solver + //-------------------- regexp solver // regular expression normal form conflict // ( x in R ^ x = y ^ rewrite((str.in_re y R)) = false ) => false // where y is the normal form computed for x. @@ -365,8 +428,8 @@ enum class InferenceId STRINGS_RE_DELTA_CONF, // regular expression derive ??? STRINGS_RE_DERIVE, - //-------------------------------------- end regexp solver - //-------------------------------------- extended function solver + //-------------------- end regexp solver + //-------------------- extended function solver // Standard extended function inferences from context-dependent rewriting // produced by constant substitutions. See Reynolds et al CAV 2017. These are // inferences of the form: @@ -411,15 +474,17 @@ enum class InferenceId // f(x1, .., xn) and P is the reduction predicate for f // (see theory_strings_preprocess). STRINGS_REDUCTION, - //-------------------------------------- end extended function solver - //-------------------------------------- prefix conflict + //-------------------- end extended function solver + //-------------------- prefix conflict // prefix conflict (coarse-grained) STRINGS_PREFIX_CONFLICT, - //-------------------------------------- end prefix conflict + //-------------------- end prefix conflict + //-------------------------------------- end strings theory + //-------------------------------------- uf theory // Clause from the uf symmetry breaker UF_BREAK_SYMMETRY, - //-------------------------------------- begin cardinality extension to UF + //-------------------- cardinality extension to UF // The inferences below are described in Reynolds' thesis 2013. // conflict of the form (card_T n) => (not (distinct t1 ... tn)) UF_CARD_CLIQUE, @@ -441,8 +506,8 @@ enum class InferenceId // (or (= t1 t2) (not (= t1 t2)) // to satisfy the cardinality constraints on the type of t1, t2. UF_CARD_SPLIT, - //-------------------------------------- end cardinality extension to UF - //-------------------------------------- begin HO extension to UF + //-------------------- end cardinality extension to UF + //-------------------- HO extension to UF // Encodes an n-ary application as a chain of binary HO_APPLY applications // (= (f t1 ... tn) (@ (@ ... (@ f t1) ...) tn)) UF_HO_APP_ENCODE, @@ -456,7 +521,7 @@ enum class InferenceId // different applications // (not (= (f sk1 .. skn) (g sk1 .. skn)) UF_HO_EXTENSIONALITY, - //-------------------------------------- begin model-construction specific part + //-------------------- model-construction specific part // These rules are necessary to ensure that we build models properly. For more // details see Section 3.3 of Barbosa et al. CADE'19. // @@ -468,9 +533,11 @@ enum class InferenceId // different applications // (not (= (f sk1 .. skn) (g sk1 .. skn)) UF_HO_MODEL_EXTENSIONALITY, - //-------------------------------------- end model-construction specific part - //-------------------------------------- end HO extension to UF + //-------------------- end model-construction specific part + //-------------------- end HO extension to UF + //-------------------------------------- end uf theory + //-------------------------------------- unknown UNKNOWN }; diff --git a/src/theory/sets/cardinality_extension.cpp b/src/theory/sets/cardinality_extension.cpp index 5997d1217..a3120ffcb 100644 --- a/src/theory/sets/cardinality_extension.cpp +++ b/src/theory/sets/cardinality_extension.cpp @@ -132,8 +132,7 @@ void CardinalityExtension::checkCardinalityExtended(TypeNode& t) // (=> true (<= (card (as univset t)) cardUniv) if (!d_state.isEntailed(leq, true)) { - d_im.assertInference( - leq, d_true, "univset cardinality <= type cardinality", 1); + d_im.assertInference(leq, InferenceId::SETS_CARD_UNIV_TYPE, d_true, 1); } } @@ -158,7 +157,8 @@ void CardinalityExtension::checkCardinalityExtended(TypeNode& t) subset = Rewriter::rewrite(subset); if (!d_state.isEntailed(subset, true)) { - d_im.assertInference(subset, d_true, "univset is a super set", 1); + d_im.assertInference( + subset, InferenceId::SETS_CARD_UNIV_SUPERSET, d_true, 1); } // negative members are members in the universe set @@ -176,7 +176,7 @@ void CardinalityExtension::checkCardinalityExtended(TypeNode& t) // (not (member negativeMember representative))) // (member negativeMember (as univset t))) d_im.assertInference( - member, notMember, "negative members are in the universe", 1); + member, InferenceId::SETS_CARD_NEGATIVE_MEMBER, notMember, 1); } } } @@ -268,7 +268,8 @@ void CardinalityExtension::registerCardinalityTerm(Node n) cterms.push_back(s); } Node pos_lem = nm->mkNode(GEQ, nm->mkNode(CARD, n), d_zero); - d_im.assertInference(pos_lem, d_emp_exp, "pcard", 1); + d_im.assertInference( + pos_lem, InferenceId::SETS_CARD_POSITIVE, d_emp_exp, 1); } else { @@ -279,13 +280,14 @@ void CardinalityExtension::registerCardinalityTerm(Node n) Node nn = cterms[k]; Node nk = d_treg.getProxy(nn); Node pos_lem = nm->mkNode(GEQ, nm->mkNode(CARD, nk), d_zero); - d_im.assertInference(pos_lem, d_emp_exp, "pcard", 1); + d_im.assertInference( + pos_lem, InferenceId::SETS_CARD_POSITIVE, d_emp_exp, 1); if (nn != nk) { Node lem = nm->mkNode(EQUAL, nm->mkNode(CARD, nk), nm->mkNode(CARD, nn)); lem = Rewriter::rewrite(lem); Trace("sets-card") << " " << k << " : " << lem << std::endl; - d_im.assertInference(lem, d_emp_exp, "card", 1); + d_im.assertInference(lem, InferenceId::SETS_CARD_EQUAL, d_emp_exp, 1); } } d_im.doPendingLemmas(); @@ -342,7 +344,7 @@ void CardinalityExtension::checkCardCyclesRec(Node eqc, Node fact = conc.size() == 1 ? conc[0] : nm->mkNode(AND, conc); Trace("sets-cycle-debug") << "CYCLE: " << fact << " from " << exp << std::endl; - d_im.assertInference(fact, exp, "card_cycle"); + d_im.assertInference(fact, InferenceId::SETS_CARD_CYCLE, exp); d_im.doPendingLemmas(); } else @@ -418,7 +420,7 @@ void CardinalityExtension::checkCardCyclesRec(Node eqc, conc.push_back(n[e].eqNode(sib[e])); } } - d_im.assertInference(conc, n.eqNode(emp_set), "cg_emp"); + d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_EMP, n.eqNode(emp_set)); d_im.doPendingLemmas(); if (d_im.hasSent()) { @@ -450,7 +452,8 @@ void CardinalityExtension::checkCardCyclesRec(Node eqc, { Trace("sets-debug") << " it is empty..." << std::endl; Assert(!d_state.areEqual(n, emp_set)); - d_im.assertInference(n.eqNode(emp_set), p.eqNode(emp_set), "cg_emppar"); + d_im.assertInference( + n.eqNode(emp_set), InferenceId::SETS_CARD_GRAPH_EMP_PARENT, p.eqNode(emp_set)); d_im.doPendingLemmas(); if (d_im.hasSent()) { @@ -497,7 +500,7 @@ void CardinalityExtension::checkCardCyclesRec(Node eqc, } Trace("sets-debug") << "...derived " << conc.size() << " conclusions" << std::endl; - d_im.assertInference(conc, n.eqNode(p), "cg_eqpar"); + d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_EQ_PARENT, n.eqNode(p)); d_im.doPendingLemmas(); if (d_im.hasSent()) { @@ -549,14 +552,14 @@ void CardinalityExtension::checkCardCyclesRec(Node eqc, if (eq_parent) { Node conc = n.eqNode(cpk); - d_im.assertInference(conc, exps, "cg_par_sing"); + d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_PARENT_SINGLETON, exps); d_im.doPendingLemmas(); } else { // split on empty Trace("sets-nf") << "Split empty : " << n << std::endl; - d_im.split(n.eqNode(emp_set), 1); + d_im.split(n.eqNode(emp_set), InferenceId::UNKNOWN, 1); } Assert(d_im.hasSent()); return; @@ -604,7 +607,7 @@ void CardinalityExtension::checkCardCyclesRec(Node eqc, conc.push_back(cpk.eqNode(n)); } } - d_im.assertInference(conc, cpk.eqNode(cpnl), "cg_pareq"); + d_im.assertInference(conc, InferenceId::SETS_CARD_GRAPH_EQ_PARENT_2, cpk.eqNode(cpnl)); d_im.doPendingLemmas(); if (d_im.hasSent()) { @@ -788,7 +791,7 @@ void CardinalityExtension::checkNormalForm(Node eqc, Trace("sets-nf") << "Actual Split : "; d_treg.debugPrintSet(r, "sets-nf"); Trace("sets-nf") << std::endl; - d_im.split(r.eqNode(emp_set), 1); + d_im.split(r.eqNode(emp_set), InferenceId::UNKNOWN, 1); Assert(d_im.hasSent()); return; } @@ -860,7 +863,7 @@ void CardinalityExtension::checkNormalForm(Node eqc, && !d_state.hasMembers(eqc)) { Trace("sets-nf-debug") << "Split on leaf " << eqc << std::endl; - d_im.split(eqc.eqNode(emp_set)); + d_im.split(eqc.eqNode(emp_set), InferenceId::UNKNOWN); success = false; } else @@ -972,7 +975,7 @@ void CardinalityExtension::checkMinCard() Node conc = nm->mkNode(GEQ, cardTerm, nm->mkConst(Rational(members.size()))); Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp); - d_im.assertInference(conc, expn, "mincard", 1); + d_im.assertInference(conc, InferenceId::SETS_CARD_MINIMAL, expn, 1); } } // flush the lemmas diff --git a/src/theory/sets/inference_manager.cpp b/src/theory/sets/inference_manager.cpp index 2629e2cbd..161a66bfe 100644 --- a/src/theory/sets/inference_manager.cpp +++ b/src/theory/sets/inference_manager.cpp @@ -32,7 +32,7 @@ InferenceManager::InferenceManager(Theory& t, d_false = NodeManager::currentNM()->mkConst(false); } -bool InferenceManager::assertFactRec(Node fact, Node exp, int inferType) +bool InferenceManager::assertFactRec(Node fact, InferenceId id, Node exp, int inferType) { // should we send this fact out as a lemma? if ((options::setsInferAsLemmas() && inferType != -1) || inferType == 1) @@ -46,7 +46,7 @@ bool InferenceManager::assertFactRec(Node fact, Node exp, int inferType) { lem = NodeManager::currentNM()->mkNode(IMPLIES, exp, fact); } - addPendingLemma(lem, InferenceId::UNKNOWN); + addPendingLemma(lem, id); return true; } Trace("sets-fact") << "Assert fact rec : " << fact << ", exp = " << exp @@ -57,7 +57,7 @@ bool InferenceManager::assertFactRec(Node fact, Node exp, int inferType) if (fact == d_false) { Trace("sets-lemma") << "Conflict : " << exp << std::endl; - conflict(exp, InferenceId::UNKNOWN); + conflict(exp, id); return true; } return false; @@ -70,7 +70,7 @@ bool InferenceManager::assertFactRec(Node fact, Node exp, int inferType) for (unsigned i = 0; i < f.getNumChildren(); i++) { Node factc = fact.getKind() == NOT ? f[i].negate() : f[i]; - bool tret = assertFactRec(factc, exp, inferType); + bool tret = assertFactRec(factc, id, exp, inferType); ret = ret || tret; if (d_state.isInConflict()) { @@ -90,7 +90,7 @@ bool InferenceManager::assertFactRec(Node fact, Node exp, int inferType) || (atom.getKind() == EQUAL && atom[0].getType().isSet())) { // send to equality engine - if (assertInternalFact(atom, polarity, InferenceId::UNKNOWN, exp)) + if (assertInternalFact(atom, polarity, id, exp)) { // return true if this wasn't redundant return true; @@ -104,67 +104,67 @@ bool InferenceManager::assertFactRec(Node fact, Node exp, int inferType) { lem = NodeManager::currentNM()->mkNode(IMPLIES, exp, fact); } - addPendingLemma(lem, InferenceId::UNKNOWN); + addPendingLemma(lem, id); return true; } return false; } void InferenceManager::assertInference(Node fact, + InferenceId id, Node exp, - const char* c, int inferType) { - if (assertFactRec(fact, exp, inferType)) + if (assertFactRec(fact, id, exp, inferType)) { Trace("sets-lemma") << "Sets::Lemma : " << fact << " from " << exp << " by " - << c << std::endl; - Trace("sets-assertion") - << "(assert (=> " << exp << " " << fact << ")) ; by " << c << std::endl; + << id << std::endl; + Trace("sets-assertion") << "(assert (=> " << exp << " " << fact + << ")) ; by " << id << std::endl; } } void InferenceManager::assertInference(Node fact, + InferenceId id, std::vector& exp, - const char* c, int inferType) { Node exp_n = exp.empty() ? d_true : (exp.size() == 1 ? exp[0] : NodeManager::currentNM()->mkNode(AND, exp)); - assertInference(fact, exp_n, c, inferType); + assertInference(fact, id, exp_n, inferType); } void InferenceManager::assertInference(std::vector& conc, + InferenceId id, Node exp, - const char* c, int inferType) { if (!conc.empty()) { Node fact = conc.size() == 1 ? conc[0] : NodeManager::currentNM()->mkNode(AND, conc); - assertInference(fact, exp, c, inferType); + assertInference(fact, id, exp, inferType); } } void InferenceManager::assertInference(std::vector& conc, + InferenceId id, std::vector& exp, - const char* c, int inferType) { Node exp_n = exp.empty() ? d_true : (exp.size() == 1 ? exp[0] : NodeManager::currentNM()->mkNode(AND, exp)); - assertInference(conc, exp_n, c, inferType); + assertInference(conc, id, exp_n, inferType); } -void InferenceManager::split(Node n, int reqPol) +void InferenceManager::split(Node n, InferenceId id, int reqPol) { n = Rewriter::rewrite(n); Node lem = NodeManager::currentNM()->mkNode(OR, n, n.negate()); // send the lemma - lemma(lem, InferenceId::UNKNOWN); + lemma(lem, id); Trace("sets-lemma") << "Sets::Lemma split : " << lem << std::endl; if (reqPol != 0) { diff --git a/src/theory/sets/inference_manager.h b/src/theory/sets/inference_manager.h index c52fcf3d4..a4eeb1f1c 100644 --- a/src/theory/sets/inference_manager.h +++ b/src/theory/sets/inference_manager.h @@ -50,24 +50,25 @@ class InferenceManager : public InferenceManagerBuffered * fact is processed as a lemma, where inferType=1 forces fact to be * set as a lemma, and inferType=-1 forces fact to be processed as a fact * (if possible). - * - * The argument c is the name of the inference, which is used for debugging. */ - void assertInference(Node fact, Node exp, const char* c, int inferType = 0); + void assertInference(Node fact, + InferenceId id, + Node exp, + int inferType = 0); /** same as above, where exp is interpreted as a conjunction */ void assertInference(Node fact, + InferenceId id, std::vector& exp, - const char* c, int inferType = 0); /** same as above, where conc is interpreted as a conjunction */ void assertInference(std::vector& conc, + InferenceId id, Node exp, - const char* c, int inferType = 0); /** same as above, where both exp and conc are interpreted as conjunctions */ void assertInference(std::vector& conc, + InferenceId id, std::vector& exp, - const char* c, int inferType = 0); /** flush the splitting lemma ( n OR (NOT n) ) @@ -75,7 +76,7 @@ class InferenceManager : public InferenceManagerBuffered * If reqPol is not 0, then a phase requirement for n is requested with * polarity ( reqPol>0 ). */ - void split(Node n, int reqPol = 0); + void split(Node n, InferenceId id, int reqPol = 0); private: /** constants */ @@ -94,7 +95,7 @@ class InferenceManager : public InferenceManagerBuffered * The argument inferType determines the policy on whether fact is processed * as a fact or as a lemma (see assertInference above). */ - bool assertFactRec(Node fact, Node exp, int inferType = 0); + bool assertFactRec(Node fact, InferenceId id, Node exp, int inferType = 0); }; } // namespace sets diff --git a/src/theory/sets/term_registry.cpp b/src/theory/sets/term_registry.cpp index 975581dfa..464846b1a 100644 --- a/src/theory/sets/term_registry.cpp +++ b/src/theory/sets/term_registry.cpp @@ -53,13 +53,13 @@ Node TermRegistry::getProxy(Node n) d_proxy_to_term[k] = n; Node eq = k.eqNode(n); Trace("sets-lemma") << "Sets::Lemma : " << eq << " by proxy" << std::endl; - d_im.lemma(eq, InferenceId::UNKNOWN, LemmaProperty::NONE, false); + d_im.lemma(eq, InferenceId::SETS_PROXY, LemmaProperty::NONE, false); if (nk == SINGLETON) { Node slem = nm->mkNode(MEMBER, n[0], k); Trace("sets-lemma") << "Sets::Lemma : " << slem << " by singleton" << std::endl; - d_im.lemma(slem, InferenceId::UNKNOWN, LemmaProperty::NONE, false); + d_im.lemma(slem, InferenceId::SETS_PROXY_SINGLETON, LemmaProperty::NONE, false); } return k; } @@ -104,7 +104,7 @@ Node TermRegistry::getUnivSet(TypeNode tn) Node ulem = nm->mkNode(SUBSET, n1, n2); Trace("sets-lemma") << "Sets::Lemma : " << ulem << " by univ-type" << std::endl; - d_im.lemma(ulem, InferenceId::UNKNOWN, LemmaProperty::NONE, false); + d_im.lemma(ulem, InferenceId::SETS_UNIV_TYPE, LemmaProperty::NONE, false); } } d_univset[tn] = n; diff --git a/src/theory/sets/theory_sets_private.cpp b/src/theory/sets/theory_sets_private.cpp index 867c87c59..e69113c97 100644 --- a/src/theory/sets/theory_sets_private.cpp +++ b/src/theory/sets/theory_sets_private.cpp @@ -104,7 +104,7 @@ void TheorySetsPrivate::eqNotifyMerge(TNode t1, TNode t2) // infer equality between elements of singleton Node exp = s1.eqNode(s2); Node eq = s1[0].eqNode(s2[0]); - d_im.assertInternalFact(eq, true, InferenceId::UNKNOWN, exp); + d_im.assertInternalFact(eq, true, InferenceId::SETS_SINGLETON_EQ, exp); } else { @@ -137,7 +137,7 @@ void TheorySetsPrivate::eqNotifyMerge(TNode t1, TNode t2) Assert(facts.size() == 1); Trace("sets-prop") << "Propagate eq-mem conflict : " << facts[0] << std::endl; - d_im.conflict(facts[0], InferenceId::UNKNOWN); + d_im.conflict(facts[0], InferenceId::SETS_EQ_MEM_CONFLICT); return; } for (const Node& f : facts) @@ -145,7 +145,7 @@ void TheorySetsPrivate::eqNotifyMerge(TNode t1, TNode t2) Assert(f.getKind() == kind::IMPLIES); Trace("sets-prop") << "Propagate eq-mem eq inference : " << f[0] << " => " << f[1] << std::endl; - d_im.assertInternalFact(f[1], true, InferenceId::UNKNOWN, f[0]); + d_im.assertInternalFact(f[1], true, InferenceId::SETS_EQ_MEM, f[0]); } } } @@ -449,7 +449,7 @@ void TheorySetsPrivate::checkDownwardsClosure() std::vector exp; exp.push_back(mem); exp.push_back(mem[1].eqNode(eq_set)); - d_im.assertInference(nmem, exp, "downc"); + d_im.assertInference(nmem, InferenceId::SETS_DOWN_CLOSURE, exp); if (d_state.isInConflict()) { return; @@ -474,7 +474,7 @@ void TheorySetsPrivate::checkDownwardsClosure() nmem = NodeManager::currentNM()->mkNode( kind::OR, pmem.negate(), nmem); } - d_im.assertInference(nmem, exp, "downc"); + d_im.assertInference(nmem, InferenceId::SETS_DOWN_CLOSURE, exp); } } } @@ -578,7 +578,8 @@ void TheorySetsPrivate::checkUpwardsClosure() { Node kk = d_treg.getProxy(term); Node fact = nm->mkNode(kind::MEMBER, x, kk); - d_im.assertInference(fact, exp, "upc", inferType); + d_im.assertInference( + fact, InferenceId::SETS_UP_CLOSURE, exp, inferType); if (d_state.isInConflict()) { return; @@ -605,7 +606,7 @@ void TheorySetsPrivate::checkUpwardsClosure() d_state.addEqualityToExp(term[1], itm2m.second[1], exp); Node r = d_treg.getProxy(term); Node fact = nm->mkNode(kind::MEMBER, x, r); - d_im.assertInference(fact, exp, "upc2"); + d_im.assertInference(fact, InferenceId::SETS_UP_CLOSURE_2, exp); if (d_state.isInConflict()) { return; @@ -667,7 +668,7 @@ void TheorySetsPrivate::checkUpwardsClosure() exp.push_back(v.eqNode(it2.second[1])); } Node fact = nm->mkNode(kind::MEMBER, it2.second[0], u); - d_im.assertInference(fact, exp, "upuniv"); + d_im.assertInference(fact, InferenceId::SETS_UP_UNIV, exp); if (d_state.isInConflict()) { return; @@ -724,7 +725,7 @@ void TheorySetsPrivate::checkDisequalities() Node mem2 = nm->mkNode(MEMBER, x, deq[1]); Node lem = nm->mkNode(OR, deq, nm->mkNode(EQUAL, mem1, mem2).negate()); lem = Rewriter::rewrite(lem); - d_im.assertInference(lem, d_true, "diseq", 1); + d_im.assertInference(lem, InferenceId::SETS_DEQ, d_true, 1); d_im.doPendingLemmas(); if (d_im.hasSent()) { @@ -764,7 +765,7 @@ void TheorySetsPrivate::checkReduceComprehensions() nm->mkNode(FORALL, nm->mkNode(BOUND_VAR_LIST, v), body.eqNode(mem)); Trace("sets-comprehension") << "Comprehension reduction: " << lem << std::endl; - d_im.lemma(lem, InferenceId::UNKNOWN); + d_im.lemma(lem, InferenceId::SETS_COMPREHENSION); } } @@ -818,14 +819,14 @@ void TheorySetsPrivate::notifyFact(TNode atom, bool polarity, TNode fact) Trace("sets-prop") << "Propagate mem-eq : " << pexp << std::endl; Node eq = s[0].eqNode(atom[0]); // triggers an internal inference - d_im.assertInternalFact(eq, true, InferenceId::UNKNOWN, pexp); + d_im.assertInternalFact(eq, true, InferenceId::SETS_MEM_EQ, pexp); } } else { Trace("sets-prop") << "Propagate mem-eq conflict : " << pexp << std::endl; - d_im.conflict(pexp, InferenceId::UNKNOWN); + d_im.conflict(pexp, InferenceId::SETS_MEM_EQ_CONFLICT); } } } @@ -896,7 +897,7 @@ void TheorySetsPrivate::addCarePairs(TNodeTrie* t1, { Trace("sets-cg-lemma") << "Should split on : " << x << "==" << y << std::endl; - d_im.split(x.eqNode(y)); + d_im.split(x.eqNode(y), InferenceId::UNKNOWN); } } } diff --git a/src/theory/sets/theory_sets_rels.cpp b/src/theory/sets/theory_sets_rels.cpp index ebb7f845d..62f1776a3 100644 --- a/src/theory/sets/theory_sets_rels.cpp +++ b/src/theory/sets/theory_sets_rels.cpp @@ -346,8 +346,8 @@ void TheorySetsRels::check(Theory::Effort level) Assert(reasons.size() >= 1); sendInfer( new_membership, - reasons.size() > 1 ? nm->mkNode(AND, reasons) : reasons[0], - "JOIN-IMAGE UP"); + InferenceId::UNKNOWN, + reasons.size() > 1 ? nm->mkNode(AND, reasons) : reasons[0]); break; } } @@ -404,7 +404,7 @@ void TheorySetsRels::check(Theory::Effort level) if( distinct_skolems.size() >= 2 ) { conclusion = NodeManager::currentNM()->mkNode( kind::AND, conclusion, NodeManager::currentNM()->mkNode( kind::DISTINCT, distinct_skolems ) ); } - sendInfer(conclusion, reason, "JOIN-IMAGE DOWN"); + sendInfer(conclusion, InferenceId::SETS_RELS_JOIN_IMAGE_DOWN, reason); Trace("rels-debug") << "\n[Theory::Rels] *********** Done with applyJoinImageRule ***********" << std::endl; } @@ -437,8 +437,8 @@ void TheorySetsRels::check(Theory::Effort level) reason = NodeManager::currentNM()->mkNode( kind::AND, reason, NodeManager::currentNM()->mkNode( kind::EQUAL, exp[1], iden_term ) ); } sendInfer(nm->mkNode(AND, fact, nm->mkNode(EQUAL, fst_mem, snd_mem)), - reason, - "IDENTITY-DOWN"); + InferenceId::SETS_RELS_IDENTITY_DOWN, + reason); Trace("rels-debug") << "\n[Theory::Rels] *********** Done with applyIdenRule on " << iden_term << std::endl; } @@ -469,7 +469,8 @@ void TheorySetsRels::check(Theory::Effort level) if( (*mem_rep_exp_it)[1] != iden_term_rel ) { reason = NodeManager::currentNM()->mkNode( kind::AND, reason, NodeManager::currentNM()->mkNode( kind::EQUAL, (*mem_rep_exp_it)[1], iden_term_rel ) ); } - sendInfer(nm->mkNode(MEMBER, new_mem, iden_term), reason, "IDENTITY-UP"); + sendInfer( + nm->mkNode(MEMBER, new_mem, iden_term), InferenceId::SETS_RELS_IDENTITY_UP, reason); ++mem_rep_exp_it; } Trace("rels-debug") << "\n[Theory::Rels] *********** Done with computing members for Iden Term = " << iden_term << std::endl; @@ -712,12 +713,12 @@ void TheorySetsRels::check(Theory::Effort level) } if( all_reasons.size() > 1) { sendInfer(nm->mkNode(MEMBER, tc_mem, tc_rel), - nm->mkNode(AND, all_reasons), - "TCLOSURE-Forward"); + InferenceId::SETS_RELS_TCLOSURE_FWD, + nm->mkNode(AND, all_reasons)); } else { sendInfer(nm->mkNode(MEMBER, tc_mem, tc_rel), - all_reasons.front(), - "TCLOSURE-Forward"); + InferenceId::SETS_RELS_TCLOSURE_FWD, + all_reasons.front()); } // check if cur_node has been traversed or not @@ -790,8 +791,8 @@ void TheorySetsRels::check(Theory::Effort level) if( pt_rel != exp[1] ) { reason = NodeManager::currentNM()->mkNode(kind::AND, exp, NodeManager::currentNM()->mkNode(kind::EQUAL, pt_rel, exp[1])); } - sendInfer(fact_1, reason, "product-split"); - sendInfer(fact_2, reason, "product-split"); + sendInfer(fact_1, InferenceId::SETS_RELS_PRODUCT_SPLIT, reason); + sendInfer(fact_2, InferenceId::SETS_RELS_PRODUCT_SPLIT, reason); } /* join-split rule: (a, b) IS_IN (X JOIN Y) @@ -861,9 +862,9 @@ void TheorySetsRels::check(Theory::Effort level) reason = NodeManager::currentNM()->mkNode(kind::AND, reason, NodeManager::currentNM()->mkNode(kind::EQUAL, join_rel, exp[1])); } Node fact = NodeManager::currentNM()->mkNode(kind::MEMBER, mem1, join_rel[0]); - sendInfer(fact, reason, "JOIN-Split-1"); + sendInfer(fact, InferenceId::SETS_RELS_JOIN_SPLIT_1, reason); fact = NodeManager::currentNM()->mkNode(kind::MEMBER, mem2, join_rel[1]); - sendInfer(fact, reason, "JOIN-Split-2"); + sendInfer(fact, InferenceId::SETS_RELS_JOIN_SPLIT_2, reason); makeSharedTerm(shared_x, shared_type); } @@ -885,8 +886,8 @@ void TheorySetsRels::check(Theory::Effort level) for( unsigned int i = 1; i < tp_terms.size(); i++ ) { Trace("rels-debug") << "\n[Theory::Rels] *********** Applying TRANSPOSE-Equal rule on transposed term = " << tp_terms[0] << " and " << tp_terms[i] << std::endl; sendInfer(nm->mkNode(EQUAL, tp_terms[0][0], tp_terms[i][0]), - nm->mkNode(EQUAL, tp_terms[0], tp_terms[i]), - "TRANSPOSE-Equal"); + InferenceId::SETS_RELS_TRANSPOSE_EQ, + nm->mkNode(EQUAL, tp_terms[0], tp_terms[i])); } } @@ -911,8 +912,8 @@ void TheorySetsRels::check(Theory::Effort level) reason = NodeManager::currentNM()->mkNode(kind::AND, reason, NodeManager::currentNM()->mkNode(kind::EQUAL, tp_rel, exp[1])); } sendInfer(nm->mkNode(MEMBER, reversed_mem, tp_rel[0]), - reason, - "TRANSPOSE-Reverse"); + InferenceId::SETS_RELS_TRANSPOSE_REV, + reason); } void TheorySetsRels::doTCInference() { @@ -1003,8 +1004,8 @@ void TheorySetsRels::check(Theory::Effort level) reason = NodeManager::currentNM()->mkNode(kind::AND, reason, NodeManager::currentNM()->mkNode(kind::EQUAL, rel[0], exps[i][1])); } sendInfer(nm->mkNode(MEMBER, RelsUtils::reverseTuple(exps[i][0]), rel), - reason, - "TRANSPOSE-reverse"); + InferenceId::SETS_RELS_TRANSPOSE_REV, + reason); } } } @@ -1076,14 +1077,14 @@ void TheorySetsRels::check(Theory::Effort level) reasons.push_back( NodeManager::currentNM()->mkNode(kind::EQUAL, r2, r2_rep_exps[j][1]) ); } if( isProduct ) { - sendInfer(fact, - nm->mkNode(kind::AND, reasons), - "PRODUCT-Compose"); + sendInfer( + fact, InferenceId::SETS_RELS_PRODUCE_COMPOSE, nm->mkNode(kind::AND, reasons)); } else { if( r1_rmost != r2_lmost ) { reasons.push_back( NodeManager::currentNM()->mkNode(kind::EQUAL, r1_rmost, r2_lmost) ); } - sendInfer(fact, nm->mkNode(kind::AND, reasons), "JOIN-Compose"); + sendInfer( + fact, InferenceId::SETS_RELS_JOIN_COMPOSE, nm->mkNode(kind::AND, reasons)); } } } @@ -1100,11 +1101,11 @@ void TheorySetsRels::check(Theory::Effort level) { if (p.getKind() == IMPLIES) { - processInference(p[1], p[0], "rels"); + processInference(p[1], InferenceId::UNKNOWN, p[0]); } else { - processInference(p, d_trueNode, "rels"); + processInference(p, InferenceId::UNKNOWN, d_trueNode); } if (d_state.isInConflict()) { @@ -1120,7 +1121,7 @@ void TheorySetsRels::check(Theory::Effort level) d_pending.clear(); } - void TheorySetsRels::processInference(Node conc, Node exp, const char* c) + void TheorySetsRels::processInference(Node conc, InferenceId id, Node exp) { Trace("sets-pinfer") << "Process inference: " << exp << " => " << conc << std::endl; @@ -1129,11 +1130,11 @@ void TheorySetsRels::check(Theory::Effort level) Trace("sets-pinfer") << " must assert as lemma" << std::endl; // we wrap the spurious explanation into a splitting lemma Node lem = NodeManager::currentNM()->mkNode(OR, exp.negate(), conc); - d_im.assertInference(lem, d_trueNode, c, 1); + d_im.assertInference(lem, id, d_trueNode, 1); return; } // try to assert it as a fact - d_im.assertInference(conc, exp, c); + d_im.assertInference(conc, id, exp); } bool TheorySetsRels::isRelationKind( Kind k ) { @@ -1235,7 +1236,7 @@ void TheorySetsRels::check(Theory::Effort level) Node tuple_reduct = NodeManager::currentNM()->mkNode(kind::APPLY_CONSTRUCTOR, tuple_elements); tuple_reduct = NodeManager::currentNM()->mkNode(kind::MEMBER,tuple_reduct, n[1]); Node tuple_reduction_lemma = NodeManager::currentNM()->mkNode(kind::EQUAL, n, tuple_reduct); - sendInfer(tuple_reduction_lemma, d_trueNode, "tuple-reduction"); + sendInfer(tuple_reduction_lemma, InferenceId::SETS_RELS_TUPLE_REDUCTION, d_trueNode); d_symbolic_tuples.insert(n); } } @@ -1323,10 +1324,10 @@ void TheorySetsRels::check(Theory::Effort level) } } - void TheorySetsRels::sendInfer(Node fact, Node reason, const char* c) + void TheorySetsRels::sendInfer(Node fact, InferenceId id, Node reason) { Trace("rels-lemma") << "Rels::lemma " << fact << " from " << reason - << " by " << c << std::endl; + << " by " << id << std::endl; Node lemma = NodeManager::currentNM()->mkNode(IMPLIES, reason, fact); d_pending.push_back(lemma); } diff --git a/src/theory/sets/theory_sets_rels.h b/src/theory/sets/theory_sets_rels.h index 95a13f4d5..312e55b0d 100644 --- a/src/theory/sets/theory_sets_rels.h +++ b/src/theory/sets/theory_sets_rels.h @@ -127,12 +127,9 @@ private: * Called when we have inferred fact from explanation reason, where the * latter should be a conjunction of facts that hold in the current context. * - * The argument c is used for debugging, to give the name of the inference - * rule being used. - * * This method adds the node (=> reason exp) to the pending vector d_pending. */ - void sendInfer(Node fact, Node reason, const char* c); + void sendInfer(Node fact, InferenceId id, Node reason); /** * This method flushes the inferences in the pending vector d_pending to * theory of sets, which may process them as lemmas or as facts to assert to @@ -143,10 +140,8 @@ private: * * A wrapper around d_im.assertInference that ensures that we do not send * inferences with explanations that are not entailed. - * - * Argument c is used for debugging, typically the name of the inference. */ - void processInference(Node conc, Node exp, const char* c); + void processInference(Node conc, InferenceId id, Node exp); /** Methods used in full effort */ void check();