This PR refactors TheoryInference so that it is not responsible for calling back into InferenceManager, instead it sets data so that InferenceManagerBuffered has enough information to do this itself. It also makes the decision of whether to cache lemmas in theory inference manager a global choice per-theory instead of per-lemma.
Assert(tn.getKind() == TrustNodeKind::REWRITE);
// tn is of kind REWRITE, turn this into a LEMMA here
TrustNode tlem = TrustNode::mkTrustLemma(tn.getProven(), tn.getGenerator());
- // must preprocess
+ // send the trusted lemma
d_im.trustedLemma(tlem, InferenceId::ARITH_PP_ELIM_OPERATORS);
// mark the atom as reduced
d_reduced[atom] = true;
Node lem = valueBasedLemma(i);
Trace("iand-lemma")
<< "IAndSolver::Lemma: " << lem << " ; VALUE_REFINE" << std::endl;
- // value lemmas should not contain div/mod so we don't need to tag it with PREPROCESS
+ // send the value lemma
d_im.addPendingLemma(lem,
InferenceId::ARITH_NL_IAND_VALUE_REFINE,
nullptr,
- true,
- LemmaProperty::NONE);
+ true);
}
}
}
namespace arith {
namespace nl {
-bool NlLemma::process(TheoryInferenceManager* im, bool asLemma)
+TrustNode NlLemma::processLemma(LemmaProperty& p)
{
- bool res = SimpleTheoryLemma::process(im, asLemma);
if (d_nlext != nullptr)
{
d_nlext->processSideEffect(*this);
}
- return res;
+ return SimpleTheoryLemma::processLemma(p);
}
std::ostream& operator<<(std::ostream& out, NlLemma& n)
}
~NlLemma() {}
- bool process(TheoryInferenceManager* im, bool asLemma) override;
+ TrustNode processLemma(LemmaProperty& p) override;
/** secant points to add
*
InferenceManager::InferenceManager(Theory& t,
TheoryState& state,
ProofNodeManager* pnm)
- : TheoryInferenceManager(t, state, pnm, "theory::arrays"),
- d_lemmaPg(pnm ? new EagerProofGenerator(
- pnm, state.getUserContext(), "ArrayLemmaProofGenerator")
+ : TheoryInferenceManager(t, state, pnm, "theory::arrays", false),
+ d_lemmaPg(pnm ? new EagerProofGenerator(pnm,
+ state.getUserContext(),
+ "ArrayLemmaProofGenerator")
: nullptr)
{
}
}
bool InferenceManager::arrayLemma(
- Node conc, InferenceId id, Node exp, PfRule pfr, LemmaProperty p, bool doCache)
+ Node conc, InferenceId id, Node exp, PfRule pfr, LemmaProperty p)
{
Trace("arrays-infer") << "TheoryArrays::arrayLemma: " << conc << " by " << exp
<< "; " << id << std::endl;
convert(pfr, conc, exp, children, args);
// make the trusted lemma based on the eager proof generator and send
TrustNode tlem = d_lemmaPg->mkTrustNode(conc, pfr, children, args);
- return trustedLemma(tlem, id, p, doCache);
+ return trustedLemma(tlem, id, p);
}
// send lemma without proofs
Node lem = nm->mkNode(IMPLIES, exp, conc);
- return lemma(lem, id, p, doCache);
+ return lemma(lem, id, p);
}
void InferenceManager::convert(PfRule& id,
*/
bool assertInference(TNode atom, bool polarity, InferenceId id, TNode reason, PfRule pfr);
/**
- * Send lemma (exp => conc) based on proof rule id with properties p. Cache
- * the lemma if doCache is true.
+ * Send lemma (exp => conc) based on proof rule id with properties p.
*/
bool arrayLemma(Node conc,
InferenceId id,
Node exp,
PfRule pfr,
- LemmaProperty p = LemmaProperty::NONE,
- bool doCache = false);
+ LemmaProperty p = LemmaProperty::NONE);
private:
/**
namespace bags {
BagSolver::BagSolver(SolverState& s, InferenceManager& im, TermRegistry& tr)
- : d_state(s), d_ig(&d_state), d_im(im), d_termReg(tr)
+ : d_state(s), d_ig(&s, &im), d_im(im), d_termReg(tr)
{
d_zero = NodeManager::currentNM()->mkConst(Rational(0));
d_one = NodeManager::currentNM()->mkConst(Rational(1));
for (const Node& e : d_state.getElements(n))
{
InferInfo i = d_ig.empty(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
for (const Node& e : elements)
{
InferInfo i = d_ig.unionDisjoint(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
for (const Node& e : elements)
{
InferInfo i = d_ig.unionMax(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
for (const Node& e : elements)
{
InferInfo i = d_ig.intersection(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
for (const Node& e : elements)
{
InferInfo i = d_ig.differenceSubtract(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
for (const Node& e : d_state.getElements(n))
{
InferInfo i = d_ig.mkBag(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
void BagSolver::checkNonNegativeCountTerms(const Node& bag, const Node& element)
{
InferInfo i = d_ig.nonNegativeCount(bag, element);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
void BagSolver::checkDifferenceRemove(const Node& n)
for (const Node& e : elements)
{
InferInfo i = d_ig.differenceRemove(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
for (const Node& e : elements)
{
InferInfo i = d_ig.duplicateRemoval(n, e);
- i.process(&d_im, true);
+ d_im.lemmaTheoryInference(&i);
}
}
for (const Node& n : d_state.getDisequalBagTerms())
{
InferInfo info = d_ig.bagDisequality(n);
- info.process(&d_im, true);
+ d_im.lemmaTheoryInference(&info);
}
}
namespace theory {
namespace bags {
-InferInfo::InferInfo(InferenceId id) : TheoryInference(id) {}
+InferInfo::InferInfo(TheoryInferenceManager* im, InferenceId id)
+ : TheoryInference(id), d_im(im)
+{
+}
-bool InferInfo::process(TheoryInferenceManager* im, bool asLemma)
+TrustNode InferInfo::processLemma(LemmaProperty& p)
{
- Node lemma = d_conclusion;
- if (d_premises.size() >= 2)
- {
- Node andNode = NodeManager::currentNM()->mkNode(kind::AND, d_premises);
- lemma = andNode.impNode(lemma);
- }
- else if (d_premises.size() == 1)
- {
- lemma = d_premises[0].impNode(lemma);
- }
- if (asLemma)
- {
- TrustNode trustedLemma = TrustNode::mkTrustLemma(lemma, nullptr);
- im->trustedLemma(trustedLemma, getId());
- }
- else
- {
- Unimplemented();
- }
+ NodeManager* nm = NodeManager::currentNM();
+ Node pnode = nm->mkAnd(d_premises);
+ Node lemma = nm->mkNode(kind::IMPLIES, pnode, d_conclusion);
+
+ // send lemmas corresponding to the skolems introduced
for (const auto& pair : d_skolems)
{
Node n = pair.first.eqNode(pair.second);
TrustNode trustedLemma = TrustNode::mkTrustLemma(n, nullptr);
- im->trustedLemma(trustedLemma, getId());
+ d_im->trustedLemma(trustedLemma, getId(), p);
}
Trace("bags::InferInfo::process") << (*this) << std::endl;
- return true;
+ return TrustNode::mkTrustLemma(lemma, nullptr);
}
bool InferInfo::isTrivial() const
namespace CVC4 {
namespace theory {
+
+class TheoryInferenceManager;
+
namespace bags {
-class InferenceManager;
/**
* An inference. This is a class to track an unprocessed call to either
class InferInfo : public TheoryInference
{
public:
- InferInfo(InferenceId id);
+ InferInfo(TheoryInferenceManager* im, InferenceId id);
~InferInfo() {}
- /** Process this inference */
- bool process(TheoryInferenceManager* im, bool asLemma) override;
+ /** Process lemma */
+ TrustNode processLemma(LemmaProperty& p) override;
+ /** Pointer to the class used for processing this info */
+ TheoryInferenceManager* d_im;
/** The conclusion */
Node d_conclusion;
/**
namespace theory {
namespace bags {
-InferenceGenerator::InferenceGenerator(SolverState* state) : d_state(state)
+InferenceGenerator::InferenceGenerator(SolverState* state, InferenceManager* im)
+ : d_state(state), d_im(im)
{
d_nm = NodeManager::currentNM();
d_sm = d_nm->getSkolemManager();
Assert(n.getType().isBag());
Assert(e.getType() == n.getType().getBagElementType());
- InferInfo inferInfo(InferenceId::BAG_NON_NEGATIVE_COUNT);
+ InferInfo inferInfo(d_im, InferenceId::BAG_NON_NEGATIVE_COUNT);
Node count = d_nm->mkNode(kind::BAG_COUNT, e, n);
Node gte = d_nm->mkNode(kind::GEQ, count, d_zero);
{
// TODO issue #78: refactor this with BagRewriter
// (=> true (= (bag.count e (bag e c)) c))
- InferInfo inferInfo(InferenceId::BAG_MK_BAG_SAME_ELEMENT);
+ InferInfo inferInfo(d_im, InferenceId::BAG_MK_BAG_SAME_ELEMENT);
Node skolem = getSkolem(n, inferInfo);
Node count = getMultiplicityTerm(e, skolem);
inferInfo.d_conclusion = count.eqNode(n[1]);
// (=>
// true
// (= (bag.count e (bag x c)) (ite (= e x) c 0)))
- InferInfo inferInfo(InferenceId::BAG_MK_BAG);
+ InferInfo inferInfo(d_im, InferenceId::BAG_MK_BAG);
Node skolem = getSkolem(n, inferInfo);
Node count = getMultiplicityTerm(e, skolem);
Node same = d_nm->mkNode(kind::EQUAL, n[0], e);
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(InferenceId::BAG_DISEQUALITY);
+ InferInfo inferInfo(d_im, InferenceId::BAG_DISEQUALITY);
TypeNode elementType = A.getType().getBagElementType();
BoundVarManager* bvm = d_nm->getBoundVarManager();
Assert(n.getKind() == kind::EMPTYBAG);
Assert(e.getType() == n.getType().getBagElementType());
- InferInfo inferInfo(InferenceId::BAG_EMPTY);
+ InferInfo inferInfo(d_im, InferenceId::BAG_EMPTY);
Node skolem = getSkolem(n, inferInfo);
Node count = getMultiplicityTerm(e, skolem);
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(InferenceId::BAG_UNION_DISJOINT);
+ InferInfo inferInfo(d_im, InferenceId::BAG_UNION_DISJOINT);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(InferenceId::BAG_UNION_MAX);
+ InferInfo inferInfo(d_im, InferenceId::BAG_UNION_MAX);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(InferenceId::BAG_INTERSECTION_MIN);
+ InferInfo inferInfo(d_im, InferenceId::BAG_INTERSECTION_MIN);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(InferenceId::BAG_DIFFERENCE_SUBTRACT);
+ InferInfo inferInfo(d_im, InferenceId::BAG_DIFFERENCE_SUBTRACT);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
- InferInfo inferInfo(InferenceId::BAG_DIFFERENCE_REMOVE);
+ InferInfo inferInfo(d_im, InferenceId::BAG_DIFFERENCE_REMOVE);
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Assert(e.getType() == n[0].getType().getBagElementType());
Node A = n[0];
- InferInfo inferInfo(InferenceId::BAG_DUPLICATE_REMOVAL);
+ InferInfo inferInfo(d_im, InferenceId::BAG_DUPLICATE_REMOVAL);
Node countA = getMultiplicityTerm(e, A);
Node skolem = getSkolem(n, inferInfo);
#include "expr/node.h"
#include "infer_info.h"
+#include "theory/bags/inference_manager.h"
#include "theory/bags/solver_state.h"
namespace CVC4 {
class InferenceGenerator
{
public:
- InferenceGenerator(SolverState* state);
+ InferenceGenerator(SolverState* state, InferenceManager* im);
/**
* @param A is a bag of type (Bag E)
NodeManager* d_nm;
SkolemManager* d_sm;
SolverState* d_state;
+ /** Pointer to the inference manager */
+ InferenceManager* d_im;
+ /** Commonly used constants */
Node d_true;
Node d_zero;
Node d_one;
#ifndef CVC4__THEORY__BAGS__INFERENCE_MANAGER_H
#define CVC4__THEORY__BAGS__INFERENCE_MANAGER_H
+#include "theory/bags/infer_info.h"
#include "theory/bags/solver_state.h"
#include "theory/inference_manager_buffered.h"
: Theory(THEORY_BAGS, c, u, out, valuation, logicInfo, pnm),
d_state(c, u, valuation),
d_im(*this, d_state, nullptr),
- d_ig(&d_state),
+ d_ig(&d_state, &d_im),
d_notify(*this, d_im),
d_statistics(),
d_rewriter(&d_statistics.d_rewrites),
return false;
}
-bool DatatypesInference::process(TheoryInferenceManager* im, bool asLemma)
+TrustNode DatatypesInference::processLemma(LemmaProperty& p)
{
- // Check to see if we have to communicate it to the rest of the system.
- // The flag asLemma is true when the inference was marked that it must be
- // sent as a lemma.
- if (asLemma)
+ // we don't pass lemma property p currently, as it is always default
+ return d_im->processDtLemma(d_conc, d_exp, getId());
+}
+
+Node DatatypesInference::processFact(std::vector<Node>& exp,
+ ProofGenerator*& pg)
+{
+ // add to the explanation vector if applicable (when non-trivial)
+ if (!d_exp.isNull() && !d_exp.isConst())
{
- return d_im->processDtLemma(d_conc, d_exp, getId());
+ exp.push_back(d_exp);
}
- return d_im->processDtFact(d_conc, d_exp, getId());
+ return d_im->processDtFact(d_conc, d_exp, getId(), pg);
}
} // namespace datatypes
* set to true.
*/
static bool mustCommunicateFact(Node n, Node exp);
- /**
- * Process this fact, possibly as a fact or as a lemma, depending on the
- * above method.
- */
- bool process(TheoryInferenceManager* im, bool asLemma) override;
+ /** Process lemma */
+ TrustNode processLemma(LemmaProperty& p) override;
+ /** Process internal fact */
+ Node processFact(std::vector<Node>& exp, ProofGenerator*& pg) override;
private:
/** Pointer to the inference manager */
doPendingFacts();
}
-void InferenceManager::sendDtLemma(Node lem,
- InferenceId id,
- LemmaProperty p,
- bool doCache)
+void InferenceManager::sendDtLemma(Node lem, InferenceId id, LemmaProperty p)
{
if (isProofEnabled())
{
return;
}
// otherwise send as a normal lemma
- if (lemma(lem, id, p, doCache))
+ if (lemma(lem, id, p))
{
d_inferenceLemmas << id;
}
bool InferenceManager::isProofEnabled() const { return d_ipc != nullptr; }
-bool InferenceManager::processDtLemma(
- Node conc, Node exp, InferenceId id, LemmaProperty p, bool doCache)
+TrustNode InferenceManager::processDtLemma(Node conc, Node exp, InferenceId id)
{
// set up a proof constructor
std::shared_ptr<InferProofCons> ipcl;
d_lemPg->setProofFor(lem, pn);
}
// use trusted lemma
- TrustNode tlem = TrustNode::mkTrustLemma(lem, d_lemPg.get());
- if (!trustedLemma(tlem, id))
- {
- Trace("dt-lemma-debug") << "...duplicate lemma" << std::endl;
- return false;
- }
- d_inferenceLemmas << id;
- return true;
+ return TrustNode::mkTrustLemma(lem, d_lemPg.get());
}
-bool InferenceManager::processDtFact(Node conc, Node exp, InferenceId id)
+Node InferenceManager::processDtFact(Node conc,
+ Node exp,
+ InferenceId id,
+ ProofGenerator*& pg)
{
- conc = prepareDtInference(conc, exp, id, d_ipc.get());
- // assert the internal fact, which has the same issue as above
- bool polarity = conc.getKind() != NOT;
- TNode atom = polarity ? conc : conc[0];
- if (isProofEnabled())
- {
- std::vector<Node> expv;
- if (!exp.isNull() && !exp.isConst())
- {
- expv.push_back(exp);
- }
- assertInternalFact(atom, polarity, id, expv, d_ipc.get());
- }
- else
- {
- // use version without proofs
- assertInternalFact(atom, polarity, id, exp);
- }
- d_inferenceFacts << id;
- return true;
+ pg = d_ipc.get();
+ return prepareDtInference(conc, exp, id, d_ipc.get());
}
Node InferenceManager::prepareDtInference(Node conc,
*/
void sendDtLemma(Node lem,
InferenceId i = InferenceId::UNKNOWN,
- LemmaProperty p = LemmaProperty::NONE,
- bool doCache = true);
+ LemmaProperty p = LemmaProperty::NONE);
/**
* Send conflict immediately on the output channel
*/
/**
* Process datatype inference as a lemma
*/
- bool processDtLemma(Node conc,
- Node exp,
- InferenceId id,
- LemmaProperty p = LemmaProperty::NONE,
- bool doCache = true);
+ TrustNode processDtLemma(Node conc, Node exp, InferenceId id);
/**
* Process datatype inference as a fact
*/
- bool processDtFact(Node conc, Node exp, InferenceId id);
+ Node processDtFact(Node conc, Node exp, InferenceId id, ProofGenerator*& pg);
/**
* Helper function for the above methods. Returns the conclusion, which
* may be modified so that it is compatible with proofs. If proofs are
Trace("dt-split-debug") << "Split lemma is : " << lemma << std::endl;
d_im.sendDtLemma(lemma,
InferenceId::DATATYPES_SPLIT,
- LemmaProperty::SEND_ATOMS,
- false);
+ LemmaProperty::SEND_ATOMS);
}
if( !options::dtBlastSplits() ){
break;
InferenceManagerBuffered::InferenceManagerBuffered(Theory& t,
TheoryState& state,
ProofNodeManager* pnm,
- const std::string& name)
- : TheoryInferenceManager(t, state, pnm, name),
+ const std::string& name,
+ bool cacheLemmas)
+ : TheoryInferenceManager(t, state, pnm, name, cacheLemmas),
d_processingPendingLemmas(false)
{
}
size_t i = 0;
while (!d_theoryState.isInConflict() && i < d_pendingFact.size())
{
- // process this fact, which notice may enqueue more pending facts in this
- // loop.
- d_pendingFact[i]->process(this, false);
+ // assert the internal fact, which notice may enqueue more pending facts in
+ // this loop, or result in a conflict.
+ assertInternalFactTheoryInference(d_pendingFact[i].get());
i++;
}
d_pendingFact.clear();
{
// process this lemma, which notice may enqueue more pending lemmas in this
// loop, or clear the lemmas.
- d_pendingLem[i]->process(this, true);
+ lemmaTheoryInference(d_pendingLem[i].get());
i++;
}
d_pendingLem.clear();
d_pendingReqPhase.clear();
}
+std::size_t InferenceManagerBuffered::numPendingLemmas() const
+{
+ return d_pendingLem.size();
+}
+std::size_t InferenceManagerBuffered::numPendingFacts() const
+{
+ return d_pendingFact.size();
+}
- std::size_t InferenceManagerBuffered::numPendingLemmas() const {
- return d_pendingLem.size();
- }
- std::size_t InferenceManagerBuffered::numPendingFacts() const {
- return d_pendingFact.size();
- }
+void InferenceManagerBuffered::lemmaTheoryInference(TheoryInference* lem)
+{
+ // process this lemma
+ LemmaProperty p = LemmaProperty::NONE;
+ TrustNode tlem = lem->processLemma(p);
+ Assert(!tlem.isNull());
+ // send the lemma
+ trustedLemma(tlem, lem->getId(), p);
+}
+
+void InferenceManagerBuffered::assertInternalFactTheoryInference(
+ TheoryInference* fact)
+{
+ // process this fact
+ std::vector<Node> exp;
+ ProofGenerator* pg = nullptr;
+ Node lit = fact->processFact(exp, pg);
+ Assert(!lit.isNull());
+ bool pol = lit.getKind() != NOT;
+ TNode atom = pol ? lit : lit[0];
+ // no double negation or conjunctive conclusions
+ Assert(atom.getKind() != NOT && atom.getKind() != AND);
+ // assert the internal fact
+ assertInternalFact(atom, pol, fact->getId(), exp, pg);
+}
} // namespace theory
} // namespace CVC4
InferenceManagerBuffered(Theory& t,
TheoryState& state,
ProofNodeManager* pnm,
- const std::string& name);
+ const std::string& name,
+ bool cacheLemmas = true);
virtual ~InferenceManagerBuffered() {}
/**
* Do we have a pending fact or lemma?
/** Returns the number of pending facts. */
std::size_t numPendingFacts() const;
+ /**
+ * Send the given theory inference as a lemma on the output channel of this
+ * inference manager. This calls TheoryInferenceManager::trustedLemma based
+ * on the provided theory inference.
+ */
+ void lemmaTheoryInference(TheoryInference* lem);
+ /**
+ * Add the given theory inference as an internal fact. This calls
+ * TheoryInferenceManager::assertInternalFact based on the provided theory
+ * inference.
+ */
+ void assertInternalFactTheoryInference(TheoryInference* fact);
+
protected:
/** A set of pending inferences to be processed as lemmas */
std::vector<std::unique_ptr<TheoryInference>> d_pendingLem;
d_proxy_to_term[k] = n;
Node eq = k.eqNode(n);
Trace("sets-lemma") << "Sets::Lemma : " << eq << " by proxy" << std::endl;
- d_im.lemma(eq, InferenceId::SETS_PROXY, LemmaProperty::NONE, false);
+ d_im.lemma(eq, InferenceId::SETS_PROXY);
if (nk == SINGLETON)
{
Node slem = nm->mkNode(MEMBER, n[0], k);
Trace("sets-lemma") << "Sets::Lemma : " << slem << " by singleton"
<< std::endl;
- d_im.lemma(slem, InferenceId::SETS_PROXY_SINGLETON, LemmaProperty::NONE, false);
+ d_im.lemma(slem, InferenceId::SETS_PROXY_SINGLETON);
}
return k;
}
Node ulem = nm->mkNode(SUBSET, n1, n2);
Trace("sets-lemma") << "Sets::Lemma : " << ulem << " by univ-type"
<< std::endl;
- d_im.lemma(ulem, InferenceId::SETS_UNIV_TYPE, LemmaProperty::NONE, false);
+ d_im.lemma(ulem, InferenceId::SETS_UNIV_TYPE);
}
}
d_univset[tn] = n;
{
}
-bool InferInfo::process(TheoryInferenceManager* im, bool asLemma)
+TrustNode InferInfo::processLemma(LemmaProperty& p)
{
- if (asLemma)
+ return d_sim->processLemma(*this, p);
+}
+
+Node InferInfo::processFact(std::vector<Node>& exp, ProofGenerator*& pg)
+{
+ for (const Node& ec : d_premises)
{
- return d_sim->processLemma(*this);
+ utils::flattenOp(kind::AND, ec, exp);
}
- return d_sim->processFact(*this);
+ d_sim->processFact(*this, pg);
+ return d_conc;
}
bool InferInfo::isTrivial() const
public:
InferInfo(InferenceId id);
~InferInfo() {}
- /** Process this inference */
- bool process(TheoryInferenceManager* im, bool asLemma) override;
+ /** Process lemma */
+ TrustNode processLemma(LemmaProperty& p) override;
+ /** Process internal fact */
+ Node processFact(std::vector<Node>& exp, ProofGenerator*& pg) override;
/** Pointer to the class used for processing this info */
InferenceManager* d_sim;
/** Whether it is the reverse form of the above id */
ExtTheory& e,
SequencesStatistics& statistics,
ProofNodeManager* pnm)
- : InferenceManagerBuffered(t, s, pnm, "theory::strings"),
+ : InferenceManagerBuffered(t, s, pnm, "theory::strings", false),
d_state(s),
d_termReg(tr),
d_extt(e),
trustedConflict(tconf, ii.getId());
}
-bool InferenceManager::processFact(InferInfo& ii)
+void InferenceManager::processFact(InferInfo& ii, ProofGenerator*& pg)
{
- // Get the fact(s). There are multiple facts if the conclusion is an AND
- std::vector<Node> facts;
- if (ii.d_conc.getKind() == AND)
- {
- for (const Node& cc : ii.d_conc)
- {
- facts.push_back(cc);
- }
- }
- else
- {
- facts.push_back(ii.d_conc);
- }
Trace("strings-assert") << "(assert (=> " << ii.getPremises() << " "
<< ii.d_conc << ")) ; fact " << ii.getId() << std::endl;
Trace("strings-lemma") << "Strings::Fact: " << ii.d_conc << " from "
<< ii.getPremises() << " by " << ii.getId()
<< std::endl;
- std::vector<Node> exp;
- for (const Node& ec : ii.d_premises)
- {
- utils::flattenOp(AND, ec, exp);
- }
- bool ret = false;
- // convert for each fact
- for (const Node& fact : facts)
+ if (d_ipc != nullptr)
{
- ii.d_conc = fact;
- bool polarity = fact.getKind() != NOT;
- TNode atom = polarity ? fact : fact[0];
- bool curRet = false;
- if (d_ipc != nullptr)
- {
- // ensure the proof generator is ready to explain this fact in the
- // current SAT context
- d_ipc->notifyFact(ii);
- // now, assert the internal fact with d_ipc as proof generator
- curRet = assertInternalFact(atom, polarity, ii.getId(), exp, d_ipc.get());
- }
- else
- {
- Node cexp = utils::mkAnd(exp);
- // without proof generator
- curRet = assertInternalFact(atom, polarity, ii.getId(), cexp);
- }
- ret = ret || curRet;
- // may be in conflict
- if (d_state.isInConflict())
- {
- break;
- }
+ // ensure the proof generator is ready to explain this fact in the
+ // current SAT context
+ d_ipc->notifyFact(ii);
+ pg = d_ipc.get();
}
- return ret;
}
-bool InferenceManager::processLemma(InferInfo& ii)
+TrustNode InferenceManager::processLemma(InferInfo& ii, LemmaProperty& p)
{
Assert(!ii.isTrivial());
Assert(!ii.isConflict());
d_termReg.registerTermAtomic(n, sks.first);
}
}
- LemmaProperty p = LemmaProperty::NONE;
if (ii.getId() == InferenceId::STRINGS_REDUCTION)
{
p |= LemmaProperty::NEEDS_JUSTIFY;
<< ii.getId() << std::endl;
++(d_statistics.d_lemmasInfer);
- // call the trusted lemma, without caching
- return trustedLemma(tlem, ii.getId(), p, false);
+ return tlem;
}
} // namespace strings
private:
/** Called when ii is ready to be processed as a fact */
- bool processFact(InferInfo& ii);
+ void processFact(InferInfo& ii, ProofGenerator*& pg);
/** Called when ii is ready to be processed as a lemma */
- bool processLemma(InferInfo& ii);
+ TrustNode processLemma(InferInfo& ii, LemmaProperty& p);
/** Reference to the solver state of the theory of strings. */
SolverState& d_state;
/** Reference to the term registry of theory of strings */
{
}
-bool SimpleTheoryLemma::process(TheoryInferenceManager* im, bool asLemma)
+TrustNode SimpleTheoryLemma::processLemma(LemmaProperty& p)
{
Assert(!d_node.isNull());
- Assert(asLemma);
- // send (trusted) lemma on the output channel with property p
- return im->trustedLemma(TrustNode::mkTrustLemma(d_node, d_pg), getId(), d_property);
+ p = d_property;
+ return TrustNode::mkTrustLemma(d_node, d_pg);
}
SimpleTheoryInternalFact::SimpleTheoryInternalFact(InferenceId id,
{
}
-bool SimpleTheoryInternalFact::process(TheoryInferenceManager* im, bool asLemma)
+Node SimpleTheoryInternalFact::processFact(std::vector<Node>& exp,
+ ProofGenerator*& pg)
{
- Assert(!asLemma);
- bool polarity = d_conc.getKind() != NOT;
- TNode atom = polarity ? d_conc : d_conc[0];
- // no double negation or conjunctive conclusions
- Assert(atom.getKind() != NOT && atom.getKind() != AND);
- if (d_pg != nullptr)
- {
- im->assertInternalFact(atom, polarity, getId(), {d_exp}, d_pg);
- }
- else
- {
- im->assertInternalFact(atom, polarity, getId(), d_exp);
- }
- return true;
+ exp.push_back(d_exp);
+ pg = d_pg;
+ return d_conc;
}
} // namespace theory
public:
TheoryInference(InferenceId id) : d_id(id) {}
virtual ~TheoryInference() {}
+
+ /**
+ * Process lemma, return the trust node to pass to
+ * TheoryInferenceManager::trustedLemma. In addition, the inference should
+ * process any internal side effects of the lemma.
+ *
+ * @param p The property of the lemma which will be passed to trustedLemma
+ * for this inference. If this call does not update p, the default value will
+ * be used.
+ * @return The trust node (of kind TrustNodeKind::LEMMA) corresponding to the
+ * lemma and its proof generator.
+ */
+ virtual TrustNode processLemma(LemmaProperty& p) { return TrustNode::null(); }
/**
- * Called by the provided inference manager to process this inference. This
- * method should make call(s) to inference manager to process this
- * inference, as well as processing any specific side effects. This method
- * typically makes a single call to the provided inference manager e.g.
- * d_im->trustedLemma or d_im->assertInternalFact. Notice it is the sole
- * responsibility of this class to make this call; the inference manager
- * does not call itself otherwise when processing pending inferences.
+ * Process internal fact, return the conclusion to pass to
+ * TheoryInferenceManager::assertInternalFact. In addition, the inference
+ * should process any internal side effects of the fact.
*
- * @param im The inference manager to use
- * @param asLemma Whether this inference is being processed as a lemma
- * during doPendingLemmas (otherwise it is being processed in doPendingFacts).
- * Typically, this method calls lemma or conflict when asLemma is
- * true, and assertInternalFact when this flag is false, although this
- * behavior is (intentionally) not strictly enforced. In particular, the
- * choice to send a conflict, lemma or fact may depend on local state of the
- * theory, which may change while the inference is pending. Hence the
- * implementation of this method may choose to implement any call to the
- * inference manager. This flag simply serves to track whether the inference
- * initially was added a pending lemma or not.
- * @return true if the inference was successfully processed by the inference
- * manager. This method for instance returns false if it corresponds to a
- * lemma that was already cached by im and hence was discarded.
+ * @param exp The explanation for the returned conclusion. Each node added to
+ * exp should be a (conjunction of) literals that hold in the current equality
+ * engine.
+ * @return The (possibly negated) conclusion.
*/
- virtual bool process(TheoryInferenceManager* im, bool asLemma) = 0;
+ virtual Node processFact(std::vector<Node>& exp, ProofGenerator*& pg)
+ {
+ return Node::null();
+ }
/** Get the InferenceId of this theory inference. */
InferenceId getId() const { return d_id; }
public:
SimpleTheoryLemma(InferenceId id, Node n, LemmaProperty p, ProofGenerator* pg);
virtual ~SimpleTheoryLemma() {}
- /**
- * Send the lemma using inference manager im, return true if the lemma was
- * sent. It should be the case that asLemma = true or an assertion failure
- * is thrown.
- */
- virtual bool process(TheoryInferenceManager* im, bool asLemma) override;
+ /** Process lemma */
+ TrustNode processLemma(LemmaProperty& p) override;
/** The lemma to send */
Node d_node;
/** The lemma property (see OutputChannel::lemma) */
public:
SimpleTheoryInternalFact(InferenceId id, Node conc, Node exp, ProofGenerator* pg);
virtual ~SimpleTheoryInternalFact() {}
- /**
- * Send the lemma using inference manager im, return true if the lemma was
- * sent. It should be the case that asLemma = false or an assertion failure
- * is thrown.
- */
- virtual bool process(TheoryInferenceManager* im, bool asLemma) override;
+ /** Process internal fact */
+ Node processFact(std::vector<Node>& exp, ProofGenerator*& pg) override;
/** The lemma to send */
Node d_conc;
/** The explanation */
TheoryInferenceManager::TheoryInferenceManager(Theory& t,
TheoryState& state,
ProofNodeManager* pnm,
- const std::string& name)
+ const std::string& name,
+ bool cacheLemmas)
: d_theory(t),
d_theoryState(state),
d_out(t.getOutputChannel()),
d_ee(nullptr),
d_pnm(pnm),
+ d_cacheLemmas(cacheLemmas),
d_keep(t.getSatContext()),
d_lemmasSent(t.getUserContext()),
d_numConflicts(0),
<< " mkTrustedConflictEqConstantMerge";
}
-bool TheoryInferenceManager::lemma(TNode lem,
- InferenceId id,
- LemmaProperty p,
- bool doCache)
+bool TheoryInferenceManager::lemma(TNode lem, InferenceId id, LemmaProperty p)
{
TrustNode tlem = TrustNode::mkTrustLemma(lem, nullptr);
- return trustedLemma(tlem, id, p, doCache);
+ return trustedLemma(tlem, id, p);
}
bool TheoryInferenceManager::trustedLemma(const TrustNode& tlem,
InferenceId id,
- LemmaProperty p,
- bool doCache)
+ LemmaProperty p)
{
- if (doCache)
+ // if the policy says to cache lemmas, check the cache and return false if
+ // we are a duplicate
+ if (d_cacheLemmas)
{
if (!cacheLemma(tlem.getNode(), p))
{
const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
const std::vector<Node>& args,
- LemmaProperty p,
- bool doCache)
+ LemmaProperty p)
{
// make the trust node
TrustNode trn = mkLemmaExp(conc, pfr, exp, noExplain, args);
// send it on the output channel
- return trustedLemma(trn, id, p, doCache);
+ return trustedLemma(trn, id, p);
}
TrustNode TheoryInferenceManager::mkLemmaExp(Node conc,
const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
ProofGenerator* pg,
- LemmaProperty p,
- bool doCache)
+ LemmaProperty p)
{
// make the trust node
TrustNode trn = mkLemmaExp(conc, exp, noExplain, pg);
// send it on the output channel
- return trustedLemma(trn, id, p, doCache);
+ return trustedLemma(trn, id, p);
}
TrustNode TheoryInferenceManager::mkLemmaExp(Node conc,
const std::vector<Node>& exp,
ProofGenerator* pg)
{
- Assert(pg != nullptr);
d_factIdStats << id;
return processInternalFact(atom, pol, PfRule::ASSUME, exp, {}, pg);
}
public:
/**
* Constructor, note that state should be the official state of theory t.
+ *
+ * @param t The theory this inference manager is for
+ * @param state The state of the theory
+ * @param pnm The proof node manager, which if non-null, enables proofs for
+ * this inference manager
+ * @param name The name of the inference manager, which is used for giving
+ * unique names for statistics,
+ * @param cacheLemmas Whether all lemmas sent using this theory inference
+ * manager are added to a user-context dependent cache. This means that
+ * only lemmas that are unique after rewriting are sent to the theory engine
+ * from this inference manager.
*/
TheoryInferenceManager(Theory& t,
TheoryState& state,
ProofNodeManager* pnm,
- const std::string& name);
+ const std::string& name,
+ bool cacheLemmas = true);
virtual ~TheoryInferenceManager();
/**
* Set equality engine, ee is a pointer to the official equality engine
*
* @param tlem The trust node containing the lemma and its proof generator.
* @param p The property of the lemma
- * @param doCache If true, we send the lemma only if it has not already been
- * cached (see cacheLemma), and add it to the cache during this call.
* @return true if the lemma was sent on the output channel.
*/
bool trustedLemma(const TrustNode& tlem,
InferenceId id,
- LemmaProperty p = LemmaProperty::NONE,
- bool doCache = true);
+ LemmaProperty p = LemmaProperty::NONE);
/**
* Send lemma lem with property p on the output channel. Same as above, with
* a node instead of a trust node.
*/
- bool lemma(TNode lem,
- InferenceId id,
- LemmaProperty p = LemmaProperty::NONE,
- bool doCache = true);
+ bool lemma(TNode lem, InferenceId id, LemmaProperty p = LemmaProperty::NONE);
/**
* Explained lemma. This should be called when
* ( exp => conc )
* equality engine
* @param args The arguments to the proof step concluding conc
* @param p The property of the lemma
- * @param doCache Whether to check and add the lemma to the cache
* @return true if the lemma was sent on the output channel.
*/
bool lemmaExp(Node conc,
const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
const std::vector<Node>& args,
- LemmaProperty p = LemmaProperty::NONE,
- bool doCache = true);
+ LemmaProperty p = LemmaProperty::NONE);
/**
* Make the trust node for the above method. It is responsibility of the
* caller to subsequently call trustedLemma with the returned trust node.
* equality engine
* @param pg If non-null, the proof generator who can provide a proof of conc.
* @param p The property of the lemma
- * @param doCache Whether to check and add the lemma to the cache
* @return true if the lemma was sent on the output channel.
*/
bool lemmaExp(Node conc,
const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
ProofGenerator* pg = nullptr,
- LemmaProperty p = LemmaProperty::NONE,
- bool doCache = true);
+ LemmaProperty p = LemmaProperty::NONE);
/**
* Make the trust node for the above method. It is responsibility of the
* caller to subsequently call trustedLemma with the returned trust node.
std::unique_ptr<eq::ProofEqEngine> d_pfee;
/** The proof node manager of the theory */
ProofNodeManager* d_pnm;
+ /** Whether this manager caches lemmas */
+ bool d_cacheLemmas;
/**
* The keep set of this class. This set is maintained to ensure that
* facts and their explanations are ref-counted. Since facts and their
Node lem = NodeManager::currentNM()->mkNode(
OR, cl, NodeManager::currentNM()->mkAnd(force_cl));
Trace("uf-ss-lemma") << "*** Enforce negative cardinality constraint lemma : " << lem << std::endl;
- d_im.lemma(lem, InferenceId::UF_CARD_ENFORCE_NEGATIVE, LemmaProperty::NONE, false);
+ d_im.lemma(lem, InferenceId::UF_CARD_ENFORCE_NEGATIVE);
return false;
}
}
Node eqv_lit = NodeManager::currentNM()->mkNode( CARDINALITY_CONSTRAINT, ct, lit[1] );
eqv_lit = lit.eqNode( eqv_lit );
Trace("uf-ss-lemma") << "*** Cardinality equiv lemma : " << eqv_lit << std::endl;
- d_im.lemma(eqv_lit, InferenceId::UF_CARD_EQUIV, LemmaProperty::NONE, false);
+ d_im.lemma(eqv_lit, InferenceId::UF_CARD_EQUIV);
d_card_assertions_eqv_lemma[lit] = true;
}
}
Node eq = Rewriter::rewrite( a.eqNode( b ) );
Node lem = NodeManager::currentNM()->mkNode( kind::OR, eq, eq.negate() );
Trace("uf-ss-lemma") << "*** Split (no-minimal) : " << lem << std::endl;
- d_im.lemma(lem, InferenceId::UF_CARD_SPLIT, LemmaProperty::NONE, false);
+ d_im.lemma(lem, InferenceId::UF_CARD_SPLIT);
d_im.requirePhase(eq, true);
type_proc[tn] = true;
break;
d_functionsTerms(c),
d_symb(u, instanceName),
d_state(c, u, valuation),
- d_im(*this, d_state, pnm, "theory::uf"),
+ d_im(*this, d_state, pnm, "theory::uf", false),
d_notify(d_im, *this)
{
d_true = NodeManager::currentNM()->mkConst( true );