This PR adds a new ArithLemma that is essentiall NlLemma, but derived from the new theory::Lemma and meant to be used all across the arithmetic theory.
Also, based on theory::InferenceManagerBuffered this PR adds arith::InferenceManager that shall become the sole interface between the arithmetic theory and the OutputChannel.
theory/arith/approx_simplex.h
theory/arith/arith_ite_utils.cpp
theory/arith/arith_ite_utils.h
+ theory/arith/arith_lemma.cpp
+ theory/arith/arith_lemma.h
theory/arith/arith_msum.cpp
theory/arith/arith_msum.h
theory/arith/arith_rewriter.cpp
theory/arith/fc_simplex.h
theory/arith/infer_bounds.cpp
theory/arith/infer_bounds.h
+ theory/arith/inference_manager.cpp
+ theory/arith/inference_manager.h
theory/arith/linear_equality.cpp
theory/arith/linear_equality.h
theory/arith/matrix.cpp
--- /dev/null
+/********************* */
+/*! \file arith_lemma.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief ArithLemma class, derived from Lemma.
+ **/
+
+#include "theory/arith/arith_lemma.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+
+std::ostream& operator<<(std::ostream& out, const ArithLemma& al)
+{
+ return out << al.d_node;
+}
+
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file arith_lemma.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief ArithLemma class, derived from Lemma.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__ARITH_LEMMA_H
+#define CVC4__THEORY__ARITH__ARITH_LEMMA_H
+
+#include <tuple>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/nl/inference.h"
+#include "theory/inference_manager_buffered.h"
+#include "theory/output_channel.h"
+#include "theory/theory_inference.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+
+/**
+ * The data structure for a single lemma to process by the arithmetic theory,
+ * derived from theory::SimpleTheoryLemma.
+ *
+ * This also includes the inference type that produced this lemma.
+ */
+class ArithLemma : public SimpleTheoryLemma
+{
+ public:
+ ArithLemma(Node n,
+ LemmaProperty p,
+ ProofGenerator* pg,
+ nl::Inference inf = nl::Inference::UNKNOWN)
+ : SimpleTheoryLemma(n, p, pg), d_inference(inf)
+ {
+ }
+ virtual ~ArithLemma() {}
+
+ /** The inference id for the lemma */
+ nl::Inference d_inference;
+};
+/**
+ * Writes an arithmetic lemma to a stream.
+ */
+std::ostream& operator<<(std::ostream& out, const ArithLemma& al);
+
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__ARITH_LEMMA_H */
--- /dev/null
+/********************* */
+/*! \file inference_manager.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of the inference manager for the theory of strings.
+ **/
+
+#include "theory/arith/inference_manager.h"
+
+#include "options/arith_options.h"
+#include "theory/arith/theory_arith.h"
+#include "theory/rewriter.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+
+InferenceManager::InferenceManager(TheoryArith& ta,
+ ArithState& astate,
+ ProofNodeManager* pnm)
+ : InferenceManagerBuffered(ta, astate, pnm), d_lemmasPp(ta.getUserContext())
+{
+}
+
+void InferenceManager::addPendingArithLemma(std::shared_ptr<ArithLemma> lemma,
+ bool isWaiting)
+{
+ lemma->d_node = Rewriter::rewrite(lemma->d_node);
+ if (hasCachedLemma(lemma->d_node, lemma->d_property))
+ {
+ return;
+ }
+ if (isEntailedFalse(*lemma))
+ {
+ if (isWaiting)
+ {
+ d_waitingLem.clear();
+ }
+ else
+ {
+ d_pendingLem.clear();
+ d_theoryState.notifyInConflict();
+ }
+ }
+ if (isWaiting)
+ {
+ d_waitingLem.emplace_back(std::move(lemma));
+ }
+ else
+ {
+ addPendingLemma(std::move(lemma));
+ }
+}
+void InferenceManager::addPendingArithLemma(const ArithLemma& lemma,
+ bool isWaiting)
+{
+ addPendingArithLemma(std::make_shared<ArithLemma>(lemma), isWaiting);
+}
+void InferenceManager::addPendingArithLemma(const Node& lemma,
+ nl::Inference inftype,
+ bool isWaiting)
+{
+ addPendingArithLemma(std::make_shared<ArithLemma>(
+ lemma, LemmaProperty::NONE, nullptr, inftype),
+ isWaiting);
+}
+
+void InferenceManager::flushWaitingLemmas()
+{
+ for (auto& lem : d_waitingLem)
+ {
+ addPendingLemma(std::move(lem));
+ }
+ d_waitingLem.clear();
+}
+
+void InferenceManager::addConflict(const Node& conf, nl::Inference inftype)
+{
+ conflict(Rewriter::rewrite(conf));
+}
+
+std::size_t InferenceManager::numWaitingLemmas() const
+{
+ return d_waitingLem.size();
+}
+
+bool InferenceManager::hasCachedLemma(TNode lem, LemmaProperty p)
+{
+ if (isLemmaPropertyPreprocess(p))
+ {
+ return d_lemmasPp.find(lem) != d_lemmasPp.end();
+ }
+ return TheoryInferenceManager::hasCachedLemma(lem, p);
+}
+
+bool InferenceManager::cacheLemma(TNode lem, LemmaProperty p)
+{
+ if (isLemmaPropertyPreprocess(p))
+ {
+ if (d_lemmasPp.find(lem) != d_lemmasPp.end())
+ {
+ return false;
+ }
+ d_lemmasPp.insert(lem);
+ return true;
+ }
+ return TheoryInferenceManager::cacheLemma(lem, p);
+}
+
+bool InferenceManager::isEntailedFalse(const ArithLemma& lem)
+{
+ if (options::nlExtEntailConflicts())
+ {
+ Node ch_lemma = lem.d_node.negate();
+ ch_lemma = Rewriter::rewrite(ch_lemma);
+ Trace("arith-inf-manager") << "InferenceManager::Check entailment of "
+ << ch_lemma << "..." << std::endl;
+
+ std::pair<bool, Node> et = d_theory.getValuation().entailmentCheck(
+ options::TheoryOfMode::THEORY_OF_TYPE_BASED, ch_lemma);
+ Trace("arith-inf-manager") << "entailment test result : " << et.first << " "
+ << et.second << std::endl;
+ if (et.first)
+ {
+ Trace("arith-inf-manager")
+ << "*** Lemma entailed to be in conflict : " << lem.d_node
+ << std::endl;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file inference_manager.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Customized inference manager for the theory of nonlinear arithmetic
+ **/
+
+#include "cvc4_private.h"
+
+#ifndef CVC4__THEORY__ARITH__INFERENCE_MANAGER_H
+#define CVC4__THEORY__ARITH__INFERENCE_MANAGER_H
+
+#include <map>
+#include <vector>
+
+#include "theory/arith/arith_lemma.h"
+#include "theory/arith/arith_state.h"
+#include "theory/arith/nl/inference.h"
+#include "theory/arith/nl/nl_lemma_utils.h"
+#include "theory/inference_manager_buffered.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+
+class TheoryArith;
+
+/**
+ * A specialized variant of the InferenceManagerBuffered, tailored to the
+ * arithmetic theory.
+ *
+ * It adds some convenience methods to send ArithLemma and adds a mechanism for
+ * waiting lemmas that can be flushed into the pending lemmas of the this
+ * buffered inference manager.
+ * It also extends the caching mechanism of TheoryInferenceManager to cache
+ * preprocessing lemmas and non-preprocessing lemmas separately. For the former,
+ * it uses the cache provided by the TheoryInferenceManager base class.
+ */
+class InferenceManager : public InferenceManagerBuffered
+{
+ using NodeSet = context::CDHashSet<Node, NodeHashFunction>;
+
+ public:
+ InferenceManager(TheoryArith& ta, ArithState& astate, ProofNodeManager* pnm);
+
+ /**
+ * Add a lemma as pending lemma to the this inference manager.
+ * If isWaiting is true, the lemma is first stored as waiting lemma and only
+ * added as pending lemma when calling flushWaitingLemmas.
+ */
+ void addPendingArithLemma(std::shared_ptr<ArithLemma> lemma,
+ bool isWaiting = false);
+ /**
+ * Add a lemma as pending lemma to the this inference manager.
+ * If isWaiting is true, the lemma is first stored as waiting lemma and only
+ * added as pending lemma when calling flushWaitingLemmas.
+ */
+ void addPendingArithLemma(const ArithLemma& lemma, bool isWaiting = false);
+ /**
+ * Add a lemma as pending lemma to the this inference manager.
+ * If isWaiting is true, the lemma is first stored as waiting lemma and only
+ * added as pending lemma when calling flushWaitingLemmas.
+ */
+ void addPendingArithLemma(const Node& lemma,
+ nl::Inference inftype,
+ bool isWaiting = false);
+
+ /**
+ * Flush all waiting lemmas to the this inference manager (as pending
+ * lemmas). To actually send them, call doPendingLemmas() afterwards.
+ */
+ void flushWaitingLemmas();
+
+ /** Add a conflict to the this inference manager. */
+ void addConflict(const Node& conf, nl::Inference inftype);
+
+ /** Returns the number of pending lemmas. */
+ std::size_t numWaitingLemmas() const;
+
+ /** Checks whether the given lemma is already present in the cache. */
+ virtual bool hasCachedLemma(TNode lem, LemmaProperty p) override;
+
+ protected:
+ /**
+ * Adds the given lemma to the cache. Returns true if it has not been in the
+ * cache yet.
+ */
+ virtual bool cacheLemma(TNode lem, LemmaProperty p) override;
+
+ private:
+ /**
+ * Checks whether the lemma is entailed to be false. In this case, it is a
+ * conflict.
+ */
+ bool isEntailedFalse(const ArithLemma& lem);
+
+ /** The waiting lemmas. */
+ std::vector<std::shared_ptr<ArithLemma>> d_waitingLem;
+
+ /** cache of all preprocessed lemmas sent on the output channel
+ * (user-context-dependent) */
+ NodeSet d_lemmasPp;
+};
+
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif
{
lems.emplace_back(nm->mkNode(Kind::OR, mis), Inference::CAD_CONFLICT);
}
- Trace("nl-cad") << "UNSAT with MIS: " << lems.back().d_lemma << std::endl;
+ Trace("nl-cad") << "UNSAT with MIS: " << lems.back().d_node << std::endl;
}
return lems;
#else
#include "theory/arith/nl/nl_lemma_utils.h"
#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/nonlinear_extension.h"
namespace CVC4 {
namespace theory {
namespace arith {
namespace nl {
-LemmaProperty NlLemma::getLemmaProperty() const
+bool NlLemma::process(TheoryInferenceManager* im)
{
- return d_preprocess ? LemmaProperty::PREPROCESS : LemmaProperty::NONE;
+ bool res = ArithLemma::process(im);
+ if (d_nlext != nullptr)
+ {
+ d_nlext->processSideEffect(*this);
+ }
+ return res;
}
std::ostream& operator<<(std::ostream& out, NlLemma& n)
{
- out << n.d_lemma;
+ out << n.d_node;
return out;
}
#include <tuple>
#include <vector>
+
#include "expr/node.h"
+#include "theory/arith/arith_lemma.h"
#include "theory/arith/nl/inference.h"
#include "theory/output_channel.h"
namespace nl {
class NlModel;
+class NonlinearExtension;
/**
* The data structure for a single lemma to process by the non-linear solver,
* - A set of secant points to record (for transcendental secant plane
* inferences).
*/
-struct NlLemma
+class NlLemma : public ArithLemma
{
- NlLemma(Node lem, Inference id = Inference::UNKNOWN)
- : d_id(id), d_lemma(lem), d_preprocess(false)
+ public:
+ NlLemma(Node n,
+ LemmaProperty p,
+ ProofGenerator* pg,
+ nl::Inference inf = nl::Inference::UNKNOWN)
+ : ArithLemma(n, p, pg, inf)
+ {
+ }
+ NlLemma(Node n, nl::Inference inf = nl::Inference::UNKNOWN)
+ : ArithLemma(n, LemmaProperty::NONE, nullptr, inf)
{
}
~NlLemma() {}
- /** The inference id for the lemma */
- Inference d_id;
- /** The lemma */
- Node d_lemma;
- /** Whether to preprocess the lemma */
- bool d_preprocess;
+
+ bool process(TheoryInferenceManager* im) override;
+
/** secant points to add
*
* A member (tf, d, c) in this vector indicates that point c should be added
* Cimatti et al., CADE 2017.
*/
std::vector<std::tuple<Node, unsigned, Node> > d_secantPoint;
- /** get lemma property (preprocess or none) */
- LemmaProperty getLemmaProperty() const;
+
+ NonlinearExtension* d_nlext;
};
/**
* Writes a non-linear lemma to a stream.
Node v = cb.first;
Node pred = nm->mkNode(AND, nm->mkNode(GEQ, v, l), nm->mkNode(GEQ, u, v));
pred = nm->mkNode(OR, mg.negate(), pred);
- lemmas.push_back(pred);
+ lemmas.emplace_back(pred);
}
}
return true;
std::vector<NlLemma> nr_lemmas;
for (unsigned i = 0; i < lemmas.size(); i++)
{
- if (r_lemmas.find(lemmas[i].d_lemma) == r_lemmas.end())
+ if (r_lemmas.find(lemmas[i].d_node) == r_lemmas.end())
{
nr_lemmas.push_back(lemmas[i]);
}
{
for (const NlLemma& nlem : out)
{
- Node lem = nlem.d_lemma;
- LemmaProperty p = nlem.getLemmaProperty();
- Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : " << nlem.d_id
+ Node lem = nlem.d_node;
+ LemmaProperty p = nlem.d_property;
+ Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : " << nlem.d_inference
<< " : " << lem << std::endl;
d_containing.getOutputChannel().lemma(lem, p);
// process the side effect
{
d_lemmas.insert(lem);
}
- d_stats.d_inferences << nlem.d_id;
+ d_stats.d_inferences << nlem.d_inference;
}
}
unsigned NonlinearExtension::filterLemma(NlLemma lem, std::vector<NlLemma>& out)
{
Trace("nl-ext-lemma-debug")
- << "NonlinearExtension::Lemma pre-rewrite : " << lem.d_lemma << std::endl;
- lem.d_lemma = Rewriter::rewrite(lem.d_lemma);
+ << "NonlinearExtension::Lemma pre-rewrite : " << lem.d_node << std::endl;
+ lem.d_node = Rewriter::rewrite(lem.d_node);
// get the proper cache
- NodeSet& lcache = lem.d_preprocess ? d_lemmasPp : d_lemmas;
- if (lcache.find(lem.d_lemma) != lcache.end())
+ NodeSet& lcache =
+ isLemmaPropertyPreprocess(lem.d_property) ? d_lemmasPp : d_lemmas;
+ if (lcache.find(lem.d_node) != lcache.end())
{
Trace("nl-ext-lemma-debug")
- << "NonlinearExtension::Lemma duplicate : " << lem.d_lemma << std::endl;
+ << "NonlinearExtension::Lemma duplicate : " << lem.d_node << std::endl;
return 0;
}
out.emplace_back(lem);
// check if any are entailed to be false
for (const NlLemma& lem : lemmas)
{
- Node ch_lemma = lem.d_lemma.negate();
+ Node ch_lemma = lem.d_node.negate();
ch_lemma = Rewriter::rewrite(ch_lemma);
Trace("nl-ext-et-debug")
<< "Check entailment of " << ch_lemma << "..." << std::endl;
if (et.first)
{
Trace("nl-ext-et") << "*** Lemma entailed to be in conflict : "
- << lem.d_lemma << std::endl;
+ << lem.d_node << std::endl;
// return just this lemma
if (filterLemma(lem, out) > 0)
{
*/
void presolve();
+ /** Process side effect se */
+ void processSideEffect(const NlLemma& se);
+
private:
/** Model-based refinement
*
* Send lemmas in out on the output channel of theory of arithmetic.
*/
void sendLemmas(const std::vector<NlLemma>& out);
- /** Process side effect se */
- void processSideEffect(const NlLemma& se);
/** cache of all lemmas sent on the output channel (user-context-dependent) */
NodeSet d_lemmas;
// note we must do preprocess on this lemma
Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
<< std::endl;
- NlLemma nlem(lem, Inference::T_PURIFY_ARG);
- nlem.d_preprocess = true;
+ NlLemma nlem(
+ lem, LemmaProperty::PREPROCESS, nullptr, Inference::T_PURIFY_ARG);
lems.emplace_back(nlem);
}
d_pendingReqPhase.clear();
}
+
+ std::size_t InferenceManagerBuffered::numPendingLemmas() const {
+ return d_pendingLem.size();
+ }
+ std::size_t InferenceManagerBuffered::numPendingFacts() const {
+ return d_pendingFact.size();
+ }
+
} // namespace theory
} // namespace CVC4
InferenceManagerBuffered(Theory& t,
TheoryState& state,
ProofNodeManager* pnm);
- ~InferenceManagerBuffered() {}
+ virtual ~InferenceManagerBuffered() {}
/**
* Have we processed an inference during this call to check? In particular,
* this returns true if we have a pending fact or lemma, or have encountered
/** Clear pending phase requirements, without processing */
void clearPendingPhaseRequirements();
+ /** Returns the number of pending lemmas. */
+ std::size_t numPendingLemmas() const;
+ /** Returns the number of pending facts. */
+ std::size_t numPendingFacts() const;
+
protected:
/** A set of pending inferences to be processed as lemmas */
std::vector<std::shared_ptr<TheoryInference>> d_pendingLem;