This PR cleans up several issues in the arithmetic theory mostly related to its usage of InferenceId and the TheoryInferenceManager:
remove the ArithLemma class and uses SimpleTheoryLemma instead
only use NlLemma if we actually need it
use proper InferenceIds everywhere
remove unused code in the nonlinear extension
theory/arith/approx_simplex.h
theory/arith/arith_ite_utils.cpp
theory/arith/arith_ite_utils.h
- theory/arith/arith_lemma.cpp
- theory/arith/arith_lemma.h
theory/arith/arith_msum.cpp
theory/arith/arith_msum.h
theory/arith/arith_preprocess.cpp
+++ /dev/null
-/********************* */
-/*! \file arith_lemma.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Gereon Kremer
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief ArithLemma class, derived from Lemma.
- **/
-
-#include "theory/arith/arith_lemma.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-std::ostream& operator<<(std::ostream& out, const ArithLemma& al)
-{
- return out << al.d_node;
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file arith_lemma.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Gereon Kremer
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief ArithLemma class, derived from Lemma.
- **/
-
-#ifndef CVC4__THEORY__ARITH__ARITH_LEMMA_H
-#define CVC4__THEORY__ARITH__ARITH_LEMMA_H
-
-#include <tuple>
-#include <vector>
-
-#include "expr/node.h"
-#include "theory/inference_id.h"
-#include "theory/inference_manager_buffered.h"
-#include "theory/output_channel.h"
-#include "theory/theory_inference.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-/**
- * The data structure for a single lemma to process by the arithmetic theory,
- * derived from theory::SimpleTheoryLemma.
- *
- * This also includes the inference type that produced this lemma.
- */
-class ArithLemma : public SimpleTheoryLemma
-{
- public:
- ArithLemma(Node n,
- LemmaProperty p,
- ProofGenerator* pg,
- InferenceId inf = InferenceId::UNKNOWN)
- : SimpleTheoryLemma(inf, n, p, pg)
- {
- }
- virtual ~ArithLemma() {}
-};
-/**
- * Writes an arithmetic lemma to a stream.
- */
-std::ostream& operator<<(std::ostream& out, const ArithLemma& al);
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__ARITH_LEMMA_H */
// tn is of kind REWRITE, turn this into a LEMMA here
TrustNode tlem = TrustNode::mkTrustLemma(tn.getProven(), tn.getGenerator());
// must preprocess
- d_im.trustedLemma(tlem, InferenceId::UNKNOWN);
+ d_im.trustedLemma(tlem, InferenceId::ARITH_PP_ELIM_OPERATORS);
// mark the atom as reduced
d_reduced[atom] = true;
return true;
{
}
-void InferenceManager::addPendingArithLemma(std::unique_ptr<ArithLemma> lemma,
- bool isWaiting)
+void InferenceManager::addPendingLemma(std::unique_ptr<SimpleTheoryLemma> lemma,
+ bool isWaiting)
{
Trace("arith::infman") << "Add " << lemma->getId() << " " << lemma->d_node
<< (isWaiting ? " as waiting" : "") << std::endl;
d_pendingLem.emplace_back(std::move(lemma));
}
}
-void InferenceManager::addPendingArithLemma(const ArithLemma& lemma,
- bool isWaiting)
+void InferenceManager::addPendingLemma(const SimpleTheoryLemma& lemma,
+ bool isWaiting)
{
- addPendingArithLemma(std::unique_ptr<ArithLemma>(new ArithLemma(lemma)),
- isWaiting);
+ addPendingLemma(
+ std::unique_ptr<SimpleTheoryLemma>(new SimpleTheoryLemma(lemma)),
+ isWaiting);
}
-void InferenceManager::addPendingArithLemma(const Node& lemma,
- InferenceId inftype,
- ProofGenerator* pg,
- bool isWaiting,
- LemmaProperty p)
+void InferenceManager::addPendingLemma(const Node& lemma,
+ InferenceId inftype,
+ ProofGenerator* pg,
+ bool isWaiting,
+ LemmaProperty p)
{
- addPendingArithLemma(
- std::unique_ptr<ArithLemma>(new ArithLemma(lemma, p, pg, inftype)),
- isWaiting);
+ addPendingLemma(std::unique_ptr<SimpleTheoryLemma>(
+ new SimpleTheoryLemma(inftype, lemma, p, pg)),
+ isWaiting);
}
void InferenceManager::flushWaitingLemmas()
return TheoryInferenceManager::cacheLemma(rewritten, p);
}
-bool InferenceManager::isEntailedFalse(const ArithLemma& lem)
+bool InferenceManager::isEntailedFalse(const SimpleTheoryLemma& lem)
{
if (options::nlExtEntailConflicts())
{
#include <map>
#include <vector>
-#include "theory/arith/arith_lemma.h"
#include "theory/arith/arith_state.h"
#include "theory/inference_id.h"
#include "theory/arith/nl/nl_lemma_utils.h"
* If isWaiting is true, the lemma is first stored as waiting lemma and only
* added as pending lemma when calling flushWaitingLemmas.
*/
- void addPendingArithLemma(std::unique_ptr<ArithLemma> lemma,
- bool isWaiting = false);
+ void addPendingLemma(std::unique_ptr<SimpleTheoryLemma> lemma,
+ bool isWaiting = false);
/**
* Add a lemma as pending lemma to this inference manager.
* If isWaiting is true, the lemma is first stored as waiting lemma and only
* added as pending lemma when calling flushWaitingLemmas.
*/
- void addPendingArithLemma(const ArithLemma& lemma, bool isWaiting = false);
+ void addPendingLemma(const SimpleTheoryLemma& lemma, bool isWaiting = false);
/**
* Add a lemma as pending lemma to this inference manager.
* If isWaiting is true, the lemma is first stored as waiting lemma and only
* added as pending lemma when calling flushWaitingLemmas.
*/
- void addPendingArithLemma(const Node& lemma,
- InferenceId inftype,
- ProofGenerator* pg = nullptr,
- bool isWaiting = false,
- LemmaProperty p = LemmaProperty::NONE);
+ void addPendingLemma(const Node& lemma,
+ InferenceId inftype,
+ ProofGenerator* pg = nullptr,
+ bool isWaiting = false,
+ LemmaProperty p = LemmaProperty::NONE);
/**
* Flush all waiting lemmas to this inference manager (as pending
* Checks whether the lemma is entailed to be false. In this case, it is a
* conflict.
*/
- bool isEntailedFalse(const ArithLemma& lem);
+ bool isEntailedFalse(const SimpleTheoryLemma& lem);
/** The waiting lemmas. */
- std::vector<std::unique_ptr<ArithLemma>> d_waitingLem;
+ std::vector<std::unique_ptr<SimpleTheoryLemma>> d_waitingLem;
};
} // namespace arith
{
n = n.negate();
}
- d_im.addPendingArithLemma(NodeManager::currentNM()->mkOr(mis),
- InferenceId::ARITH_NL_CAD_CONFLICT);
+ d_im.addPendingLemma(NodeManager::currentNM()->mkOr(mis),
+ InferenceId::ARITH_NL_CAD_CONFLICT);
}
#else
Warning() << "Tried to use CadSolver but libpoly is not available. Compile "
Trace("nl-cad") << "Excluding " << first_var << " -> "
<< interval.d_interval << " using " << lemma
<< std::endl;
- d_im.addPendingArithLemma(lemma, InferenceId::ARITH_NL_CAD_EXCLUDED_INTERVAL);
+ d_im.addPendingLemma(lemma,
+ InferenceId::ARITH_NL_CAD_EXCLUDED_INTERVAL);
}
}
}
proof->addStep(
flem, PfRule::MACRO_SR_PRED_TRANSFORM, {split, k_eq}, {flem});
}
- d_data->d_im.addPendingArithLemma(
+ d_data->d_im.addPendingLemma(
flem, InferenceId::ARITH_NL_FACTOR, proof);
}
}
Node k_eq = k.eqNode(n);
Trace("nl-ext-factor") << "...adding factor skolem " << k << " == " << n
<< std::endl;
- d_data->d_im.addPendingArithLemma(k_eq, InferenceId::ARITH_NL_FACTOR, proof);
+ d_data->d_im.addPendingLemma(k_eq, InferenceId::ARITH_NL_FACTOR, proof);
d_factor_skolem[n] = k;
}
else
<< " (pre-rewrite : " << pr_iblem << ")" << std::endl;
// Trace("nl-ext-bound-lemma") << " intro new
// monomials = " << introNewTerms << std::endl;
- d_data->d_im.addPendingArithLemma(
- iblem, InferenceId::ARITH_NL_INFER_BOUNDS_NT, nullptr, introNewTerms);
+ d_data->d_im.addPendingLemma(iblem,
+ InferenceId::ARITH_NL_INFER_BOUNDS_NT,
+ nullptr,
+ introNewTerms);
}
}
}
rblem = Rewriter::rewrite(rblem);
Trace("nl-ext-rbound-lemma")
<< "Resolution bound lemma : " << rblem << std::endl;
- d_data->d_im.addPendingArithLemma(
+ d_data->d_im.addPendingLemma(
rblem, InferenceId::ARITH_NL_RES_INFER_BOUNDS);
}
}
}
unsigned r = 1;
- std::vector<ArithLemma> lemmas;
+ std::vector<SimpleTheoryLemma> lemmas;
// if (x,y,L) in cmp_infers, then x > y inferred as conclusion of L
// in lemmas
std::map<int, std::map<Node, std::map<Node, Node> > > cmp_infers;
{
if (r_lemmas.find(lemmas[i].d_node) == r_lemmas.end())
{
- d_data->d_im.addPendingArithLemma(lemmas[i]);
+ d_data->d_im.addPendingLemma(lemmas[i]);
}
}
// could only take maximal lower/minimial lower bounds?
{
Node lemma =
nm->mkAnd(exp).impNode(mkLit(oa, d_data->d_zero, status * 2));
- d_data->d_im.addPendingArithLemma(lemma, InferenceId::ARITH_NL_SIGN);
+ d_data->d_im.addPendingLemma(lemma, InferenceId::ARITH_NL_SIGN);
}
return status;
}
proof->addStep(conc, PfRule::MACRO_SR_PRED_INTRO, {prem}, {conc});
proof->addStep(lemma, PfRule::SCOPE, {conc}, {prem});
}
- d_data->d_im.addPendingArithLemma(lemma, InferenceId::ARITH_NL_SIGN, proof);
+ d_data->d_im.addPendingLemma(lemma, InferenceId::ARITH_NL_SIGN, proof);
}
return 0;
}
Node b,
NodeMultiset& b_exp_proc,
std::vector<Node>& exp,
- std::vector<ArithLemma>& lem,
+ std::vector<SimpleTheoryLemma>& lem,
std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers)
{
Trace("nl-ext-comp-debug")
NodeMultiset& b_exp_proc,
int status,
std::vector<Node>& exp,
- std::vector<ArithLemma>& lem,
+ std::vector<SimpleTheoryLemma>& lem,
std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers)
{
Trace("nl-ext-comp-debug")
Kind::IMPLIES, nm->mkAnd(exp), mkLit(oa, ob, status, true));
Trace("nl-ext-comp-lemma") << "comparison lemma : " << clem << std::endl;
lem.emplace_back(
- clem, LemmaProperty::NONE, nullptr, InferenceId::ARITH_NL_COMPARISON);
+ InferenceId::ARITH_NL_COMPARISON, clem, LemmaProperty::NONE, nullptr);
cmp_infers[status][oa][ob] = clem;
}
return true;
Node b,
NodeMultiset& b_exp_proc,
std::vector<Node>& exp,
- std::vector<ArithLemma>& lem,
+ std::vector<SimpleTheoryLemma>& lem,
std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
/** helper function for above
*
NodeMultiset& b_exp_proc,
int status,
std::vector<Node>& exp,
- std::vector<ArithLemma>& lem,
+ std::vector<SimpleTheoryLemma>& lem,
std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
/** Check whether we have already inferred a relationship between monomials
* x and y based on the information in cmp_infers. This computes the
proof->addStep(lem, PfRule::SPLIT, {}, {eq});
}
d_data->d_im.addPendingPhaseRequirement(eq, true);
- d_data->d_im.addPendingArithLemma(lem, InferenceId::ARITH_NL_SPLIT_ZERO, proof);
+ d_data->d_im.addPendingLemma(
+ lem, InferenceId::ARITH_NL_SPLIT_ZERO, proof);
}
}
}
b_v,
nm->mkConst(Rational(d == 0 ? -1 : 1))});
}
- d_data->d_im.addPendingArithLemma(
- tlem, InferenceId::ARITH_NL_TANGENT_PLANE, proof, asWaitingLemmas);
+ d_data->d_im.addPendingLemma(tlem,
+ InferenceId::ARITH_NL_TANGENT_PLANE,
+ proof,
+ asWaitingLemmas);
}
}
}
Node lem = conj.size() == 1 ? conj[0] : nm->mkNode(AND, conj);
Trace("iand-lemma") << "IAndSolver::Lemma: " << lem << " ; INIT_REFINE"
<< std::endl;
- d_im.addPendingArithLemma(lem, InferenceId::ARITH_NL_IAND_INIT_REFINE);
+ d_im.addPendingLemma(lem, InferenceId::ARITH_NL_IAND_INIT_REFINE);
}
}
}
<< "IAndSolver::Lemma: " << lem << " ; SUM_REFINE" << std::endl;
// note that lemma can contain div/mod, and will be preprocessed in the
// prop engine
- d_im.addPendingArithLemma(
+ d_im.addPendingLemma(
lem, InferenceId::ARITH_NL_IAND_SUM_REFINE, nullptr, true);
}
else if (options::iandMode() == options::IandMode::BITWISE)
<< "IAndSolver::Lemma: " << lem << " ; BITWISE_REFINE" << std::endl;
// note that lemma can contain div/mod, and will be preprocessed in the
// prop engine
- d_im.addPendingArithLemma(
+ d_im.addPendingLemma(
lem, InferenceId::ARITH_NL_IAND_BITWISE_REFINE, nullptr, true);
}
else
Trace("iand-lemma")
<< "IAndSolver::Lemma: " << lem << " ; VALUE_REFINE" << std::endl;
// value lemmas should not contain div/mod so we don't need to tag it with PREPROCESS
- d_im.addPendingArithLemma(lem,
- InferenceId::ARITH_NL_IAND_VALUE_REFINE,
- nullptr,
- true,
- LemmaProperty::NONE);
+ d_im.addPendingLemma(lem,
+ InferenceId::ARITH_NL_IAND_VALUE_REFINE,
+ nullptr,
+ true,
+ LemmaProperty::NONE);
}
}
}
{
mis.emplace_back(n.negate());
}
- d_im.addPendingArithLemma(NodeManager::currentNM()->mkOr(mis),
- InferenceId::ARITH_NL_ICP_CONFLICT);
+ d_im.addPendingLemma(NodeManager::currentNM()->mkOr(mis),
+ InferenceId::ARITH_NL_ICP_CONFLICT);
did_progress = true;
progress = false;
break;
std::vector<Node> lemmas = generateLemmas();
for (const auto& l : lemmas)
{
- d_im.addPendingArithLemma(l, InferenceId::ARITH_NL_ICP_PROPAGATION);
+ d_im.addPendingLemma(l, InferenceId::ARITH_NL_ICP_PROPAGATION);
}
}
}
bool NlLemma::process(TheoryInferenceManager* im, bool asLemma)
{
- bool res = ArithLemma::process(im, asLemma);
+ bool res = SimpleTheoryLemma::process(im, asLemma);
if (d_nlext != nullptr)
{
d_nlext->processSideEffect(*this);
#include <vector>
#include "expr/node.h"
-#include "theory/arith/arith_lemma.h"
-#include "theory/output_channel.h"
+#include "theory/theory_inference.h"
namespace CVC4 {
namespace theory {
* - A set of secant points to record (for transcendental secant plane
* inferences).
*/
-class NlLemma : public ArithLemma
+class NlLemma : public SimpleTheoryLemma
{
public:
- NlLemma(Node n,
- LemmaProperty p,
- ProofGenerator* pg,
- InferenceId inf = InferenceId::UNKNOWN)
- : ArithLemma(n, p, pg, inf)
- {
- }
- NlLemma(Node n, InferenceId inf = InferenceId::UNKNOWN)
- : ArithLemma(n, LemmaProperty::NONE, nullptr, inf)
+ NlLemma(InferenceId inf,
+ Node n,
+ LemmaProperty p = LemmaProperty::NONE,
+ ProofGenerator* pg = nullptr)
+ : SimpleTheoryLemma(inf, n, p, pg)
{
}
~NlLemma() {}
Node conf = seq.negate();
Trace("nl-ext-lemma") << "NlModel::Lemma : quadratic no root : " << conf
<< std::endl;
- lemmas.push_back(conf);
+ lemmas.emplace_back(InferenceId::ARITH_NL_CM_QUADRATIC_EQ, conf);
Trace("nl-ext-cms") << "...fail due to negative discriminant." << std::endl;
return false;
}
<< " assertions" << std::endl;
}
-unsigned NonlinearExtension::filterLemma(NlLemma lem, std::vector<NlLemma>& out)
-{
- Trace("nl-ext-lemma-debug")
- << "NonlinearExtension::Lemma pre-rewrite : " << lem.d_node << std::endl;
- lem.d_node = Rewriter::rewrite(lem.d_node);
-
- if (d_im.hasCachedLemma(lem.d_node, lem.d_property))
- {
- Trace("nl-ext-lemma-debug")
- << "NonlinearExtension::Lemma duplicate : " << lem.d_node << std::endl;
- return 0;
- }
- out.emplace_back(lem);
- return 1;
-}
-
-unsigned NonlinearExtension::filterLemmas(std::vector<NlLemma>& lemmas,
- std::vector<NlLemma>& out)
-{
- if (options::nlExtEntailConflicts())
- {
- // check if any are entailed to be false
- for (const NlLemma& lem : lemmas)
- {
- Node ch_lemma = lem.d_node.negate();
- ch_lemma = Rewriter::rewrite(ch_lemma);
- Trace("nl-ext-et-debug")
- << "Check entailment of " << ch_lemma << "..." << std::endl;
- std::pair<bool, Node> et = d_containing.getValuation().entailmentCheck(
- options::TheoryOfMode::THEORY_OF_TYPE_BASED, ch_lemma);
- Trace("nl-ext-et-debug") << "entailment test result : " << et.first << " "
- << et.second << std::endl;
- if (et.first)
- {
- Trace("nl-ext-et") << "*** Lemma entailed to be in conflict : "
- << lem.d_node << std::endl;
- // return just this lemma
- if (filterLemma(lem, out) > 0)
- {
- lemmas.clear();
- return 1;
- }
- }
- }
- }
-
- unsigned sum = 0;
- for (const NlLemma& lem : lemmas)
- {
- sum += filterLemma(lem, out);
- d_containing.getOutputChannel().spendResource(
- ResourceManager::Resource::ArithNlLemmaStep);
- }
- lemmas.clear();
- return sum;
-}
-
void NonlinearExtension::getAssertions(std::vector<Node>& assertions)
{
Trace("nl-ext-assert-debug") << "Getting assertions..." << std::endl;
bool ret = d_model.checkModel(passertions, tdegree, lemmas);
for (const auto& al: lemmas)
{
- d_im.addPendingArithLemma(al);
+ d_im.addPendingLemma(al);
}
return ret;
}
d_containing.getOutputChannel().requirePhase(literal, true);
Trace("nl-ext-debug") << "Split on : " << literal << std::endl;
Node split = literal.orNode(literal.negate());
- NlLemma nsplit(split, InferenceId::ARITH_NL_SHARED_TERM_VALUE_SPLIT);
- d_im.addPendingArithLemma(nsplit, true);
+ d_im.addPendingLemma(split,
+ InferenceId::ARITH_NL_SHARED_TERM_VALUE_SPLIT,
+ nullptr,
+ true);
}
if (d_im.hasWaitingLemma())
{
/** compute relevant assertions */
void computeRelevantAssertions(const std::vector<Node>& assertions,
std::vector<Node>& keep);
- /**
- * Potentially adds lemmas to the set out and clears lemmas. Returns
- * the number of lemmas added to out. We do not add lemmas that have already
- * been sent on the output channel of TheoryArith.
- */
- unsigned filterLemmas(std::vector<NlLemma>& lemmas,
- std::vector<NlLemma>& out);
- /** singleton version of above */
- unsigned filterLemma(NlLemma lem, std::vector<NlLemma>& out);
/** run check strategy
*
// note we must do preprocess on this lemma
Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
<< std::endl;
- NlLemma nlem(lem, LemmaProperty::NONE, nullptr, InferenceId::ARITH_NL_T_PURIFY_ARG);
- d_data->d_im.addPendingArithLemma(nlem);
+ d_data->d_im.addPendingLemma(lem, InferenceId::ARITH_NL_T_PURIFY_ARG);
}
void ExponentialSolver::checkInitialRefine()
{
// exp is always positive: exp(t) > 0
Node lem = nm->mkNode(Kind::GT, t, d_data->d_zero);
- d_data->d_im.addPendingArithLemma(
+ d_data->d_im.addPendingLemma(
lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
{
Node lem = nm->mkNode(Kind::EQUAL,
t[0].eqNode(d_data->d_zero),
t.eqNode(d_data->d_one));
- d_data->d_im.addPendingArithLemma(
+ d_data->d_im.addPendingLemma(
lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
{
Node lem = nm->mkNode(Kind::EQUAL,
nm->mkNode(Kind::LT, t[0], d_data->d_zero),
nm->mkNode(Kind::LT, t, d_data->d_one));
- d_data->d_im.addPendingArithLemma(
+ d_data->d_im.addPendingLemma(
lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
{
nm->mkNode(Kind::LEQ, t[0], d_data->d_zero),
nm->mkNode(
Kind::GT, t, nm->mkNode(Kind::PLUS, t[0], d_data->d_one)));
- d_data->d_im.addPendingArithLemma(
+ d_data->d_im.addPendingLemma(
lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
}
nm->mkNode(Kind::GEQ, t, s));
Trace("nl-ext-exp") << "Monotonicity lemma : " << mono_lem << std::endl;
- d_data->d_im.addPendingArithLemma(mono_lem,
- InferenceId::ARITH_NL_T_MONOTONICITY);
+ d_data->d_im.addPendingLemma(mono_lem,
+ InferenceId::ARITH_NL_T_MONOTONICITY);
}
// store the previous values
targ = sarg;
Trace("nl-ext-exp") << "*** Tangent plane lemma : " << lem << std::endl;
Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
// Figure 3 : line 9
- d_data->d_im.addPendingArithLemma(lem, InferenceId::ARITH_NL_T_TANGENT, nullptr, true);
+ d_data->d_im.addPendingLemma(
+ lem, InferenceId::ARITH_NL_T_TANGENT, nullptr, true);
}
void ExponentialSolver::doSecantLemmas(TNode e,
// note we must do preprocess on this lemma
Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
<< std::endl;
- NlLemma nlem(lem, LemmaProperty::NONE, proof, InferenceId::ARITH_NL_T_PURIFY_ARG);
- d_data->d_im.addPendingArithLemma(nlem);
+ d_data->d_im.addPendingLemma(lem, InferenceId::ARITH_NL_T_PURIFY_ARG, proof);
}
void SineSolver::checkInitialRefine()
Node lem = nm->mkNode(Kind::AND,
nm->mkNode(Kind::LEQ, t, d_data->d_one),
nm->mkNode(Kind::GEQ, t, d_data->d_neg_one));
- d_data->d_im.addPendingArithLemma(
- lem, InferenceId::ARITH_NL_T_INIT_REFINE);
+ d_data->d_im.addPendingLemma(lem,
+ InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
// sine symmetry: sin(t) - sin(-t) = 0
Node lem = nm->mkNode(Kind::PLUS, t, symn).eqNode(d_data->d_zero);
- d_data->d_im.addPendingArithLemma(
- lem, InferenceId::ARITH_NL_T_INIT_REFINE);
+ d_data->d_im.addPendingLemma(lem,
+ InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
// sine zero tangent:
nm->mkNode(Kind::IMPLIES,
nm->mkNode(Kind::LT, t[0], d_data->d_zero),
nm->mkNode(Kind::GT, t, t[0])));
- d_data->d_im.addPendingArithLemma(
- lem, InferenceId::ARITH_NL_T_INIT_REFINE);
+ d_data->d_im.addPendingLemma(lem,
+ InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
// sine pi tangent:
nm->mkNode(Kind::LT,
t,
nm->mkNode(Kind::MINUS, d_data->d_pi, t[0]))));
- d_data->d_im.addPendingArithLemma(
- lem, InferenceId::ARITH_NL_T_INIT_REFINE);
+ d_data->d_im.addPendingLemma(lem,
+ InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
Node lem =
nm->mkNode(Kind::EQUAL,
nm->mkNode(Kind::GT, t[0], d_data->d_zero),
nm->mkNode(Kind::GT, t, d_data->d_zero)));
- d_data->d_im.addPendingArithLemma(
- lem, InferenceId::ARITH_NL_T_INIT_REFINE);
+ d_data->d_im.addPendingLemma(lem,
+ InferenceId::ARITH_NL_T_INIT_REFINE);
}
}
}
Trace("nl-ext-tf-mono")
<< "Monotonicity lemma : " << mono_lem << std::endl;
- d_data->d_im.addPendingArithLemma(mono_lem,
- InferenceId::ARITH_NL_T_MONOTONICITY);
+ d_data->d_im.addPendingLemma(mono_lem,
+ InferenceId::ARITH_NL_T_MONOTONICITY);
}
}
// store the previous values
Trace("nl-ext-sine") << "*** Tangent plane lemma : " << lem << std::endl;
Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
// Figure 3 : line 9
- d_data->d_im.addPendingArithLemma(
+ d_data->d_im.addPendingLemma(
lem, InferenceId::ARITH_NL_T_TANGENT, nullptr, true);
}
}
Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(Kind::AND, exp);
Node cong_lemma = expn.impNode(a.eqNode(aa));
- d_im.addPendingArithLemma(cong_lemma, InferenceId::ARITH_NL_CONGRUENCE);
+ d_im.addPendingLemma(cong_lemma, InferenceId::ARITH_NL_CONGRUENCE);
}
}
else
proof->addStep(
pi_lem, PfRule::ARITH_TRANS_PI, {}, {d_pi_bound[0], d_pi_bound[1]});
}
- d_im.addPendingArithLemma(pi_lem, InferenceId::ARITH_NL_T_PI_BOUND, proof);
+ d_im.addPendingLemma(pi_lem, InferenceId::ARITH_NL_T_PI_BOUND, proof);
}
std::pair<Node, Node> TranscendentalState::getClosestSecantPoints(TNode e,
lem = Rewriter::rewrite(lem);
Trace("nl-trans-lemma") << "*** Secant plane lemma : " << lem << std::endl;
Assert(d_model.computeAbstractModelValue(lem) == d_false);
- return NlLemma(lem, LemmaProperty::NONE, nullptr, InferenceId::ARITH_NL_T_SECANT);
+ return NlLemma(InferenceId::ARITH_NL_T_SECANT, lem);
}
void TranscendentalState::doSecantLemmas(const std::pair<Node, Node>& bounds,
// The side effect says that if lem is added, then we should add the
// secant point c for (tf,d).
nlem.d_secantPoint.push_back(std::make_tuple(tf, d, center));
- d_im.addPendingArithLemma(nlem, true);
+ d_im.addPendingLemma(nlem, true);
}
// take the model value of upper (since may contain PI)
// The side effect says that if lem is added, then we should add the
// secant point c for (tf,d).
nlem.d_secantPoint.push_back(std::make_tuple(tf, d, center));
- d_im.addPendingArithLemma(nlem, true);
+ d_im.addPendingLemma(nlem, true);
}
}
{
switch (i)
{
- case InferenceId::ARITH_NL_CONGRUENCE: return "CONGRUENCE";
- case InferenceId::ARITH_NL_SHARED_TERM_VALUE_SPLIT: return "SHARED_TERM_VALUE_SPLIT";
- case InferenceId::ARITH_NL_SPLIT_ZERO: return "SPLIT_ZERO";
- case InferenceId::ARITH_NL_SIGN: return "SIGN";
- case InferenceId::ARITH_NL_COMPARISON: return "COMPARISON";
- case InferenceId::ARITH_NL_INFER_BOUNDS: return "INFER_BOUNDS";
- case InferenceId::ARITH_NL_INFER_BOUNDS_NT: return "INFER_BOUNDS_NT";
- case InferenceId::ARITH_NL_FACTOR: return "FACTOR";
- case InferenceId::ARITH_NL_RES_INFER_BOUNDS: return "RES_INFER_BOUNDS";
- case InferenceId::ARITH_NL_TANGENT_PLANE: return "TANGENT_PLANE";
- case InferenceId::ARITH_NL_T_PURIFY_ARG: return "T_PURIFY_ARG";
- case InferenceId::ARITH_NL_T_INIT_REFINE: return "T_INIT_REFINE";
- case InferenceId::ARITH_NL_T_PI_BOUND: return "T_PI_BOUND";
- case InferenceId::ARITH_NL_T_MONOTONICITY: return "T_MONOTONICITY";
- case InferenceId::ARITH_NL_T_SECANT: return "T_SECANT";
- case InferenceId::ARITH_NL_T_TANGENT: return "T_TANGENT";
- case InferenceId::ARITH_NL_IAND_INIT_REFINE: return "IAND_INIT_REFINE";
- case InferenceId::ARITH_NL_IAND_VALUE_REFINE: return "IAND_VALUE_REFINE";
- case InferenceId::ARITH_NL_IAND_SUM_REFINE: return "IAND_SUM_REFINE";
- case InferenceId::ARITH_NL_IAND_BITWISE_REFINE: return "IAND_BITWISE_REFINE";
- case InferenceId::ARITH_NL_CAD_CONFLICT: return "CAD_CONFLICT";
- case InferenceId::ARITH_NL_CAD_EXCLUDED_INTERVAL: return "CAD_EXCLUDED_INTERVAL";
- case InferenceId::ARITH_NL_ICP_CONFLICT: return "ICP_CONFLICT";
- case InferenceId::ARITH_NL_ICP_PROPAGATION: return "ICP_PROPAGATION";
+ case InferenceId::ARITH_PP_ELIM_OPERATORS: return "ARITH_PP_ELIM_OPERATORS";
+ case InferenceId::ARITH_NL_CONGRUENCE: return "ARITH_NL_CONGRUENCE";
+ case InferenceId::ARITH_NL_SHARED_TERM_VALUE_SPLIT:
+ return "ARITH_NL_SHARED_TERM_VALUE_SPLIT";
+ case InferenceId::ARITH_NL_CM_QUADRATIC_EQ:
+ return "ARITH_NL_CM_QUADRATIC_EQ";
+ case InferenceId::ARITH_NL_SPLIT_ZERO: return "ARITH_NL_SPLIT_ZERO";
+ case InferenceId::ARITH_NL_SIGN: return "ARITH_NL_SIGN";
+ case InferenceId::ARITH_NL_COMPARISON: return "ARITH_NL_COMPARISON";
+ case InferenceId::ARITH_NL_INFER_BOUNDS: return "ARITH_NL_INFER_BOUNDS";
+ case InferenceId::ARITH_NL_INFER_BOUNDS_NT:
+ return "ARITH_NL_INFER_BOUNDS_NT";
+ case InferenceId::ARITH_NL_FACTOR: return "ARITH_NL_FACTOR";
+ case InferenceId::ARITH_NL_RES_INFER_BOUNDS:
+ return "ARITH_NL_RES_INFER_BOUNDS";
+ case InferenceId::ARITH_NL_TANGENT_PLANE: return "ARITH_NL_TANGENT_PLANE";
+ case InferenceId::ARITH_NL_T_PURIFY_ARG: return "ARITH_NL_T_PURIFY_ARG";
+ case InferenceId::ARITH_NL_T_INIT_REFINE: return "ARITH_NL_T_INIT_REFINE";
+ case InferenceId::ARITH_NL_T_PI_BOUND: return "ARITH_NL_T_PI_BOUND";
+ case InferenceId::ARITH_NL_T_MONOTONICITY: return "ARITH_NL_T_MONOTONICITY";
+ case InferenceId::ARITH_NL_T_SECANT: return "ARITH_NL_T_SECANT";
+ case InferenceId::ARITH_NL_T_TANGENT: return "ARITH_NL_T_TANGENT";
+ case InferenceId::ARITH_NL_IAND_INIT_REFINE:
+ return "ARITH_NL_IAND_INIT_REFINE";
+ case InferenceId::ARITH_NL_IAND_VALUE_REFINE:
+ return "ARITH_NL_IAND_VALUE_REFINE";
+ case InferenceId::ARITH_NL_IAND_SUM_REFINE:
+ return "ARITH_NL_IAND_SUM_REFINE";
+ case InferenceId::ARITH_NL_IAND_BITWISE_REFINE:
+ return "ARITH_NL_IAND_BITWISE_REFINE";
+ case InferenceId::ARITH_NL_CAD_CONFLICT: return "ARITH_NL_CAD_CONFLICT";
+ case InferenceId::ARITH_NL_CAD_EXCLUDED_INTERVAL:
+ return "ARITH_NL_CAD_EXCLUDED_INTERVAL";
+ case InferenceId::ARITH_NL_ICP_CONFLICT: return "ARITH_NL_ICP_CONFLICT";
+ case InferenceId::ARITH_NL_ICP_PROPAGATION:
+ return "ARITH_NL_ICP_PROPAGATION";
case InferenceId::ARRAYS_EXT: return "ARRAYS_EXT";
case InferenceId::ARRAYS_READ_OVER_WRITE: return "ARRAYS_READ_OVER_WRITE";
enum class InferenceId
{
// ---------------------------------- arith theory
- //-------------------- core
+ //-------------------- preprocessing
+ ARITH_PP_ELIM_OPERATORS,
+ //-------------------- nonlinear core
// simple congruence x=y => f(x)=f(y)
ARITH_NL_CONGRUENCE,
// shared term value split (for naive theory combination)
ARITH_NL_SHARED_TERM_VALUE_SPLIT,
- //-------------------- incremental linearization solver
+ // checkModel found a conflict with a quadratic equality
+ ARITH_NL_CM_QUADRATIC_EQ,
+ //-------------------- nonlinear incremental linearization solver
// splitting on zero (NlSolver::checkSplitZero)
ARITH_NL_SPLIT_ZERO,
// based on sign (NlSolver::checkMonomialSign)
ARITH_NL_RES_INFER_BOUNDS,
// tangent planes (NlSolver::checkTangentPlanes)
ARITH_NL_TANGENT_PLANE,
- //-------------------- transcendental solver
+ //-------------------- nonlinear transcendental solver
// purification of arguments to transcendental functions
ARITH_NL_T_PURIFY_ARG,
// initial refinement (TranscendentalSolver::checkTranscendentalInitialRefine)
ARITH_NL_T_TANGENT,
// secant refinement, the dual of the above inference
ARITH_NL_T_SECANT,
- //-------------------- iand solver
+ //-------------------- nonlinear iand solver
// initial refinements (IAndSolver::checkInitialRefine)
ARITH_NL_IAND_INIT_REFINE,
// value refinements (IAndSolver::checkFullRefine)
ARITH_NL_IAND_SUM_REFINE,
// bitwise refinements (IAndSolver::checkFullRefine)
ARITH_NL_IAND_BITWISE_REFINE,
- //-------------------- cad solver
+ //-------------------- nonlinear cad solver
// conflict / infeasible subset obtained from cad
ARITH_NL_CAD_CONFLICT,
// excludes an interval for a single variable
ARITH_NL_CAD_EXCLUDED_INTERVAL,
- //-------------------- icp solver
+ //-------------------- nonlinear icp solver
// conflict obtained from icp
ARITH_NL_ICP_CONFLICT,
// propagation / contraction of variable bounds from icp