Also makes CVC4::theory::arith::nl namespace.
This includes some formatting changes.
theory/arith/linear_equality.h
theory/arith/matrix.cpp
theory/arith/matrix.h
- theory/arith/nl_constraint.cpp
- theory/arith/nl_constraint.h
- theory/arith/nl_lemma_utils.cpp
- theory/arith/nl_lemma_utils.h
- theory/arith/nl_model.cpp
- theory/arith/nl_model.h
- theory/arith/nl_monomial.cpp
- theory/arith/nl_monomial.h
- theory/arith/nl_solver.cpp
- theory/arith/nl_solver.h
- theory/arith/nonlinear_extension.cpp
- theory/arith/nonlinear_extension.h
+ theory/arith/nl/nl_constraint.cpp
+ theory/arith/nl/nl_constraint.h
+ theory/arith/nl/nl_lemma_utils.cpp
+ theory/arith/nl/nl_lemma_utils.h
+ theory/arith/nl/nl_model.cpp
+ theory/arith/nl/nl_model.h
+ theory/arith/nl/nl_monomial.cpp
+ theory/arith/nl/nl_monomial.h
+ theory/arith/nl/nl_solver.cpp
+ theory/arith/nl/nl_solver.h
+ theory/arith/nl/nonlinear_extension.cpp
+ theory/arith/nl/nonlinear_extension.h
+ theory/arith/nl/transcendental_solver.cpp
+ theory/arith/nl/transcendental_solver.h
theory/arith/normal_form.cpp
theory/arith/normal_form.h
theory/arith/partial_model.cpp
theory/arith/theory_arith_private.h
theory/arith/theory_arith_private_forward.h
theory/arith/theory_arith_type_rules.h
- theory/arith/transcendental_solver.cpp
- theory/arith/transcendental_solver.h
theory/arith/type_enumerator.h
theory/arrays/array_info.cpp
theory/arrays/array_info.h
--- /dev/null
+/********************* */
+/*! \file nl_constraint.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of utilities for non-linear constraints
+ **/
+
+#include "theory/arith/nl/nl_constraint.h"
+
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+ConstraintDb::ConstraintDb(MonomialDb& mdb) : d_mdb(mdb) {}
+
+void ConstraintDb::registerConstraint(Node atom)
+{
+ if (std::find(d_constraints.begin(), d_constraints.end(), atom)
+ != d_constraints.end())
+ {
+ return;
+ }
+ d_constraints.push_back(atom);
+ Trace("nl-ext-debug") << "Register constraint : " << atom << std::endl;
+ std::map<Node, Node> msum;
+ if (ArithMSum::getMonomialSumLit(atom, msum))
+ {
+ Trace("nl-ext-debug") << "got monomial sum: " << std::endl;
+ if (Trace.isOn("nl-ext-debug"))
+ {
+ ArithMSum::debugPrintMonomialSum(msum, "nl-ext-debug");
+ }
+ unsigned max_degree = 0;
+ std::vector<Node> all_m;
+ std::vector<Node> max_deg_m;
+ for (std::map<Node, Node>::iterator itm = msum.begin(); itm != msum.end();
+ ++itm)
+ {
+ if (!itm->first.isNull())
+ {
+ all_m.push_back(itm->first);
+ d_mdb.registerMonomial(itm->first);
+ Trace("nl-ext-debug2")
+ << "...process monomial " << itm->first << std::endl;
+ unsigned d = d_mdb.getDegree(itm->first);
+ if (d > max_degree)
+ {
+ max_degree = d;
+ max_deg_m.clear();
+ }
+ if (d >= max_degree)
+ {
+ max_deg_m.push_back(itm->first);
+ }
+ }
+ }
+ // isolate for each maximal degree monomial
+ for (unsigned i = 0; i < all_m.size(); i++)
+ {
+ Node m = all_m[i];
+ Node rhs, coeff;
+ int res = ArithMSum::isolate(m, msum, coeff, rhs, atom.getKind());
+ if (res != 0)
+ {
+ Kind type = atom.getKind();
+ if (res == -1)
+ {
+ type = reverseRelationKind(type);
+ }
+ Trace("nl-ext-constraint") << "Constraint : " << atom << " <=> ";
+ if (!coeff.isNull())
+ {
+ Trace("nl-ext-constraint") << coeff << " * ";
+ }
+ Trace("nl-ext-constraint")
+ << m << " " << type << " " << rhs << std::endl;
+ ConstraintInfo& ci = d_c_info[atom][m];
+ ci.d_rhs = rhs;
+ ci.d_coeff = coeff;
+ ci.d_type = type;
+ }
+ }
+ for (unsigned i = 0; i < max_deg_m.size(); i++)
+ {
+ Node m = max_deg_m[i];
+ d_c_info_maxm[atom][m] = true;
+ }
+ }
+ else
+ {
+ Trace("nl-ext-debug") << "...failed to get monomial sum." << std::endl;
+ }
+}
+
+const std::map<Node, std::map<Node, ConstraintInfo> >&
+ConstraintDb::getConstraints()
+{
+ return d_c_info;
+}
+
+bool ConstraintDb::isMaximal(Node atom, Node x) const
+{
+ std::map<Node, std::map<Node, bool> >::const_iterator itcm =
+ d_c_info_maxm.find(atom);
+ Assert(itcm != d_c_info_maxm.end());
+ return itcm->second.find(x) != itcm->second.end();
+}
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file nl_constraint.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Utilities for non-linear constraints
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__NL_CONSTRAINT_H
+#define CVC4__THEORY__ARITH__NL__NL_CONSTRAINT_H
+
+#include <map>
+#include <vector>
+
+#include "expr/kind.h"
+#include "expr/node.h"
+#include "theory/arith/nl/nl_monomial.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+/** constraint information
+ *
+ * The struct ( d_rhs, d_coeff, d_type ) represents that a literal is of the
+ * form (d_coeff * x) <d_type> d_rhs.
+ */
+struct ConstraintInfo
+{
+ public:
+ /** The term on the right hand side of the constraint */
+ Node d_rhs;
+ /** The coefficent */
+ Node d_coeff;
+ /** The type (relation) of the constraint */
+ Kind d_type;
+}; /* struct ConstraintInfo */
+
+/** A database for constraints */
+class ConstraintDb
+{
+ public:
+ ConstraintDb(MonomialDb& mdb);
+ ~ConstraintDb() {}
+ /** register constraint
+ *
+ * This ensures that atom is in the domain of the constraints maintained by
+ * this database.
+ */
+ void registerConstraint(Node atom);
+ /** get constraints
+ *
+ * Returns a map m such that whenever
+ * m[lit][x] = ( r, coeff, k ), then
+ * ( lit <=> (coeff * x) <k> r )
+ */
+ const std::map<Node, std::map<Node, ConstraintInfo> >& getConstraints();
+ /** Returns true if m is of maximal degree in atom
+ *
+ * For example, for atom x^2 + x*y + y >=0, the monomials x^2 and x*y
+ * are of maximal degree (2).
+ */
+ bool isMaximal(Node atom, Node m) const;
+
+ private:
+ /** Reference to a monomial database */
+ MonomialDb& d_mdb;
+ /** List of all constraints */
+ std::vector<Node> d_constraints;
+ /** Is maximal degree */
+ std::map<Node, std::map<Node, bool> > d_c_info_maxm;
+ /** Constraint information */
+ std::map<Node, std::map<Node, ConstraintInfo> > d_c_info;
+};
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__NL_SOLVER_H */
--- /dev/null
+/********************* */
+/*! \file nl_lemma_utils.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of utilities for the non-linear solver
+ **/
+
+#include "theory/arith/nl/nl_lemma_utils.h"
+
+#include "theory/arith/nl/nl_model.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+bool SortNlModel::operator()(Node i, Node j)
+{
+ int cv = d_nlm->compare(i, j, d_isConcrete, d_isAbsolute);
+ if (cv == 0)
+ {
+ return i < j;
+ }
+ return d_reverse_order ? cv < 0 : cv > 0;
+}
+
+bool SortNonlinearDegree::operator()(Node i, Node j)
+{
+ unsigned i_count = getDegree(i);
+ unsigned j_count = getDegree(j);
+ return i_count == j_count ? (i < j) : (i_count < j_count ? true : false);
+}
+
+unsigned SortNonlinearDegree::getDegree(Node n) const
+{
+ std::map<Node, unsigned>::const_iterator it = d_mdegree.find(n);
+ Assert(it != d_mdegree.end());
+ return it->second;
+}
+
+Node ArgTrie::add(Node d, const std::vector<Node>& args)
+{
+ ArgTrie* at = this;
+ for (const Node& a : args)
+ {
+ at = &(at->d_children[a]);
+ }
+ if (at->d_data.isNull())
+ {
+ at->d_data = d;
+ }
+ return at->d_data;
+}
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file nl_lemma_utils.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Utilities for processing lemmas from the non-linear solver
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__NL_LEMMA_UTILS_H
+#define CVC4__THEORY__ARITH__NL__NL_LEMMA_UTILS_H
+
+#include <tuple>
+#include <vector>
+#include "expr/node.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+class NlModel;
+
+/**
+ * A side effect of adding a lemma in the non-linear solver. This is used
+ * to specify how the state of the non-linear solver should update. This
+ * includes:
+ * - A set of secant points to record (for transcendental secant plane
+ * inferences).
+ */
+struct NlLemmaSideEffect
+{
+ NlLemmaSideEffect() {}
+ ~NlLemmaSideEffect() {}
+ /** secant points to add
+ *
+ * A member (tf, d, c) in this vector indicates that point c should be added
+ * to the list of secant points for an application of a transcendental
+ * function tf for Taylor degree d. This is used for incremental linearization
+ * for underapproximation (resp. overapproximations) of convex (resp.
+ * concave) regions of transcendental functions. For details, see
+ * Cimatti et al., CADE 2017.
+ */
+ std::vector<std::tuple<Node, unsigned, Node> > d_secantPoint;
+};
+
+struct SortNlModel
+{
+ SortNlModel()
+ : d_nlm(nullptr),
+ d_isConcrete(true),
+ d_isAbsolute(false),
+ d_reverse_order(false)
+ {
+ }
+ /** pointer to the model */
+ NlModel* d_nlm;
+ /** are we comparing concrete model values? */
+ bool d_isConcrete;
+ /** are we comparing absolute values? */
+ bool d_isAbsolute;
+ /** are we in reverse order? */
+ bool d_reverse_order;
+ /** the comparison */
+ bool operator()(Node i, Node j);
+};
+
+struct SortNonlinearDegree
+{
+ SortNonlinearDegree(const std::map<Node, unsigned>& m) : d_mdegree(m) {}
+ /** pointer to the non-linear extension */
+ const std::map<Node, unsigned>& d_mdegree;
+ /** Get the degree of n in d_mdegree */
+ unsigned getDegree(Node n) const;
+ /**
+ * Sorts by degree of the monomials, where lower degree monomials come
+ * first.
+ */
+ bool operator()(Node i, Node j);
+};
+
+/** An argument trie, for computing congruent terms */
+class ArgTrie
+{
+ public:
+ /** children of this node */
+ std::map<Node, ArgTrie> d_children;
+ /** the data of this node */
+ Node d_data;
+ /**
+ * Set d as the data on the node whose path is [args], return either d if
+ * that node has no data, or the data that already occurs there.
+ */
+ Node add(Node d, const std::vector<Node>& args);
+};
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__NL_LEMMA_UTILS_H */
--- /dev/null
+/********************* */
+/*! \file nl_model.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Model object for the non-linear extension class
+ **/
+
+#include "theory/arith/nl/nl_model.h"
+
+#include "expr/node_algorithm.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+NlModel::NlModel(context::Context* c) : d_used_approx(false)
+{
+ d_true = NodeManager::currentNM()->mkConst(true);
+ d_false = NodeManager::currentNM()->mkConst(false);
+ d_zero = NodeManager::currentNM()->mkConst(Rational(0));
+ d_one = NodeManager::currentNM()->mkConst(Rational(1));
+ d_two = NodeManager::currentNM()->mkConst(Rational(2));
+}
+
+NlModel::~NlModel() {}
+
+void NlModel::reset(TheoryModel* m, std::map<Node, Node>& arithModel)
+{
+ d_model = m;
+ d_mv[0].clear();
+ d_mv[1].clear();
+ d_arithVal.clear();
+ // process arithModel
+ std::map<Node, Node>::iterator it;
+ for (const std::pair<const Node, Node>& m2 : arithModel)
+ {
+ d_arithVal[m2.first] = m2.second;
+ }
+}
+
+void NlModel::resetCheck()
+{
+ d_used_approx = false;
+ d_check_model_solved.clear();
+ d_check_model_bounds.clear();
+ d_check_model_vars.clear();
+ d_check_model_subs.clear();
+}
+
+Node NlModel::computeConcreteModelValue(Node n)
+{
+ return computeModelValue(n, true);
+}
+
+Node NlModel::computeAbstractModelValue(Node n)
+{
+ return computeModelValue(n, false);
+}
+
+Node NlModel::computeModelValue(Node n, bool isConcrete)
+{
+ unsigned index = isConcrete ? 0 : 1;
+ std::map<Node, Node>::iterator it = d_mv[index].find(n);
+ if (it != d_mv[index].end())
+ {
+ return it->second;
+ }
+ Trace("nl-ext-mv-debug") << "computeModelValue " << n << ", index=" << index
+ << std::endl;
+ Node ret;
+ Kind nk = n.getKind();
+ if (n.isConst())
+ {
+ ret = n;
+ }
+ else if (!isConcrete && hasTerm(n))
+ {
+ // use model value for abstraction
+ ret = getRepresentative(n);
+ }
+ else if (n.getNumChildren() == 0)
+ {
+ // we are interested in the exact value of PI, which cannot be computed.
+ // hence, we return PI itself when asked for the concrete value.
+ if (nk == PI)
+ {
+ ret = n;
+ }
+ else
+ {
+ ret = getValueInternal(n);
+ }
+ }
+ else
+ {
+ // otherwise, compute true value
+ TheoryId ctid = theory::kindToTheoryId(nk);
+ if (ctid != THEORY_ARITH && ctid != THEORY_BOOL && ctid != THEORY_BUILTIN)
+ {
+ // we directly look up terms not belonging to arithmetic
+ ret = getValueInternal(n);
+ }
+ else
+ {
+ std::vector<Node> children;
+ if (n.getMetaKind() == metakind::PARAMETERIZED)
+ {
+ children.push_back(n.getOperator());
+ }
+ for (unsigned i = 0, nchild = n.getNumChildren(); i < nchild; i++)
+ {
+ Node mc = computeModelValue(n[i], isConcrete);
+ children.push_back(mc);
+ }
+ ret = NodeManager::currentNM()->mkNode(nk, children);
+ ret = Rewriter::rewrite(ret);
+ }
+ }
+ Trace("nl-ext-mv-debug") << "computed " << (index == 0 ? "M" : "M_A") << "["
+ << n << "] = " << ret << std::endl;
+ d_mv[index][n] = ret;
+ return ret;
+}
+
+bool NlModel::hasTerm(Node n) const
+{
+ return d_arithVal.find(n) != d_arithVal.end();
+}
+
+Node NlModel::getRepresentative(Node n) const
+{
+ if (n.isConst())
+ {
+ return n;
+ }
+ std::map<Node, Node>::const_iterator it = d_arithVal.find(n);
+ if (it != d_arithVal.end())
+ {
+ AlwaysAssert(it->second.isConst());
+ return it->second;
+ }
+ return d_model->getRepresentative(n);
+}
+
+Node NlModel::getValueInternal(Node n) const
+{
+ if (n.isConst())
+ {
+ return n;
+ }
+ std::map<Node, Node>::const_iterator it = d_arithVal.find(n);
+ if (it != d_arithVal.end())
+ {
+ AlwaysAssert(it->second.isConst());
+ return it->second;
+ }
+ // It is unconstrained in the model, return 0.
+ return d_zero;
+}
+
+int NlModel::compare(Node i, Node j, bool isConcrete, bool isAbsolute)
+{
+ Node ci = computeModelValue(i, isConcrete);
+ Node cj = computeModelValue(j, isConcrete);
+ if (ci.isConst())
+ {
+ if (cj.isConst())
+ {
+ return compareValue(ci, cj, isAbsolute);
+ }
+ return 1;
+ }
+ return cj.isConst() ? -1 : 0;
+}
+
+int NlModel::compareValue(Node i, Node j, bool isAbsolute) const
+{
+ Assert(i.isConst() && j.isConst());
+ int ret;
+ if (i == j)
+ {
+ ret = 0;
+ }
+ else if (!isAbsolute)
+ {
+ ret = i.getConst<Rational>() < j.getConst<Rational>() ? 1 : -1;
+ }
+ else
+ {
+ ret = (i.getConst<Rational>().abs() == j.getConst<Rational>().abs()
+ ? 0
+ : (i.getConst<Rational>().abs() < j.getConst<Rational>().abs()
+ ? 1
+ : -1));
+ }
+ return ret;
+}
+
+bool NlModel::checkModel(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ unsigned d,
+ std::vector<Node>& lemmas,
+ std::vector<Node>& gs)
+{
+ Trace("nl-ext-cm-debug") << " solve for equalities..." << std::endl;
+ for (const Node& atom : false_asserts)
+ {
+ // see if it corresponds to a univariate polynomial equation of degree two
+ if (atom.getKind() == EQUAL)
+ {
+ if (!solveEqualitySimple(atom, d, lemmas))
+ {
+ // no chance we will satisfy this equality
+ Trace("nl-ext-cm") << "...check-model : failed to solve equality : "
+ << atom << std::endl;
+ }
+ }
+ }
+
+ // all remaining variables are constrained to their exact model values
+ Trace("nl-ext-cm-debug") << " set exact bounds for remaining variables..."
+ << std::endl;
+ std::unordered_set<TNode, TNodeHashFunction> visited;
+ std::vector<TNode> visit;
+ TNode cur;
+ for (const Node& a : assertions)
+ {
+ visit.push_back(a);
+ do
+ {
+ cur = visit.back();
+ visit.pop_back();
+ if (visited.find(cur) == visited.end())
+ {
+ visited.insert(cur);
+ if (cur.getType().isReal() && !cur.isConst())
+ {
+ Kind k = cur.getKind();
+ if (k != MULT && k != PLUS && k != NONLINEAR_MULT
+ && !isTranscendentalKind(k))
+ {
+ // if we have not set an approximate bound for it
+ if (!hasCheckModelAssignment(cur))
+ {
+ // set its exact model value in the substitution
+ Node curv = computeConcreteModelValue(cur);
+ Trace("nl-ext-cm")
+ << "check-model-bound : exact : " << cur << " = ";
+ printRationalApprox("nl-ext-cm", curv);
+ Trace("nl-ext-cm") << std::endl;
+ bool ret = addCheckModelSubstitution(cur, curv);
+ AlwaysAssert(ret);
+ }
+ }
+ }
+ for (const Node& cn : cur)
+ {
+ visit.push_back(cn);
+ }
+ }
+ } while (!visit.empty());
+ }
+
+ Trace("nl-ext-cm-debug") << " check assertions..." << std::endl;
+ std::vector<Node> check_assertions;
+ for (const Node& a : assertions)
+ {
+ // don't have to check tautological literals
+ if (d_tautology.find(a) != d_tautology.end())
+ {
+ continue;
+ }
+ if (d_check_model_solved.find(a) == d_check_model_solved.end())
+ {
+ Node av = a;
+ // apply the substitution to a
+ if (!d_check_model_vars.empty())
+ {
+ av = arithSubstitute(av, d_check_model_vars, d_check_model_subs);
+ av = Rewriter::rewrite(av);
+ }
+ // simple check literal
+ if (!simpleCheckModelLit(av))
+ {
+ Trace("nl-ext-cm") << "...check-model : assertion failed : " << a
+ << std::endl;
+ check_assertions.push_back(av);
+ Trace("nl-ext-cm-debug")
+ << "...check-model : failed assertion, value : " << av << std::endl;
+ }
+ }
+ }
+
+ if (!check_assertions.empty())
+ {
+ Trace("nl-ext-cm") << "...simple check failed." << std::endl;
+ // TODO (#1450) check model for general case
+ return false;
+ }
+ Trace("nl-ext-cm") << "...simple check succeeded!" << std::endl;
+
+ // must assert and re-check if produce models is true
+ if (options::produceModels())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ // model guard whose semantics is "the model we constructed holds"
+ Node mg = nm->mkSkolem("model", nm->booleanType());
+ gs.push_back(mg);
+ // assert the constructed model as assertions
+ for (const std::pair<const Node, std::pair<Node, Node>> cb :
+ d_check_model_bounds)
+ {
+ Node l = cb.second.first;
+ Node u = cb.second.second;
+ Node v = cb.first;
+ Node pred = nm->mkNode(AND, nm->mkNode(GEQ, v, l), nm->mkNode(GEQ, u, v));
+ pred = nm->mkNode(OR, mg.negate(), pred);
+ lemmas.push_back(pred);
+ }
+ }
+ return true;
+}
+
+bool NlModel::addCheckModelSubstitution(TNode v, TNode s)
+{
+ // should not substitute the same variable twice
+ Trace("nl-ext-model") << "* check model substitution : " << v << " -> " << s
+ << std::endl;
+ // should not set exact bound more than once
+ if (std::find(d_check_model_vars.begin(), d_check_model_vars.end(), v)
+ != d_check_model_vars.end())
+ {
+ Trace("nl-ext-model") << "...ERROR: already has value." << std::endl;
+ // this should never happen since substitutions should be applied eagerly
+ Assert(false);
+ return false;
+ }
+ // if we previously had an approximate bound, the exact bound should be in its
+ // range
+ std::map<Node, std::pair<Node, Node>>::iterator itb =
+ d_check_model_bounds.find(v);
+ if (itb != d_check_model_bounds.end())
+ {
+ if (s.getConst<Rational>() >= itb->second.first.getConst<Rational>()
+ || s.getConst<Rational>() <= itb->second.second.getConst<Rational>())
+ {
+ Trace("nl-ext-model")
+ << "...ERROR: already has bound which is out of range." << std::endl;
+ return false;
+ }
+ }
+ std::vector<Node> varsTmp;
+ varsTmp.push_back(v);
+ std::vector<Node> subsTmp;
+ subsTmp.push_back(s);
+ for (unsigned i = 0, size = d_check_model_subs.size(); i < size; i++)
+ {
+ Node ms = d_check_model_subs[i];
+ Node mss = arithSubstitute(ms, varsTmp, subsTmp);
+ if (mss != ms)
+ {
+ mss = Rewriter::rewrite(mss);
+ }
+ d_check_model_subs[i] = mss;
+ }
+ d_check_model_vars.push_back(v);
+ d_check_model_subs.push_back(s);
+ return true;
+}
+
+bool NlModel::addCheckModelBound(TNode v, TNode l, TNode u)
+{
+ Trace("nl-ext-model") << "* check model bound : " << v << " -> [" << l << " "
+ << u << "]" << std::endl;
+ if (l == u)
+ {
+ // bound is exact, can add as substitution
+ return addCheckModelSubstitution(v, l);
+ }
+ // should not set a bound for a value that is exact
+ if (std::find(d_check_model_vars.begin(), d_check_model_vars.end(), v)
+ != d_check_model_vars.end())
+ {
+ Trace("nl-ext-model")
+ << "...ERROR: setting bound for variable that already has exact value."
+ << std::endl;
+ Assert(false);
+ return false;
+ }
+ Assert(l.isConst());
+ Assert(u.isConst());
+ Assert(l.getConst<Rational>() <= u.getConst<Rational>());
+ d_check_model_bounds[v] = std::pair<Node, Node>(l, u);
+ if (Trace.isOn("nl-ext-cm"))
+ {
+ Trace("nl-ext-cm") << "check-model-bound : approximate : ";
+ printRationalApprox("nl-ext-cm", l);
+ Trace("nl-ext-cm") << " <= " << v << " <= ";
+ printRationalApprox("nl-ext-cm", u);
+ Trace("nl-ext-cm") << std::endl;
+ }
+ return true;
+}
+
+bool NlModel::hasCheckModelAssignment(Node v) const
+{
+ if (d_check_model_bounds.find(v) != d_check_model_bounds.end())
+ {
+ return true;
+ }
+ return std::find(d_check_model_vars.begin(), d_check_model_vars.end(), v)
+ != d_check_model_vars.end();
+}
+
+void NlModel::setUsedApproximate() { d_used_approx = true; }
+
+bool NlModel::usedApproximate() const { return d_used_approx; }
+
+void NlModel::addTautology(Node n)
+{
+ // ensure rewritten
+ n = Rewriter::rewrite(n);
+ std::unordered_set<TNode, TNodeHashFunction> visited;
+ std::vector<TNode> visit;
+ TNode cur;
+ visit.push_back(n);
+ do
+ {
+ cur = visit.back();
+ visit.pop_back();
+ if (visited.find(cur) == visited.end())
+ {
+ visited.insert(cur);
+ if (cur.getKind() == AND)
+ {
+ // children of AND are also implied
+ for (const Node& cn : cur)
+ {
+ visit.push_back(cn);
+ }
+ }
+ else
+ {
+ // is this an arithmetic literal?
+ Node atom = cur.getKind() == NOT ? cur[0] : cur;
+ if ((atom.getKind() == EQUAL && atom[0].getType().isReal())
+ || atom.getKind() == LEQ)
+ {
+ // Add to tautological literals if it does not contain
+ // non-linear multiplication. We cannot consider literals
+ // with non-linear multiplication to be tautological since this
+ // model object is responsible for checking whether they hold.
+ // (TODO, cvc4-projects #113: revisit this).
+ if (!expr::hasSubtermKind(NONLINEAR_MULT, atom))
+ {
+ Trace("nl-taut") << "Tautological literal: " << atom << std::endl;
+ d_tautology.insert(cur);
+ }
+ }
+ }
+ }
+ } while (!visit.empty());
+}
+
+bool NlModel::solveEqualitySimple(Node eq,
+ unsigned d,
+ std::vector<Node>& lemmas)
+{
+ Node seq = eq;
+ if (!d_check_model_vars.empty())
+ {
+ seq = arithSubstitute(eq, d_check_model_vars, d_check_model_subs);
+ seq = Rewriter::rewrite(seq);
+ if (seq.isConst())
+ {
+ if (seq.getConst<bool>())
+ {
+ d_check_model_solved[eq] = Node::null();
+ return true;
+ }
+ return false;
+ }
+ }
+ Trace("nl-ext-cms") << "simple solve equality " << seq << "..." << std::endl;
+ Assert(seq.getKind() == EQUAL);
+ std::map<Node, Node> msum;
+ if (!ArithMSum::getMonomialSumLit(seq, msum))
+ {
+ Trace("nl-ext-cms") << "...fail, could not determine monomial sum."
+ << std::endl;
+ return false;
+ }
+ bool is_valid = true;
+ // the variable we will solve a quadratic equation for
+ Node var;
+ Node a = d_zero;
+ Node b = d_zero;
+ Node c = d_zero;
+ NodeManager* nm = NodeManager::currentNM();
+ // the list of variables that occur as a monomial in msum, and whose value
+ // is so far unconstrained in the model.
+ std::unordered_set<Node, NodeHashFunction> unc_vars;
+ // the list of variables that occur as a factor in a monomial, and whose
+ // value is so far unconstrained in the model.
+ std::unordered_set<Node, NodeHashFunction> unc_vars_factor;
+ for (std::pair<const Node, Node>& m : msum)
+ {
+ Node v = m.first;
+ Node coeff = m.second.isNull() ? d_one : m.second;
+ if (v.isNull())
+ {
+ c = coeff;
+ }
+ else if (v.getKind() == NONLINEAR_MULT)
+ {
+ if (v.getNumChildren() == 2 && v[0].isVar() && v[0] == v[1]
+ && (var.isNull() || var == v[0]))
+ {
+ // may solve quadratic
+ a = coeff;
+ var = v[0];
+ }
+ else
+ {
+ is_valid = false;
+ Trace("nl-ext-cms-debug")
+ << "...invalid due to non-linear monomial " << v << std::endl;
+ // may wish to set an exact bound for a factor and repeat
+ for (const Node& vc : v)
+ {
+ unc_vars_factor.insert(vc);
+ }
+ }
+ }
+ else if (!v.isVar() || (!var.isNull() && var != v))
+ {
+ Trace("nl-ext-cms-debug")
+ << "...invalid due to factor " << v << std::endl;
+ // cannot solve multivariate
+ if (is_valid)
+ {
+ is_valid = false;
+ // if b is non-zero, then var is also an unconstrained variable
+ if (b != d_zero)
+ {
+ unc_vars.insert(var);
+ unc_vars_factor.insert(var);
+ }
+ }
+ // if v is unconstrained, we may turn this equality into a substitution
+ unc_vars.insert(v);
+ unc_vars_factor.insert(v);
+ }
+ else
+ {
+ // set the variable to solve for
+ b = coeff;
+ var = v;
+ }
+ }
+ if (!is_valid)
+ {
+ // see if we can solve for a variable?
+ for (const Node& uv : unc_vars)
+ {
+ Trace("nl-ext-cm-debug") << "check subs var : " << uv << std::endl;
+ // cannot already have a bound
+ if (uv.isVar() && !hasCheckModelAssignment(uv))
+ {
+ Node slv;
+ Node veqc;
+ if (ArithMSum::isolate(uv, msum, veqc, slv, EQUAL) != 0)
+ {
+ Assert(!slv.isNull());
+ // Currently do not support substitution-with-coefficients.
+ // We also ensure types are correct here, which avoids substituting
+ // a term of non-integer type for a variable of integer type.
+ if (veqc.isNull() && !expr::hasSubterm(slv, uv)
+ && slv.getType().isSubtypeOf(uv.getType()))
+ {
+ Trace("nl-ext-cm")
+ << "check-model-subs : " << uv << " -> " << slv << std::endl;
+ bool ret = addCheckModelSubstitution(uv, slv);
+ if (ret)
+ {
+ Trace("nl-ext-cms") << "...success, model substitution " << uv
+ << " -> " << slv << std::endl;
+ d_check_model_solved[eq] = uv;
+ }
+ return ret;
+ }
+ }
+ }
+ }
+ // see if we can assign a variable to a constant
+ for (const Node& uvf : unc_vars_factor)
+ {
+ Trace("nl-ext-cm-debug") << "check set var : " << uvf << std::endl;
+ // cannot already have a bound
+ if (uvf.isVar() && !hasCheckModelAssignment(uvf))
+ {
+ Node uvfv = computeConcreteModelValue(uvf);
+ Trace("nl-ext-cm") << "check-model-bound : exact : " << uvf << " = ";
+ printRationalApprox("nl-ext-cm", uvfv);
+ Trace("nl-ext-cm") << std::endl;
+ bool ret = addCheckModelSubstitution(uvf, uvfv);
+ // recurse
+ return ret ? solveEqualitySimple(eq, d, lemmas) : false;
+ }
+ }
+ Trace("nl-ext-cms") << "...fail due to constrained invalid terms."
+ << std::endl;
+ return false;
+ }
+ else if (var.isNull() || var.getType().isInteger())
+ {
+ // cannot solve quadratic equations for integer variables
+ Trace("nl-ext-cms") << "...fail due to variable to solve for." << std::endl;
+ return false;
+ }
+
+ // we are linear, it is simple
+ if (a == d_zero)
+ {
+ if (b == d_zero)
+ {
+ Trace("nl-ext-cms") << "...fail due to zero a/b." << std::endl;
+ Assert(false);
+ return false;
+ }
+ Node val = nm->mkConst(-c.getConst<Rational>() / b.getConst<Rational>());
+ Trace("nl-ext-cm") << "check-model-bound : exact : " << var << " = ";
+ printRationalApprox("nl-ext-cm", val);
+ Trace("nl-ext-cm") << std::endl;
+ bool ret = addCheckModelSubstitution(var, val);
+ if (ret)
+ {
+ Trace("nl-ext-cms") << "...success, solved linear." << std::endl;
+ d_check_model_solved[eq] = var;
+ }
+ return ret;
+ }
+ Trace("nl-ext-quad") << "Solve quadratic : " << seq << std::endl;
+ Trace("nl-ext-quad") << " a : " << a << std::endl;
+ Trace("nl-ext-quad") << " b : " << b << std::endl;
+ Trace("nl-ext-quad") << " c : " << c << std::endl;
+ Node two_a = nm->mkNode(MULT, d_two, a);
+ two_a = Rewriter::rewrite(two_a);
+ Node sqrt_val = nm->mkNode(
+ MINUS, nm->mkNode(MULT, b, b), nm->mkNode(MULT, d_two, two_a, c));
+ sqrt_val = Rewriter::rewrite(sqrt_val);
+ Trace("nl-ext-quad") << "Will approximate sqrt " << sqrt_val << std::endl;
+ Assert(sqrt_val.isConst());
+ // if it is negative, then we are in conflict
+ if (sqrt_val.getConst<Rational>().sgn() == -1)
+ {
+ Node conf = seq.negate();
+ Trace("nl-ext-lemma") << "NlModel::Lemma : quadratic no root : " << conf
+ << std::endl;
+ lemmas.push_back(conf);
+ Trace("nl-ext-cms") << "...fail due to negative discriminant." << std::endl;
+ return false;
+ }
+ if (hasCheckModelAssignment(var))
+ {
+ Trace("nl-ext-cms") << "...fail due to bounds on variable to solve for."
+ << std::endl;
+ // two quadratic equations for same variable, give up
+ return false;
+ }
+ // approximate the square root of sqrt_val
+ Node l, u;
+ if (!getApproximateSqrt(sqrt_val, l, u, 15 + d))
+ {
+ Trace("nl-ext-cms") << "...fail, could not approximate sqrt." << std::endl;
+ return false;
+ }
+ d_used_approx = true;
+ Trace("nl-ext-quad") << "...got " << l << " <= sqrt(" << sqrt_val
+ << ") <= " << u << std::endl;
+ Node negb = nm->mkConst(-b.getConst<Rational>());
+ Node coeffa = nm->mkConst(Rational(1) / two_a.getConst<Rational>());
+ // two possible bound regions
+ Node bounds[2][2];
+ Node diff_bound[2];
+ Node m_var = computeConcreteModelValue(var);
+ Assert(m_var.isConst());
+ for (unsigned r = 0; r < 2; r++)
+ {
+ for (unsigned b2 = 0; b2 < 2; b2++)
+ {
+ Node val = b2 == 0 ? l : u;
+ // (-b +- approx_sqrt( b^2 - 4ac ))/2a
+ Node approx = nm->mkNode(
+ MULT, coeffa, nm->mkNode(r == 0 ? MINUS : PLUS, negb, val));
+ approx = Rewriter::rewrite(approx);
+ bounds[r][b2] = approx;
+ Assert(approx.isConst());
+ }
+ if (bounds[r][0].getConst<Rational>() > bounds[r][1].getConst<Rational>())
+ {
+ // ensure bound is (lower, upper)
+ Node tmp = bounds[r][0];
+ bounds[r][0] = bounds[r][1];
+ bounds[r][1] = tmp;
+ }
+ Node diff =
+ nm->mkNode(MINUS,
+ m_var,
+ nm->mkNode(MULT,
+ nm->mkConst(Rational(1) / Rational(2)),
+ nm->mkNode(PLUS, bounds[r][0], bounds[r][1])));
+ Trace("nl-ext-cm-debug") << "Bound option #" << r << " : ";
+ printRationalApprox("nl-ext-cm-debug", bounds[r][0]);
+ Trace("nl-ext-cm-debug") << "...";
+ printRationalApprox("nl-ext-cm-debug", bounds[r][1]);
+ Trace("nl-ext-cm-debug") << std::endl;
+ diff = Rewriter::rewrite(diff);
+ Assert(diff.isConst());
+ diff = nm->mkConst(diff.getConst<Rational>().abs());
+ diff_bound[r] = diff;
+ Trace("nl-ext-cm-debug") << "...diff from model value (";
+ printRationalApprox("nl-ext-cm-debug", m_var);
+ Trace("nl-ext-cm-debug") << ") is ";
+ printRationalApprox("nl-ext-cm-debug", diff_bound[r]);
+ Trace("nl-ext-cm-debug") << std::endl;
+ }
+ // take the one that var is closer to in the model
+ Node cmp = nm->mkNode(GEQ, diff_bound[0], diff_bound[1]);
+ cmp = Rewriter::rewrite(cmp);
+ Assert(cmp.isConst());
+ unsigned r_use_index = cmp == d_true ? 1 : 0;
+ Trace("nl-ext-cm") << "check-model-bound : approximate (sqrt) : ";
+ printRationalApprox("nl-ext-cm", bounds[r_use_index][0]);
+ Trace("nl-ext-cm") << " <= " << var << " <= ";
+ printRationalApprox("nl-ext-cm", bounds[r_use_index][1]);
+ Trace("nl-ext-cm") << std::endl;
+ bool ret =
+ addCheckModelBound(var, bounds[r_use_index][0], bounds[r_use_index][1]);
+ if (ret)
+ {
+ d_check_model_solved[eq] = var;
+ Trace("nl-ext-cms") << "...success, solved quadratic." << std::endl;
+ }
+ return ret;
+}
+
+bool NlModel::simpleCheckModelLit(Node lit)
+{
+ Trace("nl-ext-cms") << "*** Simple check-model lit for " << lit << "..."
+ << std::endl;
+ if (lit.isConst())
+ {
+ Trace("nl-ext-cms") << " return constant." << std::endl;
+ return lit.getConst<bool>();
+ }
+ NodeManager* nm = NodeManager::currentNM();
+ bool pol = lit.getKind() != kind::NOT;
+ Node atom = lit.getKind() == kind::NOT ? lit[0] : lit;
+
+ if (atom.getKind() == EQUAL)
+ {
+ // x = a is ( x >= a ^ x <= a )
+ for (unsigned i = 0; i < 2; i++)
+ {
+ Node lit2 = nm->mkNode(GEQ, atom[i], atom[1 - i]);
+ if (!pol)
+ {
+ lit2 = lit2.negate();
+ }
+ lit2 = Rewriter::rewrite(lit2);
+ bool success = simpleCheckModelLit(lit2);
+ if (success != pol)
+ {
+ // false != true -> one conjunct of equality is false, we fail
+ // true != false -> one disjunct of disequality is true, we succeed
+ return success;
+ }
+ }
+ // both checks passed and polarity is true, or both checks failed and
+ // polarity is false
+ return pol;
+ }
+ else if (atom.getKind() != GEQ)
+ {
+ Trace("nl-ext-cms") << " failed due to unknown literal." << std::endl;
+ return false;
+ }
+ // get the monomial sum
+ std::map<Node, Node> msum;
+ if (!ArithMSum::getMonomialSumLit(atom, msum))
+ {
+ Trace("nl-ext-cms") << " failed due to get msum." << std::endl;
+ return false;
+ }
+ // simple interval analysis
+ if (simpleCheckModelMsum(msum, pol))
+ {
+ return true;
+ }
+ // can also try reasoning about univariate quadratic equations
+ Trace("nl-ext-cms-debug")
+ << "* Try univariate quadratic analysis..." << std::endl;
+ std::vector<Node> vs_invalid;
+ std::unordered_set<Node, NodeHashFunction> vs;
+ std::map<Node, Node> v_a;
+ std::map<Node, Node> v_b;
+ // get coefficients...
+ for (std::pair<const Node, Node>& m : msum)
+ {
+ Node v = m.first;
+ if (!v.isNull())
+ {
+ if (v.isVar())
+ {
+ v_b[v] = m.second.isNull() ? d_one : m.second;
+ vs.insert(v);
+ }
+ else if (v.getKind() == NONLINEAR_MULT && v.getNumChildren() == 2
+ && v[0] == v[1] && v[0].isVar())
+ {
+ v_a[v[0]] = m.second.isNull() ? d_one : m.second;
+ vs.insert(v[0]);
+ }
+ else
+ {
+ vs_invalid.push_back(v);
+ }
+ }
+ }
+ // solve the valid variables...
+ Node invalid_vsum = vs_invalid.empty() ? d_zero
+ : (vs_invalid.size() == 1
+ ? vs_invalid[0]
+ : nm->mkNode(PLUS, vs_invalid));
+ // substitution to try
+ std::vector<Node> qvars;
+ std::vector<Node> qsubs;
+ for (const Node& v : vs)
+ {
+ // is it a valid variable?
+ std::map<Node, std::pair<Node, Node>>::iterator bit =
+ d_check_model_bounds.find(v);
+ if (!expr::hasSubterm(invalid_vsum, v) && bit != d_check_model_bounds.end())
+ {
+ std::map<Node, Node>::iterator it = v_a.find(v);
+ if (it != v_a.end())
+ {
+ Node a = it->second;
+ Assert(a.isConst());
+ int asgn = a.getConst<Rational>().sgn();
+ Assert(asgn != 0);
+ Node t = nm->mkNode(MULT, a, v, v);
+ Node b = d_zero;
+ it = v_b.find(v);
+ if (it != v_b.end())
+ {
+ b = it->second;
+ t = nm->mkNode(PLUS, t, nm->mkNode(MULT, b, v));
+ }
+ t = Rewriter::rewrite(t);
+ Trace("nl-ext-cms-debug") << "Trying to find min/max for quadratic "
+ << t << "..." << std::endl;
+ Trace("nl-ext-cms-debug") << " a = " << a << std::endl;
+ Trace("nl-ext-cms-debug") << " b = " << b << std::endl;
+ // find maximal/minimal value on the interval
+ Node apex = nm->mkNode(
+ DIVISION, nm->mkNode(UMINUS, b), nm->mkNode(MULT, d_two, a));
+ apex = Rewriter::rewrite(apex);
+ Assert(apex.isConst());
+ // for lower, upper, whether we are greater than the apex
+ bool cmp[2];
+ Node boundn[2];
+ for (unsigned r = 0; r < 2; r++)
+ {
+ boundn[r] = r == 0 ? bit->second.first : bit->second.second;
+ Node cmpn = nm->mkNode(GT, boundn[r], apex);
+ cmpn = Rewriter::rewrite(cmpn);
+ Assert(cmpn.isConst());
+ cmp[r] = cmpn.getConst<bool>();
+ }
+ Trace("nl-ext-cms-debug") << " apex " << apex << std::endl;
+ Trace("nl-ext-cms-debug")
+ << " lower " << boundn[0] << ", cmp: " << cmp[0] << std::endl;
+ Trace("nl-ext-cms-debug")
+ << " upper " << boundn[1] << ", cmp: " << cmp[1] << std::endl;
+ Assert(boundn[0].getConst<Rational>()
+ <= boundn[1].getConst<Rational>());
+ Node s;
+ qvars.push_back(v);
+ if (cmp[0] != cmp[1])
+ {
+ Assert(!cmp[0] && cmp[1]);
+ // does the sign match the bound?
+ if ((asgn == 1) == pol)
+ {
+ // the apex is the max/min value
+ s = apex;
+ Trace("nl-ext-cms-debug") << " ...set to apex." << std::endl;
+ }
+ else
+ {
+ // it is one of the endpoints, plug in and compare
+ Node tcmpn[2];
+ for (unsigned r = 0; r < 2; r++)
+ {
+ qsubs.push_back(boundn[r]);
+ Node ts = arithSubstitute(t, qvars, qsubs);
+ tcmpn[r] = Rewriter::rewrite(ts);
+ qsubs.pop_back();
+ }
+ Node tcmp = nm->mkNode(LT, tcmpn[0], tcmpn[1]);
+ Trace("nl-ext-cms-debug")
+ << " ...both sides of apex, compare " << tcmp << std::endl;
+ tcmp = Rewriter::rewrite(tcmp);
+ Assert(tcmp.isConst());
+ unsigned bindex_use = (tcmp.getConst<bool>() == pol) ? 1 : 0;
+ Trace("nl-ext-cms-debug")
+ << " ...set to " << (bindex_use == 1 ? "upper" : "lower")
+ << std::endl;
+ s = boundn[bindex_use];
+ }
+ }
+ else
+ {
+ // both to one side of the apex
+ // we figure out which bound to use (lower or upper) based on
+ // three factors:
+ // (1) whether a's sign is positive,
+ // (2) whether we are greater than the apex of the parabola,
+ // (3) the polarity of the constraint, i.e. >= or <=.
+ // there are 8 cases of these factors, which we test here.
+ unsigned bindex_use = (((asgn == 1) == cmp[0]) == pol) ? 0 : 1;
+ Trace("nl-ext-cms-debug")
+ << " ...set to " << (bindex_use == 1 ? "upper" : "lower")
+ << std::endl;
+ s = boundn[bindex_use];
+ }
+ Assert(!s.isNull());
+ qsubs.push_back(s);
+ Trace("nl-ext-cms") << "* set bound based on quadratic : " << v
+ << " -> " << s << std::endl;
+ }
+ }
+ }
+ if (!qvars.empty())
+ {
+ Assert(qvars.size() == qsubs.size());
+ Node slit = arithSubstitute(lit, qvars, qsubs);
+ slit = Rewriter::rewrite(slit);
+ return simpleCheckModelLit(slit);
+ }
+ return false;
+}
+
+bool NlModel::simpleCheckModelMsum(const std::map<Node, Node>& msum, bool pol)
+{
+ Trace("nl-ext-cms-debug") << "* Try simple interval analysis..." << std::endl;
+ NodeManager* nm = NodeManager::currentNM();
+ // map from transcendental functions to whether they were set to lower
+ // bound
+ bool simpleSuccess = true;
+ std::map<Node, bool> set_bound;
+ std::vector<Node> sum_bound;
+ for (const std::pair<const Node, Node>& m : msum)
+ {
+ Node v = m.first;
+ if (v.isNull())
+ {
+ sum_bound.push_back(m.second.isNull() ? d_one : m.second);
+ }
+ else
+ {
+ Trace("nl-ext-cms-debug") << "- monomial : " << v << std::endl;
+ // --- whether we should set a lower bound for this monomial
+ bool set_lower =
+ (m.second.isNull() || m.second.getConst<Rational>().sgn() == 1)
+ == pol;
+ Trace("nl-ext-cms-debug")
+ << "set bound to " << (set_lower ? "lower" : "upper") << std::endl;
+
+ // --- Collect variables and factors in v
+ std::vector<Node> vars;
+ std::vector<unsigned> factors;
+ if (v.getKind() == NONLINEAR_MULT)
+ {
+ unsigned last_start = 0;
+ for (unsigned i = 0, nchildren = v.getNumChildren(); i < nchildren; i++)
+ {
+ // are we at the end?
+ if (i + 1 == nchildren || v[i + 1] != v[i])
+ {
+ unsigned vfact = 1 + (i - last_start);
+ last_start = (i + 1);
+ vars.push_back(v[i]);
+ factors.push_back(vfact);
+ }
+ }
+ }
+ else
+ {
+ vars.push_back(v);
+ factors.push_back(1);
+ }
+
+ // --- Get the lower and upper bounds and sign information.
+ // Whether we have an (odd) number of negative factors in vars, apart
+ // from the variable at choose_index.
+ bool has_neg_factor = false;
+ int choose_index = -1;
+ std::vector<Node> ls;
+ std::vector<Node> us;
+ // the relevant sign information for variables with odd exponents:
+ // 1: both signs of the interval of this variable are positive,
+ // -1: both signs of the interval of this variable are negative.
+ std::vector<int> signs;
+ Trace("nl-ext-cms-debug") << "get sign information..." << std::endl;
+ for (unsigned i = 0, size = vars.size(); i < size; i++)
+ {
+ Node vc = vars[i];
+ unsigned vcfact = factors[i];
+ if (Trace.isOn("nl-ext-cms-debug"))
+ {
+ Trace("nl-ext-cms-debug") << "-- " << vc;
+ if (vcfact > 1)
+ {
+ Trace("nl-ext-cms-debug") << "^" << vcfact;
+ }
+ Trace("nl-ext-cms-debug") << " ";
+ }
+ std::map<Node, std::pair<Node, Node>>::iterator bit =
+ d_check_model_bounds.find(vc);
+ // if there is a model bound for this term
+ if (bit != d_check_model_bounds.end())
+ {
+ Node l = bit->second.first;
+ Node u = bit->second.second;
+ ls.push_back(l);
+ us.push_back(u);
+ int vsign = 0;
+ if (vcfact % 2 == 1)
+ {
+ vsign = 1;
+ int lsgn = l.getConst<Rational>().sgn();
+ int usgn = u.getConst<Rational>().sgn();
+ Trace("nl-ext-cms-debug")
+ << "bound_sign(" << lsgn << "," << usgn << ") ";
+ if (lsgn == -1)
+ {
+ if (usgn < 1)
+ {
+ // must have a negative factor
+ has_neg_factor = !has_neg_factor;
+ vsign = -1;
+ }
+ else if (choose_index == -1)
+ {
+ // set the choose index to this
+ choose_index = i;
+ vsign = 0;
+ }
+ else
+ {
+ // ambiguous, can't determine the bound
+ Trace("nl-ext-cms")
+ << " failed due to ambiguious monomial." << std::endl;
+ return false;
+ }
+ }
+ }
+ Trace("nl-ext-cms-debug") << " -> " << vsign << std::endl;
+ signs.push_back(vsign);
+ }
+ else
+ {
+ Trace("nl-ext-cms-debug") << std::endl;
+ Trace("nl-ext-cms")
+ << " failed due to unknown bound for " << vc << std::endl;
+ // should either assign a model bound or eliminate the variable
+ // via substitution
+ Assert(false);
+ return false;
+ }
+ }
+ // whether we will try to minimize/maximize (-1/1) the absolute value
+ int setAbs = (set_lower == has_neg_factor) ? 1 : -1;
+ Trace("nl-ext-cms-debug")
+ << "set absolute value to " << (setAbs == 1 ? "maximal" : "minimal")
+ << std::endl;
+
+ std::vector<Node> vbs;
+ Trace("nl-ext-cms-debug") << "set bounds..." << std::endl;
+ for (unsigned i = 0, size = vars.size(); i < size; i++)
+ {
+ Node vc = vars[i];
+ unsigned vcfact = factors[i];
+ Node l = ls[i];
+ Node u = us[i];
+ bool vc_set_lower;
+ int vcsign = signs[i];
+ Trace("nl-ext-cms-debug")
+ << "Bounds for " << vc << " : " << l << ", " << u
+ << ", sign : " << vcsign << ", factor : " << vcfact << std::endl;
+ if (l == u)
+ {
+ // by convention, always say it is lower if they are the same
+ vc_set_lower = true;
+ Trace("nl-ext-cms-debug")
+ << "..." << vc << " equal bound, set to lower" << std::endl;
+ }
+ else
+ {
+ if (vcfact % 2 == 0)
+ {
+ // minimize or maximize its absolute value
+ Rational la = l.getConst<Rational>().abs();
+ Rational ua = u.getConst<Rational>().abs();
+ if (la == ua)
+ {
+ // by convention, always say it is lower if abs are the same
+ vc_set_lower = true;
+ Trace("nl-ext-cms-debug")
+ << "..." << vc << " equal abs, set to lower" << std::endl;
+ }
+ else
+ {
+ vc_set_lower = (la > ua) == (setAbs == 1);
+ }
+ }
+ else if (signs[i] == 0)
+ {
+ // we choose this index to match the overall set_lower
+ vc_set_lower = set_lower;
+ }
+ else
+ {
+ vc_set_lower = (signs[i] != setAbs);
+ }
+ Trace("nl-ext-cms-debug")
+ << "..." << vc << " set to " << (vc_set_lower ? "lower" : "upper")
+ << std::endl;
+ }
+ // check whether this is a conflicting bound
+ std::map<Node, bool>::iterator itsb = set_bound.find(vc);
+ if (itsb == set_bound.end())
+ {
+ set_bound[vc] = vc_set_lower;
+ }
+ else if (itsb->second != vc_set_lower)
+ {
+ Trace("nl-ext-cms")
+ << " failed due to conflicting bound for " << vc << std::endl;
+ return false;
+ }
+ // must over/under approximate based on vc_set_lower, computed above
+ Node vb = vc_set_lower ? l : u;
+ for (unsigned i2 = 0; i2 < vcfact; i2++)
+ {
+ vbs.push_back(vb);
+ }
+ }
+ if (!simpleSuccess)
+ {
+ break;
+ }
+ Node vbound = vbs.size() == 1 ? vbs[0] : nm->mkNode(MULT, vbs);
+ sum_bound.push_back(ArithMSum::mkCoeffTerm(m.second, vbound));
+ }
+ }
+ // if the exact bound was computed via simple analysis above
+ // make the bound
+ Node bound;
+ if (sum_bound.size() > 1)
+ {
+ bound = nm->mkNode(kind::PLUS, sum_bound);
+ }
+ else if (sum_bound.size() == 1)
+ {
+ bound = sum_bound[0];
+ }
+ else
+ {
+ bound = d_zero;
+ }
+ // make the comparison
+ Node comp = nm->mkNode(kind::GEQ, bound, d_zero);
+ if (!pol)
+ {
+ comp = comp.negate();
+ }
+ Trace("nl-ext-cms") << " comparison is : " << comp << std::endl;
+ comp = Rewriter::rewrite(comp);
+ Assert(comp.isConst());
+ Trace("nl-ext-cms") << " returned : " << comp << std::endl;
+ return comp == d_true;
+}
+
+bool NlModel::getApproximateSqrt(Node c, Node& l, Node& u, unsigned iter) const
+{
+ Assert(c.isConst());
+ if (c == d_one || c == d_zero)
+ {
+ l = c;
+ u = c;
+ return true;
+ }
+ Rational rc = c.getConst<Rational>();
+
+ Rational rl = rc < Rational(1) ? rc : Rational(1);
+ Rational ru = rc < Rational(1) ? Rational(1) : rc;
+ unsigned count = 0;
+ Rational half = Rational(1) / Rational(2);
+ while (count < iter)
+ {
+ Rational curr = half * (rl + ru);
+ Rational curr_sq = curr * curr;
+ if (curr_sq == rc)
+ {
+ rl = curr;
+ ru = curr;
+ break;
+ }
+ else if (curr_sq < rc)
+ {
+ rl = curr;
+ }
+ else
+ {
+ ru = curr;
+ }
+ count++;
+ }
+
+ NodeManager* nm = NodeManager::currentNM();
+ l = nm->mkConst(rl);
+ u = nm->mkConst(ru);
+ return true;
+}
+
+void NlModel::printModelValue(const char* c, Node n, unsigned prec) const
+{
+ if (Trace.isOn(c))
+ {
+ Trace(c) << " " << n << " -> ";
+ for (int i = 1; i >= 0; --i)
+ {
+ std::map<Node, Node>::const_iterator it = d_mv[i].find(n);
+ Assert(it != d_mv[i].end());
+ if (it->second.isConst())
+ {
+ printRationalApprox(c, it->second, prec);
+ }
+ else
+ {
+ Trace(c) << "?";
+ }
+ Trace(c) << (i == 1 ? " [actual: " : " ]");
+ }
+ Trace(c) << std::endl;
+ }
+}
+
+void NlModel::getModelValueRepair(
+ std::map<Node, Node>& arithModel,
+ std::map<Node, std::pair<Node, Node>>& approximations)
+{
+ Trace("nl-model") << "NlModel::getModelValueRepair:" << std::endl;
+
+ // Record the approximations we used. This code calls the
+ // recordApproximation method of the model, which overrides the model
+ // values for variables that we solved for, using techniques specific to
+ // this class.
+ NodeManager* nm = NodeManager::currentNM();
+ for (const std::pair<const Node, std::pair<Node, Node>>& cb :
+ d_check_model_bounds)
+ {
+ Node l = cb.second.first;
+ Node u = cb.second.second;
+ Node pred;
+ Node v = cb.first;
+ if (l != u)
+ {
+ pred = nm->mkNode(AND, nm->mkNode(GEQ, v, l), nm->mkNode(GEQ, u, v));
+ Trace("nl-model") << v << " approximated as " << pred << std::endl;
+ Node witness;
+ if (options::modelWitnessValue())
+ {
+ // witness is the midpoint
+ witness = nm->mkNode(
+ MULT, nm->mkConst(Rational(1, 2)), nm->mkNode(PLUS, l, u));
+ witness = Rewriter::rewrite(witness);
+ Trace("nl-model") << v << " witness is " << witness << std::endl;
+ }
+ approximations[v] = std::pair<Node, Node>(pred, witness);
+ }
+ else
+ {
+ // overwrite
+ arithModel[v] = l;
+ Trace("nl-model") << v << " exact approximation is " << l << std::endl;
+ }
+ }
+ // Also record the exact values we used. An exact value can be seen as a
+ // special kind approximation of the form (witness x. x = exact_value).
+ // Notice that the above term gets rewritten such that the choice function
+ // is eliminated.
+ for (size_t i = 0, num = d_check_model_vars.size(); i < num; i++)
+ {
+ Node v = d_check_model_vars[i];
+ Node s = d_check_model_subs[i];
+ // overwrite
+ arithModel[v] = s;
+ Trace("nl-model") << v << " solved is " << s << std::endl;
+ }
+
+ // multiplication terms should not be given values; their values are
+ // implied by the monomials that they consist of
+ std::vector<Node> amErase;
+ for (const std::pair<const Node, Node>& am : arithModel)
+ {
+ if (am.first.getKind() == NONLINEAR_MULT)
+ {
+ amErase.push_back(am.first);
+ }
+ }
+ for (const Node& ae : amErase)
+ {
+ arithModel.erase(ae);
+ }
+}
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file nl_model.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Model object for the non-linear extension class
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__NL_MODEL_H
+#define CVC4__THEORY__ARITH__NL__NL_MODEL_H
+
+#include <map>
+#include <unordered_map>
+#include <vector>
+
+#include "context/cdo.h"
+#include "context/context.h"
+#include "expr/kind.h"
+#include "expr/node.h"
+#include "theory/theory_model.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+class NonlinearExtension;
+
+/** Non-linear model finder
+ *
+ * This class is responsible for all queries related to the (candidate) model
+ * that is being processed by the non-linear arithmetic solver. It further
+ * implements techniques for finding modifications to the current candidate
+ * model in the case it can determine that a model exists. These include
+ * techniques based on solving (quadratic) equations and bound analysis.
+ */
+class NlModel
+{
+ friend class NonlinearExtension;
+
+ public:
+ NlModel(context::Context* c);
+ ~NlModel();
+ /** reset
+ *
+ * This method is called once at the beginning of a last call effort check,
+ * where m is the model of the theory of arithmetic. This method resets the
+ * cache of computed model values.
+ */
+ void reset(TheoryModel* m, std::map<Node, Node>& arithModel);
+ /** reset check
+ *
+ * This method is called when the non-linear arithmetic solver restarts
+ * its computation of lemmas and models during a last call effort check.
+ */
+ void resetCheck();
+ /** compute model value
+ *
+ * This computes model values for terms based on two semantics, a "concrete"
+ * semantics and an "abstract" semantics.
+ *
+ * if isConcrete is true, this means compute the value of n based on its
+ * children recursively. (we call this its "concrete" value)
+ * if isConcrete is false, this means lookup the value of n in the model.
+ * (we call this its "abstract" value)
+ * In other words, !isConcrete treats multiplication terms and transcendental
+ * function applications as variables, whereas isConcrete computes their
+ * actual values based on the semantics of multiplication. This is a key
+ * distinction used in the model-based refinement scheme in Cimatti et al.
+ * TACAS 2017.
+ *
+ * For example, if M( a ) = 2, M( b ) = 3, M( a*b ) = 5, i.e. the variable
+ * for a*b has been assigned a value 5 by the linear solver, then :
+ *
+ * computeModelValue( a*b, true ) =
+ * computeModelValue( a, true )*computeModelValue( b, true ) = 2*3 = 6
+ * whereas:
+ * computeModelValue( a*b, false ) = 5
+ */
+ Node computeConcreteModelValue(Node n);
+ Node computeAbstractModelValue(Node n);
+ Node computeModelValue(Node n, bool isConcrete);
+
+ /** Compare arithmetic terms i and j based an ordering.
+ *
+ * This returns:
+ * -1 if i < j, 1 if i > j, or 0 if i == j
+ *
+ * If isConcrete is true, we consider the concrete model values of i and j,
+ * otherwise, we consider their abstract model values. For definitions of
+ * concrete vs abstract model values, see NlModel::computeModelValue.
+ *
+ * If isAbsolute is true, we compare the absolute value of thee above
+ * values.
+ */
+ int compare(Node i, Node j, bool isConcrete, bool isAbsolute);
+ /** Compare arithmetic terms i and j based an ordering.
+ *
+ * This returns:
+ * -1 if i < j, 1 if i > j, or 0 if i == j
+ *
+ * If isAbsolute is true, we compare the absolute value of i and j
+ */
+ int compareValue(Node i, Node j, bool isAbsolute) const;
+
+ //------------------------------ recording model substitutions and bounds
+ /** add check model substitution
+ *
+ * Adds the model substitution v -> s. This applies the substitution
+ * { v -> s } to each term in d_check_model_subs and adds v,s to
+ * d_check_model_vars and d_check_model_subs respectively.
+ * If this method returns false, then the substitution v -> s is inconsistent
+ * with the current substitution and bounds.
+ */
+ bool addCheckModelSubstitution(TNode v, TNode s);
+ /** add check model bound
+ *
+ * Adds the bound x -> < l, u > to the map above, and records the
+ * approximation ( x, l <= x <= u ) in the model. This method returns false
+ * if the bound is inconsistent with the current model substitution or
+ * bounds.
+ */
+ bool addCheckModelBound(TNode v, TNode l, TNode u);
+ /** has check model assignment
+ *
+ * Have we assigned v in the current checkModel(...) call?
+ *
+ * This method returns true if variable v is in the domain of
+ * d_check_model_bounds or if it occurs in d_check_model_vars.
+ */
+ bool hasCheckModelAssignment(Node v) const;
+ /** Check model
+ *
+ * Checks the current model based on solving for equalities, and using error
+ * bounds on the Taylor approximation.
+ *
+ * If this function returns true, then all assertions in the input argument
+ * "assertions" are satisfied for all interpretations of variables within
+ * their computed bounds (as stored in d_check_model_bounds).
+ *
+ * For details, see Section 3 of Cimatti et al CADE 2017 under the heading
+ * "Detecting Satisfiable Formulas".
+ *
+ * d is a degree indicating how precise our computations are.
+ */
+ bool checkModel(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ unsigned d,
+ std::vector<Node>& lemmas,
+ std::vector<Node>& gs);
+ /**
+ * Set that we have used an approximation during this check. This flag is
+ * reset on a call to resetCheck. It is set when we use reasoning that
+ * is limited by a degree of precision we are using. In other words, if we
+ * used an approximation, then we maybe could still establish a lemma or
+ * determine the input is SAT if we increased our precision.
+ */
+ void setUsedApproximate();
+ /** Did we use an approximation during this check? */
+ bool usedApproximate() const;
+ /** Set tautology
+ *
+ * This states that formula n is a tautology (satisfied in all models).
+ * We call this on internally generated lemmas. This method computes a
+ * set of literals that are implied by n, that are hence tautological
+ * as well, such as:
+ * l_pi <= real.pi <= u_pi (pi approximations)
+ * sin(x) = -1*sin(-x)
+ * where these literals are internally generated for the purposes
+ * of guiding the models of the linear solver.
+ *
+ * TODO (cvc4-projects #113: would be helpful if we could do this even
+ * more aggressively by ignoring all internally generated literals.
+ *
+ * Tautological literals do not need be checked during checkModel.
+ */
+ void addTautology(Node n);
+ //------------------------------ end recording model substitutions and bounds
+
+ /** print model value, for debugging.
+ *
+ * This prints both the abstract and concrete model values for arithmetic
+ * term n on Trace c with precision prec.
+ */
+ void printModelValue(const char* c, Node n, unsigned prec = 5) const;
+ /** get model value repair
+ *
+ * This gets mappings that indicate how to repair the model generated by the
+ * linear arithmetic solver. This method should be called after a successful
+ * call to checkModel above.
+ *
+ * The mapping arithModel is updated by this method to map arithmetic terms v
+ * to their (exact) value that was computed during checkModel; the mapping
+ * approximations is updated to store approximate values in the form of a
+ * pair (P, w), where P is a predicate that describes the possible values of
+ * v and w is a witness point that satisfies this predicate.
+ */
+ void getModelValueRepair(
+ std::map<Node, Node>& arithModel,
+ std::map<Node, std::pair<Node, Node>>& approximations);
+
+ private:
+ /** The current model */
+ TheoryModel* d_model;
+ /** Get the model value of n from the model object above */
+ Node getValueInternal(Node n) const;
+ /** Does the equality engine of the model have term n? */
+ bool hasTerm(Node n) const;
+ /** Get the representative of n in the model */
+ Node getRepresentative(Node n) const;
+
+ //---------------------------check model
+ /** solve equality simple
+ *
+ * This method is used during checkModel(...). It takes as input an
+ * equality eq. If it returns true, then eq is correct-by-construction based
+ * on the information stored in our model representation (see
+ * d_check_model_vars, d_check_model_subs, d_check_model_bounds), and eq
+ * is added to d_check_model_solved. The equality eq may involve any
+ * number of variables, and monomials of arbitrary degree. If this method
+ * returns false, then we did not show that the equality was true in the
+ * model. This method uses incomplete techniques based on interval
+ * analysis and quadratic equation solving.
+ *
+ * If it can be shown that the equality must be false in the current
+ * model, then we may add a lemma to lemmas explaining why this is the case.
+ * For instance, if eq reduces to a univariate quadratic equation with no
+ * root, we send a conflict clause of the form a*x^2 + b*x + c != 0.
+ */
+ bool solveEqualitySimple(Node eq, unsigned d, std::vector<Node>& lemmas);
+
+ /** simple check model for transcendental functions for literal
+ *
+ * This method returns true if literal is true for all interpretations of
+ * transcendental functions within their error bounds (as stored
+ * in d_check_model_bounds). This is determined by a simple under/over
+ * approximation of the value of sum of (linear) monomials. For example,
+ * if we determine that .8 < sin( 1 ) < .9, this function will return
+ * true for literals like:
+ * 2.0*sin( 1 ) > 1.5
+ * -1.0*sin( 1 ) < -0.79
+ * -1.0*sin( 1 ) > -0.91
+ * sin( 1 )*sin( 1 ) + sin( 1 ) > 0.0
+ * It will return false for literals like:
+ * sin( 1 ) > 0.85
+ * It will also return false for literals like:
+ * -0.3*sin( 1 )*sin( 2 ) + sin( 2 ) > .7
+ * sin( sin( 1 ) ) > .5
+ * since the bounds on these terms cannot quickly be determined.
+ */
+ bool simpleCheckModelLit(Node lit);
+ bool simpleCheckModelMsum(const std::map<Node, Node>& msum, bool pol);
+ //---------------------------end check model
+ /** get approximate sqrt
+ *
+ * This approximates the square root of positive constant c. If this method
+ * returns true, then l and u are updated to constants such that
+ * l <= sqrt( c ) <= u
+ * The argument iter is the number of iterations in the binary search to
+ * perform. By default, this is set to 15, which is usually enough to be
+ * precise in the majority of simple cases, whereas not prohibitively
+ * expensive to compute.
+ */
+ bool getApproximateSqrt(Node c, Node& l, Node& u, unsigned iter = 15) const;
+
+ /** commonly used terms */
+ Node d_zero;
+ Node d_one;
+ Node d_two;
+ Node d_true;
+ Node d_false;
+ Node d_null;
+ /**
+ * The values that the arithmetic theory solver assigned in the model. This
+ * corresponds to exactly the set of equalities that TheoryArith is currently
+ * sending to TheoryModel during collectModelInfo.
+ */
+ std::map<Node, Node> d_arithVal;
+ /** cache of model values
+ *
+ * Stores the the concrete/abstract model values. This is a cache of the
+ * computeModelValue method.
+ */
+ std::map<Node, Node> d_mv[2];
+ /**
+ * A substitution from variables that appear in assertions to a solved form
+ * term. These vectors are ordered in the form:
+ * x_1 -> t_1 ... x_n -> t_n
+ * where x_i is not in the free variables of t_j for j>=i.
+ */
+ std::vector<Node> d_check_model_vars;
+ std::vector<Node> d_check_model_subs;
+ /** lower and upper bounds for check model
+ *
+ * For each term t in the domain of this map, if this stores the pair
+ * (c_l, c_u) then the model M is such that c_l <= M( t ) <= c_u.
+ *
+ * We add terms whose value is approximated in the model to this map, which
+ * includes:
+ * (1) applications of transcendental functions, whose value is approximated
+ * by the Taylor series,
+ * (2) variables we have solved quadratic equations for, whose value
+ * involves approximations of square roots.
+ */
+ std::map<Node, std::pair<Node, Node>> d_check_model_bounds;
+ /**
+ * The map from literals that our model construction solved, to the variable
+ * that was solved for. Examples of such literals are:
+ * (1) Equalities x = t, which we turned into a model substitution x -> t,
+ * where x not in FV( t ), and
+ * (2) Equalities a*x*x + b*x + c = 0, which we turned into a model bound
+ * -b+s*sqrt(b*b-4*a*c)/2a - E <= x <= -b+s*sqrt(b*b-4*a*c)/2a + E.
+ *
+ * These literals are exempt from check-model, since they are satisfied by
+ * definition of our model construction.
+ */
+ std::unordered_map<Node, Node, NodeHashFunction> d_check_model_solved;
+ /** did we use an approximation on this call to last-call effort? */
+ bool d_used_approx;
+ /** the set of all tautological literals */
+ std::unordered_set<Node, NodeHashFunction> d_tautology;
+}; /* class NlModel */
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__NONLINEAR_EXTENSION_H */
--- /dev/null
+/********************* */
+/*! \file nl_monomial.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of utilities for monomials
+ **/
+
+#include "theory/arith/nl/nl_monomial.h"
+
+#include "theory/arith/arith_utilities.h"
+#include "theory/arith/nl/nl_lemma_utils.h"
+#include "theory/rewriter.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+// Returns a[key] if key is in a or value otherwise.
+unsigned getCountWithDefault(const NodeMultiset& a, Node key, unsigned value)
+{
+ NodeMultiset::const_iterator it = a.find(key);
+ return (it == a.end()) ? value : it->second;
+}
+// Given two multisets return the multiset difference a \ b.
+NodeMultiset diffMultiset(const NodeMultiset& a, const NodeMultiset& b)
+{
+ NodeMultiset difference;
+ for (NodeMultiset::const_iterator it_a = a.begin(); it_a != a.end(); ++it_a)
+ {
+ Node key = it_a->first;
+ const unsigned a_value = it_a->second;
+ const unsigned b_value = getCountWithDefault(b, key, 0);
+ if (a_value > b_value)
+ {
+ difference[key] = a_value - b_value;
+ }
+ }
+ return difference;
+}
+
+// Return a vector containing a[key] repetitions of key in a multiset a.
+std::vector<Node> ExpandMultiset(const NodeMultiset& a)
+{
+ std::vector<Node> expansion;
+ for (NodeMultiset::const_iterator it_a = a.begin(); it_a != a.end(); ++it_a)
+ {
+ expansion.insert(expansion.end(), it_a->second, it_a->first);
+ }
+ return expansion;
+}
+
+// status 0 : n equal, -1 : n superset, 1 : n subset
+void MonomialIndex::addTerm(Node n,
+ const std::vector<Node>& reps,
+ MonomialDb* nla,
+ int status,
+ unsigned argIndex)
+{
+ if (status == 0)
+ {
+ if (argIndex == reps.size())
+ {
+ d_monos.push_back(n);
+ }
+ else
+ {
+ d_data[reps[argIndex]].addTerm(n, reps, nla, status, argIndex + 1);
+ }
+ }
+ for (std::map<Node, MonomialIndex>::iterator it = d_data.begin();
+ it != d_data.end();
+ ++it)
+ {
+ if (status != 0 || argIndex == reps.size() || it->first != reps[argIndex])
+ {
+ // if we do not contain this variable, then if we were a superset,
+ // fail (-2), otherwise we are subset. if we do contain this
+ // variable, then if we were equal, we are superset since variables
+ // are ordered, otherwise we remain the same.
+ int new_status =
+ std::find(reps.begin(), reps.end(), it->first) == reps.end()
+ ? (status >= 0 ? 1 : -2)
+ : (status == 0 ? -1 : status);
+ if (new_status != -2)
+ {
+ it->second.addTerm(n, reps, nla, new_status, argIndex);
+ }
+ }
+ }
+ // compare for subsets
+ for (unsigned i = 0; i < d_monos.size(); i++)
+ {
+ Node m = d_monos[i];
+ if (m != n)
+ {
+ // we are superset if we are equal and haven't traversed all variables
+ int cstatus = status == 0 ? (argIndex == reps.size() ? 0 : -1) : status;
+ Trace("nl-ext-mindex-debug") << " compare " << n << " and " << m
+ << ", status = " << cstatus << std::endl;
+ if (cstatus <= 0 && nla->isMonomialSubset(m, n))
+ {
+ nla->registerMonomialSubset(m, n);
+ Trace("nl-ext-mindex-debug") << "...success" << std::endl;
+ }
+ else if (cstatus >= 0 && nla->isMonomialSubset(n, m))
+ {
+ nla->registerMonomialSubset(n, m);
+ Trace("nl-ext-mindex-debug") << "...success (rev)" << std::endl;
+ }
+ }
+ }
+}
+
+MonomialDb::MonomialDb()
+{
+ d_one = NodeManager::currentNM()->mkConst(Rational(1));
+}
+
+void MonomialDb::registerMonomial(Node n)
+{
+ if (std::find(d_monomials.begin(), d_monomials.end(), n) != d_monomials.end())
+ {
+ return;
+ }
+ d_monomials.push_back(n);
+ Trace("nl-ext-debug") << "Register monomial : " << n << std::endl;
+ Kind k = n.getKind();
+ if (k == NONLINEAR_MULT)
+ {
+ // get exponent count
+ unsigned nchild = n.getNumChildren();
+ for (unsigned i = 0; i < nchild; i++)
+ {
+ d_m_exp[n][n[i]]++;
+ if (i == 0 || n[i] != n[i - 1])
+ {
+ d_m_vlist[n].push_back(n[i]);
+ }
+ }
+ d_m_degree[n] = nchild;
+ }
+ else if (n == d_one)
+ {
+ d_m_exp[n].clear();
+ d_m_vlist[n].clear();
+ d_m_degree[n] = 0;
+ }
+ else
+ {
+ Assert(k != PLUS && k != MULT);
+ d_m_exp[n][n] = 1;
+ d_m_vlist[n].push_back(n);
+ d_m_degree[n] = 1;
+ }
+ std::sort(d_m_vlist[n].begin(), d_m_vlist[n].end());
+ Trace("nl-ext-mindex") << "Add monomial to index : " << n << std::endl;
+ d_m_index.addTerm(n, d_m_vlist[n], this);
+}
+
+void MonomialDb::registerMonomialSubset(Node a, Node b)
+{
+ Assert(isMonomialSubset(a, b));
+
+ const NodeMultiset& a_exponent_map = getMonomialExponentMap(a);
+ const NodeMultiset& b_exponent_map = getMonomialExponentMap(b);
+
+ std::vector<Node> diff_children =
+ ExpandMultiset(diffMultiset(b_exponent_map, a_exponent_map));
+ Assert(!diff_children.empty());
+
+ d_m_contain_parent[a].push_back(b);
+ d_m_contain_children[b].push_back(a);
+
+ Node mult_term = safeConstructNary(MULT, diff_children);
+ Node nlmult_term = safeConstructNary(NONLINEAR_MULT, diff_children);
+ d_m_contain_mult[a][b] = mult_term;
+ d_m_contain_umult[a][b] = nlmult_term;
+ Trace("nl-ext-mindex") << "..." << a << " is a subset of " << b
+ << ", difference is " << mult_term << std::endl;
+}
+
+bool MonomialDb::isMonomialSubset(Node am, Node bm) const
+{
+ const NodeMultiset& a = getMonomialExponentMap(am);
+ const NodeMultiset& b = getMonomialExponentMap(bm);
+ for (NodeMultiset::const_iterator it_a = a.begin(); it_a != a.end(); ++it_a)
+ {
+ Node key = it_a->first;
+ const unsigned a_value = it_a->second;
+ const unsigned b_value = getCountWithDefault(b, key, 0);
+ if (a_value > b_value)
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+const NodeMultiset& MonomialDb::getMonomialExponentMap(Node monomial) const
+{
+ MonomialExponentMap::const_iterator it = d_m_exp.find(monomial);
+ Assert(it != d_m_exp.end());
+ return it->second;
+}
+
+unsigned MonomialDb::getExponent(Node monomial, Node v) const
+{
+ MonomialExponentMap::const_iterator it = d_m_exp.find(monomial);
+ if (it == d_m_exp.end())
+ {
+ return 0;
+ }
+ std::map<Node, unsigned>::const_iterator itv = it->second.find(v);
+ if (itv == it->second.end())
+ {
+ return 0;
+ }
+ return itv->second;
+}
+
+const std::vector<Node>& MonomialDb::getVariableList(Node monomial) const
+{
+ std::map<Node, std::vector<Node> >::const_iterator itvl =
+ d_m_vlist.find(monomial);
+ Assert(itvl != d_m_vlist.end());
+ return itvl->second;
+}
+
+unsigned MonomialDb::getDegree(Node monomial) const
+{
+ std::map<Node, unsigned>::const_iterator it = d_m_degree.find(monomial);
+ Assert(it != d_m_degree.end());
+ return it->second;
+}
+
+void MonomialDb::sortByDegree(std::vector<Node>& ms) const
+{
+ SortNonlinearDegree snlad(d_m_degree);
+ std::sort(ms.begin(), ms.end(), snlad);
+}
+
+void MonomialDb::sortVariablesByModel(std::vector<Node>& ms, NlModel& m)
+{
+ SortNlModel smv;
+ smv.d_nlm = &m;
+ smv.d_isConcrete = false;
+ smv.d_isAbsolute = true;
+ smv.d_reverse_order = true;
+ for (const Node& msc : ms)
+ {
+ std::sort(d_m_vlist[msc].begin(), d_m_vlist[msc].end(), smv);
+ }
+}
+
+const std::map<Node, std::vector<Node> >& MonomialDb::getContainsChildrenMap()
+{
+ return d_m_contain_children;
+}
+
+const std::map<Node, std::vector<Node> >& MonomialDb::getContainsParentMap()
+{
+ return d_m_contain_parent;
+}
+
+Node MonomialDb::getContainsDiff(Node a, Node b) const
+{
+ std::map<Node, std::map<Node, Node> >::const_iterator it =
+ d_m_contain_mult.find(a);
+ if (it == d_m_contain_umult.end())
+ {
+ return Node::null();
+ }
+ std::map<Node, Node>::const_iterator it2 = it->second.find(b);
+ if (it2 == it->second.end())
+ {
+ return Node::null();
+ }
+ return it2->second;
+}
+
+Node MonomialDb::getContainsDiffNl(Node a, Node b) const
+{
+ std::map<Node, std::map<Node, Node> >::const_iterator it =
+ d_m_contain_umult.find(a);
+ if (it == d_m_contain_umult.end())
+ {
+ return Node::null();
+ }
+ std::map<Node, Node>::const_iterator it2 = it->second.find(b);
+ if (it2 == it->second.end())
+ {
+ return Node::null();
+ }
+ return it2->second;
+}
+
+Node MonomialDb::mkMonomialRemFactor(Node n,
+ const NodeMultiset& n_exp_rem) const
+{
+ std::vector<Node> children;
+ const NodeMultiset& exponent_map = getMonomialExponentMap(n);
+ for (NodeMultiset::const_iterator itme2 = exponent_map.begin();
+ itme2 != exponent_map.end();
+ ++itme2)
+ {
+ Node v = itme2->first;
+ unsigned inc = itme2->second;
+ Trace("nl-ext-mono-factor")
+ << "..." << inc << " factors of " << v << std::endl;
+ unsigned count_in_n_exp_rem = getCountWithDefault(n_exp_rem, v, 0);
+ Assert(count_in_n_exp_rem <= inc);
+ inc -= count_in_n_exp_rem;
+ Trace("nl-ext-mono-factor")
+ << "......rem, now " << inc << " factors of " << v << std::endl;
+ children.insert(children.end(), inc, v);
+ }
+ Node ret = safeConstructNary(MULT, children);
+ ret = Rewriter::rewrite(ret);
+ Trace("nl-ext-mono-factor") << "...return : " << ret << std::endl;
+ return ret;
+}
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file nl_monomial.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Utilities for monomials
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__NL_MONOMIAL_H
+#define CVC4__THEORY__ARITH__NL__NL_MONOMIAL_H
+
+#include <map>
+#include <vector>
+
+#include "expr/node.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+class MonomialDb;
+class NlModel;
+
+typedef std::map<Node, unsigned> NodeMultiset;
+typedef std::map<Node, NodeMultiset> MonomialExponentMap;
+
+/** An index data structure for node multisets (monomials) */
+class MonomialIndex
+{
+ public:
+ /**
+ * Add term to this trie. The argument status indicates what the status
+ * of n is with respect to the current node in the trie, where:
+ * 0 : n is equal, -1 : n is superset, 1 : n is subset
+ * of the node described by the current path in the trie.
+ */
+ void addTerm(Node n,
+ const std::vector<Node>& reps,
+ MonomialDb* nla,
+ int status = 0,
+ unsigned argIndex = 0);
+
+ private:
+ /** The children of this node */
+ std::map<Node, MonomialIndex> d_data;
+ /** The monomials at this node */
+ std::vector<Node> d_monos;
+}; /* class MonomialIndex */
+
+/** Context-independent database for monomial information */
+class MonomialDb
+{
+ public:
+ MonomialDb();
+ ~MonomialDb() {}
+ /** register monomial */
+ void registerMonomial(Node n);
+ /**
+ * Register monomial subset. This method is called when we infer that b is
+ * a subset of monomial a, e.g. x*y^2 is a subset of x^3*y^2*z.
+ */
+ void registerMonomialSubset(Node a, Node b);
+ /**
+ * returns true if the multiset containing the
+ * factors of monomial a is a subset of the multiset
+ * containing the factors of monomial b.
+ */
+ bool isMonomialSubset(Node a, Node b) const;
+ /** Returns the NodeMultiset for a registered monomial. */
+ const NodeMultiset& getMonomialExponentMap(Node monomial) const;
+ /** Returns the exponent of variable v in the given monomial */
+ unsigned getExponent(Node monomial, Node v) const;
+ /** Get the list of unique variables is the monomial */
+ const std::vector<Node>& getVariableList(Node monomial) const;
+ /** Get degree of monomial, e.g. the degree of x^2*y^2 = 4 */
+ unsigned getDegree(Node monomial) const;
+ /** Sort monomials in ms by their degree
+ *
+ * Updates ms so that degree(ms[i]) <= degree(ms[j]) for i <= j.
+ */
+ void sortByDegree(std::vector<Node>& ms) const;
+ /** Sort the variable lists based on model values
+ *
+ * This updates the variable lists of monomials in ms based on the absolute
+ * value of their current model values in m.
+ *
+ * In other words, for each i, getVariableList(ms[i]) returns
+ * v1, ..., vn where |m(v1)| <= ... <= |m(vn)| after this method is invoked.
+ */
+ void sortVariablesByModel(std::vector<Node>& ms, NlModel& m);
+ /** Get monomial contains children map
+ *
+ * This maps monomials to other monomials that are contained in them, e.g.
+ * x^2 * y may map to { x, x^2, y } if these three terms exist have been
+ * registered to this class.
+ */
+ const std::map<Node, std::vector<Node> >& getContainsChildrenMap();
+ /** Get monomial contains parent map, reverse of above */
+ const std::map<Node, std::vector<Node> >& getContainsParentMap();
+ /**
+ * Get contains difference. Return the difference of a and b or null if it
+ * does not exist. In other words, this returns a term equivalent to a/b
+ * that does not contain division.
+ */
+ Node getContainsDiff(Node a, Node b) const;
+ /**
+ * Get contains difference non-linear. Same as above, but stores terms of kind
+ * NONLINEAR_MULT instead of MULT.
+ */
+ Node getContainsDiffNl(Node a, Node b) const;
+ /** Make monomial remainder factor */
+ Node mkMonomialRemFactor(Node n, const NodeMultiset& n_exp_rem) const;
+
+ private:
+ /** commonly used terms */
+ Node d_one;
+ /** list of all monomials */
+ std::vector<Node> d_monomials;
+ /** Map from monomials to var^index. */
+ MonomialExponentMap d_m_exp;
+ /**
+ * Mapping from monomials to the list of variables that occur in it. For
+ * example, x*x*y*z -> { x, y, z }.
+ */
+ std::map<Node, std::vector<Node> > d_m_vlist;
+ /** Degree information */
+ std::map<Node, unsigned> d_m_degree;
+ /** monomial index, by sorted variables */
+ MonomialIndex d_m_index;
+ /** containment ordering */
+ std::map<Node, std::vector<Node> > d_m_contain_children;
+ std::map<Node, std::vector<Node> > d_m_contain_parent;
+ std::map<Node, std::map<Node, Node> > d_m_contain_mult;
+ std::map<Node, std::map<Node, Node> > d_m_contain_umult;
+};
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__NL_MONOMIAL_H */
--- /dev/null
+/********************* */
+/*! \file nl_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of non-linear solver
+ **/
+
+#include "theory/arith/nl/nl_solver.h"
+
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/arith/theory_arith.h"
+#include "theory/theory_model.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+void debugPrintBound(const char* c, Node coeff, Node x, Kind type, Node rhs)
+{
+ Node t = ArithMSum::mkCoeffTerm(coeff, x);
+ Trace(c) << t << " " << type << " " << rhs;
+}
+
+bool hasNewMonomials(Node n, const std::vector<Node>& existing)
+{
+ std::set<Node> visited;
+
+ std::vector<Node> worklist;
+ worklist.push_back(n);
+ while (!worklist.empty())
+ {
+ Node current = worklist.back();
+ worklist.pop_back();
+ if (visited.find(current) == visited.end())
+ {
+ visited.insert(current);
+ if (current.getKind() == NONLINEAR_MULT)
+ {
+ if (std::find(existing.begin(), existing.end(), current)
+ == existing.end())
+ {
+ return true;
+ }
+ }
+ else
+ {
+ worklist.insert(worklist.end(), current.begin(), current.end());
+ }
+ }
+ }
+ return false;
+}
+
+NlSolver::NlSolver(TheoryArith& containing, NlModel& model)
+ : d_containing(containing),
+ d_model(model),
+ d_cdb(d_mdb),
+ d_zero_split(containing.getUserContext())
+{
+ NodeManager* nm = NodeManager::currentNM();
+ d_true = nm->mkConst(true);
+ d_false = nm->mkConst(false);
+ d_zero = nm->mkConst(Rational(0));
+ d_one = nm->mkConst(Rational(1));
+ d_neg_one = nm->mkConst(Rational(-1));
+ d_order_points.push_back(d_neg_one);
+ d_order_points.push_back(d_zero);
+ d_order_points.push_back(d_one);
+}
+
+NlSolver::~NlSolver() {}
+
+void NlSolver::initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts)
+{
+ d_ms_vars.clear();
+ d_ms_proc.clear();
+ d_ms.clear();
+ d_mterms.clear();
+ d_m_nconst_factor.clear();
+ d_tplane_refine.clear();
+ d_ci.clear();
+ d_ci_exp.clear();
+ d_ci_max.clear();
+
+ Trace("nl-ext-mv") << "Extended terms : " << std::endl;
+ // for computing congruence
+ std::map<Kind, ArgTrie> argTrie;
+ for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
+ {
+ Node a = xts[i];
+ d_model.computeConcreteModelValue(a);
+ d_model.computeAbstractModelValue(a);
+ d_model.printModelValue("nl-ext-mv", a);
+ Kind ak = a.getKind();
+ if (ak == NONLINEAR_MULT)
+ {
+ d_ms.push_back(a);
+
+ // context-independent registration
+ d_mdb.registerMonomial(a);
+
+ const std::vector<Node>& varList = d_mdb.getVariableList(a);
+ for (const Node& v : varList)
+ {
+ if (std::find(d_ms_vars.begin(), d_ms_vars.end(), v) == d_ms_vars.end())
+ {
+ d_ms_vars.push_back(v);
+ }
+ Node mvk = d_model.computeAbstractModelValue(v);
+ if (!mvk.isConst())
+ {
+ d_m_nconst_factor[a] = true;
+ }
+ }
+ // mark processed if has a "one" factor (will look at reduced monomial)?
+ }
+ }
+
+ // register constants
+ d_mdb.registerMonomial(d_one);
+ for (unsigned j = 0; j < d_order_points.size(); j++)
+ {
+ Node c = d_order_points[j];
+ d_model.computeConcreteModelValue(c);
+ d_model.computeAbstractModelValue(c);
+ }
+
+ // register variables
+ Trace("nl-ext-mv") << "Variables in monomials : " << std::endl;
+ for (unsigned i = 0; i < d_ms_vars.size(); i++)
+ {
+ Node v = d_ms_vars[i];
+ d_mdb.registerMonomial(v);
+ d_model.computeConcreteModelValue(v);
+ d_model.computeAbstractModelValue(v);
+ d_model.printModelValue("nl-ext-mv", v);
+ }
+
+ Trace("nl-ext") << "We have " << d_ms.size() << " monomials." << std::endl;
+}
+
+void NlSolver::setMonomialFactor(Node a, Node b, const NodeMultiset& common)
+{
+ // Could not tell if this was being inserted intentionally or not.
+ std::map<Node, Node>& mono_diff_a = d_mono_diff[a];
+ if (mono_diff_a.find(b) == mono_diff_a.end())
+ {
+ Trace("nl-ext-mono-factor")
+ << "Set monomial factor for " << a << "/" << b << std::endl;
+ mono_diff_a[b] = d_mdb.mkMonomialRemFactor(a, common);
+ }
+}
+
+std::vector<Node> NlSolver::checkSplitZero()
+{
+ std::vector<Node> lemmas;
+ for (unsigned i = 0; i < d_ms_vars.size(); i++)
+ {
+ Node v = d_ms_vars[i];
+ if (d_zero_split.insert(v))
+ {
+ Node eq = v.eqNode(d_zero);
+ eq = Rewriter::rewrite(eq);
+ Node literal = d_containing.getValuation().ensureLiteral(eq);
+ d_containing.getOutputChannel().requirePhase(literal, true);
+ lemmas.push_back(literal.orNode(literal.negate()));
+ }
+ }
+ return lemmas;
+}
+
+void NlSolver::assignOrderIds(std::vector<Node>& vars,
+ NodeMultiset& order,
+ bool isConcrete,
+ bool isAbsolute)
+{
+ SortNlModel smv;
+ smv.d_nlm = &d_model;
+ smv.d_isConcrete = isConcrete;
+ smv.d_isAbsolute = isAbsolute;
+ smv.d_reverse_order = false;
+ std::sort(vars.begin(), vars.end(), smv);
+
+ order.clear();
+ // assign ordering id's
+ unsigned counter = 0;
+ unsigned order_index = isConcrete ? 0 : 1;
+ Node prev;
+ for (unsigned j = 0; j < vars.size(); j++)
+ {
+ Node x = vars[j];
+ Node v = d_model.computeModelValue(x, isConcrete);
+ if (!v.isConst())
+ {
+ Trace("nl-ext-mvo") << "..do not assign order to " << x << " : " << v
+ << std::endl;
+ // don't assign for non-constant values (transcendental function apps)
+ break;
+ }
+ Trace("nl-ext-mvo") << " order " << x << " : " << v << std::endl;
+ if (v != prev)
+ {
+ // builtin points
+ bool success;
+ do
+ {
+ success = false;
+ if (order_index < d_order_points.size())
+ {
+ Node vv = d_model.computeModelValue(d_order_points[order_index],
+ isConcrete);
+ if (d_model.compareValue(v, vv, isAbsolute) <= 0)
+ {
+ counter++;
+ Trace("nl-ext-mvo") << "O[" << d_order_points[order_index]
+ << "] = " << counter << std::endl;
+ order[d_order_points[order_index]] = counter;
+ prev = vv;
+ order_index++;
+ success = true;
+ }
+ }
+ } while (success);
+ }
+ if (prev.isNull() || d_model.compareValue(v, prev, isAbsolute) != 0)
+ {
+ counter++;
+ }
+ Trace("nl-ext-mvo") << "O[" << x << "] = " << counter << std::endl;
+ order[x] = counter;
+ prev = v;
+ }
+ while (order_index < d_order_points.size())
+ {
+ counter++;
+ Trace("nl-ext-mvo") << "O[" << d_order_points[order_index]
+ << "] = " << counter << std::endl;
+ order[d_order_points[order_index]] = counter;
+ order_index++;
+ }
+}
+
+// show a <> 0 by inequalities between variables in monomial a w.r.t 0
+int NlSolver::compareSign(Node oa,
+ Node a,
+ unsigned a_index,
+ int status,
+ std::vector<Node>& exp,
+ std::vector<Node>& lem)
+{
+ Trace("nl-ext-debug") << "Process " << a << " at index " << a_index
+ << ", status is " << status << std::endl;
+ NodeManager* nm = NodeManager::currentNM();
+ Node mvaoa = d_model.computeAbstractModelValue(oa);
+ const std::vector<Node>& vla = d_mdb.getVariableList(a);
+ if (a_index == vla.size())
+ {
+ if (mvaoa.getConst<Rational>().sgn() != status)
+ {
+ Node lemma =
+ safeConstructNary(AND, exp).impNode(mkLit(oa, d_zero, status * 2));
+ lem.push_back(lemma);
+ }
+ return status;
+ }
+ Assert(a_index < vla.size());
+ Node av = vla[a_index];
+ unsigned aexp = d_mdb.getExponent(a, av);
+ // take current sign in model
+ Node mvaav = d_model.computeAbstractModelValue(av);
+ int sgn = mvaav.getConst<Rational>().sgn();
+ Trace("nl-ext-debug") << "Process var " << av << "^" << aexp
+ << ", model sign = " << sgn << std::endl;
+ if (sgn == 0)
+ {
+ if (mvaoa.getConst<Rational>().sgn() != 0)
+ {
+ Node lemma = av.eqNode(d_zero).impNode(oa.eqNode(d_zero));
+ lem.push_back(lemma);
+ }
+ return 0;
+ }
+ if (aexp % 2 == 0)
+ {
+ exp.push_back(av.eqNode(d_zero).negate());
+ return compareSign(oa, a, a_index + 1, status, exp, lem);
+ }
+ exp.push_back(nm->mkNode(sgn == 1 ? GT : LT, av, d_zero));
+ return compareSign(oa, a, a_index + 1, status * sgn, exp, lem);
+}
+
+bool NlSolver::compareMonomial(
+ Node oa,
+ Node a,
+ NodeMultiset& a_exp_proc,
+ Node ob,
+ Node b,
+ NodeMultiset& b_exp_proc,
+ std::vector<Node>& exp,
+ std::vector<Node>& lem,
+ std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers)
+{
+ Trace("nl-ext-comp-debug")
+ << "Check |" << a << "| >= |" << b << "|" << std::endl;
+ unsigned pexp_size = exp.size();
+ if (compareMonomial(
+ oa, a, 0, a_exp_proc, ob, b, 0, b_exp_proc, 0, exp, lem, cmp_infers))
+ {
+ return true;
+ }
+ exp.resize(pexp_size);
+ Trace("nl-ext-comp-debug")
+ << "Check |" << b << "| >= |" << a << "|" << std::endl;
+ if (compareMonomial(
+ ob, b, 0, b_exp_proc, oa, a, 0, a_exp_proc, 0, exp, lem, cmp_infers))
+ {
+ return true;
+ }
+ return false;
+}
+
+Node NlSolver::mkLit(Node a, Node b, int status, bool isAbsolute)
+{
+ if (status == 0)
+ {
+ Node a_eq_b = a.eqNode(b);
+ if (!isAbsolute)
+ {
+ return a_eq_b;
+ }
+ Node negate_b = NodeManager::currentNM()->mkNode(UMINUS, b);
+ return a_eq_b.orNode(a.eqNode(negate_b));
+ }
+ else if (status < 0)
+ {
+ return mkLit(b, a, -status);
+ }
+ Assert(status == 1 || status == 2);
+ NodeManager* nm = NodeManager::currentNM();
+ Kind greater_op = status == 1 ? GEQ : GT;
+ if (!isAbsolute)
+ {
+ return nm->mkNode(greater_op, a, b);
+ }
+ // return nm->mkNode( greater_op, mkAbs( a ), mkAbs( b ) );
+ Node zero = mkRationalNode(0);
+ Node a_is_nonnegative = nm->mkNode(GEQ, a, zero);
+ Node b_is_nonnegative = nm->mkNode(GEQ, b, zero);
+ Node negate_a = nm->mkNode(UMINUS, a);
+ Node negate_b = nm->mkNode(UMINUS, b);
+ return a_is_nonnegative.iteNode(
+ b_is_nonnegative.iteNode(nm->mkNode(greater_op, a, b),
+ nm->mkNode(greater_op, a, negate_b)),
+ b_is_nonnegative.iteNode(nm->mkNode(greater_op, negate_a, b),
+ nm->mkNode(greater_op, negate_a, negate_b)));
+}
+
+bool NlSolver::cmp_holds(Node x,
+ Node y,
+ std::map<Node, std::map<Node, Node> >& cmp_infers,
+ std::vector<Node>& exp,
+ std::map<Node, bool>& visited)
+{
+ if (x == y)
+ {
+ return true;
+ }
+ else if (visited.find(x) != visited.end())
+ {
+ return false;
+ }
+ visited[x] = true;
+ std::map<Node, std::map<Node, Node> >::iterator it = cmp_infers.find(x);
+ if (it != cmp_infers.end())
+ {
+ for (std::map<Node, Node>::iterator itc = it->second.begin();
+ itc != it->second.end();
+ ++itc)
+ {
+ exp.push_back(itc->second);
+ if (cmp_holds(itc->first, y, cmp_infers, exp, visited))
+ {
+ return true;
+ }
+ exp.pop_back();
+ }
+ }
+ return false;
+}
+
+// trying to show a ( >, = ) b by inequalities between variables in
+// monomials a,b
+bool NlSolver::compareMonomial(
+ Node oa,
+ Node a,
+ unsigned a_index,
+ NodeMultiset& a_exp_proc,
+ Node ob,
+ Node b,
+ unsigned b_index,
+ NodeMultiset& b_exp_proc,
+ int status,
+ std::vector<Node>& exp,
+ std::vector<Node>& lem,
+ std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers)
+{
+ Trace("nl-ext-comp-debug")
+ << "compareMonomial " << oa << " and " << ob << ", indices = " << a_index
+ << " " << b_index << std::endl;
+ Assert(status == 0 || status == 2);
+ const std::vector<Node>& vla = d_mdb.getVariableList(a);
+ const std::vector<Node>& vlb = d_mdb.getVariableList(b);
+ if (a_index == vla.size() && b_index == vlb.size())
+ {
+ // finished, compare absolute value of abstract model values
+ int modelStatus = d_model.compare(oa, ob, false, true) * -2;
+ Trace("nl-ext-comp") << "...finished comparison with " << oa << " <"
+ << status << "> " << ob
+ << ", model status = " << modelStatus << std::endl;
+ if (status != modelStatus)
+ {
+ Trace("nl-ext-comp-infer")
+ << "infer : " << oa << " <" << status << "> " << ob << std::endl;
+ if (status == 2)
+ {
+ // must state that all variables are non-zero
+ for (unsigned j = 0; j < vla.size(); j++)
+ {
+ exp.push_back(vla[j].eqNode(d_zero).negate());
+ }
+ }
+ NodeManager* nm = NodeManager::currentNM();
+ Node clem = nm->mkNode(
+ IMPLIES, safeConstructNary(AND, exp), mkLit(oa, ob, status, true));
+ Trace("nl-ext-comp-lemma") << "comparison lemma : " << clem << std::endl;
+ lem.push_back(clem);
+ cmp_infers[status][oa][ob] = clem;
+ }
+ return true;
+ }
+ // get a/b variable information
+ Node av;
+ unsigned aexp = 0;
+ unsigned avo = 0;
+ if (a_index < vla.size())
+ {
+ av = vla[a_index];
+ unsigned aexpTotal = d_mdb.getExponent(a, av);
+ Assert(a_exp_proc[av] <= aexpTotal);
+ aexp = aexpTotal - a_exp_proc[av];
+ if (aexp == 0)
+ {
+ return compareMonomial(oa,
+ a,
+ a_index + 1,
+ a_exp_proc,
+ ob,
+ b,
+ b_index,
+ b_exp_proc,
+ status,
+ exp,
+ lem,
+ cmp_infers);
+ }
+ Assert(d_order_vars.find(av) != d_order_vars.end());
+ avo = d_order_vars[av];
+ }
+ Node bv;
+ unsigned bexp = 0;
+ unsigned bvo = 0;
+ if (b_index < vlb.size())
+ {
+ bv = vlb[b_index];
+ unsigned bexpTotal = d_mdb.getExponent(b, bv);
+ Assert(b_exp_proc[bv] <= bexpTotal);
+ bexp = bexpTotal - b_exp_proc[bv];
+ if (bexp == 0)
+ {
+ return compareMonomial(oa,
+ a,
+ a_index,
+ a_exp_proc,
+ ob,
+ b,
+ b_index + 1,
+ b_exp_proc,
+ status,
+ exp,
+ lem,
+ cmp_infers);
+ }
+ Assert(d_order_vars.find(bv) != d_order_vars.end());
+ bvo = d_order_vars[bv];
+ }
+ // get "one" information
+ Assert(d_order_vars.find(d_one) != d_order_vars.end());
+ unsigned ovo = d_order_vars[d_one];
+ Trace("nl-ext-comp-debug") << "....vars : " << av << "^" << aexp << " " << bv
+ << "^" << bexp << std::endl;
+
+ //--- cases
+ if (av.isNull())
+ {
+ if (bvo <= ovo)
+ {
+ Trace("nl-ext-comp-debug") << "...take leading " << bv << std::endl;
+ // can multiply b by <=1
+ exp.push_back(mkLit(d_one, bv, bvo == ovo ? 0 : 2, true));
+ return compareMonomial(oa,
+ a,
+ a_index,
+ a_exp_proc,
+ ob,
+ b,
+ b_index + 1,
+ b_exp_proc,
+ bvo == ovo ? status : 2,
+ exp,
+ lem,
+ cmp_infers);
+ }
+ Trace("nl-ext-comp-debug")
+ << "...failure, unmatched |b|>1 component." << std::endl;
+ return false;
+ }
+ else if (bv.isNull())
+ {
+ if (avo >= ovo)
+ {
+ Trace("nl-ext-comp-debug") << "...take leading " << av << std::endl;
+ // can multiply a by >=1
+ exp.push_back(mkLit(av, d_one, avo == ovo ? 0 : 2, true));
+ return compareMonomial(oa,
+ a,
+ a_index + 1,
+ a_exp_proc,
+ ob,
+ b,
+ b_index,
+ b_exp_proc,
+ avo == ovo ? status : 2,
+ exp,
+ lem,
+ cmp_infers);
+ }
+ Trace("nl-ext-comp-debug")
+ << "...failure, unmatched |a|<1 component." << std::endl;
+ return false;
+ }
+ Assert(!av.isNull() && !bv.isNull());
+ if (avo >= bvo)
+ {
+ if (bvo < ovo && avo >= ovo)
+ {
+ Trace("nl-ext-comp-debug") << "...take leading " << av << std::endl;
+ // do avo>=1 instead
+ exp.push_back(mkLit(av, d_one, avo == ovo ? 0 : 2, true));
+ return compareMonomial(oa,
+ a,
+ a_index + 1,
+ a_exp_proc,
+ ob,
+ b,
+ b_index,
+ b_exp_proc,
+ avo == ovo ? status : 2,
+ exp,
+ lem,
+ cmp_infers);
+ }
+ unsigned min_exp = aexp > bexp ? bexp : aexp;
+ a_exp_proc[av] += min_exp;
+ b_exp_proc[bv] += min_exp;
+ Trace("nl-ext-comp-debug") << "...take leading " << min_exp << " from "
+ << av << " and " << bv << std::endl;
+ exp.push_back(mkLit(av, bv, avo == bvo ? 0 : 2, true));
+ bool ret = compareMonomial(oa,
+ a,
+ a_index,
+ a_exp_proc,
+ ob,
+ b,
+ b_index,
+ b_exp_proc,
+ avo == bvo ? status : 2,
+ exp,
+ lem,
+ cmp_infers);
+ a_exp_proc[av] -= min_exp;
+ b_exp_proc[bv] -= min_exp;
+ return ret;
+ }
+ if (bvo <= ovo)
+ {
+ Trace("nl-ext-comp-debug") << "...take leading " << bv << std::endl;
+ // try multiply b <= 1
+ exp.push_back(mkLit(d_one, bv, bvo == ovo ? 0 : 2, true));
+ return compareMonomial(oa,
+ a,
+ a_index,
+ a_exp_proc,
+ ob,
+ b,
+ b_index + 1,
+ b_exp_proc,
+ bvo == ovo ? status : 2,
+ exp,
+ lem,
+ cmp_infers);
+ }
+ Trace("nl-ext-comp-debug")
+ << "...failure, leading |b|>|a|>1 component." << std::endl;
+ return false;
+}
+
+std::vector<Node> NlSolver::checkMonomialSign()
+{
+ std::vector<Node> lemmas;
+ std::map<Node, int> signs;
+ Trace("nl-ext") << "Get monomial sign lemmas..." << std::endl;
+ for (unsigned j = 0; j < d_ms.size(); j++)
+ {
+ Node a = d_ms[j];
+ if (d_ms_proc.find(a) == d_ms_proc.end())
+ {
+ std::vector<Node> exp;
+ if (Trace.isOn("nl-ext-debug"))
+ {
+ Node cmva = d_model.computeConcreteModelValue(a);
+ Trace("nl-ext-debug")
+ << " process " << a << ", mv=" << cmva << "..." << std::endl;
+ }
+ if (d_m_nconst_factor.find(a) == d_m_nconst_factor.end())
+ {
+ signs[a] = compareSign(a, a, 0, 1, exp, lemmas);
+ if (signs[a] == 0)
+ {
+ d_ms_proc[a] = true;
+ Trace("nl-ext-debug")
+ << "...mark " << a << " reduced since its value is 0."
+ << std::endl;
+ }
+ }
+ else
+ {
+ Trace("nl-ext-debug")
+ << "...can't conclude sign lemma for " << a
+ << " since model value of a factor is non-constant." << std::endl;
+ }
+ }
+ }
+ return lemmas;
+}
+
+std::vector<Node> NlSolver::checkMonomialMagnitude(unsigned c)
+{
+ // ensure information is setup
+ if (c == 0)
+ {
+ // sort by absolute values of abstract model values
+ assignOrderIds(d_ms_vars, d_order_vars, false, true);
+
+ // sort individual variable lists
+ Trace("nl-ext-proc") << "Assign order var lists..." << std::endl;
+ d_mdb.sortVariablesByModel(d_ms, d_model);
+ }
+
+ unsigned r = 1;
+ std::vector<Node> lemmas;
+ // if (x,y,L) in cmp_infers, then x > y inferred as conclusion of L
+ // in lemmas
+ std::map<int, std::map<Node, std::map<Node, Node> > > cmp_infers;
+ Trace("nl-ext") << "Get monomial comparison lemmas (order=" << r
+ << ", compare=" << c << ")..." << std::endl;
+ for (unsigned j = 0; j < d_ms.size(); j++)
+ {
+ Node a = d_ms[j];
+ if (d_ms_proc.find(a) == d_ms_proc.end()
+ && d_m_nconst_factor.find(a) == d_m_nconst_factor.end())
+ {
+ if (c == 0)
+ {
+ // compare magnitude against 1
+ std::vector<Node> exp;
+ NodeMultiset a_exp_proc;
+ NodeMultiset b_exp_proc;
+ compareMonomial(a,
+ a,
+ a_exp_proc,
+ d_one,
+ d_one,
+ b_exp_proc,
+ exp,
+ lemmas,
+ cmp_infers);
+ }
+ else
+ {
+ const NodeMultiset& mea = d_mdb.getMonomialExponentMap(a);
+ if (c == 1)
+ {
+ // could compare not just against containing variables?
+ // compare magnitude against variables
+ for (unsigned k = 0; k < d_ms_vars.size(); k++)
+ {
+ Node v = d_ms_vars[k];
+ Node mvcv = d_model.computeConcreteModelValue(v);
+ if (mvcv.isConst())
+ {
+ std::vector<Node> exp;
+ NodeMultiset a_exp_proc;
+ NodeMultiset b_exp_proc;
+ if (mea.find(v) != mea.end())
+ {
+ a_exp_proc[v] = 1;
+ b_exp_proc[v] = 1;
+ setMonomialFactor(a, v, a_exp_proc);
+ setMonomialFactor(v, a, b_exp_proc);
+ compareMonomial(a,
+ a,
+ a_exp_proc,
+ v,
+ v,
+ b_exp_proc,
+ exp,
+ lemmas,
+ cmp_infers);
+ }
+ }
+ }
+ }
+ else
+ {
+ // compare magnitude against other non-linear monomials
+ for (unsigned k = (j + 1); k < d_ms.size(); k++)
+ {
+ Node b = d_ms[k];
+ //(signs[a]==signs[b])==(r==0)
+ if (d_ms_proc.find(b) == d_ms_proc.end()
+ && d_m_nconst_factor.find(b) == d_m_nconst_factor.end())
+ {
+ const NodeMultiset& meb = d_mdb.getMonomialExponentMap(b);
+
+ std::vector<Node> exp;
+ // take common factors of monomials, set minimum of
+ // common exponents as processed
+ NodeMultiset a_exp_proc;
+ NodeMultiset b_exp_proc;
+ for (NodeMultiset::const_iterator itmea2 = mea.begin();
+ itmea2 != mea.end();
+ ++itmea2)
+ {
+ NodeMultiset::const_iterator itmeb2 = meb.find(itmea2->first);
+ if (itmeb2 != meb.end())
+ {
+ unsigned min_exp = itmea2->second > itmeb2->second
+ ? itmeb2->second
+ : itmea2->second;
+ a_exp_proc[itmea2->first] = min_exp;
+ b_exp_proc[itmea2->first] = min_exp;
+ Trace("nl-ext-comp") << "Common exponent : " << itmea2->first
+ << " : " << min_exp << std::endl;
+ }
+ }
+ if (!a_exp_proc.empty())
+ {
+ setMonomialFactor(a, b, a_exp_proc);
+ setMonomialFactor(b, a, b_exp_proc);
+ }
+ /*
+ if( !a_exp_proc.empty() ){
+ //reduction based on common exponents a > 0 => ( a * b
+ <> a * c <=> b <> c ), a < 0 => ( a * b <> a * c <=> b
+ !<> c ) ? }else{ compareMonomial( a, a, a_exp_proc, b,
+ b, b_exp_proc, exp, lemmas );
+ }
+ */
+ compareMonomial(
+ a, a, a_exp_proc, b, b, b_exp_proc, exp, lemmas, cmp_infers);
+ }
+ }
+ }
+ }
+ }
+ }
+ // remove redundant lemmas, e.g. if a > b, b > c, a > c were
+ // inferred, discard lemma with conclusion a > c
+ Trace("nl-ext-comp") << "Compute redundancies for " << lemmas.size()
+ << " lemmas." << std::endl;
+ // naive
+ std::vector<Node> r_lemmas;
+ for (std::map<int, std::map<Node, std::map<Node, Node> > >::iterator itb =
+ cmp_infers.begin();
+ itb != cmp_infers.end();
+ ++itb)
+ {
+ for (std::map<Node, std::map<Node, Node> >::iterator itc =
+ itb->second.begin();
+ itc != itb->second.end();
+ ++itc)
+ {
+ for (std::map<Node, Node>::iterator itc2 = itc->second.begin();
+ itc2 != itc->second.end();
+ ++itc2)
+ {
+ std::map<Node, bool> visited;
+ for (std::map<Node, Node>::iterator itc3 = itc->second.begin();
+ itc3 != itc->second.end();
+ ++itc3)
+ {
+ if (itc3->first != itc2->first)
+ {
+ std::vector<Node> exp;
+ if (cmp_holds(itc3->first, itc2->first, itb->second, exp, visited))
+ {
+ r_lemmas.push_back(itc2->second);
+ Trace("nl-ext-comp")
+ << "...inference of " << itc->first << " > " << itc2->first
+ << " was redundant." << std::endl;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ std::vector<Node> nr_lemmas;
+ for (unsigned i = 0; i < lemmas.size(); i++)
+ {
+ if (std::find(r_lemmas.begin(), r_lemmas.end(), lemmas[i])
+ == r_lemmas.end())
+ {
+ nr_lemmas.push_back(lemmas[i]);
+ }
+ }
+ // could only take maximal lower/minimial lower bounds?
+
+ Trace("nl-ext-comp") << nr_lemmas.size() << " / " << lemmas.size()
+ << " were non-redundant." << std::endl;
+ return nr_lemmas;
+}
+
+std::vector<Node> NlSolver::checkTangentPlanes()
+{
+ std::vector<Node> lemmas;
+ Trace("nl-ext") << "Get monomial tangent plane lemmas..." << std::endl;
+ NodeManager* nm = NodeManager::currentNM();
+ const std::map<Node, std::vector<Node> >& ccMap =
+ d_mdb.getContainsChildrenMap();
+ unsigned kstart = d_ms_vars.size();
+ for (unsigned k = kstart; k < d_mterms.size(); k++)
+ {
+ Node t = d_mterms[k];
+ // if this term requires a refinement
+ if (d_tplane_refine.find(t) == d_tplane_refine.end())
+ {
+ continue;
+ }
+ Trace("nl-ext-tplanes")
+ << "Look at monomial requiring refinement : " << t << std::endl;
+ // get a decomposition
+ std::map<Node, std::vector<Node> >::const_iterator it = ccMap.find(t);
+ if (it == ccMap.end())
+ {
+ continue;
+ }
+ std::map<Node, std::map<Node, bool> > dproc;
+ for (unsigned j = 0; j < it->second.size(); j++)
+ {
+ Node tc = it->second[j];
+ if (tc != d_one)
+ {
+ Node tc_diff = d_mdb.getContainsDiffNl(tc, t);
+ Assert(!tc_diff.isNull());
+ Node a = tc < tc_diff ? tc : tc_diff;
+ Node b = tc < tc_diff ? tc_diff : tc;
+ if (dproc[a].find(b) == dproc[a].end())
+ {
+ dproc[a][b] = true;
+ Trace("nl-ext-tplanes")
+ << " decomposable into : " << a << " * " << b << std::endl;
+ Node a_v_c = d_model.computeAbstractModelValue(a);
+ Node b_v_c = d_model.computeAbstractModelValue(b);
+ // points we will add tangent planes for
+ std::vector<Node> pts[2];
+ pts[0].push_back(a_v_c);
+ pts[1].push_back(b_v_c);
+ // if previously refined
+ bool prevRefine = d_tangent_val_bound[0][a].find(b)
+ != d_tangent_val_bound[0][a].end();
+ // a_min, a_max, b_min, b_max
+ for (unsigned p = 0; p < 4; p++)
+ {
+ Node curr_v = p <= 1 ? a_v_c : b_v_c;
+ if (prevRefine)
+ {
+ Node pt_v = d_tangent_val_bound[p][a][b];
+ Assert(!pt_v.isNull());
+ if (curr_v != pt_v)
+ {
+ Node do_extend =
+ nm->mkNode((p == 1 || p == 3) ? GT : LT, curr_v, pt_v);
+ do_extend = Rewriter::rewrite(do_extend);
+ if (do_extend == d_true)
+ {
+ for (unsigned q = 0; q < 2; q++)
+ {
+ pts[p <= 1 ? 0 : 1].push_back(curr_v);
+ pts[p <= 1 ? 1 : 0].push_back(
+ d_tangent_val_bound[p <= 1 ? 2 + q : q][a][b]);
+ }
+ }
+ }
+ }
+ else
+ {
+ d_tangent_val_bound[p][a][b] = curr_v;
+ }
+ }
+
+ for (unsigned p = 0; p < pts[0].size(); p++)
+ {
+ Node a_v = pts[0][p];
+ Node b_v = pts[1][p];
+
+ // tangent plane
+ Node tplane = nm->mkNode(
+ MINUS,
+ nm->mkNode(
+ PLUS, nm->mkNode(MULT, b_v, a), nm->mkNode(MULT, a_v, b)),
+ nm->mkNode(MULT, a_v, b_v));
+ for (unsigned d = 0; d < 4; d++)
+ {
+ Node aa = nm->mkNode(d == 0 || d == 3 ? GEQ : LEQ, a, a_v);
+ Node ab = nm->mkNode(d == 1 || d == 3 ? GEQ : LEQ, b, b_v);
+ Node conc = nm->mkNode(d <= 1 ? LEQ : GEQ, t, tplane);
+ Node tlem = nm->mkNode(OR, aa.negate(), ab.negate(), conc);
+ Trace("nl-ext-tplanes")
+ << "Tangent plane lemma : " << tlem << std::endl;
+ lemmas.push_back(tlem);
+ }
+
+ // tangent plane reverse implication
+
+ // t <= tplane -> ( (a <= a_v ^ b >= b_v) v
+ // (a >= a_v ^ b <= b_v) ).
+ // in clause form, the above becomes
+ // t <= tplane -> a <= a_v v b <= b_v.
+ // t <= tplane -> b >= b_v v a >= a_v.
+ Node a_leq_av = nm->mkNode(LEQ, a, a_v);
+ Node b_leq_bv = nm->mkNode(LEQ, b, b_v);
+ Node a_geq_av = nm->mkNode(GEQ, a, a_v);
+ Node b_geq_bv = nm->mkNode(GEQ, b, b_v);
+
+ Node t_leq_tplane = nm->mkNode(LEQ, t, tplane);
+ Node a_leq_av_or_b_leq_bv = nm->mkNode(OR, a_leq_av, b_leq_bv);
+ Node b_geq_bv_or_a_geq_av = nm->mkNode(OR, b_geq_bv, a_geq_av);
+ Node ub_reverse1 =
+ nm->mkNode(OR, t_leq_tplane.negate(), a_leq_av_or_b_leq_bv);
+ Trace("nl-ext-tplanes")
+ << "Tangent plane lemma (reverse) : " << ub_reverse1
+ << std::endl;
+ lemmas.push_back(ub_reverse1);
+ Node ub_reverse2 =
+ nm->mkNode(OR, t_leq_tplane.negate(), b_geq_bv_or_a_geq_av);
+ Trace("nl-ext-tplanes")
+ << "Tangent plane lemma (reverse) : " << ub_reverse2
+ << std::endl;
+ lemmas.push_back(ub_reverse2);
+
+ // t >= tplane -> ( (a <= a_v ^ b <= b_v) v
+ // (a >= a_v ^ b >= b_v) ).
+ // in clause form, the above becomes
+ // t >= tplane -> a <= a_v v b >= b_v.
+ // t >= tplane -> b >= b_v v a <= a_v
+ Node t_geq_tplane = nm->mkNode(GEQ, t, tplane);
+ Node a_leq_av_or_b_geq_bv = nm->mkNode(OR, a_leq_av, b_geq_bv);
+ Node a_geq_av_or_b_leq_bv = nm->mkNode(OR, a_geq_av, b_leq_bv);
+ Node lb_reverse1 =
+ nm->mkNode(OR, t_geq_tplane.negate(), a_leq_av_or_b_geq_bv);
+ Trace("nl-ext-tplanes")
+ << "Tangent plane lemma (reverse) : " << lb_reverse1
+ << std::endl;
+ lemmas.push_back(lb_reverse1);
+ Node lb_reverse2 =
+ nm->mkNode(OR, t_geq_tplane.negate(), a_geq_av_or_b_leq_bv);
+ Trace("nl-ext-tplanes")
+ << "Tangent plane lemma (reverse) : " << lb_reverse2
+ << std::endl;
+ lemmas.push_back(lb_reverse2);
+ }
+ }
+ }
+ }
+ }
+ Trace("nl-ext") << "...trying " << lemmas.size() << " tangent plane lemmas..."
+ << std::endl;
+ return lemmas;
+}
+
+std::vector<Node> NlSolver::checkMonomialInferBounds(
+ std::vector<Node>& nt_lemmas,
+ const std::vector<Node>& asserts,
+ const std::vector<Node>& false_asserts)
+{
+ // sort monomials by degree
+ Trace("nl-ext-proc") << "Sort monomials by degree..." << std::endl;
+ d_mdb.sortByDegree(d_ms);
+ // all monomials
+ d_mterms.insert(d_mterms.end(), d_ms_vars.begin(), d_ms_vars.end());
+ d_mterms.insert(d_mterms.end(), d_ms.begin(), d_ms.end());
+
+ const std::map<Node, std::map<Node, ConstraintInfo> >& cim =
+ d_cdb.getConstraints();
+
+ std::vector<Node> lemmas;
+ NodeManager* nm = NodeManager::currentNM();
+ // register constraints
+ Trace("nl-ext-debug") << "Register bound constraints..." << std::endl;
+ for (const Node& lit : asserts)
+ {
+ bool polarity = lit.getKind() != NOT;
+ Node atom = lit.getKind() == NOT ? lit[0] : lit;
+ d_cdb.registerConstraint(atom);
+ bool is_false_lit =
+ std::find(false_asserts.begin(), false_asserts.end(), lit)
+ != false_asserts.end();
+ // add information about bounds to variables
+ std::map<Node, std::map<Node, ConstraintInfo> >::const_iterator itc =
+ cim.find(atom);
+ if (itc == cim.end())
+ {
+ continue;
+ }
+ for (const std::pair<const Node, ConstraintInfo>& itcc : itc->second)
+ {
+ Node x = itcc.first;
+ Node coeff = itcc.second.d_coeff;
+ Node rhs = itcc.second.d_rhs;
+ Kind type = itcc.second.d_type;
+ Node exp = lit;
+ if (!polarity)
+ {
+ // reverse
+ if (type == EQUAL)
+ {
+ // we will take the strict inequality in the direction of the
+ // model
+ Node lhs = ArithMSum::mkCoeffTerm(coeff, x);
+ Node query = nm->mkNode(GT, lhs, rhs);
+ Node query_mv = d_model.computeAbstractModelValue(query);
+ if (query_mv == d_true)
+ {
+ exp = query;
+ type = GT;
+ }
+ else
+ {
+ Assert(query_mv == d_false);
+ exp = nm->mkNode(LT, lhs, rhs);
+ type = LT;
+ }
+ }
+ else
+ {
+ type = negateKind(type);
+ }
+ }
+ // add to status if maximal degree
+ d_ci_max[x][coeff][rhs] = d_cdb.isMaximal(atom, x);
+ if (Trace.isOn("nl-ext-bound-debug2"))
+ {
+ Node t = ArithMSum::mkCoeffTerm(coeff, x);
+ Trace("nl-ext-bound-debug2") << "Add Bound: " << t << " " << type << " "
+ << rhs << " by " << exp << std::endl;
+ }
+ bool updated = true;
+ std::map<Node, Kind>::iterator its = d_ci[x][coeff].find(rhs);
+ if (its == d_ci[x][coeff].end())
+ {
+ d_ci[x][coeff][rhs] = type;
+ d_ci_exp[x][coeff][rhs] = exp;
+ }
+ else if (type != its->second)
+ {
+ Trace("nl-ext-bound-debug2")
+ << "Joining kinds : " << type << " " << its->second << std::endl;
+ Kind jk = joinKinds(type, its->second);
+ if (jk == UNDEFINED_KIND)
+ {
+ updated = false;
+ }
+ else if (jk != its->second)
+ {
+ if (jk == type)
+ {
+ d_ci[x][coeff][rhs] = type;
+ d_ci_exp[x][coeff][rhs] = exp;
+ }
+ else
+ {
+ d_ci[x][coeff][rhs] = jk;
+ d_ci_exp[x][coeff][rhs] =
+ nm->mkNode(AND, d_ci_exp[x][coeff][rhs], exp);
+ }
+ }
+ else
+ {
+ updated = false;
+ }
+ }
+ if (Trace.isOn("nl-ext-bound"))
+ {
+ if (updated)
+ {
+ Trace("nl-ext-bound") << "Bound: ";
+ debugPrintBound("nl-ext-bound", coeff, x, d_ci[x][coeff][rhs], rhs);
+ Trace("nl-ext-bound") << " by " << d_ci_exp[x][coeff][rhs];
+ if (d_ci_max[x][coeff][rhs])
+ {
+ Trace("nl-ext-bound") << ", is max degree";
+ }
+ Trace("nl-ext-bound") << std::endl;
+ }
+ }
+ // compute if bound is not satisfied, and store what is required
+ // for a possible refinement
+ if (options::nlExtTangentPlanes())
+ {
+ if (is_false_lit)
+ {
+ d_tplane_refine.insert(x);
+ }
+ }
+ }
+ }
+ // reflexive constraints
+ Node null_coeff;
+ for (unsigned j = 0; j < d_mterms.size(); j++)
+ {
+ Node n = d_mterms[j];
+ d_ci[n][null_coeff][n] = EQUAL;
+ d_ci_exp[n][null_coeff][n] = d_true;
+ d_ci_max[n][null_coeff][n] = false;
+ }
+
+ Trace("nl-ext") << "Get inferred bound lemmas..." << std::endl;
+ const std::map<Node, std::vector<Node> >& cpMap =
+ d_mdb.getContainsParentMap();
+ for (unsigned k = 0; k < d_mterms.size(); k++)
+ {
+ Node x = d_mterms[k];
+ Trace("nl-ext-bound-debug")
+ << "Process bounds for " << x << " : " << std::endl;
+ std::map<Node, std::vector<Node> >::const_iterator itm = cpMap.find(x);
+ if (itm == cpMap.end())
+ {
+ Trace("nl-ext-bound-debug") << "...has no parent monomials." << std::endl;
+ continue;
+ }
+ Trace("nl-ext-bound-debug")
+ << "...has " << itm->second.size() << " parent monomials." << std::endl;
+ // check derived bounds
+ std::map<Node, std::map<Node, std::map<Node, Kind> > >::iterator itc =
+ d_ci.find(x);
+ if (itc == d_ci.end())
+ {
+ continue;
+ }
+ for (std::map<Node, std::map<Node, Kind> >::iterator itcc =
+ itc->second.begin();
+ itcc != itc->second.end();
+ ++itcc)
+ {
+ Node coeff = itcc->first;
+ Node t = ArithMSum::mkCoeffTerm(coeff, x);
+ for (std::map<Node, Kind>::iterator itcr = itcc->second.begin();
+ itcr != itcc->second.end();
+ ++itcr)
+ {
+ Node rhs = itcr->first;
+ // only consider this bound if maximal degree
+ if (!d_ci_max[x][coeff][rhs])
+ {
+ continue;
+ }
+ Kind type = itcr->second;
+ for (unsigned j = 0; j < itm->second.size(); j++)
+ {
+ Node y = itm->second[j];
+ Node mult = d_mdb.getContainsDiff(x, y);
+ // x <k> t => m*x <k'> t where y = m*x
+ // get the sign of mult
+ Node mmv = d_model.computeConcreteModelValue(mult);
+ Trace("nl-ext-bound-debug2")
+ << "Model value of " << mult << " is " << mmv << std::endl;
+ if (!mmv.isConst())
+ {
+ Trace("nl-ext-bound-debug")
+ << " ...coefficient " << mult
+ << " is non-constant (probably transcendental)." << std::endl;
+ continue;
+ }
+ int mmv_sign = mmv.getConst<Rational>().sgn();
+ Trace("nl-ext-bound-debug2")
+ << " sign of " << mmv << " is " << mmv_sign << std::endl;
+ if (mmv_sign == 0)
+ {
+ Trace("nl-ext-bound-debug")
+ << " ...coefficient " << mult << " is zero." << std::endl;
+ continue;
+ }
+ Trace("nl-ext-bound-debug")
+ << " from " << x << " * " << mult << " = " << y << " and " << t
+ << " " << type << " " << rhs << ", infer : " << std::endl;
+ Kind infer_type = mmv_sign == -1 ? reverseRelationKind(type) : type;
+ Node infer_lhs = nm->mkNode(MULT, mult, t);
+ Node infer_rhs = nm->mkNode(MULT, mult, rhs);
+ Node infer = nm->mkNode(infer_type, infer_lhs, infer_rhs);
+ Trace("nl-ext-bound-debug") << " " << infer << std::endl;
+ infer = Rewriter::rewrite(infer);
+ Trace("nl-ext-bound-debug2")
+ << " ...rewritten : " << infer << std::endl;
+ // check whether it is false in model for abstraction
+ Node infer_mv = d_model.computeAbstractModelValue(infer);
+ Trace("nl-ext-bound-debug")
+ << " ...infer model value is " << infer_mv << std::endl;
+ if (infer_mv == d_false)
+ {
+ Node exp =
+ nm->mkNode(AND,
+ nm->mkNode(mmv_sign == 1 ? GT : LT, mult, d_zero),
+ d_ci_exp[x][coeff][rhs]);
+ Node iblem = nm->mkNode(IMPLIES, exp, infer);
+ Node pr_iblem = iblem;
+ iblem = Rewriter::rewrite(iblem);
+ bool introNewTerms = hasNewMonomials(iblem, d_ms);
+ Trace("nl-ext-bound-lemma")
+ << "*** Bound inference lemma : " << iblem
+ << " (pre-rewrite : " << pr_iblem << ")" << std::endl;
+ // Trace("nl-ext-bound-lemma") << " intro new
+ // monomials = " << introNewTerms << std::endl;
+ if (!introNewTerms)
+ {
+ lemmas.push_back(iblem);
+ }
+ else
+ {
+ nt_lemmas.push_back(iblem);
+ }
+ }
+ }
+ }
+ }
+ }
+ return lemmas;
+}
+
+std::vector<Node> NlSolver::checkFactoring(
+ const std::vector<Node>& asserts, const std::vector<Node>& false_asserts)
+{
+ std::vector<Node> lemmas;
+ NodeManager* nm = NodeManager::currentNM();
+ Trace("nl-ext") << "Get factoring lemmas..." << std::endl;
+ for (const Node& lit : asserts)
+ {
+ bool polarity = lit.getKind() != NOT;
+ Node atom = lit.getKind() == NOT ? lit[0] : lit;
+ Node litv = d_model.computeConcreteModelValue(lit);
+ bool considerLit = false;
+ // Only consider literals that are in false_asserts.
+ considerLit = std::find(false_asserts.begin(), false_asserts.end(), lit)
+ != false_asserts.end();
+
+ if (considerLit)
+ {
+ std::map<Node, Node> msum;
+ if (ArithMSum::getMonomialSumLit(atom, msum))
+ {
+ Trace("nl-ext-factor") << "Factoring for literal " << lit
+ << ", monomial sum is : " << std::endl;
+ if (Trace.isOn("nl-ext-factor"))
+ {
+ ArithMSum::debugPrintMonomialSum(msum, "nl-ext-factor");
+ }
+ std::map<Node, std::vector<Node> > factor_to_mono;
+ std::map<Node, std::vector<Node> > factor_to_mono_orig;
+ for (std::map<Node, Node>::iterator itm = msum.begin();
+ itm != msum.end();
+ ++itm)
+ {
+ if (!itm->first.isNull())
+ {
+ if (itm->first.getKind() == NONLINEAR_MULT)
+ {
+ std::vector<Node> children;
+ for (unsigned i = 0; i < itm->first.getNumChildren(); i++)
+ {
+ children.push_back(itm->first[i]);
+ }
+ std::map<Node, bool> processed;
+ for (unsigned i = 0; i < itm->first.getNumChildren(); i++)
+ {
+ if (processed.find(itm->first[i]) == processed.end())
+ {
+ processed[itm->first[i]] = true;
+ children[i] = d_one;
+ if (!itm->second.isNull())
+ {
+ children.push_back(itm->second);
+ }
+ Node val = nm->mkNode(MULT, children);
+ if (!itm->second.isNull())
+ {
+ children.pop_back();
+ }
+ children[i] = itm->first[i];
+ val = Rewriter::rewrite(val);
+ factor_to_mono[itm->first[i]].push_back(val);
+ factor_to_mono_orig[itm->first[i]].push_back(itm->first);
+ }
+ }
+ }
+ }
+ }
+ for (std::map<Node, std::vector<Node> >::iterator itf =
+ factor_to_mono.begin();
+ itf != factor_to_mono.end();
+ ++itf)
+ {
+ Node x = itf->first;
+ if (itf->second.size() == 1)
+ {
+ std::map<Node, Node>::iterator itm = msum.find(x);
+ if (itm != msum.end())
+ {
+ itf->second.push_back(itm->second.isNull() ? d_one : itm->second);
+ factor_to_mono_orig[x].push_back(x);
+ }
+ }
+ if (itf->second.size() <= 1)
+ {
+ continue;
+ }
+ Node sum = nm->mkNode(PLUS, itf->second);
+ sum = Rewriter::rewrite(sum);
+ Trace("nl-ext-factor")
+ << "* Factored sum for " << x << " : " << sum << std::endl;
+ Node kf = getFactorSkolem(sum, lemmas);
+ std::vector<Node> poly;
+ poly.push_back(nm->mkNode(MULT, x, kf));
+ std::map<Node, std::vector<Node> >::iterator itfo =
+ factor_to_mono_orig.find(x);
+ Assert(itfo != factor_to_mono_orig.end());
+ for (std::map<Node, Node>::iterator itm = msum.begin();
+ itm != msum.end();
+ ++itm)
+ {
+ if (std::find(itfo->second.begin(), itfo->second.end(), itm->first)
+ == itfo->second.end())
+ {
+ poly.push_back(ArithMSum::mkCoeffTerm(
+ itm->second, itm->first.isNull() ? d_one : itm->first));
+ }
+ }
+ Node polyn = poly.size() == 1 ? poly[0] : nm->mkNode(PLUS, poly);
+ Trace("nl-ext-factor")
+ << "...factored polynomial : " << polyn << std::endl;
+ Node conc_lit = nm->mkNode(atom.getKind(), polyn, d_zero);
+ conc_lit = Rewriter::rewrite(conc_lit);
+ if (!polarity)
+ {
+ conc_lit = conc_lit.negate();
+ }
+
+ std::vector<Node> lemma_disj;
+ lemma_disj.push_back(lit.negate());
+ lemma_disj.push_back(conc_lit);
+ Node flem = nm->mkNode(OR, lemma_disj);
+ Trace("nl-ext-factor") << "...lemma is " << flem << std::endl;
+ lemmas.push_back(flem);
+ }
+ }
+ }
+ }
+ return lemmas;
+}
+
+Node NlSolver::getFactorSkolem(Node n, std::vector<Node>& lemmas)
+{
+ std::map<Node, Node>::iterator itf = d_factor_skolem.find(n);
+ if (itf == d_factor_skolem.end())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node k = nm->mkSkolem("kf", n.getType());
+ Node k_eq = Rewriter::rewrite(k.eqNode(n));
+ lemmas.push_back(k_eq);
+ d_factor_skolem[n] = k;
+ return k;
+ }
+ return itf->second;
+}
+
+std::vector<Node> NlSolver::checkMonomialInferResBounds()
+{
+ std::vector<Node> lemmas;
+ NodeManager* nm = NodeManager::currentNM();
+ Trace("nl-ext") << "Get monomial resolution inferred bound lemmas..."
+ << std::endl;
+ size_t nmterms = d_mterms.size();
+ for (unsigned j = 0; j < nmterms; j++)
+ {
+ Node a = d_mterms[j];
+ std::map<Node, std::map<Node, std::map<Node, Kind> > >::iterator itca =
+ d_ci.find(a);
+ if (itca == d_ci.end())
+ {
+ continue;
+ }
+ for (unsigned k = (j + 1); k < nmterms; k++)
+ {
+ Node b = d_mterms[k];
+ std::map<Node, std::map<Node, std::map<Node, Kind> > >::iterator itcb =
+ d_ci.find(b);
+ if (itcb == d_ci.end())
+ {
+ continue;
+ }
+ Trace("nl-ext-rbound-debug") << "resolution inferences : compare " << a
+ << " and " << b << std::endl;
+ // if they have common factors
+ std::map<Node, Node>::iterator ita = d_mono_diff[a].find(b);
+ if (ita == d_mono_diff[a].end())
+ {
+ continue;
+ }
+ Trace("nl-ext-rbound") << "Get resolution inferences for [a] " << a
+ << " vs [b] " << b << std::endl;
+ std::map<Node, Node>::iterator itb = d_mono_diff[b].find(a);
+ Assert(itb != d_mono_diff[b].end());
+ Node mv_a = d_model.computeAbstractModelValue(ita->second);
+ Assert(mv_a.isConst());
+ int mv_a_sgn = mv_a.getConst<Rational>().sgn();
+ if (mv_a_sgn == 0)
+ {
+ // we don't compare monomials whose current model value is zero
+ continue;
+ }
+ Node mv_b = d_model.computeAbstractModelValue(itb->second);
+ Assert(mv_b.isConst());
+ int mv_b_sgn = mv_b.getConst<Rational>().sgn();
+ if (mv_b_sgn == 0)
+ {
+ // we don't compare monomials whose current model value is zero
+ continue;
+ }
+ Trace("nl-ext-rbound") << " [a] factor is " << ita->second
+ << ", sign in model = " << mv_a_sgn << std::endl;
+ Trace("nl-ext-rbound") << " [b] factor is " << itb->second
+ << ", sign in model = " << mv_b_sgn << std::endl;
+
+ std::vector<Node> exp;
+ // bounds of a
+ for (std::map<Node, std::map<Node, Kind> >::iterator itcac =
+ itca->second.begin();
+ itcac != itca->second.end();
+ ++itcac)
+ {
+ Node coeff_a = itcac->first;
+ for (std::map<Node, Kind>::iterator itcar = itcac->second.begin();
+ itcar != itcac->second.end();
+ ++itcar)
+ {
+ Node rhs_a = itcar->first;
+ Node rhs_a_res_base = nm->mkNode(MULT, itb->second, rhs_a);
+ rhs_a_res_base = Rewriter::rewrite(rhs_a_res_base);
+ if (hasNewMonomials(rhs_a_res_base, d_ms))
+ {
+ continue;
+ }
+ Kind type_a = itcar->second;
+ exp.push_back(d_ci_exp[a][coeff_a][rhs_a]);
+
+ // bounds of b
+ for (std::map<Node, std::map<Node, Kind> >::iterator itcbc =
+ itcb->second.begin();
+ itcbc != itcb->second.end();
+ ++itcbc)
+ {
+ Node coeff_b = itcbc->first;
+ Node rhs_a_res = ArithMSum::mkCoeffTerm(coeff_b, rhs_a_res_base);
+ for (std::map<Node, Kind>::iterator itcbr = itcbc->second.begin();
+ itcbr != itcbc->second.end();
+ ++itcbr)
+ {
+ Node rhs_b = itcbr->first;
+ Node rhs_b_res = nm->mkNode(MULT, ita->second, rhs_b);
+ rhs_b_res = ArithMSum::mkCoeffTerm(coeff_a, rhs_b_res);
+ rhs_b_res = Rewriter::rewrite(rhs_b_res);
+ if (hasNewMonomials(rhs_b_res, d_ms))
+ {
+ continue;
+ }
+ Kind type_b = itcbr->second;
+ exp.push_back(d_ci_exp[b][coeff_b][rhs_b]);
+ if (Trace.isOn("nl-ext-rbound"))
+ {
+ Trace("nl-ext-rbound") << "* try bounds : ";
+ debugPrintBound("nl-ext-rbound", coeff_a, a, type_a, rhs_a);
+ Trace("nl-ext-rbound") << std::endl;
+ Trace("nl-ext-rbound") << " ";
+ debugPrintBound("nl-ext-rbound", coeff_b, b, type_b, rhs_b);
+ Trace("nl-ext-rbound") << std::endl;
+ }
+ Kind types[2];
+ for (unsigned r = 0; r < 2; r++)
+ {
+ Node pivot_factor = r == 0 ? itb->second : ita->second;
+ int pivot_factor_sign = r == 0 ? mv_b_sgn : mv_a_sgn;
+ types[r] = r == 0 ? type_a : type_b;
+ if (pivot_factor_sign == (r == 0 ? 1 : -1))
+ {
+ types[r] = reverseRelationKind(types[r]);
+ }
+ if (pivot_factor_sign == 1)
+ {
+ exp.push_back(nm->mkNode(GT, pivot_factor, d_zero));
+ }
+ else
+ {
+ exp.push_back(nm->mkNode(LT, pivot_factor, d_zero));
+ }
+ }
+ Kind jk = transKinds(types[0], types[1]);
+ Trace("nl-ext-rbound-debug")
+ << "trans kind : " << types[0] << " + " << types[1] << " = "
+ << jk << std::endl;
+ if (jk != UNDEFINED_KIND)
+ {
+ Node conc = nm->mkNode(jk, rhs_a_res, rhs_b_res);
+ Node conc_mv = d_model.computeAbstractModelValue(conc);
+ if (conc_mv == d_false)
+ {
+ Node rblem = nm->mkNode(IMPLIES, nm->mkNode(AND, exp), conc);
+ Trace("nl-ext-rbound-lemma-debug")
+ << "Resolution bound lemma "
+ "(pre-rewrite) "
+ ": "
+ << rblem << std::endl;
+ rblem = Rewriter::rewrite(rblem);
+ Trace("nl-ext-rbound-lemma")
+ << "Resolution bound lemma : " << rblem << std::endl;
+ lemmas.push_back(rblem);
+ }
+ }
+ exp.pop_back();
+ exp.pop_back();
+ exp.pop_back();
+ }
+ }
+ exp.pop_back();
+ }
+ }
+ }
+ }
+ return lemmas;
+}
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file nl_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solver for standard non-linear constraints
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL_SOLVER_H
+#define CVC4__THEORY__ARITH__NL_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "context/cdhashset.h"
+#include "context/cdinsert_hashmap.h"
+#include "context/cdlist.h"
+#include "context/cdqueue.h"
+#include "context/context.h"
+#include "expr/kind.h"
+#include "expr/node.h"
+#include "theory/arith/nl/nl_constraint.h"
+#include "theory/arith/nl/nl_lemma_utils.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/nl_monomial.h"
+#include "theory/arith/theory_arith.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+typedef std::map<Node, unsigned> NodeMultiset;
+
+/** Non-linear solver class
+ *
+ * This class implements model-based refinement schemes
+ * for non-linear arithmetic, described in:
+ *
+ * - "Invariant Checking of NRA Transition Systems
+ * via Incremental Reduction to LRA with EUF" by
+ * Cimatti et al., TACAS 2017.
+ *
+ * - Section 5 of "Desiging Theory Solvers with
+ * Extensions" by Reynolds et al., FroCoS 2017.
+ */
+class NlSolver
+{
+ typedef std::map<Node, NodeMultiset> MonomialExponentMap;
+ typedef context::CDHashSet<Node, NodeHashFunction> NodeSet;
+
+ public:
+ NlSolver(TheoryArith& containing, NlModel& model);
+ ~NlSolver();
+
+ /** init last call
+ *
+ * This is called at the beginning of last call effort check, where
+ * assertions are the set of assertions belonging to arithmetic,
+ * false_asserts is the subset of assertions that are false in the current
+ * model, and xts is the set of extended function terms that are active in
+ * the current context.
+ */
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+ //-------------------------------------------- lemma schemas
+ /** check split zero
+ *
+ * Returns a set of theory lemmas of the form
+ * t = 0 V t != 0
+ * where t is a term that exists in the current context.
+ */
+ std::vector<Node> checkSplitZero();
+
+ /** check monomial sign
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema which ensures that non-linear monomials
+ * respect sign information based on their facts.
+ * For more details, see Section 5 of "Design Theory
+ * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
+ * Figure 5, this is the schema "Sign".
+ *
+ * Examples:
+ *
+ * x > 0 ^ y > 0 => x*y > 0
+ * x < 0 => x*y*y < 0
+ * x = 0 => x*y*z = 0
+ */
+ std::vector<Node> checkMonomialSign();
+
+ /** check monomial magnitude
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema which ensures that comparisons between
+ * non-linear monomials respect the magnitude of their
+ * factors.
+ * For more details, see Section 5 of "Design Theory
+ * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
+ * Figure 5, this is the schema "Magnitude".
+ *
+ * Examples:
+ *
+ * |x|>|y| => |x*z|>|y*z|
+ * |x|>|y| ^ |z|>|w| ^ |x|>=1 => |x*x*z*u|>|y*w|
+ *
+ * Argument c indicates the class of inferences to perform for the
+ * (non-linear) monomials in the vector d_ms. 0 : compare non-linear monomials
+ * against 1, 1 : compare non-linear monomials against variables, 2 : compare
+ * non-linear monomials against other non-linear monomials.
+ */
+ std::vector<Node> checkMonomialMagnitude(unsigned c);
+
+ /** check monomial inferred bounds
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema that infers new constraints about existing
+ * terms based on mulitplying both sides of an existing
+ * constraint by a term.
+ * For more details, see Section 5 of "Design Theory
+ * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
+ * Figure 5, this is the schema "Multiply".
+ *
+ * Examples:
+ *
+ * x > 0 ^ (y > z + w) => x*y > x*(z+w)
+ * x < 0 ^ (y > z + w) => x*y < x*(z+w)
+ * ...where (y > z + w) and x*y are a constraint and term
+ * that occur in the current context.
+ */
+ std::vector<Node> checkMonomialInferBounds(
+ std::vector<Node>& nt_lemmas,
+ const std::vector<Node>& asserts,
+ const std::vector<Node>& false_asserts);
+
+ /** check factoring
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema that states a relationship betwen monomials
+ * with common factors that occur in the same constraint.
+ *
+ * Examples:
+ *
+ * x*z+y*z > t => ( k = x + y ^ k*z > t )
+ * ...where k is fresh and x*z + y*z > t is a
+ * constraint that occurs in the current context.
+ */
+ std::vector<Node> checkFactoring(const std::vector<Node>& asserts,
+ const std::vector<Node>& false_asserts);
+
+ /** check monomial infer resolution bounds
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema which "resolves" upper bounds
+ * of one inequality with lower bounds for another.
+ * This schema is not enabled by default, and can
+ * be enabled by --nl-ext-rbound.
+ *
+ * Examples:
+ *
+ * ( y>=0 ^ s <= x*z ^ x*y <= t ) => y*s <= z*t
+ * ...where s <= x*z and x*y <= t are constraints
+ * that occur in the current context.
+ */
+ std::vector<Node> checkMonomialInferResBounds();
+
+ /** check tangent planes
+ *
+ * Returns a set of valid theory lemmas, based on an
+ * "incremental linearization" of non-linear monomials.
+ * This linearization is accomplished by adding constraints
+ * corresponding to "tangent planes" at the current
+ * model value of each non-linear monomial. In particular
+ * consider the definition for constants a,b :
+ * T_{a,b}( x*y ) = b*x + a*y - a*b.
+ * The lemmas added by this function are of the form :
+ * ( ( x>a ^ y<b) ^ (x<a ^ y>b) ) => x*y < T_{a,b}( x*y )
+ * ( ( x>a ^ y>b) ^ (x<a ^ y<b) ) => x*y > T_{a,b}( x*y )
+ * It is inspired by "Invariant Checking of NRA Transition
+ * Systems via Incremental Reduction to LRA with EUF" by
+ * Cimatti et al., TACAS 2017.
+ * This schema is not terminating in general.
+ * It is not enabled by default, and can
+ * be enabled by --nl-ext-tplanes.
+ *
+ * Examples:
+ *
+ * ( ( x>2 ^ y>5) ^ (x<2 ^ y<5) ) => x*y > 5*x + 2*y - 10
+ * ( ( x>2 ^ y<5) ^ (x<2 ^ y>5) ) => x*y < 5*x + 2*y - 10
+ */
+ std::vector<Node> checkTangentPlanes();
+
+ //-------------------------------------------- end lemma schemas
+ private:
+ // The theory of arithmetic containing this extension.
+ TheoryArith& d_containing;
+ /** Reference to the non-linear model object */
+ NlModel& d_model;
+ /** commonly used terms */
+ Node d_zero;
+ Node d_one;
+ Node d_neg_one;
+ Node d_two;
+ Node d_true;
+ Node d_false;
+ /** Context-independent database of monomial information */
+ MonomialDb d_mdb;
+ /** Context-independent database of constraint information */
+ ConstraintDb d_cdb;
+
+ // ( x*y, x*z, y ) for each pair of monomials ( x*y, x*z ) with common factors
+ std::map<Node, std::map<Node, Node> > d_mono_diff;
+
+ /** cache of terms t for which we have added the lemma ( t = 0 V t != 0 ). */
+ NodeSet d_zero_split;
+
+ // ordering, stores variables and 0,1,-1
+ std::map<Node, unsigned> d_order_vars;
+ std::vector<Node> d_order_points;
+
+ // information about monomials
+ std::vector<Node> d_ms;
+ std::vector<Node> d_ms_vars;
+ std::map<Node, bool> d_ms_proc;
+ std::vector<Node> d_mterms;
+
+ // list of monomials with factors whose model value is non-constant in model
+ // e.g. y*cos( x )
+ std::map<Node, bool> d_m_nconst_factor;
+ /** the set of monomials we should apply tangent planes to */
+ std::unordered_set<Node, NodeHashFunction> d_tplane_refine;
+ /** maps nodes to their factor skolems */
+ std::map<Node, Node> d_factor_skolem;
+ /** tangent plane bounds */
+ std::map<Node, std::map<Node, Node> > d_tangent_val_bound[4];
+ // term -> coeff -> rhs -> ( status, exp, b ),
+ // where we have that : exp => ( coeff * term <status> rhs )
+ // b is true if degree( term ) >= degree( rhs )
+ std::map<Node, std::map<Node, std::map<Node, Kind> > > d_ci;
+ std::map<Node, std::map<Node, std::map<Node, Node> > > d_ci_exp;
+ std::map<Node, std::map<Node, std::map<Node, bool> > > d_ci_max;
+
+ /** Make literal */
+ static Node mkLit(Node a, Node b, int status, bool isAbsolute = false);
+ /** register monomial */
+ void setMonomialFactor(Node a, Node b, const NodeMultiset& common);
+ /** assign order ids */
+ void assignOrderIds(std::vector<Node>& vars,
+ NodeMultiset& d_order,
+ bool isConcrete,
+ bool isAbsolute);
+
+ /** Check whether we have already inferred a relationship between monomials
+ * x and y based on the information in cmp_infers. This computes the
+ * transitive closure of the relation stored in cmp_infers.
+ */
+ bool cmp_holds(Node x,
+ Node y,
+ std::map<Node, std::map<Node, Node> >& cmp_infers,
+ std::vector<Node>& exp,
+ std::map<Node, bool>& visited);
+ /** In the following functions, status states a relationship
+ * between two arithmetic terms, where:
+ * 0 : equal
+ * 1 : greater than or equal
+ * 2 : greater than
+ * -X : (greater -> less)
+ * TODO (#1287) make this an enum?
+ */
+ /** compute the sign of a.
+ *
+ * Calls to this function are such that :
+ * exp => ( oa = a ^ a <status> 0 )
+ *
+ * This function iterates over the factors of a,
+ * where a_index is the index of the factor in a
+ * we are currently looking at.
+ *
+ * This function returns a status, which indicates
+ * a's relationship to 0.
+ * We add lemmas to lem of the form given by the
+ * lemma schema checkSign(...).
+ */
+ int compareSign(Node oa,
+ Node a,
+ unsigned a_index,
+ int status,
+ std::vector<Node>& exp,
+ std::vector<Node>& lem);
+ /** compare monomials a and b
+ *
+ * Initially, a call to this function is such that :
+ * exp => ( oa = a ^ ob = b )
+ *
+ * This function returns true if we can infer a valid
+ * arithmetic lemma of the form :
+ * P => abs( a ) >= abs( b )
+ * where P is true and abs( a ) >= abs( b ) is false in the
+ * current model.
+ *
+ * This function is implemented by "processing" factors
+ * of monomials a and b until an inference of the above
+ * form can be made. For example, if :
+ * a = x*x*y and b = z*w
+ * Assuming we are trying to show abs( a ) >= abs( c ),
+ * then if abs( M( x ) ) >= abs( M( z ) ) where M is the current model,
+ * then we can add abs( x ) >= abs( z ) to our explanation, and
+ * mark one factor of x as processed in a, and
+ * one factor of z as processed in b. The number of processed factors of a
+ * and b are stored in a_exp_proc and b_exp_proc respectively.
+ *
+ * cmp_infers stores information that is helpful
+ * in discarding redundant inferences. For example,
+ * we do not want to infer abs( x ) >= abs( z ) if
+ * we have already inferred abs( x ) >= abs( y ) and
+ * abs( y ) >= abs( z ).
+ * It stores entries of the form (status,t1,t2)->F,
+ * which indicates that we constructed a lemma F that
+ * showed t1 <status> t2.
+ *
+ * We add lemmas to lem of the form given by the
+ * lemma schema checkMagnitude(...).
+ */
+ bool compareMonomial(
+ Node oa,
+ Node a,
+ NodeMultiset& a_exp_proc,
+ Node ob,
+ Node b,
+ NodeMultiset& b_exp_proc,
+ std::vector<Node>& exp,
+ std::vector<Node>& lem,
+ std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
+ /** helper function for above
+ *
+ * The difference is the inputs a_index and b_index, which are the indices of
+ * children (factors) in monomials a and b which we are currently looking at.
+ */
+ bool compareMonomial(
+ Node oa,
+ Node a,
+ unsigned a_index,
+ NodeMultiset& a_exp_proc,
+ Node ob,
+ Node b,
+ unsigned b_index,
+ NodeMultiset& b_exp_proc,
+ int status,
+ std::vector<Node>& exp,
+ std::vector<Node>& lem,
+ std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
+ /** Get factor skolem for n, add resulting lemmas to lemmas */
+ Node getFactorSkolem(Node n, std::vector<Node>& lemmas);
+}; /* class NlSolver */
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__NL_SOLVER_H */
--- /dev/null
+/********************* */
+/*! \file nonlinear_extension.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Aina Niemetz
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief [[ Add one-line brief description here ]]
+ **
+ ** [[ Add lengthier description here ]]
+ ** \todo document this file
+ **/
+
+#include "theory/arith/nl/nonlinear_extension.h"
+
+#include "options/arith_options.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/arith/theory_arith.h"
+#include "theory/ext_theory.h"
+#include "theory/theory_model.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+NonlinearExtension::NonlinearExtension(TheoryArith& containing,
+ eq::EqualityEngine* ee)
+ : d_lemmas(containing.getUserContext()),
+ d_containing(containing),
+ d_ee(ee),
+ d_needsLastCall(false),
+ d_model(containing.getSatContext()),
+ d_trSlv(d_model),
+ d_nlSlv(containing, d_model),
+ d_builtModel(containing.getSatContext(), false)
+{
+ d_true = NodeManager::currentNM()->mkConst(true);
+ d_zero = NodeManager::currentNM()->mkConst(Rational(0));
+ d_one = NodeManager::currentNM()->mkConst(Rational(1));
+ d_neg_one = NodeManager::currentNM()->mkConst(Rational(-1));
+}
+
+NonlinearExtension::~NonlinearExtension() {}
+
+bool NonlinearExtension::getCurrentSubstitution(
+ int effort,
+ const std::vector<Node>& vars,
+ std::vector<Node>& subs,
+ std::map<Node, std::vector<Node>>& exp)
+{
+ // get the constant equivalence classes
+ std::map<Node, std::vector<int>> rep_to_subs_index;
+
+ bool retVal = false;
+ for (unsigned i = 0; i < vars.size(); i++)
+ {
+ Node n = vars[i];
+ if (d_ee->hasTerm(n))
+ {
+ Node nr = d_ee->getRepresentative(n);
+ if (nr.isConst())
+ {
+ subs.push_back(nr);
+ Trace("nl-subs") << "Basic substitution : " << n << " -> " << nr
+ << std::endl;
+ exp[n].push_back(n.eqNode(nr));
+ retVal = true;
+ }
+ else
+ {
+ rep_to_subs_index[nr].push_back(i);
+ subs.push_back(n);
+ }
+ }
+ else
+ {
+ subs.push_back(n);
+ }
+ }
+
+ // return true if the substitution is non-trivial
+ return retVal;
+}
+
+std::pair<bool, Node> NonlinearExtension::isExtfReduced(
+ int effort, Node n, Node on, const std::vector<Node>& exp) const
+{
+ if (n != d_zero)
+ {
+ Kind k = n.getKind();
+ return std::make_pair(k != NONLINEAR_MULT && !isTranscendentalKind(k),
+ Node::null());
+ }
+ Assert(n == d_zero);
+ if (on.getKind() == NONLINEAR_MULT)
+ {
+ Trace("nl-ext-zero-exp")
+ << "Infer zero : " << on << " == " << n << std::endl;
+ // minimize explanation if a substitution+rewrite results in zero
+ const std::set<Node> vars(on.begin(), on.end());
+
+ for (unsigned i = 0, size = exp.size(); i < size; i++)
+ {
+ Trace("nl-ext-zero-exp")
+ << " exp[" << i << "] = " << exp[i] << std::endl;
+ std::vector<Node> eqs;
+ if (exp[i].getKind() == EQUAL)
+ {
+ eqs.push_back(exp[i]);
+ }
+ else if (exp[i].getKind() == AND)
+ {
+ for (const Node& ec : exp[i])
+ {
+ if (ec.getKind() == EQUAL)
+ {
+ eqs.push_back(ec);
+ }
+ }
+ }
+
+ for (unsigned j = 0; j < eqs.size(); j++)
+ {
+ for (unsigned r = 0; r < 2; r++)
+ {
+ if (eqs[j][r] == d_zero && vars.find(eqs[j][1 - r]) != vars.end())
+ {
+ Trace("nl-ext-zero-exp")
+ << "...single exp : " << eqs[j] << std::endl;
+ return std::make_pair(true, eqs[j]);
+ }
+ }
+ }
+ }
+ }
+ return std::make_pair(true, Node::null());
+}
+
+void NonlinearExtension::sendLemmas(const std::vector<Node>& out,
+ bool preprocess,
+ std::map<Node, NlLemmaSideEffect>& lemSE)
+{
+ std::map<Node, NlLemmaSideEffect>::iterator its;
+ for (const Node& lem : out)
+ {
+ Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : " << lem << std::endl;
+ d_containing.getOutputChannel().lemma(lem, false, preprocess);
+ // process the side effect
+ its = lemSE.find(lem);
+ if (its != lemSE.end())
+ {
+ processSideEffect(its->second);
+ }
+ // add to cache if not preprocess
+ if (!preprocess)
+ {
+ d_lemmas.insert(lem);
+ }
+ // also indicate this is a tautology
+ d_model.addTautology(lem);
+ }
+}
+
+void NonlinearExtension::processSideEffect(const NlLemmaSideEffect& se)
+{
+ d_trSlv.processSideEffect(se);
+}
+
+unsigned NonlinearExtension::filterLemma(Node lem, std::vector<Node>& out)
+{
+ Trace("nl-ext-lemma-debug")
+ << "NonlinearExtension::Lemma pre-rewrite : " << lem << std::endl;
+ lem = Rewriter::rewrite(lem);
+ if (d_lemmas.find(lem) != d_lemmas.end()
+ || std::find(out.begin(), out.end(), lem) != out.end())
+ {
+ Trace("nl-ext-lemma-debug")
+ << "NonlinearExtension::Lemma duplicate : " << lem << std::endl;
+ return 0;
+ }
+ out.push_back(lem);
+ return 1;
+}
+
+unsigned NonlinearExtension::filterLemmas(std::vector<Node>& lemmas,
+ std::vector<Node>& out)
+{
+ if (options::nlExtEntailConflicts())
+ {
+ // check if any are entailed to be false
+ for (const Node& lem : lemmas)
+ {
+ Node ch_lemma = lem.negate();
+ ch_lemma = Rewriter::rewrite(ch_lemma);
+ Trace("nl-ext-et-debug")
+ << "Check entailment of " << ch_lemma << "..." << std::endl;
+ std::pair<bool, Node> et = d_containing.getValuation().entailmentCheck(
+ options::TheoryOfMode::THEORY_OF_TYPE_BASED, ch_lemma);
+ Trace("nl-ext-et-debug") << "entailment test result : " << et.first << " "
+ << et.second << std::endl;
+ if (et.first)
+ {
+ Trace("nl-ext-et") << "*** Lemma entailed to be in conflict : " << lem
+ << std::endl;
+ // return just this lemma
+ if (filterLemma(lem, out) > 0)
+ {
+ lemmas.clear();
+ return 1;
+ }
+ }
+ }
+ }
+
+ unsigned sum = 0;
+ for (const Node& lem : lemmas)
+ {
+ sum += filterLemma(lem, out);
+ }
+ lemmas.clear();
+ return sum;
+}
+
+void NonlinearExtension::getAssertions(std::vector<Node>& assertions)
+{
+ Trace("nl-ext") << "Getting assertions..." << std::endl;
+ NodeManager* nm = NodeManager::currentNM();
+ // get the assertions
+ std::map<Node, Rational> init_bounds[2];
+ std::map<Node, Node> init_bounds_lit[2];
+ unsigned nassertions = 0;
+ std::unordered_set<Node, NodeHashFunction> init_assertions;
+ for (Theory::assertions_iterator it = d_containing.facts_begin();
+ it != d_containing.facts_end();
+ ++it)
+ {
+ nassertions++;
+ const Assertion& assertion = *it;
+ Node lit = assertion.d_assertion;
+ init_assertions.insert(lit);
+ // check for concrete bounds
+ bool pol = lit.getKind() != NOT;
+ Node atom_orig = lit.getKind() == NOT ? lit[0] : lit;
+
+ std::vector<Node> atoms;
+ if (atom_orig.getKind() == EQUAL)
+ {
+ if (pol)
+ {
+ // t = s is ( t >= s ^ t <= s )
+ for (unsigned i = 0; i < 2; i++)
+ {
+ Node atom_new = nm->mkNode(GEQ, atom_orig[i], atom_orig[1 - i]);
+ atom_new = Rewriter::rewrite(atom_new);
+ atoms.push_back(atom_new);
+ }
+ }
+ }
+ else
+ {
+ atoms.push_back(atom_orig);
+ }
+
+ for (const Node& atom : atoms)
+ {
+ // non-strict bounds only
+ if (atom.getKind() == GEQ || (!pol && atom.getKind() == GT))
+ {
+ Node p = atom[0];
+ Assert(atom[1].isConst());
+ Rational bound = atom[1].getConst<Rational>();
+ if (!pol)
+ {
+ if (atom[0].getType().isInteger())
+ {
+ // ~( p >= c ) ---> ( p <= c-1 )
+ bound = bound - Rational(1);
+ }
+ }
+ unsigned bindex = pol ? 0 : 1;
+ bool setBound = true;
+ std::map<Node, Rational>::iterator itb = init_bounds[bindex].find(p);
+ if (itb != init_bounds[bindex].end())
+ {
+ if (itb->second == bound)
+ {
+ setBound = atom_orig.getKind() == EQUAL;
+ }
+ else
+ {
+ setBound = pol ? itb->second < bound : itb->second > bound;
+ }
+ if (setBound)
+ {
+ // the bound is subsumed
+ init_assertions.erase(init_bounds_lit[bindex][p]);
+ }
+ }
+ if (setBound)
+ {
+ Trace("nl-ext-init") << (pol ? "Lower" : "Upper") << " bound for "
+ << p << " : " << bound << std::endl;
+ init_bounds[bindex][p] = bound;
+ init_bounds_lit[bindex][p] = lit;
+ }
+ }
+ }
+ }
+ // for each bound that is the same, ensure we've inferred the equality
+ for (std::pair<const Node, Rational>& ib : init_bounds[0])
+ {
+ Node p = ib.first;
+ Node lit1 = init_bounds_lit[0][p];
+ if (lit1.getKind() != EQUAL)
+ {
+ std::map<Node, Rational>::iterator itb = init_bounds[1].find(p);
+ if (itb != init_bounds[1].end())
+ {
+ if (ib.second == itb->second)
+ {
+ Node eq = p.eqNode(nm->mkConst(ib.second));
+ eq = Rewriter::rewrite(eq);
+ Node lit2 = init_bounds_lit[1][p];
+ Assert(lit2.getKind() != EQUAL);
+ // use the equality instead, thus these are redundant
+ init_assertions.erase(lit1);
+ init_assertions.erase(lit2);
+ init_assertions.insert(eq);
+ }
+ }
+ }
+ }
+
+ for (const Node& a : init_assertions)
+ {
+ assertions.push_back(a);
+ }
+ Trace("nl-ext") << "...keep " << assertions.size() << " / " << nassertions
+ << " assertions." << std::endl;
+}
+
+std::vector<Node> NonlinearExtension::checkModelEval(
+ const std::vector<Node>& assertions)
+{
+ std::vector<Node> false_asserts;
+ for (size_t i = 0; i < assertions.size(); ++i)
+ {
+ Node lit = assertions[i];
+ Node atom = lit.getKind() == NOT ? lit[0] : lit;
+ Node litv = d_model.computeConcreteModelValue(lit);
+ Trace("nl-ext-mv-assert") << "M[[ " << lit << " ]] -> " << litv;
+ if (litv != d_true)
+ {
+ Trace("nl-ext-mv-assert") << " [model-false]" << std::endl;
+ false_asserts.push_back(lit);
+ }
+ else
+ {
+ Trace("nl-ext-mv-assert") << std::endl;
+ }
+ }
+ return false_asserts;
+}
+
+bool NonlinearExtension::checkModel(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ std::vector<Node>& lemmas,
+ std::vector<Node>& gs)
+{
+ Trace("nl-ext-cm") << "--- check-model ---" << std::endl;
+
+ // get the presubstitution
+ Trace("nl-ext-cm-debug") << " apply pre-substitution..." << std::endl;
+ std::vector<Node> passertions = assertions;
+
+ // preprocess the assertions with the trancendental solver
+ if (!d_trSlv.preprocessAssertionsCheckModel(passertions))
+ {
+ return false;
+ }
+
+ Trace("nl-ext-cm") << "-----" << std::endl;
+ unsigned tdegree = d_trSlv.getTaylorDegree();
+ bool ret =
+ d_model.checkModel(passertions, false_asserts, tdegree, lemmas, gs);
+ return ret;
+}
+
+int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts,
+ std::vector<Node>& lems,
+ std::vector<Node>& lemsPp,
+ std::vector<Node>& wlems,
+ std::map<Node, NlLemmaSideEffect>& lemSE)
+{
+ // initialize the non-linear solver
+ d_nlSlv.initLastCall(assertions, false_asserts, xts);
+ // initialize the trancendental function solver
+ std::vector<Node> lemmas;
+ d_trSlv.initLastCall(assertions, false_asserts, xts, lemmas, lemsPp);
+
+ // process lemmas that may have been generated by the transcendental solver
+ filterLemmas(lemmas, lems);
+ if (!lems.empty() || !lemsPp.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size()
+ << " new lemmas during registration." << std::endl;
+ return lems.size() + lemsPp.size();
+ }
+
+ //----------------------------------- possibly split on zero
+ if (options::nlExtSplitZero())
+ {
+ Trace("nl-ext") << "Get zero split lemmas..." << std::endl;
+ lemmas = d_nlSlv.checkSplitZero();
+ filterLemmas(lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
+ << std::endl;
+ return lems.size();
+ }
+ }
+
+ //-----------------------------------initial lemmas for transcendental
+ //functions
+ lemmas = d_trSlv.checkTranscendentalInitialRefine();
+ filterLemmas(lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
+ << std::endl;
+ return lems.size();
+ }
+
+ //-----------------------------------lemmas based on sign (comparison to zero)
+ lemmas = d_nlSlv.checkMonomialSign();
+ filterLemmas(lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
+ << std::endl;
+ return lems.size();
+ }
+
+ //-----------------------------------monotonicity of transdental functions
+ lemmas = d_trSlv.checkTranscendentalMonotonic();
+ filterLemmas(lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
+ << std::endl;
+ return lems.size();
+ }
+
+ //-----------------------------------lemmas based on magnitude of non-zero
+ //monomials
+ for (unsigned c = 0; c < 3; c++)
+ {
+ // c is effort level
+ lemmas = d_nlSlv.checkMonomialMagnitude(c);
+ unsigned nlem = lemmas.size();
+ filterLemmas(lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size()
+ << " new lemmas (out of possible " << nlem << ")."
+ << std::endl;
+ return lems.size();
+ }
+ }
+
+ //-----------------------------------inferred bounds lemmas
+ // e.g. x >= t => y*x >= y*t
+ std::vector<Node> nt_lemmas;
+ lemmas =
+ d_nlSlv.checkMonomialInferBounds(nt_lemmas, assertions, false_asserts);
+ // Trace("nl-ext") << "Bound lemmas : " << lemmas.size() << ", " <<
+ // nt_lemmas.size() << std::endl; prioritize lemmas that do not
+ // introduce new monomials
+ filterLemmas(lemmas, lems);
+
+ if (options::nlExtTangentPlanes() && options::nlExtTangentPlanesInterleave())
+ {
+ lemmas = d_nlSlv.checkTangentPlanes();
+ filterLemmas(lemmas, lems);
+ }
+
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
+ << std::endl;
+ return lems.size();
+ }
+
+ // from inferred bound inferences : now do ones that introduce new terms
+ filterLemmas(nt_lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size()
+ << " new (monomial-introducing) lemmas." << std::endl;
+ return lems.size();
+ }
+
+ //------------------------------------factoring lemmas
+ // x*y + x*z >= t => exists k. k = y + z ^ x*k >= t
+ if (options::nlExtFactor())
+ {
+ lemmas = d_nlSlv.checkFactoring(assertions, false_asserts);
+ filterLemmas(lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
+ << std::endl;
+ return lems.size();
+ }
+ }
+
+ //------------------------------------resolution bound inferences
+ // e.g. ( y>=0 ^ s <= x*z ^ x*y <= t ) => y*s <= z*t
+ if (options::nlExtResBound())
+ {
+ lemmas = d_nlSlv.checkMonomialInferResBounds();
+ filterLemmas(lemmas, lems);
+ if (!lems.empty())
+ {
+ Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
+ << std::endl;
+ return lems.size();
+ }
+ }
+
+ //------------------------------------tangent planes
+ if (options::nlExtTangentPlanes() && !options::nlExtTangentPlanesInterleave())
+ {
+ lemmas = d_nlSlv.checkTangentPlanes();
+ filterLemmas(lemmas, wlems);
+ }
+ if (options::nlExtTfTangentPlanes())
+ {
+ lemmas = d_trSlv.checkTranscendentalTangentPlanes(lemSE);
+ filterLemmas(lemmas, wlems);
+ }
+ Trace("nl-ext") << " ...finished with " << wlems.size() << " waiting lemmas."
+ << std::endl;
+
+ return 0;
+}
+
+void NonlinearExtension::check(Theory::Effort e)
+{
+ Trace("nl-ext") << std::endl;
+ Trace("nl-ext") << "NonlinearExtension::check, effort = " << e
+ << ", built model = " << d_builtModel.get() << std::endl;
+ if (e == Theory::EFFORT_FULL)
+ {
+ d_containing.getExtTheory()->clearCache();
+ d_needsLastCall = true;
+ if (options::nlExtRewrites())
+ {
+ std::vector<Node> nred;
+ if (!d_containing.getExtTheory()->doInferences(0, nred))
+ {
+ Trace("nl-ext") << "...sent no lemmas, # extf to reduce = "
+ << nred.size() << std::endl;
+ if (nred.empty())
+ {
+ d_needsLastCall = false;
+ }
+ }
+ else
+ {
+ Trace("nl-ext") << "...sent lemmas." << std::endl;
+ }
+ }
+ }
+ else
+ {
+ // If we computed lemmas during collectModelInfo, send them now.
+ if (!d_cmiLemmas.empty() || !d_cmiLemmasPp.empty())
+ {
+ sendLemmas(d_cmiLemmas, false, d_cmiLemmasSE);
+ sendLemmas(d_cmiLemmasPp, true, d_cmiLemmasSE);
+ return;
+ }
+ // Otherwise, we will answer SAT. The values that we approximated are
+ // recorded as approximations here.
+ TheoryModel* tm = d_containing.getValuation().getModel();
+ for (std::pair<const Node, std::pair<Node, Node>>& a : d_approximations)
+ {
+ if (a.second.second.isNull())
+ {
+ tm->recordApproximation(a.first, a.second.first);
+ }
+ else
+ {
+ tm->recordApproximation(a.first, a.second.first, a.second.second);
+ }
+ }
+ }
+}
+
+bool NonlinearExtension::modelBasedRefinement(
+ std::vector<Node>& mlems,
+ std::vector<Node>& mlemsPp,
+ std::map<Node, NlLemmaSideEffect>& lemSE)
+{
+ // get the assertions
+ std::vector<Node> assertions;
+ getAssertions(assertions);
+
+ Trace("nl-ext-mv-assert")
+ << "Getting model values... check for [model-false]" << std::endl;
+ // get the assertions that are false in the model
+ const std::vector<Node> false_asserts = checkModelEval(assertions);
+
+ // get the extended terms belonging to this theory
+ std::vector<Node> xts;
+ d_containing.getExtTheory()->getTerms(xts);
+
+ if (Trace.isOn("nl-ext-debug"))
+ {
+ Trace("nl-ext-debug") << " processing NonlinearExtension::check : "
+ << std::endl;
+ Trace("nl-ext-debug") << " " << false_asserts.size()
+ << " false assertions" << std::endl;
+ Trace("nl-ext-debug") << " " << xts.size()
+ << " extended terms: " << std::endl;
+ Trace("nl-ext-debug") << " ";
+ for (unsigned j = 0; j < xts.size(); j++)
+ {
+ Trace("nl-ext-debug") << xts[j] << " ";
+ }
+ Trace("nl-ext-debug") << std::endl;
+ }
+
+ // compute whether shared terms have correct values
+ unsigned num_shared_wrong_value = 0;
+ std::vector<Node> shared_term_value_splits;
+ // must ensure that shared terms are equal to their concrete value
+ Trace("nl-ext-mv") << "Shared terms : " << std::endl;
+ for (context::CDList<TNode>::const_iterator its =
+ d_containing.shared_terms_begin();
+ its != d_containing.shared_terms_end();
+ ++its)
+ {
+ TNode shared_term = *its;
+ // compute its value in the model, and its evaluation in the model
+ Node stv0 = d_model.computeConcreteModelValue(shared_term);
+ Node stv1 = d_model.computeAbstractModelValue(shared_term);
+ d_model.printModelValue("nl-ext-mv", shared_term);
+ if (stv0 != stv1)
+ {
+ num_shared_wrong_value++;
+ Trace("nl-ext-mv") << "Bad shared term value : " << shared_term
+ << std::endl;
+ if (shared_term != stv0)
+ {
+ // split on the value, this is non-terminating in general, TODO :
+ // improve this
+ Node eq = shared_term.eqNode(stv0);
+ shared_term_value_splits.push_back(eq);
+ }
+ else
+ {
+ // this can happen for transcendental functions
+ // the problem is that we cannot evaluate transcendental functions
+ // (they don't have a rewriter that returns constants)
+ // thus, the actual value in their model can be themselves, hence we
+ // have no reference point to rule out the current model. In this
+ // case, we may set incomplete below.
+ }
+ }
+ }
+ Trace("nl-ext-debug") << " " << num_shared_wrong_value
+ << " shared terms with wrong model value." << std::endl;
+ bool needsRecheck;
+ do
+ {
+ d_model.resetCheck();
+ needsRecheck = false;
+ // complete_status:
+ // 1 : we may answer SAT, -1 : we may not answer SAT, 0 : unknown
+ int complete_status = 1;
+ // lemmas that should be sent later
+ std::vector<Node> wlems;
+ // We require a check either if an assertion is false or a shared term has
+ // a wrong value
+ if (!false_asserts.empty() || num_shared_wrong_value > 0)
+ {
+ complete_status = num_shared_wrong_value > 0 ? -1 : 0;
+ checkLastCall(
+ assertions, false_asserts, xts, mlems, mlemsPp, wlems, lemSE);
+ if (!mlems.empty() || !mlemsPp.empty())
+ {
+ return true;
+ }
+ }
+ Trace("nl-ext") << "Finished check with status : " << complete_status
+ << std::endl;
+
+ // if we did not add a lemma during check and there is a chance for SAT
+ if (complete_status == 0)
+ {
+ Trace("nl-ext")
+ << "Check model based on bounds for irrational-valued functions..."
+ << std::endl;
+ // check the model based on simple solving of equalities and using
+ // error bounds on the Taylor approximation of transcendental functions.
+ std::vector<Node> lemmas;
+ std::vector<Node> gs;
+ if (checkModel(assertions, false_asserts, lemmas, gs))
+ {
+ complete_status = 1;
+ }
+ for (const Node& mg : gs)
+ {
+ Node mgr = Rewriter::rewrite(mg);
+ mgr = d_containing.getValuation().ensureLiteral(mgr);
+ d_containing.getOutputChannel().requirePhase(mgr, true);
+ d_builtModel = true;
+ }
+ filterLemmas(lemmas, mlems);
+ if (!mlems.empty())
+ {
+ return true;
+ }
+ }
+
+ // if we have not concluded SAT
+ if (complete_status != 1)
+ {
+ // flush the waiting lemmas
+ if (!wlems.empty())
+ {
+ mlems.insert(mlems.end(), wlems.begin(), wlems.end());
+ Trace("nl-ext") << "...added " << wlems.size() << " waiting lemmas."
+ << std::endl;
+ return true;
+ }
+ // resort to splitting on shared terms with their model value
+ // if we did not add any lemmas
+ if (num_shared_wrong_value > 0)
+ {
+ complete_status = -1;
+ if (!shared_term_value_splits.empty())
+ {
+ std::vector<Node> stvLemmas;
+ for (const Node& eq : shared_term_value_splits)
+ {
+ Node req = Rewriter::rewrite(eq);
+ Node literal = d_containing.getValuation().ensureLiteral(req);
+ d_containing.getOutputChannel().requirePhase(literal, true);
+ Trace("nl-ext-debug") << "Split on : " << literal << std::endl;
+ Node split = literal.orNode(literal.negate());
+ filterLemma(split, stvLemmas);
+ }
+ if (!stvLemmas.empty())
+ {
+ mlems.insert(mlems.end(), stvLemmas.begin(), stvLemmas.end());
+ Trace("nl-ext") << "...added " << stvLemmas.size()
+ << " shared term value split lemmas." << std::endl;
+ return true;
+ }
+ }
+ else
+ {
+ // this can happen if we are trying to do theory combination with
+ // trancendental functions
+ // since their model value cannot even be computed exactly
+ }
+ }
+
+ // we are incomplete
+ if (options::nlExtIncPrecision() && d_model.usedApproximate())
+ {
+ d_trSlv.incrementTaylorDegree();
+ needsRecheck = true;
+ // increase precision for PI?
+ // Difficult since Taylor series is very slow to converge
+ Trace("nl-ext") << "...increment Taylor degree to "
+ << d_trSlv.getTaylorDegree() << std::endl;
+ }
+ else
+ {
+ Trace("nl-ext") << "...failed to send lemma in "
+ "NonLinearExtension, set incomplete"
+ << std::endl;
+ d_containing.getOutputChannel().setIncomplete();
+ }
+ }
+ } while (needsRecheck);
+
+ // did not add lemmas
+ return false;
+}
+
+void NonlinearExtension::interceptModel(std::map<Node, Node>& arithModel)
+{
+ if (!needsCheckLastEffort())
+ {
+ // no non-linear constraints, we are done
+ return;
+ }
+ Trace("nl-ext") << "NonlinearExtension::interceptModel begin" << std::endl;
+ d_model.reset(d_containing.getValuation().getModel(), arithModel);
+ // run a last call effort check
+ d_cmiLemmas.clear();
+ d_cmiLemmasPp.clear();
+ d_cmiLemmasSE.clear();
+ if (!d_builtModel.get())
+ {
+ Trace("nl-ext") << "interceptModel: do model-based refinement" << std::endl;
+ modelBasedRefinement(d_cmiLemmas, d_cmiLemmasPp, d_cmiLemmasSE);
+ }
+ if (d_builtModel.get())
+ {
+ Trace("nl-ext") << "interceptModel: do model repair" << std::endl;
+ d_approximations.clear();
+ // modify the model values
+ d_model.getModelValueRepair(arithModel, d_approximations);
+ }
+}
+
+void NonlinearExtension::presolve()
+{
+ Trace("nl-ext") << "NonlinearExtension::presolve" << std::endl;
+}
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file nonlinear_extension.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Extensions for incomplete handling of nonlinear multiplication.
+ **
+ ** Extensions to the theory of arithmetic incomplete handling of nonlinear
+ ** multiplication via axiom instantiations.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__NONLINEAR_EXTENSION_H
+#define CVC4__THEORY__ARITH__NL__NONLINEAR_EXTENSION_H
+
+#include <stdint.h>
+#include <map>
+#include <vector>
+
+#include "context/cdlist.h"
+#include "expr/kind.h"
+#include "expr/node.h"
+#include "theory/arith/nl/nl_lemma_utils.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/nl_solver.h"
+#include "theory/arith/nl/transcendental_solver.h"
+#include "theory/arith/theory_arith.h"
+#include "theory/uf/equality_engine.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+/** Non-linear extension class
+ *
+ * This class implements model-based refinement schemes
+ * for non-linear arithmetic, described in:
+ *
+ * - "Invariant Checking of NRA Transition Systems
+ * via Incremental Reduction to LRA with EUF" by
+ * Cimatti et al., TACAS 2017.
+ *
+ * - Section 5 of "Desiging Theory Solvers with
+ * Extensions" by Reynolds et al., FroCoS 2017.
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality is a check(...) method,
+ * which is called by TheoryArithPrivate either:
+ * (1) at full effort with no conflicts or lemmas emitted, or
+ * (2) at last call effort.
+ * In this method, this class calls d_out->lemma(...)
+ * for valid arithmetic theory lemmas, based on the current set of assertions,
+ * where d_out is the output channel of TheoryArith.
+ */
+class NonlinearExtension
+{
+ typedef context::CDHashSet<Node, NodeHashFunction> NodeSet;
+
+ public:
+ NonlinearExtension(TheoryArith& containing, eq::EqualityEngine* ee);
+ ~NonlinearExtension();
+ /** Get current substitution
+ *
+ * This function and the one below are
+ * used for context-dependent
+ * simplification, see Section 3.1 of
+ * "Designing Theory Solvers with Extensions"
+ * by Reynolds et al. FroCoS 2017.
+ *
+ * effort : an identifier indicating the stage where
+ * we are performing context-dependent simplification,
+ * vars : a set of arithmetic variables.
+ *
+ * This function populates subs and exp, such that for 0 <= i < vars.size():
+ * ( exp[vars[i]] ) => vars[i] = subs[i]
+ * where exp[vars[i]] is a set of assertions
+ * that hold in the current context. We call { vars -> subs } a "derivable
+ * substituion" (see Reynolds et al. FroCoS 2017).
+ */
+ bool getCurrentSubstitution(int effort,
+ const std::vector<Node>& vars,
+ std::vector<Node>& subs,
+ std::map<Node, std::vector<Node>>& exp);
+ /** Is the term n in reduced form?
+ *
+ * Used for context-dependent simplification.
+ *
+ * effort : an identifier indicating the stage where
+ * we are performing context-dependent simplification,
+ * on : the original term that we reduced to n,
+ * exp : an explanation such that ( exp => on = n ).
+ *
+ * We return a pair ( b, exp' ) such that
+ * if b is true, then:
+ * n is in reduced form
+ * if exp' is non-null, then ( exp' => on = n )
+ * The second part of the pair is used for constructing
+ * minimal explanations for context-dependent simplifications.
+ */
+ std::pair<bool, Node> isExtfReduced(int effort,
+ Node n,
+ Node on,
+ const std::vector<Node>& exp) const;
+ /** Check at effort level e.
+ *
+ * This call may result in (possibly multiple) calls to d_out->lemma(...)
+ * where d_out is the output channel of TheoryArith.
+ *
+ * If e is FULL, then we add lemmas based on context-depedent
+ * simplification (see Reynolds et al FroCoS 2017).
+ *
+ * If e is LAST_CALL, we add lemmas based on model-based refinement
+ * (see additionally Cimatti et al., TACAS 2017). The lemmas added at this
+ * effort may be computed during a call to interceptModel as described below.
+ */
+ void check(Theory::Effort e);
+ /** intercept model
+ *
+ * This method is called during TheoryArith::collectModelInfo, which is
+ * invoked after the linear arithmetic solver passes a full effort check
+ * with no lemmas.
+ *
+ * The argument arithModel is a map of the form { v1 -> c1, ..., vn -> cn }
+ * which represents the linear arithmetic theory solver's contribution to the
+ * current candidate model. That is, its collectModelInfo method is requesting
+ * that equalities v1 = c1, ..., vn = cn be added to the current model, where
+ * v1, ..., vn are arithmetic variables and c1, ..., cn are constants. Notice
+ * arithmetic variables may be real-valued terms belonging to other theories,
+ * or abstractions of applications of multiplication (kind NONLINEAR_MULT).
+ *
+ * This method requests that the non-linear solver inspect this model and
+ * do any number of the following:
+ * (1) Construct lemmas based on a model-based refinement procedure inspired
+ * by Cimatti et al., TACAS 2017.,
+ * (2) In the case that the nonlinear solver finds that the current
+ * constraints are satisfiable, it may "repair" the values in the argument
+ * arithModel so that it satisfies certain nonlinear constraints. This may
+ * involve e.g. solving for variables in nonlinear equations.
+ *
+ * Notice that in the former case, the lemmas it constructs are not sent out
+ * immediately. Instead, they are put in temporary vectors d_cmiLemmas
+ * and d_cmiLemmasPp, which are then sent out (if necessary) when a last call
+ * effort check is issued to this class.
+ */
+ void interceptModel(std::map<Node, Node>& arithModel);
+ /** Does this class need a call to check(...) at last call effort? */
+ bool needsCheckLastEffort() const { return d_needsLastCall; }
+ /** presolve
+ *
+ * This function is called during TheoryArith's presolve command.
+ * In this function, we send lemmas we accumulated during preprocessing,
+ * for instance, definitional lemmas from expandDefinitions are sent out
+ * on the output channel of TheoryArith in this function.
+ */
+ void presolve();
+
+ private:
+ /** Model-based refinement
+ *
+ * This is the main entry point of this class for generating lemmas on the
+ * output channel of the theory of arithmetic.
+ *
+ * It is currently run at last call effort. It applies lemma schemas
+ * described in Reynolds et al. FroCoS 2017 that are based on ruling out
+ * the current candidate model.
+ *
+ * This function returns true if a lemma was added to the vector lems/lemsPp.
+ * Otherwise, it returns false. In the latter case, the model object d_model
+ * may have information regarding how to construct a model, in the case that
+ * we determined the problem is satisfiable.
+ *
+ * The argument lemSE is the "side effect" of the lemmas in mlems and mlemsPp
+ * (for details, see checkLastCall).
+ */
+ bool modelBasedRefinement(std::vector<Node>& mlems,
+ std::vector<Node>& mlemsPp,
+ std::map<Node, NlLemmaSideEffect>& lemSE);
+
+ /** check last call
+ *
+ * Check assertions for consistency in the effort LAST_CALL with a subset of
+ * the assertions, false_asserts, that evaluate to false in the current model.
+ *
+ * xts : the list of (non-reduced) extended terms in the current context.
+ *
+ * This method adds lemmas to arguments lems, lemsPp, and wlems, each of
+ * which are intended to be sent out on the output channel of TheoryArith
+ * under certain conditions.
+ *
+ * If the set lems or lemsPp is non-empty, then no further processing is
+ * necessary. The last call effort check should terminate and these
+ * lemmas should be sent. The set lemsPp is distinguished from lems since
+ * the preprocess flag on the lemma(...) call should be set to true.
+ *
+ * The "waiting" lemmas wlems contain lemmas that should be sent on the
+ * output channel as a last resort. In other words, only if we are not
+ * able to establish SAT via a call to checkModel(...) should wlems be
+ * considered. This set typically contains tangent plane lemmas.
+ *
+ * The argument lemSE is the "side effect" of the lemmas from the previous
+ * three calls. If a lemma is mapping to a side effect, it should be
+ * processed via a call to processSideEffect(...) immediately after the
+ * lemma is sent (if it is indeed sent on this call to check).
+ */
+ int checkLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts,
+ std::vector<Node>& lems,
+ std::vector<Node>& lemsPp,
+ std::vector<Node>& wlems,
+ std::map<Node, NlLemmaSideEffect>& lemSE);
+
+ /** get assertions
+ *
+ * Let M be the set of assertions known by THEORY_ARITH. This function adds a
+ * set of literals M' to assertions such that M' and M are equivalent.
+ *
+ * Examples of how M' differs with M:
+ * (1) M' may not include t < c (in M) if t < c' is in M' for c' < c, where
+ * c and c' are constants,
+ * (2) M' may contain t = c if both t >= c and t <= c are in M.
+ */
+ void getAssertions(std::vector<Node>& assertions);
+ /** check model
+ *
+ * Returns the subset of assertions whose concrete values we cannot show are
+ * true in the current model. Notice that we typically cannot compute concrete
+ * values for assertions involving transcendental functions. Any assertion
+ * whose model value cannot be computed is included in the return value of
+ * this function.
+ */
+ std::vector<Node> checkModelEval(const std::vector<Node>& assertions);
+
+ //---------------------------check model
+ /** Check model
+ *
+ * Checks the current model based on solving for equalities, and using error
+ * bounds on the Taylor approximation.
+ *
+ * If this function returns true, then all assertions in the input argument
+ * "assertions" are satisfied for all interpretations of variables within
+ * their computed bounds (as stored in d_check_model_bounds).
+ *
+ * For details, see Section 3 of Cimatti et al CADE 2017 under the heading
+ * "Detecting Satisfiable Formulas".
+ *
+ * The arguments lemmas and gs store the lemmas and guard literals to be sent
+ * out on the output channel of TheoryArith as lemmas and calls to
+ * ensureLiteral respectively.
+ */
+ bool checkModel(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ std::vector<Node>& lemmas,
+ std::vector<Node>& gs);
+ //---------------------------end check model
+
+ /** Is n entailed with polarity pol in the current context? */
+ bool isEntailed(Node n, bool pol);
+
+ /**
+ * Potentially adds lemmas to the set out and clears lemmas. Returns
+ * the number of lemmas added to out. We do not add lemmas that have already
+ * been sent on the output channel of TheoryArith.
+ */
+ unsigned filterLemmas(std::vector<Node>& lemmas, std::vector<Node>& out);
+ /** singleton version of above */
+ unsigned filterLemma(Node lem, std::vector<Node>& out);
+
+ /**
+ * Send lemmas in out on the output channel of theory of arithmetic.
+ */
+ void sendLemmas(const std::vector<Node>& out,
+ bool preprocess,
+ std::map<Node, NlLemmaSideEffect>& lemSE);
+ /** Process side effect se */
+ void processSideEffect(const NlLemmaSideEffect& se);
+
+ /** cache of all lemmas sent on the output channel (user-context-dependent) */
+ NodeSet d_lemmas;
+ /** commonly used terms */
+ Node d_zero;
+ Node d_one;
+ Node d_neg_one;
+ Node d_true;
+ // The theory of arithmetic containing this extension.
+ TheoryArith& d_containing;
+ // pointer to used equality engine
+ eq::EqualityEngine* d_ee;
+ // needs last call effort
+ bool d_needsLastCall;
+ /** The non-linear model object
+ *
+ * This class is responsible for computing model values for arithmetic terms
+ * and for establishing when we are able to answer "SAT".
+ */
+ NlModel d_model;
+ /** The transcendental extension object
+ *
+ * This is the subsolver responsible for running the procedure for
+ * transcendental functions.
+ */
+ TranscendentalSolver d_trSlv;
+ /** The nonlinear extension object
+ *
+ * This is the subsolver responsible for running the procedure for
+ * constraints involving nonlinear mulitplication.
+ */
+ NlSolver d_nlSlv;
+ /**
+ * The lemmas we computed during collectModelInfo. We store two vectors of
+ * lemmas to be sent out on the output channel of TheoryArith. The first
+ * is not preprocessed, the second is.
+ */
+ std::vector<Node> d_cmiLemmas;
+ std::vector<Node> d_cmiLemmasPp;
+ /** the side effects of the above lemmas */
+ std::map<Node, NlLemmaSideEffect> d_cmiLemmasSE;
+ /**
+ * The approximations computed during collectModelInfo. For details, see
+ * NlModel::getModelValueRepair.
+ */
+ std::map<Node, std::pair<Node, Node>> d_approximations;
+ /** have we successfully built the model in this SAT context? */
+ context::CDO<bool> d_builtModel;
+}; /* class NonlinearExtension */
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__NONLINEAR_EXTENSION_H */
--- /dev/null
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+TranscendentalSolver::TranscendentalSolver(NlModel& m) : d_model(m)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ d_true = nm->mkConst(true);
+ d_false = nm->mkConst(false);
+ d_zero = nm->mkConst(Rational(0));
+ d_one = nm->mkConst(Rational(1));
+ d_neg_one = nm->mkConst(Rational(-1));
+ d_taylor_real_fv = nm->mkBoundVar("x", nm->realType());
+ d_taylor_real_fv_base = nm->mkBoundVar("a", nm->realType());
+ d_taylor_real_fv_base_rem = nm->mkBoundVar("b", nm->realType());
+ d_taylor_degree = options::nlExtTfTaylorDegree();
+}
+
+TranscendentalSolver::~TranscendentalSolver() {}
+
+void TranscendentalSolver::initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts,
+ std::vector<Node>& lems,
+ std::vector<Node>& lemsPp)
+{
+ d_funcCongClass.clear();
+ d_funcMap.clear();
+ d_tf_region.clear();
+
+ NodeManager* nm = NodeManager::currentNM();
+
+ // register the extended function terms
+ std::vector<Node> trNeedsMaster;
+ bool needPi = false;
+ // for computing congruence
+ std::map<Kind, ArgTrie> argTrie;
+ for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
+ {
+ Node a = xts[i];
+ Kind ak = a.getKind();
+ bool consider = true;
+ // if is an unpurified application of SINE, or it is a transcendental
+ // applied to a trancendental, purify.
+ if (isTranscendentalKind(ak))
+ {
+ // if we've already computed master for a
+ if (d_trMaster.find(a) != d_trMaster.end())
+ {
+ // a master has at least one slave
+ consider = (d_trSlaves.find(a) != d_trSlaves.end());
+ }
+ else
+ {
+ if (ak == SINE)
+ {
+ // always not a master
+ consider = false;
+ }
+ else
+ {
+ for (const Node& ac : a)
+ {
+ if (isTranscendentalKind(ac.getKind()))
+ {
+ consider = false;
+ break;
+ }
+ }
+ }
+ if (!consider)
+ {
+ // wait to assign a master below
+ trNeedsMaster.push_back(a);
+ }
+ else
+ {
+ d_trMaster[a] = a;
+ d_trSlaves[a].insert(a);
+ }
+ }
+ }
+ if (ak == EXPONENTIAL || ak == SINE)
+ {
+ needPi = needPi || (ak == SINE);
+ // if we didn't indicate that it should be purified above
+ if (consider)
+ {
+ std::vector<Node> repList;
+ for (const Node& ac : a)
+ {
+ Node r = d_model.computeConcreteModelValue(ac);
+ repList.push_back(r);
+ }
+ Node aa = argTrie[ak].add(a, repList);
+ if (aa != a)
+ {
+ // apply congruence to pairs of terms that are disequal and congruent
+ Assert(aa.getNumChildren() == a.getNumChildren());
+ Node mvaa = d_model.computeAbstractModelValue(a);
+ Node mvaaa = d_model.computeAbstractModelValue(aa);
+ if (mvaa != mvaaa)
+ {
+ std::vector<Node> exp;
+ for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
+ {
+ exp.push_back(a[j].eqNode(aa[j]));
+ }
+ Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp);
+ Node cong_lemma = nm->mkNode(OR, expn.negate(), a.eqNode(aa));
+ lems.push_back(cong_lemma);
+ }
+ }
+ else
+ {
+ // new representative of congruence class
+ d_funcMap[ak].push_back(a);
+ }
+ // add to congruence class
+ d_funcCongClass[aa].push_back(a);
+ }
+ }
+ else if (ak == PI)
+ {
+ Assert(consider);
+ needPi = true;
+ d_funcMap[ak].push_back(a);
+ d_funcCongClass[a].push_back(a);
+ }
+ }
+ // initialize pi if necessary
+ if (needPi && d_pi.isNull())
+ {
+ mkPi();
+ getCurrentPiBounds(lems);
+ }
+
+ if (!lems.empty())
+ {
+ return;
+ }
+
+ // process SINE phase shifting
+ for (const Node& a : trNeedsMaster)
+ {
+ // should not have processed this already
+ Assert(d_trMaster.find(a) == d_trMaster.end());
+ Kind k = a.getKind();
+ Assert(k == SINE || k == EXPONENTIAL);
+ Node y =
+ nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
+ Node new_a = nm->mkNode(k, y);
+ d_trSlaves[new_a].insert(new_a);
+ d_trSlaves[new_a].insert(a);
+ d_trMaster[a] = new_a;
+ d_trMaster[new_a] = new_a;
+ Node lem;
+ if (k == SINE)
+ {
+ Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
+ << std::endl;
+ Assert(!d_pi.isNull());
+ Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
+ // TODO : do not introduce shift here, instead needs model-based
+ // refinement for constant shifts (cvc4-projects #1284)
+ lem = nm->mkNode(
+ AND,
+ mkValidPhase(y, d_pi),
+ nm->mkNode(
+ ITE,
+ mkValidPhase(a[0], d_pi),
+ a[0].eqNode(y),
+ a[0].eqNode(nm->mkNode(
+ PLUS,
+ y,
+ nm->mkNode(MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
+ new_a.eqNode(a));
+ }
+ else
+ {
+ // do both equalities to ensure that new_a becomes a preregistered term
+ lem = nm->mkNode(AND, a.eqNode(new_a), a[0].eqNode(y));
+ }
+ // note we must do preprocess on this lemma
+ Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
+ << std::endl;
+ lemsPp.push_back(lem);
+ }
+
+ if (Trace.isOn("nl-ext-mv"))
+ {
+ Trace("nl-ext-mv") << "Arguments of trancendental functions : "
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (k == SINE || k == EXPONENTIAL)
+ {
+ for (const Node& tf : tfl.second)
+ {
+ Node v = tf[0];
+ d_model.computeConcreteModelValue(v);
+ d_model.computeAbstractModelValue(v);
+ d_model.printModelValue("nl-ext-mv", v);
+ }
+ }
+ }
+ }
+}
+
+bool TranscendentalSolver::preprocessAssertionsCheckModel(
+ std::vector<Node>& assertions)
+{
+ std::vector<Node> pvars;
+ std::vector<Node> psubs;
+ for (const std::pair<const Node, Node>& tb : d_trMaster)
+ {
+ pvars.push_back(tb.first);
+ psubs.push_back(tb.second);
+ }
+
+ // initialize representation of assertions
+ std::vector<Node> passertions;
+ for (const Node& a : assertions)
+
+ {
+ Node pa = a;
+ if (!pvars.empty())
+ {
+ pa = arithSubstitute(pa, pvars, psubs);
+ pa = Rewriter::rewrite(pa);
+ }
+ if (!pa.isConst() || !pa.getConst<bool>())
+ {
+ Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
+ passertions.push_back(pa);
+ }
+ }
+ // get model bounds for all transcendental functions
+ Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
+ {
+ Kind k = tfs.first;
+ for (const Node& tf : tfs.second)
+ {
+ Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
+ bool success = true;
+ // tf is Figure 3 : tf( x )
+ Node bl;
+ Node bu;
+ if (k == PI)
+ {
+ bl = d_pi_bound[0];
+ bu = d_pi_bound[1];
+ }
+ else
+ {
+ std::pair<Node, Node> bounds = getTfModelBounds(tf, d_taylor_degree);
+ bl = bounds.first;
+ bu = bounds.second;
+ if (bl != bu)
+ {
+ d_model.setUsedApproximate();
+ }
+ }
+ if (!bl.isNull() && !bu.isNull())
+ {
+ // for each function in the congruence classe
+ for (const Node& ctf : d_funcCongClass[tf])
+ {
+ // each term in congruence classes should be master terms
+ Assert(d_trSlaves.find(ctf) != d_trSlaves.end());
+ // we set the bounds for each slave of tf
+ for (const Node& stf : d_trSlaves[ctf])
+ {
+ Trace("nl-ext-cm") << "...bound for " << stf << " : [" << bl << ", "
+ << bu << "]" << std::endl;
+ success = d_model.addCheckModelBound(stf, bl, bu);
+ }
+ }
+ }
+ else
+ {
+ Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
+ }
+ if (!success)
+ {
+ // a bound was conflicting
+ Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
+ Trace("nl-ext-cm") << "-----" << std::endl;
+ return false;
+ }
+ }
+ }
+ // replace the assertions
+ assertions = passertions;
+ return true;
+}
+
+void TranscendentalSolver::incrementTaylorDegree() { d_taylor_degree++; }
+unsigned TranscendentalSolver::getTaylorDegree() const
+{
+ return d_taylor_degree;
+}
+
+void TranscendentalSolver::processSideEffect(const NlLemmaSideEffect& se)
+{
+ for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
+ {
+ Node tf = std::get<0>(sp);
+ unsigned d = std::get<1>(sp);
+ Node c = std::get<2>(sp);
+ d_secant_points[tf][d].push_back(c);
+ }
+}
+
+void TranscendentalSolver::mkPi()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ if (d_pi.isNull())
+ {
+ d_pi = nm->mkNullaryOperator(nm->realType(), PI);
+ d_pi_2 = Rewriter::rewrite(
+ nm->mkNode(MULT, d_pi, nm->mkConst(Rational(1) / Rational(2))));
+ d_pi_neg_2 = Rewriter::rewrite(
+ nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1) / Rational(2))));
+ d_pi_neg =
+ Rewriter::rewrite(nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1))));
+ // initialize bounds
+ d_pi_bound[0] = nm->mkConst(Rational(103993) / Rational(33102));
+ d_pi_bound[1] = nm->mkConst(Rational(104348) / Rational(33215));
+ }
+}
+
+void TranscendentalSolver::getCurrentPiBounds(std::vector<Node>& lemmas)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node pi_lem = nm->mkNode(AND,
+ nm->mkNode(GEQ, d_pi, d_pi_bound[0]),
+ nm->mkNode(LEQ, d_pi, d_pi_bound[1]));
+ lemmas.push_back(pi_lem);
+}
+
+std::vector<Node> TranscendentalSolver::checkTranscendentalInitialRefine()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ std::vector<Node> lemmas;
+ Trace("nl-ext")
+ << "Get initial refinement lemmas for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ for (const Node& t : tfl.second)
+ {
+ // initial refinements
+ if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
+ {
+ d_tf_initial_refine[t] = true;
+ Node lem;
+ if (k == SINE)
+ {
+ Node symn = nm->mkNode(SINE, nm->mkNode(MULT, d_neg_one, t[0]));
+ symn = Rewriter::rewrite(symn);
+ // Can assume it is its own master since phase is split over 0,
+ // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
+ d_trMaster[symn] = symn;
+ d_trSlaves[symn].insert(symn);
+ Assert(d_trSlaves.find(t) != d_trSlaves.end());
+ std::vector<Node> children;
+
+ lem = nm->mkNode(AND,
+ // bounds
+ nm->mkNode(AND,
+ nm->mkNode(LEQ, t, d_one),
+ nm->mkNode(GEQ, t, d_neg_one)),
+ // symmetry
+ nm->mkNode(PLUS, t, symn).eqNode(d_zero),
+ // sign
+ nm->mkNode(EQUAL,
+ nm->mkNode(LT, t[0], d_zero),
+ nm->mkNode(LT, t, d_zero)),
+ // zero val
+ nm->mkNode(EQUAL,
+ nm->mkNode(GT, t[0], d_zero),
+ nm->mkNode(GT, t, d_zero)));
+ lem = nm->mkNode(
+ AND,
+ lem,
+ // zero tangent
+ nm->mkNode(AND,
+ nm->mkNode(IMPLIES,
+ nm->mkNode(GT, t[0], d_zero),
+ nm->mkNode(LT, t, t[0])),
+ nm->mkNode(IMPLIES,
+ nm->mkNode(LT, t[0], d_zero),
+ nm->mkNode(GT, t, t[0]))),
+ // pi tangent
+ nm->mkNode(
+ AND,
+ nm->mkNode(IMPLIES,
+ nm->mkNode(LT, t[0], d_pi),
+ nm->mkNode(LT, t, nm->mkNode(MINUS, d_pi, t[0]))),
+ nm->mkNode(
+ IMPLIES,
+ nm->mkNode(GT, t[0], d_pi_neg),
+ nm->mkNode(GT, t, nm->mkNode(MINUS, d_pi_neg, t[0])))));
+ }
+ else if (k == EXPONENTIAL)
+ {
+ // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
+ // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
+ lem = nm->mkNode(
+ AND,
+ nm->mkNode(GT, t, d_zero),
+ nm->mkNode(EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
+ nm->mkNode(EQUAL,
+ nm->mkNode(LT, t[0], d_zero),
+ nm->mkNode(LT, t, d_one)),
+ nm->mkNode(OR,
+ nm->mkNode(LEQ, t[0], d_zero),
+ nm->mkNode(GT, t, nm->mkNode(PLUS, t[0], d_one))));
+ }
+ if (!lem.isNull())
+ {
+ lemmas.push_back(lem);
+ }
+ }
+ }
+ }
+
+ return lemmas;
+}
+
+std::vector<Node> TranscendentalSolver::checkTranscendentalMonotonic()
+{
+ std::vector<Node> lemmas;
+ Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
+ << std::endl;
+
+ // sort arguments of all transcendentals
+ std::map<Kind, std::vector<Node> > sorted_tf_args;
+ std::map<Kind, std::map<Node, Node> > tf_arg_to_term;
+
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (k == EXPONENTIAL || k == SINE)
+ {
+ for (const Node& tf : tfl.second)
+ {
+ Node a = tf[0];
+ Node mvaa = d_model.computeAbstractModelValue(a);
+ if (mvaa.isConst())
+ {
+ Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
+ sorted_tf_args[k].push_back(a);
+ tf_arg_to_term[k][a] = tf;
+ }
+ }
+ }
+ }
+
+ SortNlModel smv;
+ smv.d_nlm = &d_model;
+ // sort by concrete values
+ smv.d_isConcrete = true;
+ smv.d_reverse_order = true;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (!sorted_tf_args[k].empty())
+ {
+ std::sort(sorted_tf_args[k].begin(), sorted_tf_args[k].end(), smv);
+ Trace("nl-ext-tf-mono") << "Sorted transcendental function list for " << k
+ << " : " << std::endl;
+ for (unsigned i = 0; i < sorted_tf_args[k].size(); i++)
+ {
+ Node targ = sorted_tf_args[k][i];
+ Node mvatarg = d_model.computeAbstractModelValue(targ);
+ Trace("nl-ext-tf-mono")
+ << " " << targ << " -> " << mvatarg << std::endl;
+ Node t = tf_arg_to_term[k][targ];
+ Node mvat = d_model.computeAbstractModelValue(t);
+ Trace("nl-ext-tf-mono") << " f-val : " << mvat << std::endl;
+ }
+ std::vector<Node> mpoints;
+ std::vector<Node> mpoints_vals;
+ if (k == SINE)
+ {
+ mpoints.push_back(d_pi);
+ mpoints.push_back(d_pi_2);
+ mpoints.push_back(d_zero);
+ mpoints.push_back(d_pi_neg_2);
+ mpoints.push_back(d_pi_neg);
+ }
+ else if (k == EXPONENTIAL)
+ {
+ mpoints.push_back(Node::null());
+ }
+ if (!mpoints.empty())
+ {
+ // get model values for points
+ for (unsigned i = 0; i < mpoints.size(); i++)
+ {
+ Node mpv;
+ if (!mpoints[i].isNull())
+ {
+ mpv = d_model.computeAbstractModelValue(mpoints[i]);
+ Assert(mpv.isConst());
+ }
+ mpoints_vals.push_back(mpv);
+ }
+
+ unsigned mdir_index = 0;
+ int monotonic_dir = -1;
+ Node mono_bounds[2];
+ Node targ, targval, t, tval;
+ for (unsigned i = 0, size = sorted_tf_args[k].size(); i < size; i++)
+ {
+ Node sarg = sorted_tf_args[k][i];
+ Node sargval = d_model.computeAbstractModelValue(sarg);
+ Assert(sargval.isConst());
+ Node s = tf_arg_to_term[k][sarg];
+ Node sval = d_model.computeAbstractModelValue(s);
+ Assert(sval.isConst());
+
+ // increment to the proper monotonicity region
+ bool increment = true;
+ while (increment && mdir_index < mpoints.size())
+ {
+ increment = false;
+ if (mpoints[mdir_index].isNull())
+ {
+ increment = true;
+ }
+ else
+ {
+ Node pval = mpoints_vals[mdir_index];
+ Assert(pval.isConst());
+ if (sargval.getConst<Rational>() < pval.getConst<Rational>())
+ {
+ increment = true;
+ Trace("nl-ext-tf-mono") << "...increment at " << sarg
+ << " since model value is less than "
+ << mpoints[mdir_index] << std::endl;
+ }
+ }
+ if (increment)
+ {
+ tval = Node::null();
+ mono_bounds[1] = mpoints[mdir_index];
+ mdir_index++;
+ monotonic_dir = regionToMonotonicityDir(k, mdir_index);
+ if (mdir_index < mpoints.size())
+ {
+ mono_bounds[0] = mpoints[mdir_index];
+ }
+ else
+ {
+ mono_bounds[0] = Node::null();
+ }
+ }
+ }
+ // store the concavity region
+ d_tf_region[s] = mdir_index;
+ Trace("nl-ext-concavity") << "Transcendental function " << s
+ << " is in region #" << mdir_index;
+ Trace("nl-ext-concavity")
+ << ", arg model value = " << sargval << std::endl;
+
+ if (!tval.isNull())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node mono_lem;
+ if (monotonic_dir == 1
+ && sval.getConst<Rational>() > tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(
+ IMPLIES, nm->mkNode(GEQ, targ, sarg), nm->mkNode(GEQ, t, s));
+ }
+ else if (monotonic_dir == -1
+ && sval.getConst<Rational>() < tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(
+ IMPLIES, nm->mkNode(LEQ, targ, sarg), nm->mkNode(LEQ, t, s));
+ }
+ if (!mono_lem.isNull())
+ {
+ if (!mono_bounds[0].isNull())
+ {
+ Assert(!mono_bounds[1].isNull());
+ mono_lem = nm->mkNode(
+ IMPLIES,
+ nm->mkNode(AND,
+ mkBounded(mono_bounds[0], targ, mono_bounds[1]),
+ mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
+ mono_lem);
+ }
+ Trace("nl-ext-tf-mono")
+ << "Monotonicity lemma : " << mono_lem << std::endl;
+ lemmas.push_back(mono_lem);
+ }
+ }
+ // store the previous values
+ targ = sarg;
+ targval = sargval;
+ t = s;
+ tval = sval;
+ }
+ }
+ }
+ }
+ return lemmas;
+}
+
+std::vector<Node> TranscendentalSolver::checkTranscendentalTangentPlanes(
+ std::map<Node, NlLemmaSideEffect>& lemSE)
+{
+ std::vector<Node> lemmas;
+ Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
+ << std::endl;
+ // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
+ // via Incremental Linearization" by Cimatti et al
+ for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
+ {
+ Kind k = tfs.first;
+ if (k == PI)
+ {
+ // We do not use Taylor approximation for PI currently.
+ // This is because the convergence is extremely slow, and hence an
+ // initial approximation is superior.
+ continue;
+ }
+ Trace("nl-ext-tftp-debug2") << "Taylor variables: " << std::endl;
+ Trace("nl-ext-tftp-debug2")
+ << " taylor_real_fv : " << d_taylor_real_fv << std::endl;
+ Trace("nl-ext-tftp-debug2")
+ << " taylor_real_fv_base : " << d_taylor_real_fv_base << std::endl;
+ Trace("nl-ext-tftp-debug2")
+ << " taylor_real_fv_base_rem : " << d_taylor_real_fv_base_rem
+ << std::endl;
+ Trace("nl-ext-tftp-debug2") << std::endl;
+
+ // we substitute into the Taylor sum P_{n,f(0)}( x )
+
+ for (const Node& tf : tfs.second)
+ {
+ // tf is Figure 3 : tf( x )
+ Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
+ // go until max degree is reached, or we don't meet bound criteria
+ for (unsigned d = 1; d <= d_taylor_degree; d++)
+ {
+ Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
+ unsigned prev = lemmas.size();
+ if (checkTfTangentPlanesFun(tf, d, lemmas, lemSE))
+ {
+ Trace("nl-ext-tftp")
+ << "...fail, #lemmas = " << (lemmas.size() - prev) << std::endl;
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp") << "...success" << std::endl;
+ }
+ }
+ }
+ }
+
+ return lemmas;
+}
+
+bool TranscendentalSolver::checkTfTangentPlanesFun(
+ Node tf,
+ unsigned d,
+ std::vector<Node>& lemmas,
+ std::map<Node, NlLemmaSideEffect>& lemSE)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Kind k = tf.getKind();
+ // this should only be run on master applications
+ Assert(d_trSlaves.find(tf) != d_trSlaves.end());
+
+ // Figure 3 : c
+ Node c = d_model.computeAbstractModelValue(tf[0]);
+ int csign = c.getConst<Rational>().sgn();
+ if (csign == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+ Assert(csign == 1 || csign == -1);
+
+ // Figure 3: P_l, P_u
+ // mapped to for signs of c
+ std::map<int, Node> poly_approx_bounds[2];
+ std::vector<Node> pbounds;
+ getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+ poly_approx_bounds[0][1] = pbounds[0];
+ poly_approx_bounds[0][-1] = pbounds[1];
+ poly_approx_bounds[1][1] = pbounds[2];
+ poly_approx_bounds[1][-1] = pbounds[3];
+
+ // Figure 3 : v
+ Node v = d_model.computeAbstractModelValue(tf);
+
+ // check value of tf
+ Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
+ << ", degree " << d << "..." << std::endl;
+ Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
+ Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
+
+ std::vector<Node> taylor_vars;
+ taylor_vars.push_back(d_taylor_real_fv);
+
+ // compute the concavity
+ int region = -1;
+ std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
+ d_tf_region.find(tf);
+ if (itr != d_tf_region.end())
+ {
+ region = itr->second;
+ Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
+ }
+ // Figure 3 : conc
+ int concavity = regionToConcavity(k, itr->second);
+ Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
+ if (concavity == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+ // bounds for which we are this concavity
+ // Figure 3: < l, u >
+ Node bounds[2];
+ if (k == SINE)
+ {
+ bounds[0] = regionToLowerBound(k, region);
+ Assert(!bounds[0].isNull());
+ bounds[1] = regionToUpperBound(k, region);
+ Assert(!bounds[1].isNull());
+ }
+
+ // Figure 3: P
+ Node poly_approx;
+
+ // compute whether this is a tangent refinement or a secant refinement
+ bool is_tangent = false;
+ bool is_secant = false;
+ std::pair<Node, Node> mvb = getTfModelBounds(tf, d);
+ for (unsigned r = 0; r < 2; r++)
+ {
+ Node pab = poly_approx_bounds[r][csign];
+ Node v_pab = r == 0 ? mvb.first : mvb.second;
+ if (!v_pab.isNull())
+ {
+ Trace("nl-ext-tftp-debug2")
+ << "...model value of " << pab << " is " << v_pab << std::endl;
+
+ Assert(v_pab.isConst());
+ Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
+ Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
+ Node compr = Rewriter::rewrite(comp);
+ Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
+ if (compr == d_true)
+ {
+ // beyond the bounds
+ if (r == 0)
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == 1;
+ is_secant = concavity == -1;
+ }
+ else
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == -1;
+ is_secant = concavity == 1;
+ }
+ if (Trace.isOn("nl-ext-tftp"))
+ {
+ Trace("nl-ext-tftp") << "*** Outside boundary point (";
+ Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << ", will refine..." << std::endl;
+ Trace("nl-ext-tftp")
+ << " poly_approx = " << poly_approx << std::endl;
+ Trace("nl-ext-tftp")
+ << " is_tangent = " << is_tangent << std::endl;
+ Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
+ }
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp")
+ << " ...within " << (r == 0 ? "low" : "high") << " bound : ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << std::endl;
+ }
+ }
+ }
+
+ // Figure 3: P( c )
+ Node poly_approx_c;
+ if (is_tangent || is_secant)
+ {
+ Assert(!poly_approx.isNull());
+ std::vector<Node> taylor_subs;
+ taylor_subs.push_back(c);
+ Assert(taylor_vars.size() == taylor_subs.size());
+ poly_approx_c = poly_approx.substitute(taylor_vars.begin(),
+ taylor_vars.end(),
+ taylor_subs.begin(),
+ taylor_subs.end());
+ Trace("nl-ext-tftp-debug2")
+ << "...poly approximation at c is " << poly_approx_c << std::endl;
+ }
+ else
+ {
+ // we may want to continue getting better bounds
+ return false;
+ }
+
+ if (is_tangent)
+ {
+ // compute tangent plane
+ // Figure 3: T( x )
+ // We use zero slope tangent planes, since the concavity of the Taylor
+ // approximation cannot be easily established.
+ Node tplane = poly_approx_c;
+
+ Node lem = nm->mkNode(concavity == 1 ? GEQ : LEQ, tf, tplane);
+ std::vector<Node> antec;
+ int mdir = regionToMonotonicityDir(k, region);
+ for (unsigned i = 0; i < 2; i++)
+ {
+ // Tangent plane is valid in the interval [c,u) if the slope of the
+ // function matches its concavity, and is valid in (l, c] otherwise.
+ Node use_bound = (mdir == concavity) == (i == 0) ? c : bounds[i];
+ if (!use_bound.isNull())
+ {
+ Node ant = nm->mkNode(i == 0 ? GEQ : LEQ, tf[0], use_bound);
+ antec.push_back(ant);
+ }
+ }
+ if (!antec.empty())
+ {
+ Node antec_n = antec.size() == 1 ? antec[0] : nm->mkNode(AND, antec);
+ lem = nm->mkNode(IMPLIES, antec_n, lem);
+ }
+ Trace("nl-ext-tftp-debug2")
+ << "*** Tangent plane lemma (pre-rewrite): " << lem << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-tftp-lemma")
+ << "*** Tangent plane lemma : " << lem << std::endl;
+ Assert(d_model.computeAbstractModelValue(lem) == d_false);
+ // Figure 3 : line 9
+ lemmas.push_back(lem);
+ }
+ else if (is_secant)
+ {
+ // bounds are the minimum and maximum previous secant points
+ // should not repeat secant points: secant lemmas should suffice to
+ // rule out previous assignment
+ Assert(std::find(
+ d_secant_points[tf][d].begin(), d_secant_points[tf][d].end(), c)
+ == d_secant_points[tf][d].end());
+ // Insert into the (temporary) vector. We do not update this vector
+ // until we are sure this secant plane lemma has been processed. We do
+ // this by mapping the lemma to a side effect below.
+ std::vector<Node> spoints = d_secant_points[tf][d];
+ spoints.push_back(c);
+
+ // sort
+ SortNlModel smv;
+ smv.d_nlm = &d_model;
+ smv.d_isConcrete = true;
+ std::sort(spoints.begin(), spoints.end(), smv);
+ // get the resulting index of c
+ unsigned index =
+ std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
+ // bounds are the next closest upper/lower bound values
+ if (index > 0)
+ {
+ bounds[0] = spoints[index - 1];
+ }
+ else
+ {
+ // otherwise, we use the lower boundary point for this concavity
+ // region
+ if (k == SINE)
+ {
+ Assert(!bounds[0].isNull());
+ }
+ else if (k == EXPONENTIAL)
+ {
+ // pick c-1
+ bounds[0] = Rewriter::rewrite(nm->mkNode(MINUS, c, d_one));
+ }
+ }
+ if (index < spoints.size() - 1)
+ {
+ bounds[1] = spoints[index + 1];
+ }
+ else
+ {
+ // otherwise, we use the upper boundary point for this concavity
+ // region
+ if (k == SINE)
+ {
+ Assert(!bounds[1].isNull());
+ }
+ else if (k == EXPONENTIAL)
+ {
+ // pick c+1
+ bounds[1] = Rewriter::rewrite(nm->mkNode(PLUS, c, d_one));
+ }
+ }
+ Trace("nl-ext-tftp-debug2") << "...secant bounds are : " << bounds[0]
+ << " ... " << bounds[1] << std::endl;
+
+ // the secant plane may be conjunction of 1-2 guarded inequalities
+ std::vector<Node> lemmaConj;
+ for (unsigned s = 0; s < 2; s++)
+ {
+ // compute secant plane
+ Assert(!poly_approx.isNull());
+ Assert(!bounds[s].isNull());
+ // take the model value of l or u (since may contain PI)
+ Node b = d_model.computeAbstractModelValue(bounds[s]);
+ Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds[s]
+ << " is " << b << std::endl;
+ Assert(b.isConst());
+ if (c != b)
+ {
+ // Figure 3 : P(l), P(u), for s = 0,1
+ Node poly_approx_b;
+ std::vector<Node> taylor_subs;
+ taylor_subs.push_back(b);
+ Assert(taylor_vars.size() == taylor_subs.size());
+ poly_approx_b = poly_approx.substitute(taylor_vars.begin(),
+ taylor_vars.end(),
+ taylor_subs.begin(),
+ taylor_subs.end());
+ // Figure 3: S_l( x ), S_u( x ) for s = 0,1
+ Node splane;
+ Node rcoeff_n = Rewriter::rewrite(nm->mkNode(MINUS, b, c));
+ Assert(rcoeff_n.isConst());
+ Rational rcoeff = rcoeff_n.getConst<Rational>();
+ Assert(rcoeff.sgn() != 0);
+ poly_approx_b = Rewriter::rewrite(poly_approx_b);
+ poly_approx_c = Rewriter::rewrite(poly_approx_c);
+ splane = nm->mkNode(
+ PLUS,
+ poly_approx_b,
+ nm->mkNode(MULT,
+ nm->mkNode(MINUS, poly_approx_b, poly_approx_c),
+ nm->mkConst(Rational(1) / rcoeff),
+ nm->mkNode(MINUS, tf[0], b)));
+
+ Node lem = nm->mkNode(concavity == 1 ? LEQ : GEQ, tf, splane);
+ // With respect to Figure 3, this is slightly different.
+ // In particular, we chose b to be the model value of bounds[s],
+ // which is a constant although bounds[s] may not be (e.g. if it
+ // contains PI).
+ // To ensure that c...b does not cross an inflection point,
+ // we guard with the symbolic version of bounds[s].
+ // This leads to lemmas e.g. of this form:
+ // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
+ // b ) + P( b ) )
+ // where b = (PI/2)^M, the current value of PI/2 in the model.
+ // This is sound since we are guarded by the symbolic
+ // representation of PI/2.
+ Node antec_n =
+ nm->mkNode(AND,
+ nm->mkNode(GEQ, tf[0], s == 0 ? bounds[s] : c),
+ nm->mkNode(LEQ, tf[0], s == 0 ? c : bounds[s]));
+ lem = nm->mkNode(IMPLIES, antec_n, lem);
+ Trace("nl-ext-tftp-debug2")
+ << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-tftp-lemma")
+ << "*** Secant plane lemma : " << lem << std::endl;
+ lemmaConj.push_back(lem);
+ Assert(d_model.computeAbstractModelValue(lem) == d_false);
+ }
+ }
+ // Figure 3 : line 22
+ Assert(!lemmaConj.empty());
+ Node lem =
+ lemmaConj.size() == 1 ? lemmaConj[0] : nm->mkNode(AND, lemmaConj);
+ lemmas.push_back(lem);
+ // The side effect says that if lem is added, then we should add the
+ // secant point c for (tf,d).
+ lemSE[lem].d_secantPoint.push_back(std::make_tuple(tf, d, c));
+ }
+ return true;
+}
+
+int TranscendentalSolver::regionToMonotonicityDir(Kind k, int region)
+{
+ if (k == EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == SINE)
+ {
+ if (region == 1 || region == 4)
+ {
+ return -1;
+ }
+ else if (region == 2 || region == 3)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int TranscendentalSolver::regionToConcavity(Kind k, int region)
+{
+ if (k == EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == SINE)
+ {
+ if (region == 1 || region == 2)
+ {
+ return -1;
+ }
+ else if (region == 3 || region == 4)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+Node TranscendentalSolver::regionToLowerBound(Kind k, int region)
+{
+ if (k == SINE)
+ {
+ if (region == 1)
+ {
+ return d_pi_2;
+ }
+ else if (region == 2)
+ {
+ return d_zero;
+ }
+ else if (region == 3)
+ {
+ return d_pi_neg_2;
+ }
+ else if (region == 4)
+ {
+ return d_pi_neg;
+ }
+ }
+ return Node::null();
+}
+
+Node TranscendentalSolver::regionToUpperBound(Kind k, int region)
+{
+ if (k == SINE)
+ {
+ if (region == 1)
+ {
+ return d_pi;
+ }
+ else if (region == 2)
+ {
+ return d_pi_2;
+ }
+ else if (region == 3)
+ {
+ return d_zero;
+ }
+ else if (region == 4)
+ {
+ return d_pi_neg_2;
+ }
+ }
+ return Node::null();
+}
+
+Node TranscendentalSolver::getDerivative(Node n, Node x)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Assert(x.isVar());
+ // only handle the cases of the taylor expansion of d
+ if (n.getKind() == EXPONENTIAL)
+ {
+ if (n[0] == x)
+ {
+ return n;
+ }
+ }
+ else if (n.getKind() == SINE)
+ {
+ if (n[0] == x)
+ {
+ Node na = nm->mkNode(MINUS, d_pi_2, n[0]);
+ Node ret = nm->mkNode(SINE, na);
+ ret = Rewriter::rewrite(ret);
+ return ret;
+ }
+ }
+ else if (n.getKind() == PLUS)
+ {
+ std::vector<Node> dchildren;
+ for (unsigned i = 0; i < n.getNumChildren(); i++)
+ {
+ // PLUS is flattened in rewriter, recursion depth is bounded by 1
+ Node dc = getDerivative(n[i], x);
+ if (dc.isNull())
+ {
+ return dc;
+ }
+ else
+ {
+ dchildren.push_back(dc);
+ }
+ }
+ return nm->mkNode(PLUS, dchildren);
+ }
+ else if (n.getKind() == MULT)
+ {
+ Assert(n[0].isConst());
+ Node dc = getDerivative(n[1], x);
+ if (!dc.isNull())
+ {
+ return nm->mkNode(MULT, n[0], dc);
+ }
+ }
+ else if (n.getKind() == NONLINEAR_MULT)
+ {
+ unsigned xcount = 0;
+ std::vector<Node> children;
+ unsigned xindex = 0;
+ for (unsigned i = 0, size = n.getNumChildren(); i < size; i++)
+ {
+ if (n[i] == x)
+ {
+ xcount++;
+ xindex = i;
+ }
+ children.push_back(n[i]);
+ }
+ if (xcount == 0)
+ {
+ return d_zero;
+ }
+ else
+ {
+ children[xindex] = nm->mkConst(Rational(xcount));
+ }
+ return nm->mkNode(MULT, children);
+ }
+ else if (n.isVar())
+ {
+ return n == x ? d_one : d_zero;
+ }
+ else if (n.isConst())
+ {
+ return d_zero;
+ }
+ Trace("nl-ext-debug") << "No derivative computed for " << n;
+ Trace("nl-ext-debug") << " for d/d{" << x << "}" << std::endl;
+ return Node::null();
+}
+
+std::pair<Node, Node> TranscendentalSolver::getTaylor(Node fa, unsigned n)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Assert(n > 0);
+ Node fac; // what term we cache for fa
+ if (fa[0] == d_zero)
+ {
+ // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
+ fac = fa;
+ }
+ else
+ {
+ // otherwise we use a standard factor a in (x-a)^n
+ fac = nm->mkNode(fa.getKind(), d_taylor_real_fv_base);
+ }
+ Node taylor_rem;
+ Node taylor_sum;
+ // check if we have already computed this Taylor series
+ std::unordered_map<unsigned, Node>::iterator itt = d_taylor_sum[fac].find(n);
+ if (itt == d_taylor_sum[fac].end())
+ {
+ Node i_exp_base;
+ if (fa[0] == d_zero)
+ {
+ i_exp_base = d_taylor_real_fv;
+ }
+ else
+ {
+ i_exp_base = Rewriter::rewrite(
+ nm->mkNode(MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
+ }
+ Node i_derv = fac;
+ Node i_fact = d_one;
+ Node i_exp = d_one;
+ int i_derv_status = 0;
+ unsigned counter = 0;
+ std::vector<Node> sum;
+ do
+ {
+ counter++;
+ if (fa.getKind() == EXPONENTIAL)
+ {
+ // unchanged
+ }
+ else if (fa.getKind() == SINE)
+ {
+ if (i_derv_status % 2 == 1)
+ {
+ Node arg = nm->mkNode(PLUS, d_pi_2, d_taylor_real_fv_base);
+ i_derv = nm->mkNode(SINE, arg);
+ }
+ else
+ {
+ i_derv = fa;
+ }
+ if (i_derv_status >= 2)
+ {
+ i_derv = nm->mkNode(MINUS, d_zero, i_derv);
+ }
+ i_derv = Rewriter::rewrite(i_derv);
+ i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
+ }
+ if (counter == (n + 1))
+ {
+ TNode x = d_taylor_real_fv_base;
+ i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
+ }
+ Node curr = nm->mkNode(MULT, nm->mkNode(DIVISION, i_derv, i_fact), i_exp);
+ if (counter == (n + 1))
+ {
+ taylor_rem = curr;
+ }
+ else
+ {
+ sum.push_back(curr);
+ i_fact = Rewriter::rewrite(
+ nm->mkNode(MULT, nm->mkConst(Rational(counter)), i_fact));
+ i_exp = Rewriter::rewrite(nm->mkNode(MULT, i_exp_base, i_exp));
+ }
+ } while (counter <= n);
+ taylor_sum = sum.size() == 1 ? sum[0] : nm->mkNode(PLUS, sum);
+
+ if (fac[0] != d_taylor_real_fv_base)
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fac[0]);
+ }
+
+ // cache
+ d_taylor_sum[fac][n] = taylor_sum;
+ d_taylor_rem[fac][n] = taylor_rem;
+ }
+ else
+ {
+ taylor_sum = itt->second;
+ Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
+ taylor_rem = d_taylor_rem[fac][n];
+ }
+
+ // must substitute for the argument if we were using a different lookup
+ if (fa[0] != fac[0])
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fa[0]);
+ }
+ return std::pair<Node, Node>(taylor_sum, taylor_rem);
+}
+
+void TranscendentalSolver::getPolynomialApproximationBounds(
+ Kind k, unsigned d, std::vector<Node>& pbounds)
+{
+ if (d_poly_bounds[k][d].empty())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, d_zero);
+ // n is the Taylor degree we are currently considering
+ unsigned n = 2 * d;
+ // n must be even
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is : " << taylor.first << std::endl;
+ Node taylor_sum = Rewriter::rewrite(taylor.first);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is (post-rewrite) : " << taylor_sum
+ << std::endl;
+ Assert(taylor.second.getKind() == MULT);
+ Assert(taylor.second.getNumChildren() == 2);
+ Assert(taylor.second[0].getKind() == DIVISION);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder for " << k << " is " << taylor.second << std::endl;
+ // ru is x^{n+1}/(n+1)!
+ Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
+ ru = Rewriter::rewrite(ru);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
+ if (k == EXPONENTIAL)
+ {
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(Rewriter::rewrite(
+ nm->mkNode(MULT, taylor_sum, nm->mkNode(PLUS, d_one, ru))));
+ pbounds.push_back(Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru)));
+ }
+ else
+ {
+ Assert(k == SINE);
+ Node l = Rewriter::rewrite(nm->mkNode(MINUS, taylor_sum, ru));
+ Node u = Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru));
+ pbounds.push_back(l);
+ pbounds.push_back(l);
+ pbounds.push_back(u);
+ pbounds.push_back(u);
+ }
+ Trace("nl-ext-tf-tplanes")
+ << "Polynomial approximation for " << k << " is: " << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
+ d_poly_bounds[k][d].insert(
+ d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
+ }
+ else
+ {
+ pbounds.insert(
+ pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
+ }
+}
+
+void TranscendentalSolver::getPolynomialApproximationBoundForArg(
+ Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
+{
+ getPolynomialApproximationBounds(k, d, pbounds);
+ Assert(c.isConst());
+ if (k == EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, d_zero);
+ bool success = false;
+ unsigned ds = d;
+ TNode ttrf = d_taylor_real_fv;
+ TNode tc = c;
+ do
+ {
+ success = true;
+ unsigned n = 2 * ds;
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ // check that 1-c^{n+1}/(n+1)! > 0
+ Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
+ Node rus = ru.substitute(ttrf, tc);
+ rus = Rewriter::rewrite(rus);
+ Assert(rus.isConst());
+ if (rus.getConst<Rational>() > d_one.getConst<Rational>())
+ {
+ success = false;
+ ds = ds + 1;
+ }
+ } while (!success);
+ if (ds > d)
+ {
+ Trace("nl-ext-exp-taylor")
+ << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
+ << k << " " << c << ")" << std::endl;
+ // must use sound upper bound
+ std::vector<Node> pboundss;
+ getPolynomialApproximationBounds(k, ds, pboundss);
+ pbounds[2] = pboundss[2];
+ }
+ }
+}
+
+std::pair<Node, Node> TranscendentalSolver::getTfModelBounds(Node tf,
+ unsigned d)
+{
+ // compute the model value of the argument
+ Node c = d_model.computeAbstractModelValue(tf[0]);
+ Assert(c.isConst());
+ int csign = c.getConst<Rational>().sgn();
+ Kind k = tf.getKind();
+ if (csign == 0)
+ {
+ // at zero, its trivial
+ if (k == SINE)
+ {
+ return std::pair<Node, Node>(d_zero, d_zero);
+ }
+ Assert(k == EXPONENTIAL);
+ return std::pair<Node, Node>(d_one, d_one);
+ }
+ bool isNeg = csign == -1;
+
+ std::vector<Node> pbounds;
+ getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+
+ std::vector<Node> bounds;
+ TNode tfv = d_taylor_real_fv;
+ TNode tfs = tf[0];
+ for (unsigned d2 = 0; d2 < 2; d2++)
+ {
+ int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
+ Node pab = pbounds[index];
+ if (!pab.isNull())
+ {
+ // { x -> tf[0] }
+ pab = pab.substitute(tfv, tfs);
+ pab = Rewriter::rewrite(pab);
+ Node v_pab = d_model.computeAbstractModelValue(pab);
+ bounds.push_back(v_pab);
+ }
+ else
+ {
+ bounds.push_back(Node::null());
+ }
+ }
+ return std::pair<Node, Node>(bounds[0], bounds[1]);
+}
+
+Node TranscendentalSolver::mkValidPhase(Node a, Node pi)
+{
+ return mkBounded(
+ NodeManager::currentNM()->mkNode(MULT, mkRationalNode(-1), pi), a, pi);
+}
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file transcendental_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
+ ** in the top-level source directory) and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling transcendental functions.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL_SOLVER_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/nl/nl_lemma_utils.h"
+#include "theory/arith/nl/nl_model.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class TranscendentalSolver
+{
+ public:
+ TranscendentalSolver(NlModel& m);
+ ~TranscendentalSolver();
+
+ /** init last call
+ *
+ * This is called at the beginning of last call effort check, where
+ * assertions are the set of assertions belonging to arithmetic,
+ * false_asserts is the subset of assertions that are false in the current
+ * model, and xts is the set of extended function terms that are active in
+ * the current context.
+ *
+ * This call may add lemmas to lems/lemsPp based on registering term
+ * information (for example, purification of sine terms).
+ */
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts,
+ std::vector<Node>& lems,
+ std::vector<Node>& lemsPp);
+ /** increment taylor degree */
+ void incrementTaylorDegree();
+ /** get taylor degree */
+ unsigned getTaylorDegree() const;
+ /** preprocess assertions check model
+ *
+ * This modifies the given assertions in preparation for running a call
+ * to check model.
+ *
+ * This method returns false if a bound for a transcendental function
+ * was conflicting.
+ */
+ bool preprocessAssertionsCheckModel(std::vector<Node>& assertions);
+ /** Process side effect se */
+ void processSideEffect(const NlLemmaSideEffect& se);
+ //-------------------------------------------- lemma schemas
+ /** check transcendental initial refine
+ *
+ * Returns a set of valid theory lemmas, based on
+ * simple facts about transcendental functions.
+ * This mostly follows the initial axioms described in
+ * Section 4 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ *
+ * Examples:
+ *
+ * sin( x ) = -sin( -x )
+ * ( PI > x > 0 ) => 0 < sin( x ) < 1
+ * exp( x )>0
+ * x<0 => exp( x )<1
+ */
+ std::vector<Node> checkTranscendentalInitialRefine();
+
+ /** check transcendental monotonic
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma scheme that ensures that applications
+ * of transcendental functions respect monotonicity.
+ *
+ * Examples:
+ *
+ * x > y => exp( x ) > exp( y )
+ * PI/2 > x > y > 0 => sin( x ) > sin( y )
+ * PI > x > y > PI/2 => sin( x ) < sin( y )
+ */
+ std::vector<Node> checkTranscendentalMonotonic();
+
+ /** check transcendental tangent planes
+ *
+ * Returns a set of valid theory lemmas, based on
+ * computing an "incremental linearization" of
+ * transcendental functions based on the model values
+ * of transcendental functions and their arguments.
+ * It is based on Figure 3 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ * This schema is not terminating in general.
+ * It is not enabled by default, and can
+ * be enabled by --nl-ext-tf-tplanes.
+ *
+ * Example:
+ *
+ * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
+ * Note that:
+ * sin(1) ~= .841471
+ *
+ * The Taylor series and remainder of sin(y) of degree 7 is
+ * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
+ * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
+ *
+ * This gives us lower and upper bounds :
+ * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
+ * ...where note P_u( 1 ) = 4243/5040 ~= .841865
+ * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
+ * ...where note P_l( 1 ) = 4241/5040 ~= .841468
+ *
+ * Assume that M( sin(y) ) > P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a tangent plane refinement.
+ * The tangent plane at the point 1 in P_u is
+ * given by the formula:
+ * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
+ * We add the lemma:
+ * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
+ * which is:
+ * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
+ *
+ * Assume that M( sin(y) ) < P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a secant plane refinement for some constants ( l, u )
+ * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
+ * l = 0 and u = M( PI/2 ) = 150517/47912.
+ * The secant planes at point 1 for P_l
+ * are given by the formulas:
+ * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
+ * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
+ * We add the lemmas:
+ * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
+ * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
+ * which are:
+ * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
+ * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
+ * where c1, c2 are rationals (for brevity, omitted here)
+ * such that c1 ~= .277 and c2 ~= 2.032.
+ *
+ * The argument lemSE is the "side effect" of the lemmas in the return
+ * value of this function (for details, see checkLastCall).
+ */
+ std::vector<Node> checkTranscendentalTangentPlanes(
+ std::map<Node, NlLemmaSideEffect>& lemSE);
+ /** check transcendental function refinement for tf
+ *
+ * This method is called by the above method for each "master"
+ * transcendental function application that occurs in an assertion in the
+ * current context. For example, an application like sin(t) is not a master
+ * if we have introduced the constraints:
+ * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
+ * See d_trMaster/d_trSlaves for more detail.
+ *
+ * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
+ * function application tf for Taylor degree d. It may add a secant or
+ * tangent plane lemma to lems and its side effect (if one exists)
+ * to lemSE.
+ *
+ * It returns false if the bounds are not precise enough to add a
+ * secant or tangent plane lemma.
+ */
+ bool checkTfTangentPlanesFun(Node tf,
+ unsigned d,
+ std::vector<Node>& lems,
+ std::map<Node, NlLemmaSideEffect>& lemSE);
+ //-------------------------------------------- end lemma schemas
+ private:
+ /** polynomial approximation bounds
+ *
+ * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
+ * d_taylor_real_fv. These are polynomial approximations of the Taylor series
+ * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
+ * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
+ * for positive/negative (+/-) values of the argument of <k>( 0 ).
+ *
+ * Notice that for certain bounds (e.g. upper bounds for exponential), the
+ * Taylor approximation for a fixed degree is only sound up to a given
+ * upper bound on the argument. To obtain sound lower/upper bounds for a
+ * given <k>( c ), use the function below.
+ */
+ void getPolynomialApproximationBounds(Kind k,
+ unsigned d,
+ std::vector<Node>& pbounds);
+ /** polynomial approximation bounds
+ *
+ * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
+ * that are sound (lower, upper) bounds for <k>( c ). Notice that these
+ * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
+ * c>0, we return the P_u+[x] from the function above for the minimum degree
+ * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
+ */
+ void getPolynomialApproximationBoundForArg(Kind k,
+ Node c,
+ unsigned d,
+ std::vector<Node>& pbounds);
+ /** get transcendental function model bounds
+ *
+ * This returns the current lower and upper bounds of transcendental
+ * function application tf based on Taylor of degree 2*d, which is dependent
+ * on the model value of its argument.
+ */
+ std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d);
+ /** get monotonicity direction
+ *
+ * Returns whether the slope is positive (+1) or negative(-1)
+ * in region of transcendental function with kind k.
+ * Returns 0 if region is invalid.
+ */
+ int regionToMonotonicityDir(Kind k, int region);
+ /** get concavity
+ *
+ * Returns whether we are concave (+1) or convex (-1)
+ * in region of transcendental function with kind k,
+ * where region is defined above.
+ * Returns 0 if region is invalid.
+ */
+ int regionToConcavity(Kind k, int region);
+ /** region to lower bound
+ *
+ * Returns the term corresponding to the lower
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no lower bound for the
+ * region.
+ */
+ Node regionToLowerBound(Kind k, int region);
+ /** region to upper bound
+ *
+ * Returns the term corresponding to the upper
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no upper bound for the
+ * region.
+ */
+ Node regionToUpperBound(Kind k, int region);
+ /** get derivative
+ *
+ * Returns d/dx n. Supports cases of n
+ * for transcendental functions applied to x,
+ * multiplication, addition, constants and variables.
+ * Returns Node::null() if derivative is an
+ * unhandled case.
+ */
+ Node getDerivative(Node n, Node x);
+
+ void mkPi();
+ void getCurrentPiBounds(std::vector<Node>& lemmas);
+ /** Make the node -pi <= a <= pi */
+ static Node mkValidPhase(Node a, Node pi);
+
+ /** Reference to the non-linear model object */
+ NlModel& d_model;
+ /** commonly used terms */
+ Node d_zero;
+ Node d_one;
+ Node d_neg_one;
+ Node d_true;
+ Node d_false;
+ /**
+ * Some transcendental functions f(t) are "purified", e.g. we add
+ * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
+ * purified we call "master terms".
+ *
+ * The maps below maintain a master/slave relationship over
+ * transcendental functions (SINE, EXPONENTIAL, PI), where above
+ * f(y) is the master of itself and of f(t).
+ *
+ * This is used for ensuring that the argument y of SINE we process is on the
+ * interval [-pi .. pi], and that exponentials are not applied to arguments
+ * that contain transcendental functions.
+ */
+ std::map<Node, Node> d_trMaster;
+ std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
+ /** The transcendental functions we have done initial refinements on */
+ std::map<Node, bool> d_tf_initial_refine;
+
+ /** concavity region for transcendental functions
+ *
+ * This stores an integer that identifies an interval in
+ * which the current model value for an argument of an
+ * application of a transcendental function resides.
+ *
+ * For exp( x ):
+ * region #1 is -infty < x < infty
+ * For sin( x ):
+ * region #0 is pi < x < infty (this is an invalid region)
+ * region #1 is pi/2 < x <= pi
+ * region #2 is 0 < x <= pi/2
+ * region #3 is -pi/2 < x <= 0
+ * region #4 is -pi < x <= -pi/2
+ * region #5 is -infty < x <= -pi (this is an invalid region)
+ * All regions not listed above, as well as regions 0 and 5
+ * for SINE are "invalid". We only process applications
+ * of transcendental functions whose arguments have model
+ * values that reside in valid regions.
+ */
+ std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
+ /** cache of the above function */
+ std::map<Kind, std::map<unsigned, std::vector<Node>>> d_poly_bounds;
+
+ /**
+ * Maps representives of a congruence class to the members of that class.
+ *
+ * In detail, a congruence class is a set of terms of the form
+ * { f(t1), ..., f(tn) }
+ * such that t1 = ... = tn in the current context. We choose an arbitrary
+ * term among these to be the repesentative of this congruence class.
+ *
+ * Moreover, notice we compute congruence classes only over terms that
+ * are transcendental function applications that are "master terms",
+ * see d_trMaster/d_trSlave.
+ */
+ std::map<Node, std::vector<Node>> d_funcCongClass;
+ /**
+ * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
+ * that are representives of their congruence class.
+ */
+ std::map<Kind, std::vector<Node>> d_funcMap;
+
+ // tangent plane bounds
+ std::map<Node, std::map<Node, Node>> d_tangent_val_bound[4];
+
+ /** secant points (sorted list) for transcendental functions
+ *
+ * This is used for tangent plane refinements for
+ * transcendental functions. This is the set
+ * "get-previous-secant-points" in "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017, for
+ * each transcendental function application. We store this set for each
+ * Taylor degree.
+ */
+ std::unordered_map<Node,
+ std::map<unsigned, std::vector<Node>>,
+ NodeHashFunction>
+ d_secant_points;
+
+ /** get Taylor series of degree n for function fa centered around point fa[0].
+ *
+ * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
+ * the first part of the pair is the Taylor series expansion :
+ * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
+ * and the second part of the pair is the Taylor series remainder :
+ * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
+ *
+ * The above values are cached for each (f,n) for a fixed variable "a".
+ * To compute the Taylor series for fa, we compute the Taylor series
+ * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
+ * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
+ * if fa[0]=0.
+ * In the latter case, note we compute the exponential x^{n+1}
+ * instead of (x-a)^{n+1}, which can be done faster.
+ */
+ std::pair<Node, Node> getTaylor(Node fa, unsigned n);
+
+ /** internal variables used for constructing (cached) versions of the Taylor
+ * series above.
+ */
+ Node d_taylor_real_fv; // x above
+ Node d_taylor_real_fv_base; // a above
+ Node d_taylor_real_fv_base_rem; // b above
+
+ /** cache of sum and remainder terms for getTaylor */
+ std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
+ d_taylor_sum;
+ std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
+ d_taylor_rem;
+ /** taylor degree
+ *
+ * Indicates that the degree of the polynomials in the Taylor approximation of
+ * all transcendental functions is 2*d_taylor_degree. This value is set
+ * initially to options::nlExtTfTaylorDegree() and may be incremented
+ * if the option options::nlExtTfIncPrecision() is enabled.
+ */
+ unsigned d_taylor_degree;
+ /** PI
+ *
+ * Note that PI is a (symbolic, non-constant) nullary operator. This is
+ * because its value cannot be computed exactly. We constraint PI to concrete
+ * lower and upper bounds stored in d_pi_bound below.
+ */
+ Node d_pi;
+ /** PI/2 */
+ Node d_pi_2;
+ /** -PI/2 */
+ Node d_pi_neg_2;
+ /** -PI */
+ Node d_pi_neg;
+ /** the concrete lower and upper bounds for PI */
+ Node d_pi_bound[2];
+}; /* class TranscendentalSolver */
+
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
+++ /dev/null
-/********************* */
-/*! \file nl_constraint.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of utilities for non-linear constraints
- **/
-
-#include "theory/arith/nl_constraint.h"
-
-#include "theory/arith/arith_msum.h"
-#include "theory/arith/arith_utilities.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-ConstraintDb::ConstraintDb(MonomialDb& mdb) : d_mdb(mdb) {}
-
-void ConstraintDb::registerConstraint(Node atom)
-{
- if (std::find(d_constraints.begin(), d_constraints.end(), atom)
- != d_constraints.end())
- {
- return;
- }
- d_constraints.push_back(atom);
- Trace("nl-ext-debug") << "Register constraint : " << atom << std::endl;
- std::map<Node, Node> msum;
- if (ArithMSum::getMonomialSumLit(atom, msum))
- {
- Trace("nl-ext-debug") << "got monomial sum: " << std::endl;
- if (Trace.isOn("nl-ext-debug"))
- {
- ArithMSum::debugPrintMonomialSum(msum, "nl-ext-debug");
- }
- unsigned max_degree = 0;
- std::vector<Node> all_m;
- std::vector<Node> max_deg_m;
- for (std::map<Node, Node>::iterator itm = msum.begin(); itm != msum.end();
- ++itm)
- {
- if (!itm->first.isNull())
- {
- all_m.push_back(itm->first);
- d_mdb.registerMonomial(itm->first);
- Trace("nl-ext-debug2")
- << "...process monomial " << itm->first << std::endl;
- unsigned d = d_mdb.getDegree(itm->first);
- if (d > max_degree)
- {
- max_degree = d;
- max_deg_m.clear();
- }
- if (d >= max_degree)
- {
- max_deg_m.push_back(itm->first);
- }
- }
- }
- // isolate for each maximal degree monomial
- for (unsigned i = 0; i < all_m.size(); i++)
- {
- Node m = all_m[i];
- Node rhs, coeff;
- int res = ArithMSum::isolate(m, msum, coeff, rhs, atom.getKind());
- if (res != 0)
- {
- Kind type = atom.getKind();
- if (res == -1)
- {
- type = reverseRelationKind(type);
- }
- Trace("nl-ext-constraint") << "Constraint : " << atom << " <=> ";
- if (!coeff.isNull())
- {
- Trace("nl-ext-constraint") << coeff << " * ";
- }
- Trace("nl-ext-constraint")
- << m << " " << type << " " << rhs << std::endl;
- ConstraintInfo& ci = d_c_info[atom][m];
- ci.d_rhs = rhs;
- ci.d_coeff = coeff;
- ci.d_type = type;
- }
- }
- for (unsigned i = 0; i < max_deg_m.size(); i++)
- {
- Node m = max_deg_m[i];
- d_c_info_maxm[atom][m] = true;
- }
- }
- else
- {
- Trace("nl-ext-debug") << "...failed to get monomial sum." << std::endl;
- }
-}
-
-const std::map<Node, std::map<Node, ConstraintInfo> >&
-ConstraintDb::getConstraints()
-{
- return d_c_info;
-}
-
-bool ConstraintDb::isMaximal(Node atom, Node x) const
-{
- std::map<Node, std::map<Node, bool> >::const_iterator itcm =
- d_c_info_maxm.find(atom);
- Assert(itcm != d_c_info_maxm.end());
- return itcm->second.find(x) != itcm->second.end();
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file nl_constraint.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Utilities for non-linear constraints
- **/
-
-#ifndef CVC4__THEORY__ARITH__NL_CONSTRAINT_H
-#define CVC4__THEORY__ARITH__NL_CONSTRAINT_H
-
-#include <map>
-#include <vector>
-
-#include "expr/kind.h"
-#include "expr/node.h"
-#include "theory/arith/nl_monomial.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-/** constraint information
- *
- * The struct ( d_rhs, d_coeff, d_type ) represents that a literal is of the
- * form (d_coeff * x) <d_type> d_rhs.
- */
-struct ConstraintInfo
-{
- public:
- /** The term on the right hand side of the constraint */
- Node d_rhs;
- /** The coefficent */
- Node d_coeff;
- /** The type (relation) of the constraint */
- Kind d_type;
-}; /* struct ConstraintInfo */
-
-/** A database for constraints */
-class ConstraintDb
-{
- public:
- ConstraintDb(MonomialDb& mdb);
- ~ConstraintDb() {}
- /** register constraint
- *
- * This ensures that atom is in the domain of the constraints maintained by
- * this database.
- */
- void registerConstraint(Node atom);
- /** get constraints
- *
- * Returns a map m such that whenever
- * m[lit][x] = ( r, coeff, k ), then
- * ( lit <=> (coeff * x) <k> r )
- */
- const std::map<Node, std::map<Node, ConstraintInfo> >& getConstraints();
- /** Returns true if m is of maximal degree in atom
- *
- * For example, for atom x^2 + x*y + y >=0, the monomials x^2 and x*y
- * are of maximal degree (2).
- */
- bool isMaximal(Node atom, Node m) const;
-
- private:
- /** Reference to a monomial database */
- MonomialDb& d_mdb;
- /** List of all constraints */
- std::vector<Node> d_constraints;
- /** Is maximal degree */
- std::map<Node, std::map<Node, bool> > d_c_info_maxm;
- /** Constraint information */
- std::map<Node, std::map<Node, ConstraintInfo> > d_c_info;
-};
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__NL_SOLVER_H */
+++ /dev/null
-/********************* */
-/*! \file nl_lemma_utils.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of utilities for the non-linear solver
- **/
-
-#include "theory/arith/nl_lemma_utils.h"
-
-#include "theory/arith/nl_model.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-bool SortNlModel::operator()(Node i, Node j)
-{
- int cv = d_nlm->compare(i, j, d_isConcrete, d_isAbsolute);
- if (cv == 0)
- {
- return i < j;
- }
- return d_reverse_order ? cv < 0 : cv > 0;
-}
-
-bool SortNonlinearDegree::operator()(Node i, Node j)
-{
- unsigned i_count = getDegree(i);
- unsigned j_count = getDegree(j);
- return i_count == j_count ? (i < j) : (i_count < j_count ? true : false);
-}
-
-unsigned SortNonlinearDegree::getDegree(Node n) const
-{
- std::map<Node, unsigned>::const_iterator it = d_mdegree.find(n);
- Assert(it != d_mdegree.end());
- return it->second;
-}
-
-Node ArgTrie::add(Node d, const std::vector<Node>& args)
-{
- ArgTrie* at = this;
- for (const Node& a : args)
- {
- at = &(at->d_children[a]);
- }
- if (at->d_data.isNull())
- {
- at->d_data = d;
- }
- return at->d_data;
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file nl_lemma_utils.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Utilities for processing lemmas from the non-linear solver
- **/
-
-#ifndef CVC4__THEORY__ARITH__NL_LEMMA_UTILS_H
-#define CVC4__THEORY__ARITH__NL_LEMMA_UTILS_H
-
-#include <tuple>
-#include <vector>
-#include "expr/node.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-class NlModel;
-
-/**
- * A side effect of adding a lemma in the non-linear solver. This is used
- * to specify how the state of the non-linear solver should update. This
- * includes:
- * - A set of secant points to record (for transcendental secant plane
- * inferences).
- */
-struct NlLemmaSideEffect
-{
- NlLemmaSideEffect() {}
- ~NlLemmaSideEffect() {}
- /** secant points to add
- *
- * A member (tf, d, c) in this vector indicates that point c should be added
- * to the list of secant points for an application of a transcendental
- * function tf for Taylor degree d. This is used for incremental linearization
- * for underapproximation (resp. overapproximations) of convex (resp.
- * concave) regions of transcendental functions. For details, see
- * Cimatti et al., CADE 2017.
- */
- std::vector<std::tuple<Node, unsigned, Node> > d_secantPoint;
-};
-
-struct SortNlModel
-{
- SortNlModel()
- : d_nlm(nullptr),
- d_isConcrete(true),
- d_isAbsolute(false),
- d_reverse_order(false)
- {
- }
- /** pointer to the model */
- NlModel* d_nlm;
- /** are we comparing concrete model values? */
- bool d_isConcrete;
- /** are we comparing absolute values? */
- bool d_isAbsolute;
- /** are we in reverse order? */
- bool d_reverse_order;
- /** the comparison */
- bool operator()(Node i, Node j);
-};
-
-struct SortNonlinearDegree
-{
- SortNonlinearDegree(const std::map<Node, unsigned>& m) : d_mdegree(m) {}
- /** pointer to the non-linear extension */
- const std::map<Node, unsigned>& d_mdegree;
- /** Get the degree of n in d_mdegree */
- unsigned getDegree(Node n) const;
- /**
- * Sorts by degree of the monomials, where lower degree monomials come
- * first.
- */
- bool operator()(Node i, Node j);
-};
-
-/** An argument trie, for computing congruent terms */
-class ArgTrie
-{
- public:
- /** children of this node */
- std::map<Node, ArgTrie> d_children;
- /** the data of this node */
- Node d_data;
- /**
- * Set d as the data on the node whose path is [args], return either d if
- * that node has no data, or the data that already occurs there.
- */
- Node add(Node d, const std::vector<Node>& args);
-};
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__NL_LEMMA_UTILS_H */
+++ /dev/null
-/********************* */
-/*! \file nl_model.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Model object for the non-linear extension class
- **/
-
-#include "theory/arith/nl_model.h"
-
-#include "expr/node_algorithm.h"
-#include "options/arith_options.h"
-#include "theory/arith/arith_msum.h"
-#include "theory/arith/arith_utilities.h"
-#include "theory/rewriter.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-NlModel::NlModel(context::Context* c) : d_used_approx(false)
-{
- d_true = NodeManager::currentNM()->mkConst(true);
- d_false = NodeManager::currentNM()->mkConst(false);
- d_zero = NodeManager::currentNM()->mkConst(Rational(0));
- d_one = NodeManager::currentNM()->mkConst(Rational(1));
- d_two = NodeManager::currentNM()->mkConst(Rational(2));
-}
-
-NlModel::~NlModel() {}
-
-void NlModel::reset(TheoryModel* m, std::map<Node, Node>& arithModel)
-{
- d_model = m;
- d_mv[0].clear();
- d_mv[1].clear();
- d_arithVal.clear();
- // process arithModel
- std::map<Node, Node>::iterator it;
- for (const std::pair<const Node, Node>& m2 : arithModel)
- {
- d_arithVal[m2.first] = m2.second;
- }
-}
-
-void NlModel::resetCheck()
-{
- d_used_approx = false;
- d_check_model_solved.clear();
- d_check_model_bounds.clear();
- d_check_model_vars.clear();
- d_check_model_subs.clear();
-}
-
-Node NlModel::computeConcreteModelValue(Node n)
-{
- return computeModelValue(n, true);
-}
-
-Node NlModel::computeAbstractModelValue(Node n)
-{
- return computeModelValue(n, false);
-}
-
-Node NlModel::computeModelValue(Node n, bool isConcrete)
-{
- unsigned index = isConcrete ? 0 : 1;
- std::map<Node, Node>::iterator it = d_mv[index].find(n);
- if (it != d_mv[index].end())
- {
- return it->second;
- }
- Trace("nl-ext-mv-debug") << "computeModelValue " << n << ", index=" << index
- << std::endl;
- Node ret;
- Kind nk = n.getKind();
- if (n.isConst())
- {
- ret = n;
- }
- else if (!isConcrete && hasTerm(n))
- {
- // use model value for abstraction
- ret = getRepresentative(n);
- }
- else if (n.getNumChildren() == 0)
- {
- // we are interested in the exact value of PI, which cannot be computed.
- // hence, we return PI itself when asked for the concrete value.
- if (nk == PI)
- {
- ret = n;
- }
- else
- {
- ret = getValueInternal(n);
- }
- }
- else
- {
- // otherwise, compute true value
- TheoryId ctid = theory::kindToTheoryId(nk);
- if (ctid != THEORY_ARITH && ctid != THEORY_BOOL && ctid != THEORY_BUILTIN)
- {
- // we directly look up terms not belonging to arithmetic
- ret = getValueInternal(n);
- }
- else
- {
- std::vector<Node> children;
- if (n.getMetaKind() == metakind::PARAMETERIZED)
- {
- children.push_back(n.getOperator());
- }
- for (unsigned i = 0, nchild = n.getNumChildren(); i < nchild; i++)
- {
- Node mc = computeModelValue(n[i], isConcrete);
- children.push_back(mc);
- }
- ret = NodeManager::currentNM()->mkNode(nk, children);
- ret = Rewriter::rewrite(ret);
- }
- }
- Trace("nl-ext-mv-debug") << "computed " << (index == 0 ? "M" : "M_A") << "["
- << n << "] = " << ret << std::endl;
- d_mv[index][n] = ret;
- return ret;
-}
-
-bool NlModel::hasTerm(Node n) const
-{
- return d_arithVal.find(n) != d_arithVal.end();
-}
-
-Node NlModel::getRepresentative(Node n) const
-{
- if (n.isConst())
- {
- return n;
- }
- std::map<Node, Node>::const_iterator it = d_arithVal.find(n);
- if (it != d_arithVal.end())
- {
- AlwaysAssert(it->second.isConst());
- return it->second;
- }
- return d_model->getRepresentative(n);
-}
-
-Node NlModel::getValueInternal(Node n) const
-{
- if (n.isConst())
- {
- return n;
- }
- std::map<Node, Node>::const_iterator it = d_arithVal.find(n);
- if (it != d_arithVal.end())
- {
- AlwaysAssert(it->second.isConst());
- return it->second;
- }
- // It is unconstrained in the model, return 0.
- return d_zero;
-}
-
-int NlModel::compare(Node i, Node j, bool isConcrete, bool isAbsolute)
-{
- Node ci = computeModelValue(i, isConcrete);
- Node cj = computeModelValue(j, isConcrete);
- if (ci.isConst())
- {
- if (cj.isConst())
- {
- return compareValue(ci, cj, isAbsolute);
- }
- return 1;
- }
- return cj.isConst() ? -1 : 0;
-}
-
-int NlModel::compareValue(Node i, Node j, bool isAbsolute) const
-{
- Assert(i.isConst() && j.isConst());
- int ret;
- if (i == j)
- {
- ret = 0;
- }
- else if (!isAbsolute)
- {
- ret = i.getConst<Rational>() < j.getConst<Rational>() ? 1 : -1;
- }
- else
- {
- ret = (i.getConst<Rational>().abs() == j.getConst<Rational>().abs()
- ? 0
- : (i.getConst<Rational>().abs() < j.getConst<Rational>().abs()
- ? 1
- : -1));
- }
- return ret;
-}
-
-bool NlModel::checkModel(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- unsigned d,
- std::vector<Node>& lemmas,
- std::vector<Node>& gs)
-{
- Trace("nl-ext-cm-debug") << " solve for equalities..." << std::endl;
- for (const Node& atom : false_asserts)
- {
- // see if it corresponds to a univariate polynomial equation of degree two
- if (atom.getKind() == EQUAL)
- {
- if (!solveEqualitySimple(atom, d, lemmas))
- {
- // no chance we will satisfy this equality
- Trace("nl-ext-cm") << "...check-model : failed to solve equality : "
- << atom << std::endl;
- }
- }
- }
-
- // all remaining variables are constrained to their exact model values
- Trace("nl-ext-cm-debug") << " set exact bounds for remaining variables..."
- << std::endl;
- std::unordered_set<TNode, TNodeHashFunction> visited;
- std::vector<TNode> visit;
- TNode cur;
- for (const Node& a : assertions)
- {
- visit.push_back(a);
- do
- {
- cur = visit.back();
- visit.pop_back();
- if (visited.find(cur) == visited.end())
- {
- visited.insert(cur);
- if (cur.getType().isReal() && !cur.isConst())
- {
- Kind k = cur.getKind();
- if (k != MULT && k != PLUS && k != NONLINEAR_MULT
- && !isTranscendentalKind(k))
- {
- // if we have not set an approximate bound for it
- if (!hasCheckModelAssignment(cur))
- {
- // set its exact model value in the substitution
- Node curv = computeConcreteModelValue(cur);
- Trace("nl-ext-cm")
- << "check-model-bound : exact : " << cur << " = ";
- printRationalApprox("nl-ext-cm", curv);
- Trace("nl-ext-cm") << std::endl;
- bool ret = addCheckModelSubstitution(cur, curv);
- AlwaysAssert(ret);
- }
- }
- }
- for (const Node& cn : cur)
- {
- visit.push_back(cn);
- }
- }
- } while (!visit.empty());
- }
-
- Trace("nl-ext-cm-debug") << " check assertions..." << std::endl;
- std::vector<Node> check_assertions;
- for (const Node& a : assertions)
- {
- // don't have to check tautological literals
- if (d_tautology.find(a) != d_tautology.end())
- {
- continue;
- }
- if (d_check_model_solved.find(a) == d_check_model_solved.end())
- {
- Node av = a;
- // apply the substitution to a
- if (!d_check_model_vars.empty())
- {
- av = arithSubstitute(av, d_check_model_vars, d_check_model_subs);
- av = Rewriter::rewrite(av);
- }
- // simple check literal
- if (!simpleCheckModelLit(av))
- {
- Trace("nl-ext-cm") << "...check-model : assertion failed : " << a
- << std::endl;
- check_assertions.push_back(av);
- Trace("nl-ext-cm-debug")
- << "...check-model : failed assertion, value : " << av << std::endl;
- }
- }
- }
-
- if (!check_assertions.empty())
- {
- Trace("nl-ext-cm") << "...simple check failed." << std::endl;
- // TODO (#1450) check model for general case
- return false;
- }
- Trace("nl-ext-cm") << "...simple check succeeded!" << std::endl;
-
- // must assert and re-check if produce models is true
- if (options::produceModels())
- {
- NodeManager* nm = NodeManager::currentNM();
- // model guard whose semantics is "the model we constructed holds"
- Node mg = nm->mkSkolem("model", nm->booleanType());
- gs.push_back(mg);
- // assert the constructed model as assertions
- for (const std::pair<const Node, std::pair<Node, Node> > cb :
- d_check_model_bounds)
- {
- Node l = cb.second.first;
- Node u = cb.second.second;
- Node v = cb.first;
- Node pred = nm->mkNode(AND, nm->mkNode(GEQ, v, l), nm->mkNode(GEQ, u, v));
- pred = nm->mkNode(OR, mg.negate(), pred);
- lemmas.push_back(pred);
- }
- }
- return true;
-}
-
-bool NlModel::addCheckModelSubstitution(TNode v, TNode s)
-{
- // should not substitute the same variable twice
- Trace("nl-ext-model") << "* check model substitution : " << v << " -> " << s
- << std::endl;
- // should not set exact bound more than once
- if (std::find(d_check_model_vars.begin(), d_check_model_vars.end(), v)
- != d_check_model_vars.end())
- {
- Trace("nl-ext-model") << "...ERROR: already has value." << std::endl;
- // this should never happen since substitutions should be applied eagerly
- Assert(false);
- return false;
- }
- // if we previously had an approximate bound, the exact bound should be in its
- // range
- std::map<Node, std::pair<Node, Node> >::iterator itb =
- d_check_model_bounds.find(v);
- if (itb != d_check_model_bounds.end())
- {
- if (s.getConst<Rational>() >= itb->second.first.getConst<Rational>()
- || s.getConst<Rational>() <= itb->second.second.getConst<Rational>())
- {
- Trace("nl-ext-model")
- << "...ERROR: already has bound which is out of range." << std::endl;
- return false;
- }
- }
- std::vector<Node> varsTmp;
- varsTmp.push_back(v);
- std::vector<Node> subsTmp;
- subsTmp.push_back(s);
- for (unsigned i = 0, size = d_check_model_subs.size(); i < size; i++)
- {
- Node ms = d_check_model_subs[i];
- Node mss = arithSubstitute(ms, varsTmp, subsTmp);
- if (mss != ms)
- {
- mss = Rewriter::rewrite(mss);
- }
- d_check_model_subs[i] = mss;
- }
- d_check_model_vars.push_back(v);
- d_check_model_subs.push_back(s);
- return true;
-}
-
-bool NlModel::addCheckModelBound(TNode v, TNode l, TNode u)
-{
- Trace("nl-ext-model") << "* check model bound : " << v << " -> [" << l << " "
- << u << "]" << std::endl;
- if (l == u)
- {
- // bound is exact, can add as substitution
- return addCheckModelSubstitution(v, l);
- }
- // should not set a bound for a value that is exact
- if (std::find(d_check_model_vars.begin(), d_check_model_vars.end(), v)
- != d_check_model_vars.end())
- {
- Trace("nl-ext-model")
- << "...ERROR: setting bound for variable that already has exact value."
- << std::endl;
- Assert(false);
- return false;
- }
- Assert(l.isConst());
- Assert(u.isConst());
- Assert(l.getConst<Rational>() <= u.getConst<Rational>());
- d_check_model_bounds[v] = std::pair<Node, Node>(l, u);
- if (Trace.isOn("nl-ext-cm"))
- {
- Trace("nl-ext-cm") << "check-model-bound : approximate : ";
- printRationalApprox("nl-ext-cm", l);
- Trace("nl-ext-cm") << " <= " << v << " <= ";
- printRationalApprox("nl-ext-cm", u);
- Trace("nl-ext-cm") << std::endl;
- }
- return true;
-}
-
-bool NlModel::hasCheckModelAssignment(Node v) const
-{
- if (d_check_model_bounds.find(v) != d_check_model_bounds.end())
- {
- return true;
- }
- return std::find(d_check_model_vars.begin(), d_check_model_vars.end(), v)
- != d_check_model_vars.end();
-}
-
-void NlModel::setUsedApproximate() { d_used_approx = true; }
-
-bool NlModel::usedApproximate() const { return d_used_approx; }
-
-void NlModel::addTautology(Node n)
-{
- // ensure rewritten
- n = Rewriter::rewrite(n);
- std::unordered_set<TNode, TNodeHashFunction> visited;
- std::vector<TNode> visit;
- TNode cur;
- visit.push_back(n);
- do
- {
- cur = visit.back();
- visit.pop_back();
- if (visited.find(cur) == visited.end())
- {
- visited.insert(cur);
- if (cur.getKind() == AND)
- {
- // children of AND are also implied
- for (const Node& cn : cur)
- {
- visit.push_back(cn);
- }
- }
- else
- {
- // is this an arithmetic literal?
- Node atom = cur.getKind() == NOT ? cur[0] : cur;
- if ((atom.getKind() == EQUAL && atom[0].getType().isReal())
- || atom.getKind() == LEQ)
- {
- // Add to tautological literals if it does not contain
- // non-linear multiplication. We cannot consider literals
- // with non-linear multiplication to be tautological since this
- // model object is responsible for checking whether they hold.
- // (TODO, cvc4-projects #113: revisit this).
- if (!expr::hasSubtermKind(NONLINEAR_MULT, atom))
- {
- Trace("nl-taut") << "Tautological literal: " << atom << std::endl;
- d_tautology.insert(cur);
- }
- }
- }
- }
- } while (!visit.empty());
-}
-
-bool NlModel::solveEqualitySimple(Node eq,
- unsigned d,
- std::vector<Node>& lemmas)
-{
- Node seq = eq;
- if (!d_check_model_vars.empty())
- {
- seq = arithSubstitute(eq, d_check_model_vars, d_check_model_subs);
- seq = Rewriter::rewrite(seq);
- if (seq.isConst())
- {
- if (seq.getConst<bool>())
- {
- d_check_model_solved[eq] = Node::null();
- return true;
- }
- return false;
- }
- }
- Trace("nl-ext-cms") << "simple solve equality " << seq << "..." << std::endl;
- Assert(seq.getKind() == EQUAL);
- std::map<Node, Node> msum;
- if (!ArithMSum::getMonomialSumLit(seq, msum))
- {
- Trace("nl-ext-cms") << "...fail, could not determine monomial sum."
- << std::endl;
- return false;
- }
- bool is_valid = true;
- // the variable we will solve a quadratic equation for
- Node var;
- Node a = d_zero;
- Node b = d_zero;
- Node c = d_zero;
- NodeManager* nm = NodeManager::currentNM();
- // the list of variables that occur as a monomial in msum, and whose value
- // is so far unconstrained in the model.
- std::unordered_set<Node, NodeHashFunction> unc_vars;
- // the list of variables that occur as a factor in a monomial, and whose
- // value is so far unconstrained in the model.
- std::unordered_set<Node, NodeHashFunction> unc_vars_factor;
- for (std::pair<const Node, Node>& m : msum)
- {
- Node v = m.first;
- Node coeff = m.second.isNull() ? d_one : m.second;
- if (v.isNull())
- {
- c = coeff;
- }
- else if (v.getKind() == NONLINEAR_MULT)
- {
- if (v.getNumChildren() == 2 && v[0].isVar() && v[0] == v[1]
- && (var.isNull() || var == v[0]))
- {
- // may solve quadratic
- a = coeff;
- var = v[0];
- }
- else
- {
- is_valid = false;
- Trace("nl-ext-cms-debug")
- << "...invalid due to non-linear monomial " << v << std::endl;
- // may wish to set an exact bound for a factor and repeat
- for (const Node& vc : v)
- {
- unc_vars_factor.insert(vc);
- }
- }
- }
- else if (!v.isVar() || (!var.isNull() && var != v))
- {
- Trace("nl-ext-cms-debug")
- << "...invalid due to factor " << v << std::endl;
- // cannot solve multivariate
- if (is_valid)
- {
- is_valid = false;
- // if b is non-zero, then var is also an unconstrained variable
- if (b != d_zero)
- {
- unc_vars.insert(var);
- unc_vars_factor.insert(var);
- }
- }
- // if v is unconstrained, we may turn this equality into a substitution
- unc_vars.insert(v);
- unc_vars_factor.insert(v);
- }
- else
- {
- // set the variable to solve for
- b = coeff;
- var = v;
- }
- }
- if (!is_valid)
- {
- // see if we can solve for a variable?
- for (const Node& uv : unc_vars)
- {
- Trace("nl-ext-cm-debug") << "check subs var : " << uv << std::endl;
- // cannot already have a bound
- if (uv.isVar() && !hasCheckModelAssignment(uv))
- {
- Node slv;
- Node veqc;
- if (ArithMSum::isolate(uv, msum, veqc, slv, EQUAL) != 0)
- {
- Assert(!slv.isNull());
- // Currently do not support substitution-with-coefficients.
- // We also ensure types are correct here, which avoids substituting
- // a term of non-integer type for a variable of integer type.
- if (veqc.isNull() && !expr::hasSubterm(slv, uv)
- && slv.getType().isSubtypeOf(uv.getType()))
- {
- Trace("nl-ext-cm")
- << "check-model-subs : " << uv << " -> " << slv << std::endl;
- bool ret = addCheckModelSubstitution(uv, slv);
- if (ret)
- {
- Trace("nl-ext-cms") << "...success, model substitution " << uv
- << " -> " << slv << std::endl;
- d_check_model_solved[eq] = uv;
- }
- return ret;
- }
- }
- }
- }
- // see if we can assign a variable to a constant
- for (const Node& uvf : unc_vars_factor)
- {
- Trace("nl-ext-cm-debug") << "check set var : " << uvf << std::endl;
- // cannot already have a bound
- if (uvf.isVar() && !hasCheckModelAssignment(uvf))
- {
- Node uvfv = computeConcreteModelValue(uvf);
- Trace("nl-ext-cm") << "check-model-bound : exact : " << uvf << " = ";
- printRationalApprox("nl-ext-cm", uvfv);
- Trace("nl-ext-cm") << std::endl;
- bool ret = addCheckModelSubstitution(uvf, uvfv);
- // recurse
- return ret ? solveEqualitySimple(eq, d, lemmas) : false;
- }
- }
- Trace("nl-ext-cms") << "...fail due to constrained invalid terms."
- << std::endl;
- return false;
- }
- else if (var.isNull() || var.getType().isInteger())
- {
- // cannot solve quadratic equations for integer variables
- Trace("nl-ext-cms") << "...fail due to variable to solve for." << std::endl;
- return false;
- }
-
- // we are linear, it is simple
- if (a == d_zero)
- {
- if (b == d_zero)
- {
- Trace("nl-ext-cms") << "...fail due to zero a/b." << std::endl;
- Assert(false);
- return false;
- }
- Node val = nm->mkConst(-c.getConst<Rational>() / b.getConst<Rational>());
- Trace("nl-ext-cm") << "check-model-bound : exact : " << var << " = ";
- printRationalApprox("nl-ext-cm", val);
- Trace("nl-ext-cm") << std::endl;
- bool ret = addCheckModelSubstitution(var, val);
- if (ret)
- {
- Trace("nl-ext-cms") << "...success, solved linear." << std::endl;
- d_check_model_solved[eq] = var;
- }
- return ret;
- }
- Trace("nl-ext-quad") << "Solve quadratic : " << seq << std::endl;
- Trace("nl-ext-quad") << " a : " << a << std::endl;
- Trace("nl-ext-quad") << " b : " << b << std::endl;
- Trace("nl-ext-quad") << " c : " << c << std::endl;
- Node two_a = nm->mkNode(MULT, d_two, a);
- two_a = Rewriter::rewrite(two_a);
- Node sqrt_val = nm->mkNode(
- MINUS, nm->mkNode(MULT, b, b), nm->mkNode(MULT, d_two, two_a, c));
- sqrt_val = Rewriter::rewrite(sqrt_val);
- Trace("nl-ext-quad") << "Will approximate sqrt " << sqrt_val << std::endl;
- Assert(sqrt_val.isConst());
- // if it is negative, then we are in conflict
- if (sqrt_val.getConst<Rational>().sgn() == -1)
- {
- Node conf = seq.negate();
- Trace("nl-ext-lemma") << "NlModel::Lemma : quadratic no root : " << conf
- << std::endl;
- lemmas.push_back(conf);
- Trace("nl-ext-cms") << "...fail due to negative discriminant." << std::endl;
- return false;
- }
- if (hasCheckModelAssignment(var))
- {
- Trace("nl-ext-cms") << "...fail due to bounds on variable to solve for."
- << std::endl;
- // two quadratic equations for same variable, give up
- return false;
- }
- // approximate the square root of sqrt_val
- Node l, u;
- if (!getApproximateSqrt(sqrt_val, l, u, 15 + d))
- {
- Trace("nl-ext-cms") << "...fail, could not approximate sqrt." << std::endl;
- return false;
- }
- d_used_approx = true;
- Trace("nl-ext-quad") << "...got " << l << " <= sqrt(" << sqrt_val
- << ") <= " << u << std::endl;
- Node negb = nm->mkConst(-b.getConst<Rational>());
- Node coeffa = nm->mkConst(Rational(1) / two_a.getConst<Rational>());
- // two possible bound regions
- Node bounds[2][2];
- Node diff_bound[2];
- Node m_var = computeConcreteModelValue(var);
- Assert(m_var.isConst());
- for (unsigned r = 0; r < 2; r++)
- {
- for (unsigned b2 = 0; b2 < 2; b2++)
- {
- Node val = b2 == 0 ? l : u;
- // (-b +- approx_sqrt( b^2 - 4ac ))/2a
- Node approx = nm->mkNode(
- MULT, coeffa, nm->mkNode(r == 0 ? MINUS : PLUS, negb, val));
- approx = Rewriter::rewrite(approx);
- bounds[r][b2] = approx;
- Assert(approx.isConst());
- }
- if (bounds[r][0].getConst<Rational>() > bounds[r][1].getConst<Rational>())
- {
- // ensure bound is (lower, upper)
- Node tmp = bounds[r][0];
- bounds[r][0] = bounds[r][1];
- bounds[r][1] = tmp;
- }
- Node diff =
- nm->mkNode(MINUS,
- m_var,
- nm->mkNode(MULT,
- nm->mkConst(Rational(1) / Rational(2)),
- nm->mkNode(PLUS, bounds[r][0], bounds[r][1])));
- Trace("nl-ext-cm-debug") << "Bound option #" << r << " : ";
- printRationalApprox("nl-ext-cm-debug", bounds[r][0]);
- Trace("nl-ext-cm-debug") << "...";
- printRationalApprox("nl-ext-cm-debug", bounds[r][1]);
- Trace("nl-ext-cm-debug") << std::endl;
- diff = Rewriter::rewrite(diff);
- Assert(diff.isConst());
- diff = nm->mkConst(diff.getConst<Rational>().abs());
- diff_bound[r] = diff;
- Trace("nl-ext-cm-debug") << "...diff from model value (";
- printRationalApprox("nl-ext-cm-debug", m_var);
- Trace("nl-ext-cm-debug") << ") is ";
- printRationalApprox("nl-ext-cm-debug", diff_bound[r]);
- Trace("nl-ext-cm-debug") << std::endl;
- }
- // take the one that var is closer to in the model
- Node cmp = nm->mkNode(GEQ, diff_bound[0], diff_bound[1]);
- cmp = Rewriter::rewrite(cmp);
- Assert(cmp.isConst());
- unsigned r_use_index = cmp == d_true ? 1 : 0;
- Trace("nl-ext-cm") << "check-model-bound : approximate (sqrt) : ";
- printRationalApprox("nl-ext-cm", bounds[r_use_index][0]);
- Trace("nl-ext-cm") << " <= " << var << " <= ";
- printRationalApprox("nl-ext-cm", bounds[r_use_index][1]);
- Trace("nl-ext-cm") << std::endl;
- bool ret =
- addCheckModelBound(var, bounds[r_use_index][0], bounds[r_use_index][1]);
- if (ret)
- {
- d_check_model_solved[eq] = var;
- Trace("nl-ext-cms") << "...success, solved quadratic." << std::endl;
- }
- return ret;
-}
-
-bool NlModel::simpleCheckModelLit(Node lit)
-{
- Trace("nl-ext-cms") << "*** Simple check-model lit for " << lit << "..."
- << std::endl;
- if (lit.isConst())
- {
- Trace("nl-ext-cms") << " return constant." << std::endl;
- return lit.getConst<bool>();
- }
- NodeManager* nm = NodeManager::currentNM();
- bool pol = lit.getKind() != kind::NOT;
- Node atom = lit.getKind() == kind::NOT ? lit[0] : lit;
-
- if (atom.getKind() == EQUAL)
- {
- // x = a is ( x >= a ^ x <= a )
- for (unsigned i = 0; i < 2; i++)
- {
- Node lit2 = nm->mkNode(GEQ, atom[i], atom[1 - i]);
- if (!pol)
- {
- lit2 = lit2.negate();
- }
- lit2 = Rewriter::rewrite(lit2);
- bool success = simpleCheckModelLit(lit2);
- if (success != pol)
- {
- // false != true -> one conjunct of equality is false, we fail
- // true != false -> one disjunct of disequality is true, we succeed
- return success;
- }
- }
- // both checks passed and polarity is true, or both checks failed and
- // polarity is false
- return pol;
- }
- else if (atom.getKind() != GEQ)
- {
- Trace("nl-ext-cms") << " failed due to unknown literal." << std::endl;
- return false;
- }
- // get the monomial sum
- std::map<Node, Node> msum;
- if (!ArithMSum::getMonomialSumLit(atom, msum))
- {
- Trace("nl-ext-cms") << " failed due to get msum." << std::endl;
- return false;
- }
- // simple interval analysis
- if (simpleCheckModelMsum(msum, pol))
- {
- return true;
- }
- // can also try reasoning about univariate quadratic equations
- Trace("nl-ext-cms-debug")
- << "* Try univariate quadratic analysis..." << std::endl;
- std::vector<Node> vs_invalid;
- std::unordered_set<Node, NodeHashFunction> vs;
- std::map<Node, Node> v_a;
- std::map<Node, Node> v_b;
- // get coefficients...
- for (std::pair<const Node, Node>& m : msum)
- {
- Node v = m.first;
- if (!v.isNull())
- {
- if (v.isVar())
- {
- v_b[v] = m.second.isNull() ? d_one : m.second;
- vs.insert(v);
- }
- else if (v.getKind() == NONLINEAR_MULT && v.getNumChildren() == 2
- && v[0] == v[1] && v[0].isVar())
- {
- v_a[v[0]] = m.second.isNull() ? d_one : m.second;
- vs.insert(v[0]);
- }
- else
- {
- vs_invalid.push_back(v);
- }
- }
- }
- // solve the valid variables...
- Node invalid_vsum = vs_invalid.empty() ? d_zero
- : (vs_invalid.size() == 1
- ? vs_invalid[0]
- : nm->mkNode(PLUS, vs_invalid));
- // substitution to try
- std::vector<Node> qvars;
- std::vector<Node> qsubs;
- for (const Node& v : vs)
- {
- // is it a valid variable?
- std::map<Node, std::pair<Node, Node> >::iterator bit =
- d_check_model_bounds.find(v);
- if (!expr::hasSubterm(invalid_vsum, v) && bit != d_check_model_bounds.end())
- {
- std::map<Node, Node>::iterator it = v_a.find(v);
- if (it != v_a.end())
- {
- Node a = it->second;
- Assert(a.isConst());
- int asgn = a.getConst<Rational>().sgn();
- Assert(asgn != 0);
- Node t = nm->mkNode(MULT, a, v, v);
- Node b = d_zero;
- it = v_b.find(v);
- if (it != v_b.end())
- {
- b = it->second;
- t = nm->mkNode(PLUS, t, nm->mkNode(MULT, b, v));
- }
- t = Rewriter::rewrite(t);
- Trace("nl-ext-cms-debug") << "Trying to find min/max for quadratic "
- << t << "..." << std::endl;
- Trace("nl-ext-cms-debug") << " a = " << a << std::endl;
- Trace("nl-ext-cms-debug") << " b = " << b << std::endl;
- // find maximal/minimal value on the interval
- Node apex = nm->mkNode(
- DIVISION, nm->mkNode(UMINUS, b), nm->mkNode(MULT, d_two, a));
- apex = Rewriter::rewrite(apex);
- Assert(apex.isConst());
- // for lower, upper, whether we are greater than the apex
- bool cmp[2];
- Node boundn[2];
- for (unsigned r = 0; r < 2; r++)
- {
- boundn[r] = r == 0 ? bit->second.first : bit->second.second;
- Node cmpn = nm->mkNode(GT, boundn[r], apex);
- cmpn = Rewriter::rewrite(cmpn);
- Assert(cmpn.isConst());
- cmp[r] = cmpn.getConst<bool>();
- }
- Trace("nl-ext-cms-debug") << " apex " << apex << std::endl;
- Trace("nl-ext-cms-debug")
- << " lower " << boundn[0] << ", cmp: " << cmp[0] << std::endl;
- Trace("nl-ext-cms-debug")
- << " upper " << boundn[1] << ", cmp: " << cmp[1] << std::endl;
- Assert(boundn[0].getConst<Rational>()
- <= boundn[1].getConst<Rational>());
- Node s;
- qvars.push_back(v);
- if (cmp[0] != cmp[1])
- {
- Assert(!cmp[0] && cmp[1]);
- // does the sign match the bound?
- if ((asgn == 1) == pol)
- {
- // the apex is the max/min value
- s = apex;
- Trace("nl-ext-cms-debug") << " ...set to apex." << std::endl;
- }
- else
- {
- // it is one of the endpoints, plug in and compare
- Node tcmpn[2];
- for (unsigned r = 0; r < 2; r++)
- {
- qsubs.push_back(boundn[r]);
- Node ts = arithSubstitute(t, qvars, qsubs);
- tcmpn[r] = Rewriter::rewrite(ts);
- qsubs.pop_back();
- }
- Node tcmp = nm->mkNode(LT, tcmpn[0], tcmpn[1]);
- Trace("nl-ext-cms-debug")
- << " ...both sides of apex, compare " << tcmp << std::endl;
- tcmp = Rewriter::rewrite(tcmp);
- Assert(tcmp.isConst());
- unsigned bindex_use = (tcmp.getConst<bool>() == pol) ? 1 : 0;
- Trace("nl-ext-cms-debug")
- << " ...set to " << (bindex_use == 1 ? "upper" : "lower")
- << std::endl;
- s = boundn[bindex_use];
- }
- }
- else
- {
- // both to one side of the apex
- // we figure out which bound to use (lower or upper) based on
- // three factors:
- // (1) whether a's sign is positive,
- // (2) whether we are greater than the apex of the parabola,
- // (3) the polarity of the constraint, i.e. >= or <=.
- // there are 8 cases of these factors, which we test here.
- unsigned bindex_use = (((asgn == 1) == cmp[0]) == pol) ? 0 : 1;
- Trace("nl-ext-cms-debug")
- << " ...set to " << (bindex_use == 1 ? "upper" : "lower")
- << std::endl;
- s = boundn[bindex_use];
- }
- Assert(!s.isNull());
- qsubs.push_back(s);
- Trace("nl-ext-cms") << "* set bound based on quadratic : " << v
- << " -> " << s << std::endl;
- }
- }
- }
- if (!qvars.empty())
- {
- Assert(qvars.size() == qsubs.size());
- Node slit = arithSubstitute(lit, qvars, qsubs);
- slit = Rewriter::rewrite(slit);
- return simpleCheckModelLit(slit);
- }
- return false;
-}
-
-bool NlModel::simpleCheckModelMsum(const std::map<Node, Node>& msum, bool pol)
-{
- Trace("nl-ext-cms-debug") << "* Try simple interval analysis..." << std::endl;
- NodeManager* nm = NodeManager::currentNM();
- // map from transcendental functions to whether they were set to lower
- // bound
- bool simpleSuccess = true;
- std::map<Node, bool> set_bound;
- std::vector<Node> sum_bound;
- for (const std::pair<const Node, Node>& m : msum)
- {
- Node v = m.first;
- if (v.isNull())
- {
- sum_bound.push_back(m.second.isNull() ? d_one : m.second);
- }
- else
- {
- Trace("nl-ext-cms-debug") << "- monomial : " << v << std::endl;
- // --- whether we should set a lower bound for this monomial
- bool set_lower =
- (m.second.isNull() || m.second.getConst<Rational>().sgn() == 1)
- == pol;
- Trace("nl-ext-cms-debug")
- << "set bound to " << (set_lower ? "lower" : "upper") << std::endl;
-
- // --- Collect variables and factors in v
- std::vector<Node> vars;
- std::vector<unsigned> factors;
- if (v.getKind() == NONLINEAR_MULT)
- {
- unsigned last_start = 0;
- for (unsigned i = 0, nchildren = v.getNumChildren(); i < nchildren; i++)
- {
- // are we at the end?
- if (i + 1 == nchildren || v[i + 1] != v[i])
- {
- unsigned vfact = 1 + (i - last_start);
- last_start = (i + 1);
- vars.push_back(v[i]);
- factors.push_back(vfact);
- }
- }
- }
- else
- {
- vars.push_back(v);
- factors.push_back(1);
- }
-
- // --- Get the lower and upper bounds and sign information.
- // Whether we have an (odd) number of negative factors in vars, apart
- // from the variable at choose_index.
- bool has_neg_factor = false;
- int choose_index = -1;
- std::vector<Node> ls;
- std::vector<Node> us;
- // the relevant sign information for variables with odd exponents:
- // 1: both signs of the interval of this variable are positive,
- // -1: both signs of the interval of this variable are negative.
- std::vector<int> signs;
- Trace("nl-ext-cms-debug") << "get sign information..." << std::endl;
- for (unsigned i = 0, size = vars.size(); i < size; i++)
- {
- Node vc = vars[i];
- unsigned vcfact = factors[i];
- if (Trace.isOn("nl-ext-cms-debug"))
- {
- Trace("nl-ext-cms-debug") << "-- " << vc;
- if (vcfact > 1)
- {
- Trace("nl-ext-cms-debug") << "^" << vcfact;
- }
- Trace("nl-ext-cms-debug") << " ";
- }
- std::map<Node, std::pair<Node, Node> >::iterator bit =
- d_check_model_bounds.find(vc);
- // if there is a model bound for this term
- if (bit != d_check_model_bounds.end())
- {
- Node l = bit->second.first;
- Node u = bit->second.second;
- ls.push_back(l);
- us.push_back(u);
- int vsign = 0;
- if (vcfact % 2 == 1)
- {
- vsign = 1;
- int lsgn = l.getConst<Rational>().sgn();
- int usgn = u.getConst<Rational>().sgn();
- Trace("nl-ext-cms-debug")
- << "bound_sign(" << lsgn << "," << usgn << ") ";
- if (lsgn == -1)
- {
- if (usgn < 1)
- {
- // must have a negative factor
- has_neg_factor = !has_neg_factor;
- vsign = -1;
- }
- else if (choose_index == -1)
- {
- // set the choose index to this
- choose_index = i;
- vsign = 0;
- }
- else
- {
- // ambiguous, can't determine the bound
- Trace("nl-ext-cms")
- << " failed due to ambiguious monomial." << std::endl;
- return false;
- }
- }
- }
- Trace("nl-ext-cms-debug") << " -> " << vsign << std::endl;
- signs.push_back(vsign);
- }
- else
- {
- Trace("nl-ext-cms-debug") << std::endl;
- Trace("nl-ext-cms")
- << " failed due to unknown bound for " << vc << std::endl;
- // should either assign a model bound or eliminate the variable
- // via substitution
- Assert(false);
- return false;
- }
- }
- // whether we will try to minimize/maximize (-1/1) the absolute value
- int setAbs = (set_lower == has_neg_factor) ? 1 : -1;
- Trace("nl-ext-cms-debug")
- << "set absolute value to " << (setAbs == 1 ? "maximal" : "minimal")
- << std::endl;
-
- std::vector<Node> vbs;
- Trace("nl-ext-cms-debug") << "set bounds..." << std::endl;
- for (unsigned i = 0, size = vars.size(); i < size; i++)
- {
- Node vc = vars[i];
- unsigned vcfact = factors[i];
- Node l = ls[i];
- Node u = us[i];
- bool vc_set_lower;
- int vcsign = signs[i];
- Trace("nl-ext-cms-debug")
- << "Bounds for " << vc << " : " << l << ", " << u
- << ", sign : " << vcsign << ", factor : " << vcfact << std::endl;
- if (l == u)
- {
- // by convention, always say it is lower if they are the same
- vc_set_lower = true;
- Trace("nl-ext-cms-debug")
- << "..." << vc << " equal bound, set to lower" << std::endl;
- }
- else
- {
- if (vcfact % 2 == 0)
- {
- // minimize or maximize its absolute value
- Rational la = l.getConst<Rational>().abs();
- Rational ua = u.getConst<Rational>().abs();
- if (la == ua)
- {
- // by convention, always say it is lower if abs are the same
- vc_set_lower = true;
- Trace("nl-ext-cms-debug")
- << "..." << vc << " equal abs, set to lower" << std::endl;
- }
- else
- {
- vc_set_lower = (la > ua) == (setAbs == 1);
- }
- }
- else if (signs[i] == 0)
- {
- // we choose this index to match the overall set_lower
- vc_set_lower = set_lower;
- }
- else
- {
- vc_set_lower = (signs[i] != setAbs);
- }
- Trace("nl-ext-cms-debug")
- << "..." << vc << " set to " << (vc_set_lower ? "lower" : "upper")
- << std::endl;
- }
- // check whether this is a conflicting bound
- std::map<Node, bool>::iterator itsb = set_bound.find(vc);
- if (itsb == set_bound.end())
- {
- set_bound[vc] = vc_set_lower;
- }
- else if (itsb->second != vc_set_lower)
- {
- Trace("nl-ext-cms")
- << " failed due to conflicting bound for " << vc << std::endl;
- return false;
- }
- // must over/under approximate based on vc_set_lower, computed above
- Node vb = vc_set_lower ? l : u;
- for (unsigned i2 = 0; i2 < vcfact; i2++)
- {
- vbs.push_back(vb);
- }
- }
- if (!simpleSuccess)
- {
- break;
- }
- Node vbound = vbs.size() == 1 ? vbs[0] : nm->mkNode(MULT, vbs);
- sum_bound.push_back(ArithMSum::mkCoeffTerm(m.second, vbound));
- }
- }
- // if the exact bound was computed via simple analysis above
- // make the bound
- Node bound;
- if (sum_bound.size() > 1)
- {
- bound = nm->mkNode(kind::PLUS, sum_bound);
- }
- else if (sum_bound.size() == 1)
- {
- bound = sum_bound[0];
- }
- else
- {
- bound = d_zero;
- }
- // make the comparison
- Node comp = nm->mkNode(kind::GEQ, bound, d_zero);
- if (!pol)
- {
- comp = comp.negate();
- }
- Trace("nl-ext-cms") << " comparison is : " << comp << std::endl;
- comp = Rewriter::rewrite(comp);
- Assert(comp.isConst());
- Trace("nl-ext-cms") << " returned : " << comp << std::endl;
- return comp == d_true;
-}
-
-bool NlModel::getApproximateSqrt(Node c, Node& l, Node& u, unsigned iter) const
-{
- Assert(c.isConst());
- if (c == d_one || c == d_zero)
- {
- l = c;
- u = c;
- return true;
- }
- Rational rc = c.getConst<Rational>();
-
- Rational rl = rc < Rational(1) ? rc : Rational(1);
- Rational ru = rc < Rational(1) ? Rational(1) : rc;
- unsigned count = 0;
- Rational half = Rational(1) / Rational(2);
- while (count < iter)
- {
- Rational curr = half * (rl + ru);
- Rational curr_sq = curr * curr;
- if (curr_sq == rc)
- {
- rl = curr;
- ru = curr;
- break;
- }
- else if (curr_sq < rc)
- {
- rl = curr;
- }
- else
- {
- ru = curr;
- }
- count++;
- }
-
- NodeManager* nm = NodeManager::currentNM();
- l = nm->mkConst(rl);
- u = nm->mkConst(ru);
- return true;
-}
-
-void NlModel::printModelValue(const char* c, Node n, unsigned prec) const
-{
- if (Trace.isOn(c))
- {
- Trace(c) << " " << n << " -> ";
- for (int i = 1; i >= 0; --i)
- {
- std::map<Node, Node>::const_iterator it = d_mv[i].find(n);
- Assert(it != d_mv[i].end());
- if (it->second.isConst())
- {
- printRationalApprox(c, it->second, prec);
- }
- else
- {
- Trace(c) << "?";
- }
- Trace(c) << (i == 1 ? " [actual: " : " ]");
- }
- Trace(c) << std::endl;
- }
-}
-
-void NlModel::getModelValueRepair(
- std::map<Node, Node>& arithModel,
- std::map<Node, std::pair<Node, Node>>& approximations)
-{
- Trace("nl-model") << "NlModel::getModelValueRepair:" << std::endl;
-
- // Record the approximations we used. This code calls the
- // recordApproximation method of the model, which overrides the model
- // values for variables that we solved for, using techniques specific to
- // this class.
- NodeManager* nm = NodeManager::currentNM();
- for (const std::pair<const Node, std::pair<Node, Node> >& cb :
- d_check_model_bounds)
- {
- Node l = cb.second.first;
- Node u = cb.second.second;
- Node pred;
- Node v = cb.first;
- if (l != u)
- {
- pred = nm->mkNode(AND, nm->mkNode(GEQ, v, l), nm->mkNode(GEQ, u, v));
- Trace("nl-model") << v << " approximated as " << pred << std::endl;
- Node witness;
- if (options::modelWitnessValue())
- {
- // witness is the midpoint
- witness = nm->mkNode(
- MULT, nm->mkConst(Rational(1, 2)), nm->mkNode(PLUS, l, u));
- witness = Rewriter::rewrite(witness);
- Trace("nl-model") << v << " witness is " << witness << std::endl;
- }
- approximations[v] = std::pair<Node, Node>(pred, witness);
- }
- else
- {
- // overwrite
- arithModel[v] = l;
- Trace("nl-model") << v << " exact approximation is " << l << std::endl;
- }
- }
- // Also record the exact values we used. An exact value can be seen as a
- // special kind approximation of the form (witness x. x = exact_value).
- // Notice that the above term gets rewritten such that the choice function
- // is eliminated.
- for (size_t i = 0, num = d_check_model_vars.size(); i < num; i++)
- {
- Node v = d_check_model_vars[i];
- Node s = d_check_model_subs[i];
- // overwrite
- arithModel[v] = s;
- Trace("nl-model") << v << " solved is " << s << std::endl;
- }
-
- // multiplication terms should not be given values; their values are
- // implied by the monomials that they consist of
- std::vector<Node> amErase;
- for (const std::pair<const Node, Node>& am : arithModel)
- {
- if (am.first.getKind() == NONLINEAR_MULT)
- {
- amErase.push_back(am.first);
- }
- }
- for (const Node& ae : amErase)
- {
- arithModel.erase(ae);
- }
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file nl_model.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Model object for the non-linear extension class
- **/
-
-#ifndef CVC4__THEORY__ARITH__NL_MODEL_H
-#define CVC4__THEORY__ARITH__NL_MODEL_H
-
-#include <map>
-#include <unordered_map>
-#include <vector>
-
-#include "context/cdo.h"
-#include "context/context.h"
-#include "expr/kind.h"
-#include "expr/node.h"
-#include "theory/theory_model.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-class NonlinearExtension;
-
-/** Non-linear model finder
- *
- * This class is responsible for all queries related to the (candidate) model
- * that is being processed by the non-linear arithmetic solver. It further
- * implements techniques for finding modifications to the current candidate
- * model in the case it can determine that a model exists. These include
- * techniques based on solving (quadratic) equations and bound analysis.
- */
-class NlModel
-{
- friend class NonlinearExtension;
-
- public:
- NlModel(context::Context* c);
- ~NlModel();
- /** reset
- *
- * This method is called once at the beginning of a last call effort check,
- * where m is the model of the theory of arithmetic. This method resets the
- * cache of computed model values.
- */
- void reset(TheoryModel* m, std::map<Node, Node>& arithModel);
- /** reset check
- *
- * This method is called when the non-linear arithmetic solver restarts
- * its computation of lemmas and models during a last call effort check.
- */
- void resetCheck();
- /** compute model value
- *
- * This computes model values for terms based on two semantics, a "concrete"
- * semantics and an "abstract" semantics.
- *
- * if isConcrete is true, this means compute the value of n based on its
- * children recursively. (we call this its "concrete" value)
- * if isConcrete is false, this means lookup the value of n in the model.
- * (we call this its "abstract" value)
- * In other words, !isConcrete treats multiplication terms and transcendental
- * function applications as variables, whereas isConcrete computes their
- * actual values based on the semantics of multiplication. This is a key
- * distinction used in the model-based refinement scheme in Cimatti et al.
- * TACAS 2017.
- *
- * For example, if M( a ) = 2, M( b ) = 3, M( a*b ) = 5, i.e. the variable
- * for a*b has been assigned a value 5 by the linear solver, then :
- *
- * computeModelValue( a*b, true ) =
- * computeModelValue( a, true )*computeModelValue( b, true ) = 2*3 = 6
- * whereas:
- * computeModelValue( a*b, false ) = 5
- */
- Node computeConcreteModelValue(Node n);
- Node computeAbstractModelValue(Node n);
- Node computeModelValue(Node n, bool isConcrete);
-
- /** Compare arithmetic terms i and j based an ordering.
- *
- * This returns:
- * -1 if i < j, 1 if i > j, or 0 if i == j
- *
- * If isConcrete is true, we consider the concrete model values of i and j,
- * otherwise, we consider their abstract model values. For definitions of
- * concrete vs abstract model values, see NlModel::computeModelValue.
- *
- * If isAbsolute is true, we compare the absolute value of thee above
- * values.
- */
- int compare(Node i, Node j, bool isConcrete, bool isAbsolute);
- /** Compare arithmetic terms i and j based an ordering.
- *
- * This returns:
- * -1 if i < j, 1 if i > j, or 0 if i == j
- *
- * If isAbsolute is true, we compare the absolute value of i and j
- */
- int compareValue(Node i, Node j, bool isAbsolute) const;
-
- //------------------------------ recording model substitutions and bounds
- /** add check model substitution
- *
- * Adds the model substitution v -> s. This applies the substitution
- * { v -> s } to each term in d_check_model_subs and adds v,s to
- * d_check_model_vars and d_check_model_subs respectively.
- * If this method returns false, then the substitution v -> s is inconsistent
- * with the current substitution and bounds.
- */
- bool addCheckModelSubstitution(TNode v, TNode s);
- /** add check model bound
- *
- * Adds the bound x -> < l, u > to the map above, and records the
- * approximation ( x, l <= x <= u ) in the model. This method returns false
- * if the bound is inconsistent with the current model substitution or
- * bounds.
- */
- bool addCheckModelBound(TNode v, TNode l, TNode u);
- /** has check model assignment
- *
- * Have we assigned v in the current checkModel(...) call?
- *
- * This method returns true if variable v is in the domain of
- * d_check_model_bounds or if it occurs in d_check_model_vars.
- */
- bool hasCheckModelAssignment(Node v) const;
- /** Check model
- *
- * Checks the current model based on solving for equalities, and using error
- * bounds on the Taylor approximation.
- *
- * If this function returns true, then all assertions in the input argument
- * "assertions" are satisfied for all interpretations of variables within
- * their computed bounds (as stored in d_check_model_bounds).
- *
- * For details, see Section 3 of Cimatti et al CADE 2017 under the heading
- * "Detecting Satisfiable Formulas".
- *
- * d is a degree indicating how precise our computations are.
- */
- bool checkModel(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- unsigned d,
- std::vector<Node>& lemmas,
- std::vector<Node>& gs);
- /**
- * Set that we have used an approximation during this check. This flag is
- * reset on a call to resetCheck. It is set when we use reasoning that
- * is limited by a degree of precision we are using. In other words, if we
- * used an approximation, then we maybe could still establish a lemma or
- * determine the input is SAT if we increased our precision.
- */
- void setUsedApproximate();
- /** Did we use an approximation during this check? */
- bool usedApproximate() const;
- /** Set tautology
- *
- * This states that formula n is a tautology (satisfied in all models).
- * We call this on internally generated lemmas. This method computes a
- * set of literals that are implied by n, that are hence tautological
- * as well, such as:
- * l_pi <= real.pi <= u_pi (pi approximations)
- * sin(x) = -1*sin(-x)
- * where these literals are internally generated for the purposes
- * of guiding the models of the linear solver.
- *
- * TODO (cvc4-projects #113: would be helpful if we could do this even
- * more aggressively by ignoring all internally generated literals.
- *
- * Tautological literals do not need be checked during checkModel.
- */
- void addTautology(Node n);
- //------------------------------ end recording model substitutions and bounds
-
- /** print model value, for debugging.
- *
- * This prints both the abstract and concrete model values for arithmetic
- * term n on Trace c with precision prec.
- */
- void printModelValue(const char* c, Node n, unsigned prec = 5) const;
- /** get model value repair
- *
- * This gets mappings that indicate how to repair the model generated by the
- * linear arithmetic solver. This method should be called after a successful
- * call to checkModel above.
- *
- * The mapping arithModel is updated by this method to map arithmetic terms v
- * to their (exact) value that was computed during checkModel; the mapping
- * approximations is updated to store approximate values in the form of a
- * pair (P, w), where P is a predicate that describes the possible values of
- * v and w is a witness point that satisfies this predicate.
- */
- void getModelValueRepair(
- std::map<Node, Node>& arithModel,
- std::map<Node, std::pair<Node, Node>>& approximations);
-
- private:
- /** The current model */
- TheoryModel* d_model;
- /** Get the model value of n from the model object above */
- Node getValueInternal(Node n) const;
- /** Does the equality engine of the model have term n? */
- bool hasTerm(Node n) const;
- /** Get the representative of n in the model */
- Node getRepresentative(Node n) const;
-
- //---------------------------check model
- /** solve equality simple
- *
- * This method is used during checkModel(...). It takes as input an
- * equality eq. If it returns true, then eq is correct-by-construction based
- * on the information stored in our model representation (see
- * d_check_model_vars, d_check_model_subs, d_check_model_bounds), and eq
- * is added to d_check_model_solved. The equality eq may involve any
- * number of variables, and monomials of arbitrary degree. If this method
- * returns false, then we did not show that the equality was true in the
- * model. This method uses incomplete techniques based on interval
- * analysis and quadratic equation solving.
- *
- * If it can be shown that the equality must be false in the current
- * model, then we may add a lemma to lemmas explaining why this is the case.
- * For instance, if eq reduces to a univariate quadratic equation with no
- * root, we send a conflict clause of the form a*x^2 + b*x + c != 0.
- */
- bool solveEqualitySimple(Node eq, unsigned d, std::vector<Node>& lemmas);
-
- /** simple check model for transcendental functions for literal
- *
- * This method returns true if literal is true for all interpretations of
- * transcendental functions within their error bounds (as stored
- * in d_check_model_bounds). This is determined by a simple under/over
- * approximation of the value of sum of (linear) monomials. For example,
- * if we determine that .8 < sin( 1 ) < .9, this function will return
- * true for literals like:
- * 2.0*sin( 1 ) > 1.5
- * -1.0*sin( 1 ) < -0.79
- * -1.0*sin( 1 ) > -0.91
- * sin( 1 )*sin( 1 ) + sin( 1 ) > 0.0
- * It will return false for literals like:
- * sin( 1 ) > 0.85
- * It will also return false for literals like:
- * -0.3*sin( 1 )*sin( 2 ) + sin( 2 ) > .7
- * sin( sin( 1 ) ) > .5
- * since the bounds on these terms cannot quickly be determined.
- */
- bool simpleCheckModelLit(Node lit);
- bool simpleCheckModelMsum(const std::map<Node, Node>& msum, bool pol);
- //---------------------------end check model
- /** get approximate sqrt
- *
- * This approximates the square root of positive constant c. If this method
- * returns true, then l and u are updated to constants such that
- * l <= sqrt( c ) <= u
- * The argument iter is the number of iterations in the binary search to
- * perform. By default, this is set to 15, which is usually enough to be
- * precise in the majority of simple cases, whereas not prohibitively
- * expensive to compute.
- */
- bool getApproximateSqrt(Node c, Node& l, Node& u, unsigned iter = 15) const;
-
- /** commonly used terms */
- Node d_zero;
- Node d_one;
- Node d_two;
- Node d_true;
- Node d_false;
- Node d_null;
- /**
- * The values that the arithmetic theory solver assigned in the model. This
- * corresponds to exactly the set of equalities that TheoryArith is currently
- * sending to TheoryModel during collectModelInfo.
- */
- std::map<Node, Node> d_arithVal;
- /** cache of model values
- *
- * Stores the the concrete/abstract model values. This is a cache of the
- * computeModelValue method.
- */
- std::map<Node, Node> d_mv[2];
- /**
- * A substitution from variables that appear in assertions to a solved form
- * term. These vectors are ordered in the form:
- * x_1 -> t_1 ... x_n -> t_n
- * where x_i is not in the free variables of t_j for j>=i.
- */
- std::vector<Node> d_check_model_vars;
- std::vector<Node> d_check_model_subs;
- /** lower and upper bounds for check model
- *
- * For each term t in the domain of this map, if this stores the pair
- * (c_l, c_u) then the model M is such that c_l <= M( t ) <= c_u.
- *
- * We add terms whose value is approximated in the model to this map, which
- * includes:
- * (1) applications of transcendental functions, whose value is approximated
- * by the Taylor series,
- * (2) variables we have solved quadratic equations for, whose value
- * involves approximations of square roots.
- */
- std::map<Node, std::pair<Node, Node> > d_check_model_bounds;
- /**
- * The map from literals that our model construction solved, to the variable
- * that was solved for. Examples of such literals are:
- * (1) Equalities x = t, which we turned into a model substitution x -> t,
- * where x not in FV( t ), and
- * (2) Equalities a*x*x + b*x + c = 0, which we turned into a model bound
- * -b+s*sqrt(b*b-4*a*c)/2a - E <= x <= -b+s*sqrt(b*b-4*a*c)/2a + E.
- *
- * These literals are exempt from check-model, since they are satisfied by
- * definition of our model construction.
- */
- std::unordered_map<Node, Node, NodeHashFunction> d_check_model_solved;
- /** did we use an approximation on this call to last-call effort? */
- bool d_used_approx;
- /** the set of all tautological literals */
- std::unordered_set<Node, NodeHashFunction> d_tautology;
-}; /* class NlModel */
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__NONLINEAR_EXTENSION_H */
+++ /dev/null
-/********************* */
-/*! \file nl_monomial.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of utilities for monomials
- **/
-
-#include "theory/arith/nl_monomial.h"
-
-#include "theory/arith/arith_utilities.h"
-#include "theory/arith/nl_lemma_utils.h"
-#include "theory/rewriter.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-// Returns a[key] if key is in a or value otherwise.
-unsigned getCountWithDefault(const NodeMultiset& a, Node key, unsigned value)
-{
- NodeMultiset::const_iterator it = a.find(key);
- return (it == a.end()) ? value : it->second;
-}
-// Given two multisets return the multiset difference a \ b.
-NodeMultiset diffMultiset(const NodeMultiset& a, const NodeMultiset& b)
-{
- NodeMultiset difference;
- for (NodeMultiset::const_iterator it_a = a.begin(); it_a != a.end(); ++it_a)
- {
- Node key = it_a->first;
- const unsigned a_value = it_a->second;
- const unsigned b_value = getCountWithDefault(b, key, 0);
- if (a_value > b_value)
- {
- difference[key] = a_value - b_value;
- }
- }
- return difference;
-}
-
-// Return a vector containing a[key] repetitions of key in a multiset a.
-std::vector<Node> ExpandMultiset(const NodeMultiset& a)
-{
- std::vector<Node> expansion;
- for (NodeMultiset::const_iterator it_a = a.begin(); it_a != a.end(); ++it_a)
- {
- expansion.insert(expansion.end(), it_a->second, it_a->first);
- }
- return expansion;
-}
-
-// status 0 : n equal, -1 : n superset, 1 : n subset
-void MonomialIndex::addTerm(Node n,
- const std::vector<Node>& reps,
- MonomialDb* nla,
- int status,
- unsigned argIndex)
-{
- if (status == 0)
- {
- if (argIndex == reps.size())
- {
- d_monos.push_back(n);
- }
- else
- {
- d_data[reps[argIndex]].addTerm(n, reps, nla, status, argIndex + 1);
- }
- }
- for (std::map<Node, MonomialIndex>::iterator it = d_data.begin();
- it != d_data.end();
- ++it)
- {
- if (status != 0 || argIndex == reps.size() || it->first != reps[argIndex])
- {
- // if we do not contain this variable, then if we were a superset,
- // fail (-2), otherwise we are subset. if we do contain this
- // variable, then if we were equal, we are superset since variables
- // are ordered, otherwise we remain the same.
- int new_status =
- std::find(reps.begin(), reps.end(), it->first) == reps.end()
- ? (status >= 0 ? 1 : -2)
- : (status == 0 ? -1 : status);
- if (new_status != -2)
- {
- it->second.addTerm(n, reps, nla, new_status, argIndex);
- }
- }
- }
- // compare for subsets
- for (unsigned i = 0; i < d_monos.size(); i++)
- {
- Node m = d_monos[i];
- if (m != n)
- {
- // we are superset if we are equal and haven't traversed all variables
- int cstatus = status == 0 ? (argIndex == reps.size() ? 0 : -1) : status;
- Trace("nl-ext-mindex-debug") << " compare " << n << " and " << m
- << ", status = " << cstatus << std::endl;
- if (cstatus <= 0 && nla->isMonomialSubset(m, n))
- {
- nla->registerMonomialSubset(m, n);
- Trace("nl-ext-mindex-debug") << "...success" << std::endl;
- }
- else if (cstatus >= 0 && nla->isMonomialSubset(n, m))
- {
- nla->registerMonomialSubset(n, m);
- Trace("nl-ext-mindex-debug") << "...success (rev)" << std::endl;
- }
- }
- }
-}
-
-MonomialDb::MonomialDb()
-{
- d_one = NodeManager::currentNM()->mkConst(Rational(1));
-}
-
-void MonomialDb::registerMonomial(Node n)
-{
- if (std::find(d_monomials.begin(), d_monomials.end(), n) != d_monomials.end())
- {
- return;
- }
- d_monomials.push_back(n);
- Trace("nl-ext-debug") << "Register monomial : " << n << std::endl;
- Kind k = n.getKind();
- if (k == NONLINEAR_MULT)
- {
- // get exponent count
- unsigned nchild = n.getNumChildren();
- for (unsigned i = 0; i < nchild; i++)
- {
- d_m_exp[n][n[i]]++;
- if (i == 0 || n[i] != n[i - 1])
- {
- d_m_vlist[n].push_back(n[i]);
- }
- }
- d_m_degree[n] = nchild;
- }
- else if (n == d_one)
- {
- d_m_exp[n].clear();
- d_m_vlist[n].clear();
- d_m_degree[n] = 0;
- }
- else
- {
- Assert(k != PLUS && k != MULT);
- d_m_exp[n][n] = 1;
- d_m_vlist[n].push_back(n);
- d_m_degree[n] = 1;
- }
- std::sort(d_m_vlist[n].begin(), d_m_vlist[n].end());
- Trace("nl-ext-mindex") << "Add monomial to index : " << n << std::endl;
- d_m_index.addTerm(n, d_m_vlist[n], this);
-}
-
-void MonomialDb::registerMonomialSubset(Node a, Node b)
-{
- Assert(isMonomialSubset(a, b));
-
- const NodeMultiset& a_exponent_map = getMonomialExponentMap(a);
- const NodeMultiset& b_exponent_map = getMonomialExponentMap(b);
-
- std::vector<Node> diff_children =
- ExpandMultiset(diffMultiset(b_exponent_map, a_exponent_map));
- Assert(!diff_children.empty());
-
- d_m_contain_parent[a].push_back(b);
- d_m_contain_children[b].push_back(a);
-
- Node mult_term = safeConstructNary(MULT, diff_children);
- Node nlmult_term = safeConstructNary(NONLINEAR_MULT, diff_children);
- d_m_contain_mult[a][b] = mult_term;
- d_m_contain_umult[a][b] = nlmult_term;
- Trace("nl-ext-mindex") << "..." << a << " is a subset of " << b
- << ", difference is " << mult_term << std::endl;
-}
-
-bool MonomialDb::isMonomialSubset(Node am, Node bm) const
-{
- const NodeMultiset& a = getMonomialExponentMap(am);
- const NodeMultiset& b = getMonomialExponentMap(bm);
- for (NodeMultiset::const_iterator it_a = a.begin(); it_a != a.end(); ++it_a)
- {
- Node key = it_a->first;
- const unsigned a_value = it_a->second;
- const unsigned b_value = getCountWithDefault(b, key, 0);
- if (a_value > b_value)
- {
- return false;
- }
- }
- return true;
-}
-
-const NodeMultiset& MonomialDb::getMonomialExponentMap(Node monomial) const
-{
- MonomialExponentMap::const_iterator it = d_m_exp.find(monomial);
- Assert(it != d_m_exp.end());
- return it->second;
-}
-
-unsigned MonomialDb::getExponent(Node monomial, Node v) const
-{
- MonomialExponentMap::const_iterator it = d_m_exp.find(monomial);
- if (it == d_m_exp.end())
- {
- return 0;
- }
- std::map<Node, unsigned>::const_iterator itv = it->second.find(v);
- if (itv == it->second.end())
- {
- return 0;
- }
- return itv->second;
-}
-
-const std::vector<Node>& MonomialDb::getVariableList(Node monomial) const
-{
- std::map<Node, std::vector<Node> >::const_iterator itvl =
- d_m_vlist.find(monomial);
- Assert(itvl != d_m_vlist.end());
- return itvl->second;
-}
-
-unsigned MonomialDb::getDegree(Node monomial) const
-{
- std::map<Node, unsigned>::const_iterator it = d_m_degree.find(monomial);
- Assert(it != d_m_degree.end());
- return it->second;
-}
-
-void MonomialDb::sortByDegree(std::vector<Node>& ms) const
-{
- SortNonlinearDegree snlad(d_m_degree);
- std::sort(ms.begin(), ms.end(), snlad);
-}
-
-void MonomialDb::sortVariablesByModel(std::vector<Node>& ms, NlModel& m)
-{
- SortNlModel smv;
- smv.d_nlm = &m;
- smv.d_isConcrete = false;
- smv.d_isAbsolute = true;
- smv.d_reverse_order = true;
- for (const Node& msc : ms)
- {
- std::sort(d_m_vlist[msc].begin(), d_m_vlist[msc].end(), smv);
- }
-}
-
-const std::map<Node, std::vector<Node> >& MonomialDb::getContainsChildrenMap()
-{
- return d_m_contain_children;
-}
-
-const std::map<Node, std::vector<Node> >& MonomialDb::getContainsParentMap()
-{
- return d_m_contain_parent;
-}
-
-Node MonomialDb::getContainsDiff(Node a, Node b) const
-{
- std::map<Node, std::map<Node, Node> >::const_iterator it =
- d_m_contain_mult.find(a);
- if (it == d_m_contain_umult.end())
- {
- return Node::null();
- }
- std::map<Node, Node>::const_iterator it2 = it->second.find(b);
- if (it2 == it->second.end())
- {
- return Node::null();
- }
- return it2->second;
-}
-
-Node MonomialDb::getContainsDiffNl(Node a, Node b) const
-{
- std::map<Node, std::map<Node, Node> >::const_iterator it =
- d_m_contain_umult.find(a);
- if (it == d_m_contain_umult.end())
- {
- return Node::null();
- }
- std::map<Node, Node>::const_iterator it2 = it->second.find(b);
- if (it2 == it->second.end())
- {
- return Node::null();
- }
- return it2->second;
-}
-
-Node MonomialDb::mkMonomialRemFactor(Node n,
- const NodeMultiset& n_exp_rem) const
-{
- std::vector<Node> children;
- const NodeMultiset& exponent_map = getMonomialExponentMap(n);
- for (NodeMultiset::const_iterator itme2 = exponent_map.begin();
- itme2 != exponent_map.end();
- ++itme2)
- {
- Node v = itme2->first;
- unsigned inc = itme2->second;
- Trace("nl-ext-mono-factor")
- << "..." << inc << " factors of " << v << std::endl;
- unsigned count_in_n_exp_rem = getCountWithDefault(n_exp_rem, v, 0);
- Assert(count_in_n_exp_rem <= inc);
- inc -= count_in_n_exp_rem;
- Trace("nl-ext-mono-factor")
- << "......rem, now " << inc << " factors of " << v << std::endl;
- children.insert(children.end(), inc, v);
- }
- Node ret = safeConstructNary(MULT, children);
- ret = Rewriter::rewrite(ret);
- Trace("nl-ext-mono-factor") << "...return : " << ret << std::endl;
- return ret;
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file nl_monomial.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Utilities for monomials
- **/
-
-#ifndef CVC4__THEORY__ARITH__NL_MONOMIAL_H
-#define CVC4__THEORY__ARITH__NL_MONOMIAL_H
-
-#include <map>
-#include <vector>
-
-#include "expr/node.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-class MonomialDb;
-class NlModel;
-
-typedef std::map<Node, unsigned> NodeMultiset;
-typedef std::map<Node, NodeMultiset> MonomialExponentMap;
-
-/** An index data structure for node multisets (monomials) */
-class MonomialIndex
-{
- public:
- /**
- * Add term to this trie. The argument status indicates what the status
- * of n is with respect to the current node in the trie, where:
- * 0 : n is equal, -1 : n is superset, 1 : n is subset
- * of the node described by the current path in the trie.
- */
- void addTerm(Node n,
- const std::vector<Node>& reps,
- MonomialDb* nla,
- int status = 0,
- unsigned argIndex = 0);
-
- private:
- /** The children of this node */
- std::map<Node, MonomialIndex> d_data;
- /** The monomials at this node */
- std::vector<Node> d_monos;
-}; /* class MonomialIndex */
-
-/** Context-independent database for monomial information */
-class MonomialDb
-{
- public:
- MonomialDb();
- ~MonomialDb() {}
- /** register monomial */
- void registerMonomial(Node n);
- /**
- * Register monomial subset. This method is called when we infer that b is
- * a subset of monomial a, e.g. x*y^2 is a subset of x^3*y^2*z.
- */
- void registerMonomialSubset(Node a, Node b);
- /**
- * returns true if the multiset containing the
- * factors of monomial a is a subset of the multiset
- * containing the factors of monomial b.
- */
- bool isMonomialSubset(Node a, Node b) const;
- /** Returns the NodeMultiset for a registered monomial. */
- const NodeMultiset& getMonomialExponentMap(Node monomial) const;
- /** Returns the exponent of variable v in the given monomial */
- unsigned getExponent(Node monomial, Node v) const;
- /** Get the list of unique variables is the monomial */
- const std::vector<Node>& getVariableList(Node monomial) const;
- /** Get degree of monomial, e.g. the degree of x^2*y^2 = 4 */
- unsigned getDegree(Node monomial) const;
- /** Sort monomials in ms by their degree
- *
- * Updates ms so that degree(ms[i]) <= degree(ms[j]) for i <= j.
- */
- void sortByDegree(std::vector<Node>& ms) const;
- /** Sort the variable lists based on model values
- *
- * This updates the variable lists of monomials in ms based on the absolute
- * value of their current model values in m.
- *
- * In other words, for each i, getVariableList(ms[i]) returns
- * v1, ..., vn where |m(v1)| <= ... <= |m(vn)| after this method is invoked.
- */
- void sortVariablesByModel(std::vector<Node>& ms, NlModel& m);
- /** Get monomial contains children map
- *
- * This maps monomials to other monomials that are contained in them, e.g.
- * x^2 * y may map to { x, x^2, y } if these three terms exist have been
- * registered to this class.
- */
- const std::map<Node, std::vector<Node> >& getContainsChildrenMap();
- /** Get monomial contains parent map, reverse of above */
- const std::map<Node, std::vector<Node> >& getContainsParentMap();
- /**
- * Get contains difference. Return the difference of a and b or null if it
- * does not exist. In other words, this returns a term equivalent to a/b
- * that does not contain division.
- */
- Node getContainsDiff(Node a, Node b) const;
- /**
- * Get contains difference non-linear. Same as above, but stores terms of kind
- * NONLINEAR_MULT instead of MULT.
- */
- Node getContainsDiffNl(Node a, Node b) const;
- /** Make monomial remainder factor */
- Node mkMonomialRemFactor(Node n, const NodeMultiset& n_exp_rem) const;
-
- private:
- /** commonly used terms */
- Node d_one;
- /** list of all monomials */
- std::vector<Node> d_monomials;
- /** Map from monomials to var^index. */
- MonomialExponentMap d_m_exp;
- /**
- * Mapping from monomials to the list of variables that occur in it. For
- * example, x*x*y*z -> { x, y, z }.
- */
- std::map<Node, std::vector<Node> > d_m_vlist;
- /** Degree information */
- std::map<Node, unsigned> d_m_degree;
- /** monomial index, by sorted variables */
- MonomialIndex d_m_index;
- /** containment ordering */
- std::map<Node, std::vector<Node> > d_m_contain_children;
- std::map<Node, std::vector<Node> > d_m_contain_parent;
- std::map<Node, std::map<Node, Node> > d_m_contain_mult;
- std::map<Node, std::map<Node, Node> > d_m_contain_umult;
-};
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__NL_MONOMIAL_H */
+++ /dev/null
-/********************* */
-/*! \file nl_solver.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of non-linear solver
- **/
-
-#include "theory/arith/nl_solver.h"
-
-#include "options/arith_options.h"
-#include "theory/arith/arith_msum.h"
-#include "theory/arith/arith_utilities.h"
-#include "theory/arith/theory_arith.h"
-#include "theory/theory_model.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-void debugPrintBound(const char* c, Node coeff, Node x, Kind type, Node rhs)
-{
- Node t = ArithMSum::mkCoeffTerm(coeff, x);
- Trace(c) << t << " " << type << " " << rhs;
-}
-
-bool hasNewMonomials(Node n, const std::vector<Node>& existing)
-{
- std::set<Node> visited;
-
- std::vector<Node> worklist;
- worklist.push_back(n);
- while (!worklist.empty())
- {
- Node current = worklist.back();
- worklist.pop_back();
- if (visited.find(current) == visited.end())
- {
- visited.insert(current);
- if (current.getKind() == NONLINEAR_MULT)
- {
- if (std::find(existing.begin(), existing.end(), current)
- == existing.end())
- {
- return true;
- }
- }
- else
- {
- worklist.insert(worklist.end(), current.begin(), current.end());
- }
- }
- }
- return false;
-}
-
-NlSolver::NlSolver(TheoryArith& containing, NlModel& model)
- : d_containing(containing),
- d_model(model),
- d_cdb(d_mdb),
- d_zero_split(containing.getUserContext())
-{
- NodeManager* nm = NodeManager::currentNM();
- d_true = nm->mkConst(true);
- d_false = nm->mkConst(false);
- d_zero = nm->mkConst(Rational(0));
- d_one = nm->mkConst(Rational(1));
- d_neg_one = nm->mkConst(Rational(-1));
- d_order_points.push_back(d_neg_one);
- d_order_points.push_back(d_zero);
- d_order_points.push_back(d_one);
-}
-
-NlSolver::~NlSolver() {}
-
-void NlSolver::initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts)
-{
- d_ms_vars.clear();
- d_ms_proc.clear();
- d_ms.clear();
- d_mterms.clear();
- d_m_nconst_factor.clear();
- d_tplane_refine.clear();
- d_ci.clear();
- d_ci_exp.clear();
- d_ci_max.clear();
-
- Trace("nl-ext-mv") << "Extended terms : " << std::endl;
- // for computing congruence
- std::map<Kind, ArgTrie> argTrie;
- for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
- {
- Node a = xts[i];
- d_model.computeConcreteModelValue(a);
- d_model.computeAbstractModelValue(a);
- d_model.printModelValue("nl-ext-mv", a);
- Kind ak = a.getKind();
- if (ak == NONLINEAR_MULT)
- {
- d_ms.push_back(a);
-
- // context-independent registration
- d_mdb.registerMonomial(a);
-
- const std::vector<Node>& varList = d_mdb.getVariableList(a);
- for (const Node& v : varList)
- {
- if (std::find(d_ms_vars.begin(), d_ms_vars.end(), v) == d_ms_vars.end())
- {
- d_ms_vars.push_back(v);
- }
- Node mvk = d_model.computeAbstractModelValue(v);
- if (!mvk.isConst())
- {
- d_m_nconst_factor[a] = true;
- }
- }
- // mark processed if has a "one" factor (will look at reduced monomial)?
- }
- }
-
- // register constants
- d_mdb.registerMonomial(d_one);
- for (unsigned j = 0; j < d_order_points.size(); j++)
- {
- Node c = d_order_points[j];
- d_model.computeConcreteModelValue(c);
- d_model.computeAbstractModelValue(c);
- }
-
- // register variables
- Trace("nl-ext-mv") << "Variables in monomials : " << std::endl;
- for (unsigned i = 0; i < d_ms_vars.size(); i++)
- {
- Node v = d_ms_vars[i];
- d_mdb.registerMonomial(v);
- d_model.computeConcreteModelValue(v);
- d_model.computeAbstractModelValue(v);
- d_model.printModelValue("nl-ext-mv", v);
- }
-
- Trace("nl-ext") << "We have " << d_ms.size() << " monomials." << std::endl;
-}
-
-void NlSolver::setMonomialFactor(Node a, Node b, const NodeMultiset& common)
-{
- // Could not tell if this was being inserted intentionally or not.
- std::map<Node, Node>& mono_diff_a = d_mono_diff[a];
- if (mono_diff_a.find(b) == mono_diff_a.end())
- {
- Trace("nl-ext-mono-factor")
- << "Set monomial factor for " << a << "/" << b << std::endl;
- mono_diff_a[b] = d_mdb.mkMonomialRemFactor(a, common);
- }
-}
-
-std::vector<Node> NlSolver::checkSplitZero()
-{
- std::vector<Node> lemmas;
- for (unsigned i = 0; i < d_ms_vars.size(); i++)
- {
- Node v = d_ms_vars[i];
- if (d_zero_split.insert(v))
- {
- Node eq = v.eqNode(d_zero);
- eq = Rewriter::rewrite(eq);
- Node literal = d_containing.getValuation().ensureLiteral(eq);
- d_containing.getOutputChannel().requirePhase(literal, true);
- lemmas.push_back(literal.orNode(literal.negate()));
- }
- }
- return lemmas;
-}
-
-void NlSolver::assignOrderIds(std::vector<Node>& vars,
- NodeMultiset& order,
- bool isConcrete,
- bool isAbsolute)
-{
- SortNlModel smv;
- smv.d_nlm = &d_model;
- smv.d_isConcrete = isConcrete;
- smv.d_isAbsolute = isAbsolute;
- smv.d_reverse_order = false;
- std::sort(vars.begin(), vars.end(), smv);
-
- order.clear();
- // assign ordering id's
- unsigned counter = 0;
- unsigned order_index = isConcrete ? 0 : 1;
- Node prev;
- for (unsigned j = 0; j < vars.size(); j++)
- {
- Node x = vars[j];
- Node v = d_model.computeModelValue(x, isConcrete);
- if (!v.isConst())
- {
- Trace("nl-ext-mvo") << "..do not assign order to " << x << " : " << v
- << std::endl;
- // don't assign for non-constant values (transcendental function apps)
- break;
- }
- Trace("nl-ext-mvo") << " order " << x << " : " << v << std::endl;
- if (v != prev)
- {
- // builtin points
- bool success;
- do
- {
- success = false;
- if (order_index < d_order_points.size())
- {
- Node vv = d_model.computeModelValue(d_order_points[order_index],
- isConcrete);
- if (d_model.compareValue(v, vv, isAbsolute) <= 0)
- {
- counter++;
- Trace("nl-ext-mvo") << "O[" << d_order_points[order_index]
- << "] = " << counter << std::endl;
- order[d_order_points[order_index]] = counter;
- prev = vv;
- order_index++;
- success = true;
- }
- }
- } while (success);
- }
- if (prev.isNull() || d_model.compareValue(v, prev, isAbsolute) != 0)
- {
- counter++;
- }
- Trace("nl-ext-mvo") << "O[" << x << "] = " << counter << std::endl;
- order[x] = counter;
- prev = v;
- }
- while (order_index < d_order_points.size())
- {
- counter++;
- Trace("nl-ext-mvo") << "O[" << d_order_points[order_index]
- << "] = " << counter << std::endl;
- order[d_order_points[order_index]] = counter;
- order_index++;
- }
-}
-
-// show a <> 0 by inequalities between variables in monomial a w.r.t 0
-int NlSolver::compareSign(Node oa,
- Node a,
- unsigned a_index,
- int status,
- std::vector<Node>& exp,
- std::vector<Node>& lem)
-{
- Trace("nl-ext-debug") << "Process " << a << " at index " << a_index
- << ", status is " << status << std::endl;
- NodeManager* nm = NodeManager::currentNM();
- Node mvaoa = d_model.computeAbstractModelValue(oa);
- const std::vector<Node>& vla = d_mdb.getVariableList(a);
- if (a_index == vla.size())
- {
- if (mvaoa.getConst<Rational>().sgn() != status)
- {
- Node lemma =
- safeConstructNary(AND, exp).impNode(mkLit(oa, d_zero, status * 2));
- lem.push_back(lemma);
- }
- return status;
- }
- Assert(a_index < vla.size());
- Node av = vla[a_index];
- unsigned aexp = d_mdb.getExponent(a, av);
- // take current sign in model
- Node mvaav = d_model.computeAbstractModelValue(av);
- int sgn = mvaav.getConst<Rational>().sgn();
- Trace("nl-ext-debug") << "Process var " << av << "^" << aexp
- << ", model sign = " << sgn << std::endl;
- if (sgn == 0)
- {
- if (mvaoa.getConst<Rational>().sgn() != 0)
- {
- Node lemma = av.eqNode(d_zero).impNode(oa.eqNode(d_zero));
- lem.push_back(lemma);
- }
- return 0;
- }
- if (aexp % 2 == 0)
- {
- exp.push_back(av.eqNode(d_zero).negate());
- return compareSign(oa, a, a_index + 1, status, exp, lem);
- }
- exp.push_back(nm->mkNode(sgn == 1 ? GT : LT, av, d_zero));
- return compareSign(oa, a, a_index + 1, status * sgn, exp, lem);
-}
-
-bool NlSolver::compareMonomial(
- Node oa,
- Node a,
- NodeMultiset& a_exp_proc,
- Node ob,
- Node b,
- NodeMultiset& b_exp_proc,
- std::vector<Node>& exp,
- std::vector<Node>& lem,
- std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers)
-{
- Trace("nl-ext-comp-debug")
- << "Check |" << a << "| >= |" << b << "|" << std::endl;
- unsigned pexp_size = exp.size();
- if (compareMonomial(
- oa, a, 0, a_exp_proc, ob, b, 0, b_exp_proc, 0, exp, lem, cmp_infers))
- {
- return true;
- }
- exp.resize(pexp_size);
- Trace("nl-ext-comp-debug")
- << "Check |" << b << "| >= |" << a << "|" << std::endl;
- if (compareMonomial(
- ob, b, 0, b_exp_proc, oa, a, 0, a_exp_proc, 0, exp, lem, cmp_infers))
- {
- return true;
- }
- return false;
-}
-
-Node NlSolver::mkLit(Node a, Node b, int status, bool isAbsolute)
-{
- if (status == 0)
- {
- Node a_eq_b = a.eqNode(b);
- if (!isAbsolute)
- {
- return a_eq_b;
- }
- Node negate_b = NodeManager::currentNM()->mkNode(UMINUS, b);
- return a_eq_b.orNode(a.eqNode(negate_b));
- }
- else if (status < 0)
- {
- return mkLit(b, a, -status);
- }
- Assert(status == 1 || status == 2);
- NodeManager* nm = NodeManager::currentNM();
- Kind greater_op = status == 1 ? GEQ : GT;
- if (!isAbsolute)
- {
- return nm->mkNode(greater_op, a, b);
- }
- // return nm->mkNode( greater_op, mkAbs( a ), mkAbs( b ) );
- Node zero = mkRationalNode(0);
- Node a_is_nonnegative = nm->mkNode(GEQ, a, zero);
- Node b_is_nonnegative = nm->mkNode(GEQ, b, zero);
- Node negate_a = nm->mkNode(UMINUS, a);
- Node negate_b = nm->mkNode(UMINUS, b);
- return a_is_nonnegative.iteNode(
- b_is_nonnegative.iteNode(nm->mkNode(greater_op, a, b),
- nm->mkNode(greater_op, a, negate_b)),
- b_is_nonnegative.iteNode(nm->mkNode(greater_op, negate_a, b),
- nm->mkNode(greater_op, negate_a, negate_b)));
-}
-
-bool NlSolver::cmp_holds(Node x,
- Node y,
- std::map<Node, std::map<Node, Node> >& cmp_infers,
- std::vector<Node>& exp,
- std::map<Node, bool>& visited)
-{
- if (x == y)
- {
- return true;
- }
- else if (visited.find(x) != visited.end())
- {
- return false;
- }
- visited[x] = true;
- std::map<Node, std::map<Node, Node> >::iterator it = cmp_infers.find(x);
- if (it != cmp_infers.end())
- {
- for (std::map<Node, Node>::iterator itc = it->second.begin();
- itc != it->second.end();
- ++itc)
- {
- exp.push_back(itc->second);
- if (cmp_holds(itc->first, y, cmp_infers, exp, visited))
- {
- return true;
- }
- exp.pop_back();
- }
- }
- return false;
-}
-
-// trying to show a ( >, = ) b by inequalities between variables in
-// monomials a,b
-bool NlSolver::compareMonomial(
- Node oa,
- Node a,
- unsigned a_index,
- NodeMultiset& a_exp_proc,
- Node ob,
- Node b,
- unsigned b_index,
- NodeMultiset& b_exp_proc,
- int status,
- std::vector<Node>& exp,
- std::vector<Node>& lem,
- std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers)
-{
- Trace("nl-ext-comp-debug")
- << "compareMonomial " << oa << " and " << ob << ", indices = " << a_index
- << " " << b_index << std::endl;
- Assert(status == 0 || status == 2);
- const std::vector<Node>& vla = d_mdb.getVariableList(a);
- const std::vector<Node>& vlb = d_mdb.getVariableList(b);
- if (a_index == vla.size() && b_index == vlb.size())
- {
- // finished, compare absolute value of abstract model values
- int modelStatus = d_model.compare(oa, ob, false, true) * -2;
- Trace("nl-ext-comp") << "...finished comparison with " << oa << " <"
- << status << "> " << ob
- << ", model status = " << modelStatus << std::endl;
- if (status != modelStatus)
- {
- Trace("nl-ext-comp-infer")
- << "infer : " << oa << " <" << status << "> " << ob << std::endl;
- if (status == 2)
- {
- // must state that all variables are non-zero
- for (unsigned j = 0; j < vla.size(); j++)
- {
- exp.push_back(vla[j].eqNode(d_zero).negate());
- }
- }
- NodeManager* nm = NodeManager::currentNM();
- Node clem = nm->mkNode(
- IMPLIES, safeConstructNary(AND, exp), mkLit(oa, ob, status, true));
- Trace("nl-ext-comp-lemma") << "comparison lemma : " << clem << std::endl;
- lem.push_back(clem);
- cmp_infers[status][oa][ob] = clem;
- }
- return true;
- }
- // get a/b variable information
- Node av;
- unsigned aexp = 0;
- unsigned avo = 0;
- if (a_index < vla.size())
- {
- av = vla[a_index];
- unsigned aexpTotal = d_mdb.getExponent(a, av);
- Assert(a_exp_proc[av] <= aexpTotal);
- aexp = aexpTotal - a_exp_proc[av];
- if (aexp == 0)
- {
- return compareMonomial(oa,
- a,
- a_index + 1,
- a_exp_proc,
- ob,
- b,
- b_index,
- b_exp_proc,
- status,
- exp,
- lem,
- cmp_infers);
- }
- Assert(d_order_vars.find(av) != d_order_vars.end());
- avo = d_order_vars[av];
- }
- Node bv;
- unsigned bexp = 0;
- unsigned bvo = 0;
- if (b_index < vlb.size())
- {
- bv = vlb[b_index];
- unsigned bexpTotal = d_mdb.getExponent(b, bv);
- Assert(b_exp_proc[bv] <= bexpTotal);
- bexp = bexpTotal - b_exp_proc[bv];
- if (bexp == 0)
- {
- return compareMonomial(oa,
- a,
- a_index,
- a_exp_proc,
- ob,
- b,
- b_index + 1,
- b_exp_proc,
- status,
- exp,
- lem,
- cmp_infers);
- }
- Assert(d_order_vars.find(bv) != d_order_vars.end());
- bvo = d_order_vars[bv];
- }
- // get "one" information
- Assert(d_order_vars.find(d_one) != d_order_vars.end());
- unsigned ovo = d_order_vars[d_one];
- Trace("nl-ext-comp-debug") << "....vars : " << av << "^" << aexp << " " << bv
- << "^" << bexp << std::endl;
-
- //--- cases
- if (av.isNull())
- {
- if (bvo <= ovo)
- {
- Trace("nl-ext-comp-debug") << "...take leading " << bv << std::endl;
- // can multiply b by <=1
- exp.push_back(mkLit(d_one, bv, bvo == ovo ? 0 : 2, true));
- return compareMonomial(oa,
- a,
- a_index,
- a_exp_proc,
- ob,
- b,
- b_index + 1,
- b_exp_proc,
- bvo == ovo ? status : 2,
- exp,
- lem,
- cmp_infers);
- }
- Trace("nl-ext-comp-debug")
- << "...failure, unmatched |b|>1 component." << std::endl;
- return false;
- }
- else if (bv.isNull())
- {
- if (avo >= ovo)
- {
- Trace("nl-ext-comp-debug") << "...take leading " << av << std::endl;
- // can multiply a by >=1
- exp.push_back(mkLit(av, d_one, avo == ovo ? 0 : 2, true));
- return compareMonomial(oa,
- a,
- a_index + 1,
- a_exp_proc,
- ob,
- b,
- b_index,
- b_exp_proc,
- avo == ovo ? status : 2,
- exp,
- lem,
- cmp_infers);
- }
- Trace("nl-ext-comp-debug")
- << "...failure, unmatched |a|<1 component." << std::endl;
- return false;
- }
- Assert(!av.isNull() && !bv.isNull());
- if (avo >= bvo)
- {
- if (bvo < ovo && avo >= ovo)
- {
- Trace("nl-ext-comp-debug") << "...take leading " << av << std::endl;
- // do avo>=1 instead
- exp.push_back(mkLit(av, d_one, avo == ovo ? 0 : 2, true));
- return compareMonomial(oa,
- a,
- a_index + 1,
- a_exp_proc,
- ob,
- b,
- b_index,
- b_exp_proc,
- avo == ovo ? status : 2,
- exp,
- lem,
- cmp_infers);
- }
- unsigned min_exp = aexp > bexp ? bexp : aexp;
- a_exp_proc[av] += min_exp;
- b_exp_proc[bv] += min_exp;
- Trace("nl-ext-comp-debug") << "...take leading " << min_exp << " from "
- << av << " and " << bv << std::endl;
- exp.push_back(mkLit(av, bv, avo == bvo ? 0 : 2, true));
- bool ret = compareMonomial(oa,
- a,
- a_index,
- a_exp_proc,
- ob,
- b,
- b_index,
- b_exp_proc,
- avo == bvo ? status : 2,
- exp,
- lem,
- cmp_infers);
- a_exp_proc[av] -= min_exp;
- b_exp_proc[bv] -= min_exp;
- return ret;
- }
- if (bvo <= ovo)
- {
- Trace("nl-ext-comp-debug") << "...take leading " << bv << std::endl;
- // try multiply b <= 1
- exp.push_back(mkLit(d_one, bv, bvo == ovo ? 0 : 2, true));
- return compareMonomial(oa,
- a,
- a_index,
- a_exp_proc,
- ob,
- b,
- b_index + 1,
- b_exp_proc,
- bvo == ovo ? status : 2,
- exp,
- lem,
- cmp_infers);
- }
- Trace("nl-ext-comp-debug")
- << "...failure, leading |b|>|a|>1 component." << std::endl;
- return false;
-}
-
-std::vector<Node> NlSolver::checkMonomialSign()
-{
- std::vector<Node> lemmas;
- std::map<Node, int> signs;
- Trace("nl-ext") << "Get monomial sign lemmas..." << std::endl;
- for (unsigned j = 0; j < d_ms.size(); j++)
- {
- Node a = d_ms[j];
- if (d_ms_proc.find(a) == d_ms_proc.end())
- {
- std::vector<Node> exp;
- if (Trace.isOn("nl-ext-debug"))
- {
- Node cmva = d_model.computeConcreteModelValue(a);
- Trace("nl-ext-debug")
- << " process " << a << ", mv=" << cmva << "..." << std::endl;
- }
- if (d_m_nconst_factor.find(a) == d_m_nconst_factor.end())
- {
- signs[a] = compareSign(a, a, 0, 1, exp, lemmas);
- if (signs[a] == 0)
- {
- d_ms_proc[a] = true;
- Trace("nl-ext-debug")
- << "...mark " << a << " reduced since its value is 0."
- << std::endl;
- }
- }
- else
- {
- Trace("nl-ext-debug")
- << "...can't conclude sign lemma for " << a
- << " since model value of a factor is non-constant." << std::endl;
- }
- }
- }
- return lemmas;
-}
-
-std::vector<Node> NlSolver::checkMonomialMagnitude(unsigned c)
-{
- // ensure information is setup
- if (c == 0)
- {
- // sort by absolute values of abstract model values
- assignOrderIds(d_ms_vars, d_order_vars, false, true);
-
- // sort individual variable lists
- Trace("nl-ext-proc") << "Assign order var lists..." << std::endl;
- d_mdb.sortVariablesByModel(d_ms, d_model);
- }
-
- unsigned r = 1;
- std::vector<Node> lemmas;
- // if (x,y,L) in cmp_infers, then x > y inferred as conclusion of L
- // in lemmas
- std::map<int, std::map<Node, std::map<Node, Node> > > cmp_infers;
- Trace("nl-ext") << "Get monomial comparison lemmas (order=" << r
- << ", compare=" << c << ")..." << std::endl;
- for (unsigned j = 0; j < d_ms.size(); j++)
- {
- Node a = d_ms[j];
- if (d_ms_proc.find(a) == d_ms_proc.end()
- && d_m_nconst_factor.find(a) == d_m_nconst_factor.end())
- {
- if (c == 0)
- {
- // compare magnitude against 1
- std::vector<Node> exp;
- NodeMultiset a_exp_proc;
- NodeMultiset b_exp_proc;
- compareMonomial(a,
- a,
- a_exp_proc,
- d_one,
- d_one,
- b_exp_proc,
- exp,
- lemmas,
- cmp_infers);
- }
- else
- {
- const NodeMultiset& mea = d_mdb.getMonomialExponentMap(a);
- if (c == 1)
- {
- // could compare not just against containing variables?
- // compare magnitude against variables
- for (unsigned k = 0; k < d_ms_vars.size(); k++)
- {
- Node v = d_ms_vars[k];
- Node mvcv = d_model.computeConcreteModelValue(v);
- if (mvcv.isConst())
- {
- std::vector<Node> exp;
- NodeMultiset a_exp_proc;
- NodeMultiset b_exp_proc;
- if (mea.find(v) != mea.end())
- {
- a_exp_proc[v] = 1;
- b_exp_proc[v] = 1;
- setMonomialFactor(a, v, a_exp_proc);
- setMonomialFactor(v, a, b_exp_proc);
- compareMonomial(a,
- a,
- a_exp_proc,
- v,
- v,
- b_exp_proc,
- exp,
- lemmas,
- cmp_infers);
- }
- }
- }
- }
- else
- {
- // compare magnitude against other non-linear monomials
- for (unsigned k = (j + 1); k < d_ms.size(); k++)
- {
- Node b = d_ms[k];
- //(signs[a]==signs[b])==(r==0)
- if (d_ms_proc.find(b) == d_ms_proc.end()
- && d_m_nconst_factor.find(b) == d_m_nconst_factor.end())
- {
- const NodeMultiset& meb = d_mdb.getMonomialExponentMap(b);
-
- std::vector<Node> exp;
- // take common factors of monomials, set minimum of
- // common exponents as processed
- NodeMultiset a_exp_proc;
- NodeMultiset b_exp_proc;
- for (NodeMultiset::const_iterator itmea2 = mea.begin();
- itmea2 != mea.end();
- ++itmea2)
- {
- NodeMultiset::const_iterator itmeb2 = meb.find(itmea2->first);
- if (itmeb2 != meb.end())
- {
- unsigned min_exp = itmea2->second > itmeb2->second
- ? itmeb2->second
- : itmea2->second;
- a_exp_proc[itmea2->first] = min_exp;
- b_exp_proc[itmea2->first] = min_exp;
- Trace("nl-ext-comp") << "Common exponent : " << itmea2->first
- << " : " << min_exp << std::endl;
- }
- }
- if (!a_exp_proc.empty())
- {
- setMonomialFactor(a, b, a_exp_proc);
- setMonomialFactor(b, a, b_exp_proc);
- }
- /*
- if( !a_exp_proc.empty() ){
- //reduction based on common exponents a > 0 => ( a * b
- <> a * c <=> b <> c ), a < 0 => ( a * b <> a * c <=> b
- !<> c ) ? }else{ compareMonomial( a, a, a_exp_proc, b,
- b, b_exp_proc, exp, lemmas );
- }
- */
- compareMonomial(
- a, a, a_exp_proc, b, b, b_exp_proc, exp, lemmas, cmp_infers);
- }
- }
- }
- }
- }
- }
- // remove redundant lemmas, e.g. if a > b, b > c, a > c were
- // inferred, discard lemma with conclusion a > c
- Trace("nl-ext-comp") << "Compute redundancies for " << lemmas.size()
- << " lemmas." << std::endl;
- // naive
- std::vector<Node> r_lemmas;
- for (std::map<int, std::map<Node, std::map<Node, Node> > >::iterator itb =
- cmp_infers.begin();
- itb != cmp_infers.end();
- ++itb)
- {
- for (std::map<Node, std::map<Node, Node> >::iterator itc =
- itb->second.begin();
- itc != itb->second.end();
- ++itc)
- {
- for (std::map<Node, Node>::iterator itc2 = itc->second.begin();
- itc2 != itc->second.end();
- ++itc2)
- {
- std::map<Node, bool> visited;
- for (std::map<Node, Node>::iterator itc3 = itc->second.begin();
- itc3 != itc->second.end();
- ++itc3)
- {
- if (itc3->first != itc2->first)
- {
- std::vector<Node> exp;
- if (cmp_holds(itc3->first, itc2->first, itb->second, exp, visited))
- {
- r_lemmas.push_back(itc2->second);
- Trace("nl-ext-comp")
- << "...inference of " << itc->first << " > " << itc2->first
- << " was redundant." << std::endl;
- break;
- }
- }
- }
- }
- }
- }
- std::vector<Node> nr_lemmas;
- for (unsigned i = 0; i < lemmas.size(); i++)
- {
- if (std::find(r_lemmas.begin(), r_lemmas.end(), lemmas[i])
- == r_lemmas.end())
- {
- nr_lemmas.push_back(lemmas[i]);
- }
- }
- // could only take maximal lower/minimial lower bounds?
-
- Trace("nl-ext-comp") << nr_lemmas.size() << " / " << lemmas.size()
- << " were non-redundant." << std::endl;
- return nr_lemmas;
-}
-
-std::vector<Node> NlSolver::checkTangentPlanes()
-{
- std::vector<Node> lemmas;
- Trace("nl-ext") << "Get monomial tangent plane lemmas..." << std::endl;
- NodeManager* nm = NodeManager::currentNM();
- const std::map<Node, std::vector<Node> >& ccMap =
- d_mdb.getContainsChildrenMap();
- unsigned kstart = d_ms_vars.size();
- for (unsigned k = kstart; k < d_mterms.size(); k++)
- {
- Node t = d_mterms[k];
- // if this term requires a refinement
- if (d_tplane_refine.find(t) == d_tplane_refine.end())
- {
- continue;
- }
- Trace("nl-ext-tplanes")
- << "Look at monomial requiring refinement : " << t << std::endl;
- // get a decomposition
- std::map<Node, std::vector<Node> >::const_iterator it = ccMap.find(t);
- if (it == ccMap.end())
- {
- continue;
- }
- std::map<Node, std::map<Node, bool> > dproc;
- for (unsigned j = 0; j < it->second.size(); j++)
- {
- Node tc = it->second[j];
- if (tc != d_one)
- {
- Node tc_diff = d_mdb.getContainsDiffNl(tc, t);
- Assert(!tc_diff.isNull());
- Node a = tc < tc_diff ? tc : tc_diff;
- Node b = tc < tc_diff ? tc_diff : tc;
- if (dproc[a].find(b) == dproc[a].end())
- {
- dproc[a][b] = true;
- Trace("nl-ext-tplanes")
- << " decomposable into : " << a << " * " << b << std::endl;
- Node a_v_c = d_model.computeAbstractModelValue(a);
- Node b_v_c = d_model.computeAbstractModelValue(b);
- // points we will add tangent planes for
- std::vector<Node> pts[2];
- pts[0].push_back(a_v_c);
- pts[1].push_back(b_v_c);
- // if previously refined
- bool prevRefine = d_tangent_val_bound[0][a].find(b)
- != d_tangent_val_bound[0][a].end();
- // a_min, a_max, b_min, b_max
- for (unsigned p = 0; p < 4; p++)
- {
- Node curr_v = p <= 1 ? a_v_c : b_v_c;
- if (prevRefine)
- {
- Node pt_v = d_tangent_val_bound[p][a][b];
- Assert(!pt_v.isNull());
- if (curr_v != pt_v)
- {
- Node do_extend =
- nm->mkNode((p == 1 || p == 3) ? GT : LT, curr_v, pt_v);
- do_extend = Rewriter::rewrite(do_extend);
- if (do_extend == d_true)
- {
- for (unsigned q = 0; q < 2; q++)
- {
- pts[p <= 1 ? 0 : 1].push_back(curr_v);
- pts[p <= 1 ? 1 : 0].push_back(
- d_tangent_val_bound[p <= 1 ? 2 + q : q][a][b]);
- }
- }
- }
- }
- else
- {
- d_tangent_val_bound[p][a][b] = curr_v;
- }
- }
-
- for (unsigned p = 0; p < pts[0].size(); p++)
- {
- Node a_v = pts[0][p];
- Node b_v = pts[1][p];
-
- // tangent plane
- Node tplane = nm->mkNode(
- MINUS,
- nm->mkNode(
- PLUS, nm->mkNode(MULT, b_v, a), nm->mkNode(MULT, a_v, b)),
- nm->mkNode(MULT, a_v, b_v));
- for (unsigned d = 0; d < 4; d++)
- {
- Node aa = nm->mkNode(d == 0 || d == 3 ? GEQ : LEQ, a, a_v);
- Node ab = nm->mkNode(d == 1 || d == 3 ? GEQ : LEQ, b, b_v);
- Node conc = nm->mkNode(d <= 1 ? LEQ : GEQ, t, tplane);
- Node tlem = nm->mkNode(OR, aa.negate(), ab.negate(), conc);
- Trace("nl-ext-tplanes")
- << "Tangent plane lemma : " << tlem << std::endl;
- lemmas.push_back(tlem);
- }
-
- // tangent plane reverse implication
-
- // t <= tplane -> ( (a <= a_v ^ b >= b_v) v
- // (a >= a_v ^ b <= b_v) ).
- // in clause form, the above becomes
- // t <= tplane -> a <= a_v v b <= b_v.
- // t <= tplane -> b >= b_v v a >= a_v.
- Node a_leq_av = nm->mkNode(LEQ, a, a_v);
- Node b_leq_bv = nm->mkNode(LEQ, b, b_v);
- Node a_geq_av = nm->mkNode(GEQ, a, a_v);
- Node b_geq_bv = nm->mkNode(GEQ, b, b_v);
-
- Node t_leq_tplane = nm->mkNode(LEQ, t, tplane);
- Node a_leq_av_or_b_leq_bv = nm->mkNode(OR, a_leq_av, b_leq_bv);
- Node b_geq_bv_or_a_geq_av = nm->mkNode(OR, b_geq_bv, a_geq_av);
- Node ub_reverse1 =
- nm->mkNode(OR, t_leq_tplane.negate(), a_leq_av_or_b_leq_bv);
- Trace("nl-ext-tplanes")
- << "Tangent plane lemma (reverse) : " << ub_reverse1
- << std::endl;
- lemmas.push_back(ub_reverse1);
- Node ub_reverse2 =
- nm->mkNode(OR, t_leq_tplane.negate(), b_geq_bv_or_a_geq_av);
- Trace("nl-ext-tplanes")
- << "Tangent plane lemma (reverse) : " << ub_reverse2
- << std::endl;
- lemmas.push_back(ub_reverse2);
-
- // t >= tplane -> ( (a <= a_v ^ b <= b_v) v
- // (a >= a_v ^ b >= b_v) ).
- // in clause form, the above becomes
- // t >= tplane -> a <= a_v v b >= b_v.
- // t >= tplane -> b >= b_v v a <= a_v
- Node t_geq_tplane = nm->mkNode(GEQ, t, tplane);
- Node a_leq_av_or_b_geq_bv = nm->mkNode(OR, a_leq_av, b_geq_bv);
- Node a_geq_av_or_b_leq_bv = nm->mkNode(OR, a_geq_av, b_leq_bv);
- Node lb_reverse1 =
- nm->mkNode(OR, t_geq_tplane.negate(), a_leq_av_or_b_geq_bv);
- Trace("nl-ext-tplanes")
- << "Tangent plane lemma (reverse) : " << lb_reverse1
- << std::endl;
- lemmas.push_back(lb_reverse1);
- Node lb_reverse2 =
- nm->mkNode(OR, t_geq_tplane.negate(), a_geq_av_or_b_leq_bv);
- Trace("nl-ext-tplanes")
- << "Tangent plane lemma (reverse) : " << lb_reverse2
- << std::endl;
- lemmas.push_back(lb_reverse2);
- }
- }
- }
- }
- }
- Trace("nl-ext") << "...trying " << lemmas.size() << " tangent plane lemmas..."
- << std::endl;
- return lemmas;
-}
-
-std::vector<Node> NlSolver::checkMonomialInferBounds(
- std::vector<Node>& nt_lemmas,
- const std::vector<Node>& asserts,
- const std::vector<Node>& false_asserts)
-{
- // sort monomials by degree
- Trace("nl-ext-proc") << "Sort monomials by degree..." << std::endl;
- d_mdb.sortByDegree(d_ms);
- // all monomials
- d_mterms.insert(d_mterms.end(), d_ms_vars.begin(), d_ms_vars.end());
- d_mterms.insert(d_mterms.end(), d_ms.begin(), d_ms.end());
-
- const std::map<Node, std::map<Node, ConstraintInfo> >& cim =
- d_cdb.getConstraints();
-
- std::vector<Node> lemmas;
- NodeManager* nm = NodeManager::currentNM();
- // register constraints
- Trace("nl-ext-debug") << "Register bound constraints..." << std::endl;
- for (const Node& lit : asserts)
- {
- bool polarity = lit.getKind() != NOT;
- Node atom = lit.getKind() == NOT ? lit[0] : lit;
- d_cdb.registerConstraint(atom);
- bool is_false_lit =
- std::find(false_asserts.begin(), false_asserts.end(), lit)
- != false_asserts.end();
- // add information about bounds to variables
- std::map<Node, std::map<Node, ConstraintInfo> >::const_iterator itc =
- cim.find(atom);
- if (itc == cim.end())
- {
- continue;
- }
- for (const std::pair<const Node, ConstraintInfo>& itcc : itc->second)
- {
- Node x = itcc.first;
- Node coeff = itcc.second.d_coeff;
- Node rhs = itcc.second.d_rhs;
- Kind type = itcc.second.d_type;
- Node exp = lit;
- if (!polarity)
- {
- // reverse
- if (type == EQUAL)
- {
- // we will take the strict inequality in the direction of the
- // model
- Node lhs = ArithMSum::mkCoeffTerm(coeff, x);
- Node query = nm->mkNode(GT, lhs, rhs);
- Node query_mv = d_model.computeAbstractModelValue(query);
- if (query_mv == d_true)
- {
- exp = query;
- type = GT;
- }
- else
- {
- Assert(query_mv == d_false);
- exp = nm->mkNode(LT, lhs, rhs);
- type = LT;
- }
- }
- else
- {
- type = negateKind(type);
- }
- }
- // add to status if maximal degree
- d_ci_max[x][coeff][rhs] = d_cdb.isMaximal(atom, x);
- if (Trace.isOn("nl-ext-bound-debug2"))
- {
- Node t = ArithMSum::mkCoeffTerm(coeff, x);
- Trace("nl-ext-bound-debug2") << "Add Bound: " << t << " " << type << " "
- << rhs << " by " << exp << std::endl;
- }
- bool updated = true;
- std::map<Node, Kind>::iterator its = d_ci[x][coeff].find(rhs);
- if (its == d_ci[x][coeff].end())
- {
- d_ci[x][coeff][rhs] = type;
- d_ci_exp[x][coeff][rhs] = exp;
- }
- else if (type != its->second)
- {
- Trace("nl-ext-bound-debug2")
- << "Joining kinds : " << type << " " << its->second << std::endl;
- Kind jk = joinKinds(type, its->second);
- if (jk == UNDEFINED_KIND)
- {
- updated = false;
- }
- else if (jk != its->second)
- {
- if (jk == type)
- {
- d_ci[x][coeff][rhs] = type;
- d_ci_exp[x][coeff][rhs] = exp;
- }
- else
- {
- d_ci[x][coeff][rhs] = jk;
- d_ci_exp[x][coeff][rhs] =
- nm->mkNode(AND, d_ci_exp[x][coeff][rhs], exp);
- }
- }
- else
- {
- updated = false;
- }
- }
- if (Trace.isOn("nl-ext-bound"))
- {
- if (updated)
- {
- Trace("nl-ext-bound") << "Bound: ";
- debugPrintBound("nl-ext-bound", coeff, x, d_ci[x][coeff][rhs], rhs);
- Trace("nl-ext-bound") << " by " << d_ci_exp[x][coeff][rhs];
- if (d_ci_max[x][coeff][rhs])
- {
- Trace("nl-ext-bound") << ", is max degree";
- }
- Trace("nl-ext-bound") << std::endl;
- }
- }
- // compute if bound is not satisfied, and store what is required
- // for a possible refinement
- if (options::nlExtTangentPlanes())
- {
- if (is_false_lit)
- {
- d_tplane_refine.insert(x);
- }
- }
- }
- }
- // reflexive constraints
- Node null_coeff;
- for (unsigned j = 0; j < d_mterms.size(); j++)
- {
- Node n = d_mterms[j];
- d_ci[n][null_coeff][n] = EQUAL;
- d_ci_exp[n][null_coeff][n] = d_true;
- d_ci_max[n][null_coeff][n] = false;
- }
-
- Trace("nl-ext") << "Get inferred bound lemmas..." << std::endl;
- const std::map<Node, std::vector<Node> >& cpMap =
- d_mdb.getContainsParentMap();
- for (unsigned k = 0; k < d_mterms.size(); k++)
- {
- Node x = d_mterms[k];
- Trace("nl-ext-bound-debug")
- << "Process bounds for " << x << " : " << std::endl;
- std::map<Node, std::vector<Node> >::const_iterator itm = cpMap.find(x);
- if (itm == cpMap.end())
- {
- Trace("nl-ext-bound-debug") << "...has no parent monomials." << std::endl;
- continue;
- }
- Trace("nl-ext-bound-debug")
- << "...has " << itm->second.size() << " parent monomials." << std::endl;
- // check derived bounds
- std::map<Node, std::map<Node, std::map<Node, Kind> > >::iterator itc =
- d_ci.find(x);
- if (itc == d_ci.end())
- {
- continue;
- }
- for (std::map<Node, std::map<Node, Kind> >::iterator itcc =
- itc->second.begin();
- itcc != itc->second.end();
- ++itcc)
- {
- Node coeff = itcc->first;
- Node t = ArithMSum::mkCoeffTerm(coeff, x);
- for (std::map<Node, Kind>::iterator itcr = itcc->second.begin();
- itcr != itcc->second.end();
- ++itcr)
- {
- Node rhs = itcr->first;
- // only consider this bound if maximal degree
- if (!d_ci_max[x][coeff][rhs])
- {
- continue;
- }
- Kind type = itcr->second;
- for (unsigned j = 0; j < itm->second.size(); j++)
- {
- Node y = itm->second[j];
- Node mult = d_mdb.getContainsDiff(x, y);
- // x <k> t => m*x <k'> t where y = m*x
- // get the sign of mult
- Node mmv = d_model.computeConcreteModelValue(mult);
- Trace("nl-ext-bound-debug2")
- << "Model value of " << mult << " is " << mmv << std::endl;
- if (!mmv.isConst())
- {
- Trace("nl-ext-bound-debug")
- << " ...coefficient " << mult
- << " is non-constant (probably transcendental)." << std::endl;
- continue;
- }
- int mmv_sign = mmv.getConst<Rational>().sgn();
- Trace("nl-ext-bound-debug2")
- << " sign of " << mmv << " is " << mmv_sign << std::endl;
- if (mmv_sign == 0)
- {
- Trace("nl-ext-bound-debug")
- << " ...coefficient " << mult << " is zero." << std::endl;
- continue;
- }
- Trace("nl-ext-bound-debug")
- << " from " << x << " * " << mult << " = " << y << " and " << t
- << " " << type << " " << rhs << ", infer : " << std::endl;
- Kind infer_type = mmv_sign == -1 ? reverseRelationKind(type) : type;
- Node infer_lhs = nm->mkNode(MULT, mult, t);
- Node infer_rhs = nm->mkNode(MULT, mult, rhs);
- Node infer = nm->mkNode(infer_type, infer_lhs, infer_rhs);
- Trace("nl-ext-bound-debug") << " " << infer << std::endl;
- infer = Rewriter::rewrite(infer);
- Trace("nl-ext-bound-debug2")
- << " ...rewritten : " << infer << std::endl;
- // check whether it is false in model for abstraction
- Node infer_mv = d_model.computeAbstractModelValue(infer);
- Trace("nl-ext-bound-debug")
- << " ...infer model value is " << infer_mv << std::endl;
- if (infer_mv == d_false)
- {
- Node exp =
- nm->mkNode(AND,
- nm->mkNode(mmv_sign == 1 ? GT : LT, mult, d_zero),
- d_ci_exp[x][coeff][rhs]);
- Node iblem = nm->mkNode(IMPLIES, exp, infer);
- Node pr_iblem = iblem;
- iblem = Rewriter::rewrite(iblem);
- bool introNewTerms = hasNewMonomials(iblem, d_ms);
- Trace("nl-ext-bound-lemma")
- << "*** Bound inference lemma : " << iblem
- << " (pre-rewrite : " << pr_iblem << ")" << std::endl;
- // Trace("nl-ext-bound-lemma") << " intro new
- // monomials = " << introNewTerms << std::endl;
- if (!introNewTerms)
- {
- lemmas.push_back(iblem);
- }
- else
- {
- nt_lemmas.push_back(iblem);
- }
- }
- }
- }
- }
- }
- return lemmas;
-}
-
-std::vector<Node> NlSolver::checkFactoring(
- const std::vector<Node>& asserts, const std::vector<Node>& false_asserts)
-{
- std::vector<Node> lemmas;
- NodeManager* nm = NodeManager::currentNM();
- Trace("nl-ext") << "Get factoring lemmas..." << std::endl;
- for (const Node& lit : asserts)
- {
- bool polarity = lit.getKind() != NOT;
- Node atom = lit.getKind() == NOT ? lit[0] : lit;
- Node litv = d_model.computeConcreteModelValue(lit);
- bool considerLit = false;
- // Only consider literals that are in false_asserts.
- considerLit = std::find(false_asserts.begin(), false_asserts.end(), lit)
- != false_asserts.end();
-
- if (considerLit)
- {
- std::map<Node, Node> msum;
- if (ArithMSum::getMonomialSumLit(atom, msum))
- {
- Trace("nl-ext-factor") << "Factoring for literal " << lit
- << ", monomial sum is : " << std::endl;
- if (Trace.isOn("nl-ext-factor"))
- {
- ArithMSum::debugPrintMonomialSum(msum, "nl-ext-factor");
- }
- std::map<Node, std::vector<Node> > factor_to_mono;
- std::map<Node, std::vector<Node> > factor_to_mono_orig;
- for (std::map<Node, Node>::iterator itm = msum.begin();
- itm != msum.end();
- ++itm)
- {
- if (!itm->first.isNull())
- {
- if (itm->first.getKind() == NONLINEAR_MULT)
- {
- std::vector<Node> children;
- for (unsigned i = 0; i < itm->first.getNumChildren(); i++)
- {
- children.push_back(itm->first[i]);
- }
- std::map<Node, bool> processed;
- for (unsigned i = 0; i < itm->first.getNumChildren(); i++)
- {
- if (processed.find(itm->first[i]) == processed.end())
- {
- processed[itm->first[i]] = true;
- children[i] = d_one;
- if (!itm->second.isNull())
- {
- children.push_back(itm->second);
- }
- Node val = nm->mkNode(MULT, children);
- if (!itm->second.isNull())
- {
- children.pop_back();
- }
- children[i] = itm->first[i];
- val = Rewriter::rewrite(val);
- factor_to_mono[itm->first[i]].push_back(val);
- factor_to_mono_orig[itm->first[i]].push_back(itm->first);
- }
- }
- }
- }
- }
- for (std::map<Node, std::vector<Node> >::iterator itf =
- factor_to_mono.begin();
- itf != factor_to_mono.end();
- ++itf)
- {
- Node x = itf->first;
- if (itf->second.size() == 1)
- {
- std::map<Node, Node>::iterator itm = msum.find(x);
- if (itm != msum.end())
- {
- itf->second.push_back(itm->second.isNull() ? d_one : itm->second);
- factor_to_mono_orig[x].push_back(x);
- }
- }
- if (itf->second.size() <= 1)
- {
- continue;
- }
- Node sum = nm->mkNode(PLUS, itf->second);
- sum = Rewriter::rewrite(sum);
- Trace("nl-ext-factor")
- << "* Factored sum for " << x << " : " << sum << std::endl;
- Node kf = getFactorSkolem(sum, lemmas);
- std::vector<Node> poly;
- poly.push_back(nm->mkNode(MULT, x, kf));
- std::map<Node, std::vector<Node> >::iterator itfo =
- factor_to_mono_orig.find(x);
- Assert(itfo != factor_to_mono_orig.end());
- for (std::map<Node, Node>::iterator itm = msum.begin();
- itm != msum.end();
- ++itm)
- {
- if (std::find(itfo->second.begin(), itfo->second.end(), itm->first)
- == itfo->second.end())
- {
- poly.push_back(ArithMSum::mkCoeffTerm(
- itm->second, itm->first.isNull() ? d_one : itm->first));
- }
- }
- Node polyn = poly.size() == 1 ? poly[0] : nm->mkNode(PLUS, poly);
- Trace("nl-ext-factor")
- << "...factored polynomial : " << polyn << std::endl;
- Node conc_lit = nm->mkNode(atom.getKind(), polyn, d_zero);
- conc_lit = Rewriter::rewrite(conc_lit);
- if (!polarity)
- {
- conc_lit = conc_lit.negate();
- }
-
- std::vector<Node> lemma_disj;
- lemma_disj.push_back(lit.negate());
- lemma_disj.push_back(conc_lit);
- Node flem = nm->mkNode(OR, lemma_disj);
- Trace("nl-ext-factor") << "...lemma is " << flem << std::endl;
- lemmas.push_back(flem);
- }
- }
- }
- }
- return lemmas;
-}
-
-Node NlSolver::getFactorSkolem(Node n, std::vector<Node>& lemmas)
-{
- std::map<Node, Node>::iterator itf = d_factor_skolem.find(n);
- if (itf == d_factor_skolem.end())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node k = nm->mkSkolem("kf", n.getType());
- Node k_eq = Rewriter::rewrite(k.eqNode(n));
- lemmas.push_back(k_eq);
- d_factor_skolem[n] = k;
- return k;
- }
- return itf->second;
-}
-
-std::vector<Node> NlSolver::checkMonomialInferResBounds()
-{
- std::vector<Node> lemmas;
- NodeManager* nm = NodeManager::currentNM();
- Trace("nl-ext") << "Get monomial resolution inferred bound lemmas..."
- << std::endl;
- size_t nmterms = d_mterms.size();
- for (unsigned j = 0; j < nmterms; j++)
- {
- Node a = d_mterms[j];
- std::map<Node, std::map<Node, std::map<Node, Kind> > >::iterator itca =
- d_ci.find(a);
- if (itca == d_ci.end())
- {
- continue;
- }
- for (unsigned k = (j + 1); k < nmterms; k++)
- {
- Node b = d_mterms[k];
- std::map<Node, std::map<Node, std::map<Node, Kind> > >::iterator itcb =
- d_ci.find(b);
- if (itcb == d_ci.end())
- {
- continue;
- }
- Trace("nl-ext-rbound-debug") << "resolution inferences : compare " << a
- << " and " << b << std::endl;
- // if they have common factors
- std::map<Node, Node>::iterator ita = d_mono_diff[a].find(b);
- if (ita == d_mono_diff[a].end())
- {
- continue;
- }
- Trace("nl-ext-rbound") << "Get resolution inferences for [a] " << a
- << " vs [b] " << b << std::endl;
- std::map<Node, Node>::iterator itb = d_mono_diff[b].find(a);
- Assert(itb != d_mono_diff[b].end());
- Node mv_a = d_model.computeAbstractModelValue(ita->second);
- Assert(mv_a.isConst());
- int mv_a_sgn = mv_a.getConst<Rational>().sgn();
- if (mv_a_sgn == 0)
- {
- // we don't compare monomials whose current model value is zero
- continue;
- }
- Node mv_b = d_model.computeAbstractModelValue(itb->second);
- Assert(mv_b.isConst());
- int mv_b_sgn = mv_b.getConst<Rational>().sgn();
- if (mv_b_sgn == 0)
- {
- // we don't compare monomials whose current model value is zero
- continue;
- }
- Trace("nl-ext-rbound") << " [a] factor is " << ita->second
- << ", sign in model = " << mv_a_sgn << std::endl;
- Trace("nl-ext-rbound") << " [b] factor is " << itb->second
- << ", sign in model = " << mv_b_sgn << std::endl;
-
- std::vector<Node> exp;
- // bounds of a
- for (std::map<Node, std::map<Node, Kind> >::iterator itcac =
- itca->second.begin();
- itcac != itca->second.end();
- ++itcac)
- {
- Node coeff_a = itcac->first;
- for (std::map<Node, Kind>::iterator itcar = itcac->second.begin();
- itcar != itcac->second.end();
- ++itcar)
- {
- Node rhs_a = itcar->first;
- Node rhs_a_res_base = nm->mkNode(MULT, itb->second, rhs_a);
- rhs_a_res_base = Rewriter::rewrite(rhs_a_res_base);
- if (hasNewMonomials(rhs_a_res_base, d_ms))
- {
- continue;
- }
- Kind type_a = itcar->second;
- exp.push_back(d_ci_exp[a][coeff_a][rhs_a]);
-
- // bounds of b
- for (std::map<Node, std::map<Node, Kind> >::iterator itcbc =
- itcb->second.begin();
- itcbc != itcb->second.end();
- ++itcbc)
- {
- Node coeff_b = itcbc->first;
- Node rhs_a_res = ArithMSum::mkCoeffTerm(coeff_b, rhs_a_res_base);
- for (std::map<Node, Kind>::iterator itcbr = itcbc->second.begin();
- itcbr != itcbc->second.end();
- ++itcbr)
- {
- Node rhs_b = itcbr->first;
- Node rhs_b_res = nm->mkNode(MULT, ita->second, rhs_b);
- rhs_b_res = ArithMSum::mkCoeffTerm(coeff_a, rhs_b_res);
- rhs_b_res = Rewriter::rewrite(rhs_b_res);
- if (hasNewMonomials(rhs_b_res, d_ms))
- {
- continue;
- }
- Kind type_b = itcbr->second;
- exp.push_back(d_ci_exp[b][coeff_b][rhs_b]);
- if (Trace.isOn("nl-ext-rbound"))
- {
- Trace("nl-ext-rbound") << "* try bounds : ";
- debugPrintBound("nl-ext-rbound", coeff_a, a, type_a, rhs_a);
- Trace("nl-ext-rbound") << std::endl;
- Trace("nl-ext-rbound") << " ";
- debugPrintBound("nl-ext-rbound", coeff_b, b, type_b, rhs_b);
- Trace("nl-ext-rbound") << std::endl;
- }
- Kind types[2];
- for (unsigned r = 0; r < 2; r++)
- {
- Node pivot_factor = r == 0 ? itb->second : ita->second;
- int pivot_factor_sign = r == 0 ? mv_b_sgn : mv_a_sgn;
- types[r] = r == 0 ? type_a : type_b;
- if (pivot_factor_sign == (r == 0 ? 1 : -1))
- {
- types[r] = reverseRelationKind(types[r]);
- }
- if (pivot_factor_sign == 1)
- {
- exp.push_back(nm->mkNode(GT, pivot_factor, d_zero));
- }
- else
- {
- exp.push_back(nm->mkNode(LT, pivot_factor, d_zero));
- }
- }
- Kind jk = transKinds(types[0], types[1]);
- Trace("nl-ext-rbound-debug")
- << "trans kind : " << types[0] << " + " << types[1] << " = "
- << jk << std::endl;
- if (jk != UNDEFINED_KIND)
- {
- Node conc = nm->mkNode(jk, rhs_a_res, rhs_b_res);
- Node conc_mv = d_model.computeAbstractModelValue(conc);
- if (conc_mv == d_false)
- {
- Node rblem = nm->mkNode(IMPLIES, nm->mkNode(AND, exp), conc);
- Trace("nl-ext-rbound-lemma-debug")
- << "Resolution bound lemma "
- "(pre-rewrite) "
- ": "
- << rblem << std::endl;
- rblem = Rewriter::rewrite(rblem);
- Trace("nl-ext-rbound-lemma")
- << "Resolution bound lemma : " << rblem << std::endl;
- lemmas.push_back(rblem);
- }
- }
- exp.pop_back();
- exp.pop_back();
- exp.pop_back();
- }
- }
- exp.pop_back();
- }
- }
- }
- }
- return lemmas;
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file nl_solver.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Solver for standard non-linear constraints
- **/
-
-#ifndef CVC4__THEORY__ARITH__NL_SOLVER_H
-#define CVC4__THEORY__ARITH__NL_SOLVER_H
-
-#include <map>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-#include "context/cdhashset.h"
-#include "context/cdinsert_hashmap.h"
-#include "context/cdlist.h"
-#include "context/cdqueue.h"
-#include "context/context.h"
-#include "expr/kind.h"
-#include "expr/node.h"
-#include "theory/arith/nl_constraint.h"
-#include "theory/arith/nl_lemma_utils.h"
-#include "theory/arith/nl_model.h"
-#include "theory/arith/nl_monomial.h"
-#include "theory/arith/theory_arith.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-typedef std::map<Node, unsigned> NodeMultiset;
-
-/** Non-linear solver class
- *
- * This class implements model-based refinement schemes
- * for non-linear arithmetic, described in:
- *
- * - "Invariant Checking of NRA Transition Systems
- * via Incremental Reduction to LRA with EUF" by
- * Cimatti et al., TACAS 2017.
- *
- * - Section 5 of "Desiging Theory Solvers with
- * Extensions" by Reynolds et al., FroCoS 2017.
- */
-class NlSolver
-{
- typedef std::map<Node, NodeMultiset> MonomialExponentMap;
- typedef context::CDHashSet<Node, NodeHashFunction> NodeSet;
-
- public:
- NlSolver(TheoryArith& containing, NlModel& model);
- ~NlSolver();
-
- /** init last call
- *
- * This is called at the beginning of last call effort check, where
- * assertions are the set of assertions belonging to arithmetic,
- * false_asserts is the subset of assertions that are false in the current
- * model, and xts is the set of extended function terms that are active in
- * the current context.
- */
- void initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts);
- //-------------------------------------------- lemma schemas
- /** check split zero
- *
- * Returns a set of theory lemmas of the form
- * t = 0 V t != 0
- * where t is a term that exists in the current context.
- */
- std::vector<Node> checkSplitZero();
-
- /** check monomial sign
- *
- * Returns a set of valid theory lemmas, based on a
- * lemma schema which ensures that non-linear monomials
- * respect sign information based on their facts.
- * For more details, see Section 5 of "Design Theory
- * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
- * Figure 5, this is the schema "Sign".
- *
- * Examples:
- *
- * x > 0 ^ y > 0 => x*y > 0
- * x < 0 => x*y*y < 0
- * x = 0 => x*y*z = 0
- */
- std::vector<Node> checkMonomialSign();
-
- /** check monomial magnitude
- *
- * Returns a set of valid theory lemmas, based on a
- * lemma schema which ensures that comparisons between
- * non-linear monomials respect the magnitude of their
- * factors.
- * For more details, see Section 5 of "Design Theory
- * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
- * Figure 5, this is the schema "Magnitude".
- *
- * Examples:
- *
- * |x|>|y| => |x*z|>|y*z|
- * |x|>|y| ^ |z|>|w| ^ |x|>=1 => |x*x*z*u|>|y*w|
- *
- * Argument c indicates the class of inferences to perform for the
- * (non-linear) monomials in the vector d_ms. 0 : compare non-linear monomials
- * against 1, 1 : compare non-linear monomials against variables, 2 : compare
- * non-linear monomials against other non-linear monomials.
- */
- std::vector<Node> checkMonomialMagnitude(unsigned c);
-
- /** check monomial inferred bounds
- *
- * Returns a set of valid theory lemmas, based on a
- * lemma schema that infers new constraints about existing
- * terms based on mulitplying both sides of an existing
- * constraint by a term.
- * For more details, see Section 5 of "Design Theory
- * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
- * Figure 5, this is the schema "Multiply".
- *
- * Examples:
- *
- * x > 0 ^ (y > z + w) => x*y > x*(z+w)
- * x < 0 ^ (y > z + w) => x*y < x*(z+w)
- * ...where (y > z + w) and x*y are a constraint and term
- * that occur in the current context.
- */
- std::vector<Node> checkMonomialInferBounds(
- std::vector<Node>& nt_lemmas,
- const std::vector<Node>& asserts,
- const std::vector<Node>& false_asserts);
-
- /** check factoring
- *
- * Returns a set of valid theory lemmas, based on a
- * lemma schema that states a relationship betwen monomials
- * with common factors that occur in the same constraint.
- *
- * Examples:
- *
- * x*z+y*z > t => ( k = x + y ^ k*z > t )
- * ...where k is fresh and x*z + y*z > t is a
- * constraint that occurs in the current context.
- */
- std::vector<Node> checkFactoring(const std::vector<Node>& asserts,
- const std::vector<Node>& false_asserts);
-
- /** check monomial infer resolution bounds
- *
- * Returns a set of valid theory lemmas, based on a
- * lemma schema which "resolves" upper bounds
- * of one inequality with lower bounds for another.
- * This schema is not enabled by default, and can
- * be enabled by --nl-ext-rbound.
- *
- * Examples:
- *
- * ( y>=0 ^ s <= x*z ^ x*y <= t ) => y*s <= z*t
- * ...where s <= x*z and x*y <= t are constraints
- * that occur in the current context.
- */
- std::vector<Node> checkMonomialInferResBounds();
-
- /** check tangent planes
- *
- * Returns a set of valid theory lemmas, based on an
- * "incremental linearization" of non-linear monomials.
- * This linearization is accomplished by adding constraints
- * corresponding to "tangent planes" at the current
- * model value of each non-linear monomial. In particular
- * consider the definition for constants a,b :
- * T_{a,b}( x*y ) = b*x + a*y - a*b.
- * The lemmas added by this function are of the form :
- * ( ( x>a ^ y<b) ^ (x<a ^ y>b) ) => x*y < T_{a,b}( x*y )
- * ( ( x>a ^ y>b) ^ (x<a ^ y<b) ) => x*y > T_{a,b}( x*y )
- * It is inspired by "Invariant Checking of NRA Transition
- * Systems via Incremental Reduction to LRA with EUF" by
- * Cimatti et al., TACAS 2017.
- * This schema is not terminating in general.
- * It is not enabled by default, and can
- * be enabled by --nl-ext-tplanes.
- *
- * Examples:
- *
- * ( ( x>2 ^ y>5) ^ (x<2 ^ y<5) ) => x*y > 5*x + 2*y - 10
- * ( ( x>2 ^ y<5) ^ (x<2 ^ y>5) ) => x*y < 5*x + 2*y - 10
- */
- std::vector<Node> checkTangentPlanes();
-
- //-------------------------------------------- end lemma schemas
- private:
- // The theory of arithmetic containing this extension.
- TheoryArith& d_containing;
- /** Reference to the non-linear model object */
- NlModel& d_model;
- /** commonly used terms */
- Node d_zero;
- Node d_one;
- Node d_neg_one;
- Node d_two;
- Node d_true;
- Node d_false;
- /** Context-independent database of monomial information */
- MonomialDb d_mdb;
- /** Context-independent database of constraint information */
- ConstraintDb d_cdb;
-
- // ( x*y, x*z, y ) for each pair of monomials ( x*y, x*z ) with common factors
- std::map<Node, std::map<Node, Node> > d_mono_diff;
-
- /** cache of terms t for which we have added the lemma ( t = 0 V t != 0 ). */
- NodeSet d_zero_split;
-
- // ordering, stores variables and 0,1,-1
- std::map<Node, unsigned> d_order_vars;
- std::vector<Node> d_order_points;
-
- // information about monomials
- std::vector<Node> d_ms;
- std::vector<Node> d_ms_vars;
- std::map<Node, bool> d_ms_proc;
- std::vector<Node> d_mterms;
-
- // list of monomials with factors whose model value is non-constant in model
- // e.g. y*cos( x )
- std::map<Node, bool> d_m_nconst_factor;
- /** the set of monomials we should apply tangent planes to */
- std::unordered_set<Node, NodeHashFunction> d_tplane_refine;
- /** maps nodes to their factor skolems */
- std::map<Node, Node> d_factor_skolem;
- /** tangent plane bounds */
- std::map<Node, std::map<Node, Node> > d_tangent_val_bound[4];
- // term -> coeff -> rhs -> ( status, exp, b ),
- // where we have that : exp => ( coeff * term <status> rhs )
- // b is true if degree( term ) >= degree( rhs )
- std::map<Node, std::map<Node, std::map<Node, Kind> > > d_ci;
- std::map<Node, std::map<Node, std::map<Node, Node> > > d_ci_exp;
- std::map<Node, std::map<Node, std::map<Node, bool> > > d_ci_max;
-
- /** Make literal */
- static Node mkLit(Node a, Node b, int status, bool isAbsolute = false);
- /** register monomial */
- void setMonomialFactor(Node a, Node b, const NodeMultiset& common);
- /** assign order ids */
- void assignOrderIds(std::vector<Node>& vars,
- NodeMultiset& d_order,
- bool isConcrete,
- bool isAbsolute);
-
- /** Check whether we have already inferred a relationship between monomials
- * x and y based on the information in cmp_infers. This computes the
- * transitive closure of the relation stored in cmp_infers.
- */
- bool cmp_holds(Node x,
- Node y,
- std::map<Node, std::map<Node, Node> >& cmp_infers,
- std::vector<Node>& exp,
- std::map<Node, bool>& visited);
- /** In the following functions, status states a relationship
- * between two arithmetic terms, where:
- * 0 : equal
- * 1 : greater than or equal
- * 2 : greater than
- * -X : (greater -> less)
- * TODO (#1287) make this an enum?
- */
- /** compute the sign of a.
- *
- * Calls to this function are such that :
- * exp => ( oa = a ^ a <status> 0 )
- *
- * This function iterates over the factors of a,
- * where a_index is the index of the factor in a
- * we are currently looking at.
- *
- * This function returns a status, which indicates
- * a's relationship to 0.
- * We add lemmas to lem of the form given by the
- * lemma schema checkSign(...).
- */
- int compareSign(Node oa,
- Node a,
- unsigned a_index,
- int status,
- std::vector<Node>& exp,
- std::vector<Node>& lem);
- /** compare monomials a and b
- *
- * Initially, a call to this function is such that :
- * exp => ( oa = a ^ ob = b )
- *
- * This function returns true if we can infer a valid
- * arithmetic lemma of the form :
- * P => abs( a ) >= abs( b )
- * where P is true and abs( a ) >= abs( b ) is false in the
- * current model.
- *
- * This function is implemented by "processing" factors
- * of monomials a and b until an inference of the above
- * form can be made. For example, if :
- * a = x*x*y and b = z*w
- * Assuming we are trying to show abs( a ) >= abs( c ),
- * then if abs( M( x ) ) >= abs( M( z ) ) where M is the current model,
- * then we can add abs( x ) >= abs( z ) to our explanation, and
- * mark one factor of x as processed in a, and
- * one factor of z as processed in b. The number of processed factors of a
- * and b are stored in a_exp_proc and b_exp_proc respectively.
- *
- * cmp_infers stores information that is helpful
- * in discarding redundant inferences. For example,
- * we do not want to infer abs( x ) >= abs( z ) if
- * we have already inferred abs( x ) >= abs( y ) and
- * abs( y ) >= abs( z ).
- * It stores entries of the form (status,t1,t2)->F,
- * which indicates that we constructed a lemma F that
- * showed t1 <status> t2.
- *
- * We add lemmas to lem of the form given by the
- * lemma schema checkMagnitude(...).
- */
- bool compareMonomial(
- Node oa,
- Node a,
- NodeMultiset& a_exp_proc,
- Node ob,
- Node b,
- NodeMultiset& b_exp_proc,
- std::vector<Node>& exp,
- std::vector<Node>& lem,
- std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
- /** helper function for above
- *
- * The difference is the inputs a_index and b_index, which are the indices of
- * children (factors) in monomials a and b which we are currently looking at.
- */
- bool compareMonomial(
- Node oa,
- Node a,
- unsigned a_index,
- NodeMultiset& a_exp_proc,
- Node ob,
- Node b,
- unsigned b_index,
- NodeMultiset& b_exp_proc,
- int status,
- std::vector<Node>& exp,
- std::vector<Node>& lem,
- std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
- /** Get factor skolem for n, add resulting lemmas to lemmas */
- Node getFactorSkolem(Node n, std::vector<Node>& lemmas);
-}; /* class NlSolver */
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__NL_SOLVER_H */
+++ /dev/null
-/********************* */
-/*! \file nonlinear_extension.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King, Aina Niemetz
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief [[ Add one-line brief description here ]]
- **
- ** [[ Add lengthier description here ]]
- ** \todo document this file
- **/
-
-#include "theory/arith/nonlinear_extension.h"
-
-#include "options/arith_options.h"
-#include "theory/arith/arith_utilities.h"
-#include "theory/arith/theory_arith.h"
-#include "theory/ext_theory.h"
-#include "theory/theory_model.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-NonlinearExtension::NonlinearExtension(TheoryArith& containing,
- eq::EqualityEngine* ee)
- : d_lemmas(containing.getUserContext()),
- d_containing(containing),
- d_ee(ee),
- d_needsLastCall(false),
- d_model(containing.getSatContext()),
- d_trSlv(d_model),
- d_nlSlv(containing, d_model),
- d_builtModel(containing.getSatContext(), false)
-{
- d_true = NodeManager::currentNM()->mkConst(true);
- d_zero = NodeManager::currentNM()->mkConst(Rational(0));
- d_one = NodeManager::currentNM()->mkConst(Rational(1));
- d_neg_one = NodeManager::currentNM()->mkConst(Rational(-1));
-}
-
-NonlinearExtension::~NonlinearExtension() {}
-
-bool NonlinearExtension::getCurrentSubstitution(
- int effort, const std::vector<Node>& vars, std::vector<Node>& subs,
- std::map<Node, std::vector<Node> >& exp) {
- // get the constant equivalence classes
- std::map<Node, std::vector<int> > rep_to_subs_index;
-
- bool retVal = false;
- for (unsigned i = 0; i < vars.size(); i++) {
- Node n = vars[i];
- if (d_ee->hasTerm(n)) {
- Node nr = d_ee->getRepresentative(n);
- if (nr.isConst()) {
- subs.push_back(nr);
- Trace("nl-subs") << "Basic substitution : " << n << " -> " << nr
- << std::endl;
- exp[n].push_back(n.eqNode(nr));
- retVal = true;
- } else {
- rep_to_subs_index[nr].push_back(i);
- subs.push_back(n);
- }
- } else {
- subs.push_back(n);
- }
- }
-
- // return true if the substitution is non-trivial
- return retVal;
-}
-
-std::pair<bool, Node> NonlinearExtension::isExtfReduced(
- int effort, Node n, Node on, const std::vector<Node>& exp) const {
- if (n != d_zero) {
- Kind k = n.getKind();
- return std::make_pair(k != NONLINEAR_MULT && !isTranscendentalKind(k),
- Node::null());
- }
- Assert(n == d_zero);
- if (on.getKind() == NONLINEAR_MULT)
- {
- Trace("nl-ext-zero-exp") << "Infer zero : " << on << " == " << n
- << std::endl;
- // minimize explanation if a substitution+rewrite results in zero
- const std::set<Node> vars(on.begin(), on.end());
-
- for (unsigned i = 0, size = exp.size(); i < size; i++)
- {
- Trace("nl-ext-zero-exp") << " exp[" << i << "] = " << exp[i]
- << std::endl;
- std::vector<Node> eqs;
- if (exp[i].getKind() == EQUAL)
- {
- eqs.push_back(exp[i]);
- }
- else if (exp[i].getKind() == AND)
- {
- for (const Node& ec : exp[i])
- {
- if (ec.getKind() == EQUAL)
- {
- eqs.push_back(ec);
- }
- }
- }
-
- for (unsigned j = 0; j < eqs.size(); j++)
- {
- for (unsigned r = 0; r < 2; r++)
- {
- if (eqs[j][r] == d_zero && vars.find(eqs[j][1 - r]) != vars.end())
- {
- Trace("nl-ext-zero-exp") << "...single exp : " << eqs[j]
- << std::endl;
- return std::make_pair(true, eqs[j]);
- }
- }
- }
- }
- }
- return std::make_pair(true, Node::null());
-}
-
-void NonlinearExtension::sendLemmas(const std::vector<Node>& out,
- bool preprocess,
- std::map<Node, NlLemmaSideEffect>& lemSE)
-{
- std::map<Node, NlLemmaSideEffect>::iterator its;
- for (const Node& lem : out)
- {
- Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : " << lem << std::endl;
- d_containing.getOutputChannel().lemma(lem, false, preprocess);
- // process the side effect
- its = lemSE.find(lem);
- if (its != lemSE.end())
- {
- processSideEffect(its->second);
- }
- // add to cache if not preprocess
- if (!preprocess)
- {
- d_lemmas.insert(lem);
- }
- // also indicate this is a tautology
- d_model.addTautology(lem);
- }
-}
-
-void NonlinearExtension::processSideEffect(const NlLemmaSideEffect& se)
-{
- d_trSlv.processSideEffect(se);
-}
-
-unsigned NonlinearExtension::filterLemma(Node lem, std::vector<Node>& out)
-{
- Trace("nl-ext-lemma-debug")
- << "NonlinearExtension::Lemma pre-rewrite : " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- if (d_lemmas.find(lem) != d_lemmas.end()
- || std::find(out.begin(), out.end(), lem) != out.end())
- {
- Trace("nl-ext-lemma-debug")
- << "NonlinearExtension::Lemma duplicate : " << lem << std::endl;
- return 0;
- }
- out.push_back(lem);
- return 1;
-}
-
-unsigned NonlinearExtension::filterLemmas(std::vector<Node>& lemmas,
- std::vector<Node>& out)
-{
- if (options::nlExtEntailConflicts())
- {
- // check if any are entailed to be false
- for (const Node& lem : lemmas)
- {
- Node ch_lemma = lem.negate();
- ch_lemma = Rewriter::rewrite(ch_lemma);
- Trace("nl-ext-et-debug")
- << "Check entailment of " << ch_lemma << "..." << std::endl;
- std::pair<bool, Node> et = d_containing.getValuation().entailmentCheck(
- options::TheoryOfMode::THEORY_OF_TYPE_BASED, ch_lemma);
- Trace("nl-ext-et-debug") << "entailment test result : " << et.first << " "
- << et.second << std::endl;
- if (et.first)
- {
- Trace("nl-ext-et") << "*** Lemma entailed to be in conflict : " << lem
- << std::endl;
- // return just this lemma
- if (filterLemma(lem, out) > 0)
- {
- lemmas.clear();
- return 1;
- }
- }
- }
- }
-
- unsigned sum = 0;
- for (const Node& lem : lemmas)
- {
- sum += filterLemma(lem, out);
- }
- lemmas.clear();
- return sum;
-}
-
-void NonlinearExtension::getAssertions(std::vector<Node>& assertions)
-{
- Trace("nl-ext") << "Getting assertions..." << std::endl;
- NodeManager* nm = NodeManager::currentNM();
- // get the assertions
- std::map<Node, Rational> init_bounds[2];
- std::map<Node, Node> init_bounds_lit[2];
- unsigned nassertions = 0;
- std::unordered_set<Node, NodeHashFunction> init_assertions;
- for (Theory::assertions_iterator it = d_containing.facts_begin();
- it != d_containing.facts_end();
- ++it)
- {
- nassertions++;
- const Assertion& assertion = *it;
- Node lit = assertion.d_assertion;
- init_assertions.insert(lit);
- // check for concrete bounds
- bool pol = lit.getKind() != NOT;
- Node atom_orig = lit.getKind() == NOT ? lit[0] : lit;
-
- std::vector<Node> atoms;
- if (atom_orig.getKind() == EQUAL)
- {
- if (pol)
- {
- // t = s is ( t >= s ^ t <= s )
- for (unsigned i = 0; i < 2; i++)
- {
- Node atom_new = nm->mkNode(GEQ, atom_orig[i], atom_orig[1 - i]);
- atom_new = Rewriter::rewrite(atom_new);
- atoms.push_back(atom_new);
- }
- }
- }
- else
- {
- atoms.push_back(atom_orig);
- }
-
- for (const Node& atom : atoms)
- {
- // non-strict bounds only
- if (atom.getKind() == GEQ || (!pol && atom.getKind() == GT))
- {
- Node p = atom[0];
- Assert(atom[1].isConst());
- Rational bound = atom[1].getConst<Rational>();
- if (!pol)
- {
- if (atom[0].getType().isInteger())
- {
- // ~( p >= c ) ---> ( p <= c-1 )
- bound = bound - Rational(1);
- }
- }
- unsigned bindex = pol ? 0 : 1;
- bool setBound = true;
- std::map<Node, Rational>::iterator itb = init_bounds[bindex].find(p);
- if (itb != init_bounds[bindex].end())
- {
- if (itb->second == bound)
- {
- setBound = atom_orig.getKind() == EQUAL;
- }
- else
- {
- setBound = pol ? itb->second < bound : itb->second > bound;
- }
- if (setBound)
- {
- // the bound is subsumed
- init_assertions.erase(init_bounds_lit[bindex][p]);
- }
- }
- if (setBound)
- {
- Trace("nl-ext-init") << (pol ? "Lower" : "Upper") << " bound for "
- << p << " : " << bound << std::endl;
- init_bounds[bindex][p] = bound;
- init_bounds_lit[bindex][p] = lit;
- }
- }
- }
- }
- // for each bound that is the same, ensure we've inferred the equality
- for (std::pair<const Node, Rational>& ib : init_bounds[0])
- {
- Node p = ib.first;
- Node lit1 = init_bounds_lit[0][p];
- if (lit1.getKind() != EQUAL)
- {
- std::map<Node, Rational>::iterator itb = init_bounds[1].find(p);
- if (itb != init_bounds[1].end())
- {
- if (ib.second == itb->second)
- {
- Node eq = p.eqNode(nm->mkConst(ib.second));
- eq = Rewriter::rewrite(eq);
- Node lit2 = init_bounds_lit[1][p];
- Assert(lit2.getKind() != EQUAL);
- // use the equality instead, thus these are redundant
- init_assertions.erase(lit1);
- init_assertions.erase(lit2);
- init_assertions.insert(eq);
- }
- }
- }
- }
-
- for (const Node& a : init_assertions)
- {
- assertions.push_back(a);
- }
- Trace("nl-ext") << "...keep " << assertions.size() << " / " << nassertions
- << " assertions." << std::endl;
-}
-
-std::vector<Node> NonlinearExtension::checkModelEval(
- const std::vector<Node>& assertions)
-{
- std::vector<Node> false_asserts;
- for (size_t i = 0; i < assertions.size(); ++i) {
- Node lit = assertions[i];
- Node atom = lit.getKind()==NOT ? lit[0] : lit;
- Node litv = d_model.computeConcreteModelValue(lit);
- Trace("nl-ext-mv-assert") << "M[[ " << lit << " ]] -> " << litv;
- if (litv != d_true)
- {
- Trace("nl-ext-mv-assert") << " [model-false]" << std::endl;
- false_asserts.push_back(lit);
- }
- else
- {
- Trace("nl-ext-mv-assert") << std::endl;
- }
- }
- return false_asserts;
-}
-
-bool NonlinearExtension::checkModel(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- std::vector<Node>& lemmas,
- std::vector<Node>& gs)
-{
- Trace("nl-ext-cm") << "--- check-model ---" << std::endl;
-
- // get the presubstitution
- Trace("nl-ext-cm-debug") << " apply pre-substitution..." << std::endl;
- std::vector<Node> passertions = assertions;
-
- // preprocess the assertions with the trancendental solver
- if (!d_trSlv.preprocessAssertionsCheckModel(passertions))
- {
- return false;
- }
-
- Trace("nl-ext-cm") << "-----" << std::endl;
- unsigned tdegree = d_trSlv.getTaylorDegree();
- bool ret =
- d_model.checkModel(passertions, false_asserts, tdegree, lemmas, gs);
- return ret;
-}
-
-int NonlinearExtension::checkLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts,
- std::vector<Node>& lems,
- std::vector<Node>& lemsPp,
- std::vector<Node>& wlems,
- std::map<Node, NlLemmaSideEffect>& lemSE)
-{
- // initialize the non-linear solver
- d_nlSlv.initLastCall(assertions, false_asserts, xts);
- // initialize the trancendental function solver
- std::vector<Node> lemmas;
- d_trSlv.initLastCall(assertions, false_asserts, xts, lemmas, lemsPp);
-
- // process lemmas that may have been generated by the transcendental solver
- filterLemmas(lemmas, lems);
- if (!lems.empty() || !lemsPp.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size()
- << " new lemmas during registration." << std::endl;
- return lems.size() + lemsPp.size();
- }
-
- //----------------------------------- possibly split on zero
- if (options::nlExtSplitZero()) {
- Trace("nl-ext") << "Get zero split lemmas..." << std::endl;
- lemmas = d_nlSlv.checkSplitZero();
- filterLemmas(lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
- << std::endl;
- return lems.size();
- }
- }
-
- //-----------------------------------initial lemmas for transcendental functions
- lemmas = d_trSlv.checkTranscendentalInitialRefine();
- filterLemmas(lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
- << std::endl;
- return lems.size();
- }
-
- //-----------------------------------lemmas based on sign (comparison to zero)
- lemmas = d_nlSlv.checkMonomialSign();
- filterLemmas(lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
- << std::endl;
- return lems.size();
- }
-
- //-----------------------------------monotonicity of transdental functions
- lemmas = d_trSlv.checkTranscendentalMonotonic();
- filterLemmas(lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
- << std::endl;
- return lems.size();
- }
-
- //-----------------------------------lemmas based on magnitude of non-zero monomials
- for (unsigned c = 0; c < 3; c++) {
- // c is effort level
- lemmas = d_nlSlv.checkMonomialMagnitude(c);
- unsigned nlem = lemmas.size();
- filterLemmas(lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size()
- << " new lemmas (out of possible " << nlem << ")."
- << std::endl;
- return lems.size();
- }
- }
-
- //-----------------------------------inferred bounds lemmas
- // e.g. x >= t => y*x >= y*t
- std::vector< Node > nt_lemmas;
- lemmas =
- d_nlSlv.checkMonomialInferBounds(nt_lemmas, assertions, false_asserts);
- // Trace("nl-ext") << "Bound lemmas : " << lemmas.size() << ", " <<
- // nt_lemmas.size() << std::endl; prioritize lemmas that do not
- // introduce new monomials
- filterLemmas(lemmas, lems);
-
- if (options::nlExtTangentPlanes() && options::nlExtTangentPlanesInterleave())
- {
- lemmas = d_nlSlv.checkTangentPlanes();
- filterLemmas(lemmas, lems);
- }
-
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
- << std::endl;
- return lems.size();
- }
-
- // from inferred bound inferences : now do ones that introduce new terms
- filterLemmas(nt_lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size()
- << " new (monomial-introducing) lemmas." << std::endl;
- return lems.size();
- }
-
- //------------------------------------factoring lemmas
- // x*y + x*z >= t => exists k. k = y + z ^ x*k >= t
- if( options::nlExtFactor() ){
- lemmas = d_nlSlv.checkFactoring(assertions, false_asserts);
- filterLemmas(lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
- << std::endl;
- return lems.size();
- }
- }
-
- //------------------------------------resolution bound inferences
- // e.g. ( y>=0 ^ s <= x*z ^ x*y <= t ) => y*s <= z*t
- if (options::nlExtResBound()) {
- lemmas = d_nlSlv.checkMonomialInferResBounds();
- filterLemmas(lemmas, lems);
- if (!lems.empty())
- {
- Trace("nl-ext") << " ...finished with " << lems.size() << " new lemmas."
- << std::endl;
- return lems.size();
- }
- }
-
- //------------------------------------tangent planes
- if (options::nlExtTangentPlanes() && !options::nlExtTangentPlanesInterleave())
- {
- lemmas = d_nlSlv.checkTangentPlanes();
- filterLemmas(lemmas, wlems);
- }
- if (options::nlExtTfTangentPlanes())
- {
- lemmas = d_trSlv.checkTranscendentalTangentPlanes(lemSE);
- filterLemmas(lemmas, wlems);
- }
- Trace("nl-ext") << " ...finished with " << wlems.size() << " waiting lemmas."
- << std::endl;
-
- return 0;
-}
-
-void NonlinearExtension::check(Theory::Effort e) {
- Trace("nl-ext") << std::endl;
- Trace("nl-ext") << "NonlinearExtension::check, effort = " << e
- << ", built model = " << d_builtModel.get() << std::endl;
- if (e == Theory::EFFORT_FULL)
- {
- d_containing.getExtTheory()->clearCache();
- d_needsLastCall = true;
- if (options::nlExtRewrites()) {
- std::vector<Node> nred;
- if (!d_containing.getExtTheory()->doInferences(0, nred)) {
- Trace("nl-ext") << "...sent no lemmas, # extf to reduce = "
- << nred.size() << std::endl;
- if (nred.empty()) {
- d_needsLastCall = false;
- }
- } else {
- Trace("nl-ext") << "...sent lemmas." << std::endl;
- }
- }
- }
- else
- {
- // If we computed lemmas during collectModelInfo, send them now.
- if (!d_cmiLemmas.empty() || !d_cmiLemmasPp.empty())
- {
- sendLemmas(d_cmiLemmas, false, d_cmiLemmasSE);
- sendLemmas(d_cmiLemmasPp, true, d_cmiLemmasSE);
- return;
- }
- // Otherwise, we will answer SAT. The values that we approximated are
- // recorded as approximations here.
- TheoryModel* tm = d_containing.getValuation().getModel();
- for (std::pair<const Node, std::pair<Node, Node>>& a : d_approximations)
- {
- if (a.second.second.isNull())
- {
- tm->recordApproximation(a.first, a.second.first);
- }
- else
- {
- tm->recordApproximation(a.first, a.second.first, a.second.second);
- }
- }
- }
-}
-
-bool NonlinearExtension::modelBasedRefinement(
- std::vector<Node>& mlems,
- std::vector<Node>& mlemsPp,
- std::map<Node, NlLemmaSideEffect>& lemSE)
-{
- // get the assertions
- std::vector<Node> assertions;
- getAssertions(assertions);
-
- Trace("nl-ext-mv-assert")
- << "Getting model values... check for [model-false]" << std::endl;
- // get the assertions that are false in the model
- const std::vector<Node> false_asserts = checkModelEval(assertions);
-
- // get the extended terms belonging to this theory
- std::vector<Node> xts;
- d_containing.getExtTheory()->getTerms(xts);
-
- if (Trace.isOn("nl-ext-debug"))
- {
- Trace("nl-ext-debug") << " processing NonlinearExtension::check : "
- << std::endl;
- Trace("nl-ext-debug") << " " << false_asserts.size()
- << " false assertions" << std::endl;
- Trace("nl-ext-debug") << " " << xts.size()
- << " extended terms: " << std::endl;
- Trace("nl-ext-debug") << " ";
- for (unsigned j = 0; j < xts.size(); j++)
- {
- Trace("nl-ext-debug") << xts[j] << " ";
- }
- Trace("nl-ext-debug") << std::endl;
- }
-
- // compute whether shared terms have correct values
- unsigned num_shared_wrong_value = 0;
- std::vector<Node> shared_term_value_splits;
- // must ensure that shared terms are equal to their concrete value
- Trace("nl-ext-mv") << "Shared terms : " << std::endl;
- for (context::CDList<TNode>::const_iterator its =
- d_containing.shared_terms_begin();
- its != d_containing.shared_terms_end();
- ++its)
- {
- TNode shared_term = *its;
- // compute its value in the model, and its evaluation in the model
- Node stv0 = d_model.computeConcreteModelValue(shared_term);
- Node stv1 = d_model.computeAbstractModelValue(shared_term);
- d_model.printModelValue("nl-ext-mv", shared_term);
- if (stv0 != stv1)
- {
- num_shared_wrong_value++;
- Trace("nl-ext-mv") << "Bad shared term value : " << shared_term
- << std::endl;
- if (shared_term != stv0)
- {
- // split on the value, this is non-terminating in general, TODO :
- // improve this
- Node eq = shared_term.eqNode(stv0);
- shared_term_value_splits.push_back(eq);
- }
- else
- {
- // this can happen for transcendental functions
- // the problem is that we cannot evaluate transcendental functions
- // (they don't have a rewriter that returns constants)
- // thus, the actual value in their model can be themselves, hence we
- // have no reference point to rule out the current model. In this
- // case, we may set incomplete below.
- }
- }
- }
- Trace("nl-ext-debug") << " " << num_shared_wrong_value
- << " shared terms with wrong model value." << std::endl;
- bool needsRecheck;
- do
- {
- d_model.resetCheck();
- needsRecheck = false;
- // complete_status:
- // 1 : we may answer SAT, -1 : we may not answer SAT, 0 : unknown
- int complete_status = 1;
- // lemmas that should be sent later
- std::vector<Node> wlems;
- // We require a check either if an assertion is false or a shared term has
- // a wrong value
- if (!false_asserts.empty() || num_shared_wrong_value > 0)
- {
- complete_status = num_shared_wrong_value > 0 ? -1 : 0;
- checkLastCall(
- assertions, false_asserts, xts, mlems, mlemsPp, wlems, lemSE);
- if (!mlems.empty() || !mlemsPp.empty())
- {
- return true;
- }
- }
- Trace("nl-ext") << "Finished check with status : " << complete_status
- << std::endl;
-
- // if we did not add a lemma during check and there is a chance for SAT
- if (complete_status == 0)
- {
- Trace("nl-ext")
- << "Check model based on bounds for irrational-valued functions..."
- << std::endl;
- // check the model based on simple solving of equalities and using
- // error bounds on the Taylor approximation of transcendental functions.
- std::vector<Node> lemmas;
- std::vector<Node> gs;
- if (checkModel(assertions, false_asserts, lemmas, gs))
- {
- complete_status = 1;
- }
- for (const Node& mg : gs)
- {
- Node mgr = Rewriter::rewrite(mg);
- mgr = d_containing.getValuation().ensureLiteral(mgr);
- d_containing.getOutputChannel().requirePhase(mgr, true);
- d_builtModel = true;
- }
- filterLemmas(lemmas, mlems);
- if (!mlems.empty())
- {
- return true;
- }
- }
-
- // if we have not concluded SAT
- if (complete_status != 1)
- {
- // flush the waiting lemmas
- if (!wlems.empty())
- {
- mlems.insert(mlems.end(), wlems.begin(), wlems.end());
- Trace("nl-ext") << "...added " << wlems.size() << " waiting lemmas."
- << std::endl;
- return true;
- }
- // resort to splitting on shared terms with their model value
- // if we did not add any lemmas
- if (num_shared_wrong_value > 0)
- {
- complete_status = -1;
- if (!shared_term_value_splits.empty())
- {
- std::vector<Node> stvLemmas;
- for (const Node& eq : shared_term_value_splits)
- {
- Node req = Rewriter::rewrite(eq);
- Node literal = d_containing.getValuation().ensureLiteral(req);
- d_containing.getOutputChannel().requirePhase(literal, true);
- Trace("nl-ext-debug") << "Split on : " << literal << std::endl;
- Node split = literal.orNode(literal.negate());
- filterLemma(split, stvLemmas);
- }
- if (!stvLemmas.empty())
- {
- mlems.insert(mlems.end(), stvLemmas.begin(), stvLemmas.end());
- Trace("nl-ext") << "...added " << stvLemmas.size()
- << " shared term value split lemmas." << std::endl;
- return true;
- }
- }
- else
- {
- // this can happen if we are trying to do theory combination with
- // trancendental functions
- // since their model value cannot even be computed exactly
- }
- }
-
- // we are incomplete
- if (options::nlExtIncPrecision() && d_model.usedApproximate())
- {
- d_trSlv.incrementTaylorDegree();
- needsRecheck = true;
- // increase precision for PI?
- // Difficult since Taylor series is very slow to converge
- Trace("nl-ext") << "...increment Taylor degree to "
- << d_trSlv.getTaylorDegree() << std::endl;
- }
- else
- {
- Trace("nl-ext") << "...failed to send lemma in "
- "NonLinearExtension, set incomplete"
- << std::endl;
- d_containing.getOutputChannel().setIncomplete();
- }
- }
- } while (needsRecheck);
-
- // did not add lemmas
- return false;
-}
-
-void NonlinearExtension::interceptModel(std::map<Node, Node>& arithModel)
-{
- if (!needsCheckLastEffort())
- {
- // no non-linear constraints, we are done
- return;
- }
- Trace("nl-ext") << "NonlinearExtension::interceptModel begin" << std::endl;
- d_model.reset(d_containing.getValuation().getModel(), arithModel);
- // run a last call effort check
- d_cmiLemmas.clear();
- d_cmiLemmasPp.clear();
- d_cmiLemmasSE.clear();
- if (!d_builtModel.get())
- {
- Trace("nl-ext") << "interceptModel: do model-based refinement" << std::endl;
- modelBasedRefinement(d_cmiLemmas, d_cmiLemmasPp, d_cmiLemmasSE);
- }
- if (d_builtModel.get())
- {
- Trace("nl-ext") << "interceptModel: do model repair" << std::endl;
- d_approximations.clear();
- // modify the model values
- d_model.getModelValueRepair(arithModel, d_approximations);
- }
-}
-
-void NonlinearExtension::presolve()
-{
- Trace("nl-ext") << "NonlinearExtension::presolve" << std::endl;
-}
-
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file nonlinear_extension.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Extensions for incomplete handling of nonlinear multiplication.
- **
- ** Extensions to the theory of arithmetic incomplete handling of nonlinear
- ** multiplication via axiom instantiations.
- **/
-
-#ifndef CVC4__THEORY__ARITH__NONLINEAR_EXTENSION_H
-#define CVC4__THEORY__ARITH__NONLINEAR_EXTENSION_H
-
-#include <stdint.h>
-#include <map>
-#include <vector>
-
-#include "context/cdlist.h"
-#include "expr/kind.h"
-#include "expr/node.h"
-#include "theory/arith/nl_lemma_utils.h"
-#include "theory/arith/nl_model.h"
-#include "theory/arith/nl_solver.h"
-#include "theory/arith/theory_arith.h"
-#include "theory/arith/transcendental_solver.h"
-#include "theory/uf/equality_engine.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-/** Non-linear extension class
- *
- * This class implements model-based refinement schemes
- * for non-linear arithmetic, described in:
- *
- * - "Invariant Checking of NRA Transition Systems
- * via Incremental Reduction to LRA with EUF" by
- * Cimatti et al., TACAS 2017.
- *
- * - Section 5 of "Desiging Theory Solvers with
- * Extensions" by Reynolds et al., FroCoS 2017.
- *
- * - "Satisfiability Modulo Transcendental
- * Functions via Incremental Linearization" by Cimatti
- * et al., CADE 2017.
- *
- * It's main functionality is a check(...) method,
- * which is called by TheoryArithPrivate either:
- * (1) at full effort with no conflicts or lemmas emitted, or
- * (2) at last call effort.
- * In this method, this class calls d_out->lemma(...)
- * for valid arithmetic theory lemmas, based on the current set of assertions,
- * where d_out is the output channel of TheoryArith.
- */
-class NonlinearExtension {
- typedef context::CDHashSet<Node, NodeHashFunction> NodeSet;
-
- public:
- NonlinearExtension(TheoryArith& containing, eq::EqualityEngine* ee);
- ~NonlinearExtension();
- /** Get current substitution
- *
- * This function and the one below are
- * used for context-dependent
- * simplification, see Section 3.1 of
- * "Designing Theory Solvers with Extensions"
- * by Reynolds et al. FroCoS 2017.
- *
- * effort : an identifier indicating the stage where
- * we are performing context-dependent simplification,
- * vars : a set of arithmetic variables.
- *
- * This function populates subs and exp, such that for 0 <= i < vars.size():
- * ( exp[vars[i]] ) => vars[i] = subs[i]
- * where exp[vars[i]] is a set of assertions
- * that hold in the current context. We call { vars -> subs } a "derivable
- * substituion" (see Reynolds et al. FroCoS 2017).
- */
- bool getCurrentSubstitution(int effort, const std::vector<Node>& vars,
- std::vector<Node>& subs,
- std::map<Node, std::vector<Node> >& exp);
- /** Is the term n in reduced form?
- *
- * Used for context-dependent simplification.
- *
- * effort : an identifier indicating the stage where
- * we are performing context-dependent simplification,
- * on : the original term that we reduced to n,
- * exp : an explanation such that ( exp => on = n ).
- *
- * We return a pair ( b, exp' ) such that
- * if b is true, then:
- * n is in reduced form
- * if exp' is non-null, then ( exp' => on = n )
- * The second part of the pair is used for constructing
- * minimal explanations for context-dependent simplifications.
- */
- std::pair<bool, Node> isExtfReduced(int effort, Node n, Node on,
- const std::vector<Node>& exp) const;
- /** Check at effort level e.
- *
- * This call may result in (possibly multiple) calls to d_out->lemma(...)
- * where d_out is the output channel of TheoryArith.
- *
- * If e is FULL, then we add lemmas based on context-depedent
- * simplification (see Reynolds et al FroCoS 2017).
- *
- * If e is LAST_CALL, we add lemmas based on model-based refinement
- * (see additionally Cimatti et al., TACAS 2017). The lemmas added at this
- * effort may be computed during a call to interceptModel as described below.
- */
- void check(Theory::Effort e);
- /** intercept model
- *
- * This method is called during TheoryArith::collectModelInfo, which is
- * invoked after the linear arithmetic solver passes a full effort check
- * with no lemmas.
- *
- * The argument arithModel is a map of the form { v1 -> c1, ..., vn -> cn }
- * which represents the linear arithmetic theory solver's contribution to the
- * current candidate model. That is, its collectModelInfo method is requesting
- * that equalities v1 = c1, ..., vn = cn be added to the current model, where
- * v1, ..., vn are arithmetic variables and c1, ..., cn are constants. Notice
- * arithmetic variables may be real-valued terms belonging to other theories,
- * or abstractions of applications of multiplication (kind NONLINEAR_MULT).
- *
- * This method requests that the non-linear solver inspect this model and
- * do any number of the following:
- * (1) Construct lemmas based on a model-based refinement procedure inspired
- * by Cimatti et al., TACAS 2017.,
- * (2) In the case that the nonlinear solver finds that the current
- * constraints are satisfiable, it may "repair" the values in the argument
- * arithModel so that it satisfies certain nonlinear constraints. This may
- * involve e.g. solving for variables in nonlinear equations.
- *
- * Notice that in the former case, the lemmas it constructs are not sent out
- * immediately. Instead, they are put in temporary vectors d_cmiLemmas
- * and d_cmiLemmasPp, which are then sent out (if necessary) when a last call
- * effort check is issued to this class.
- */
- void interceptModel(std::map<Node, Node>& arithModel);
- /** Does this class need a call to check(...) at last call effort? */
- bool needsCheckLastEffort() const { return d_needsLastCall; }
- /** presolve
- *
- * This function is called during TheoryArith's presolve command.
- * In this function, we send lemmas we accumulated during preprocessing,
- * for instance, definitional lemmas from expandDefinitions are sent out
- * on the output channel of TheoryArith in this function.
- */
- void presolve();
- private:
- /** Model-based refinement
- *
- * This is the main entry point of this class for generating lemmas on the
- * output channel of the theory of arithmetic.
- *
- * It is currently run at last call effort. It applies lemma schemas
- * described in Reynolds et al. FroCoS 2017 that are based on ruling out
- * the current candidate model.
- *
- * This function returns true if a lemma was added to the vector lems/lemsPp.
- * Otherwise, it returns false. In the latter case, the model object d_model
- * may have information regarding how to construct a model, in the case that
- * we determined the problem is satisfiable.
- *
- * The argument lemSE is the "side effect" of the lemmas in mlems and mlemsPp
- * (for details, see checkLastCall).
- */
- bool modelBasedRefinement(std::vector<Node>& mlems,
- std::vector<Node>& mlemsPp,
- std::map<Node, NlLemmaSideEffect>& lemSE);
-
-
- /** check last call
- *
- * Check assertions for consistency in the effort LAST_CALL with a subset of
- * the assertions, false_asserts, that evaluate to false in the current model.
- *
- * xts : the list of (non-reduced) extended terms in the current context.
- *
- * This method adds lemmas to arguments lems, lemsPp, and wlems, each of
- * which are intended to be sent out on the output channel of TheoryArith
- * under certain conditions.
- *
- * If the set lems or lemsPp is non-empty, then no further processing is
- * necessary. The last call effort check should terminate and these
- * lemmas should be sent. The set lemsPp is distinguished from lems since
- * the preprocess flag on the lemma(...) call should be set to true.
- *
- * The "waiting" lemmas wlems contain lemmas that should be sent on the
- * output channel as a last resort. In other words, only if we are not
- * able to establish SAT via a call to checkModel(...) should wlems be
- * considered. This set typically contains tangent plane lemmas.
- *
- * The argument lemSE is the "side effect" of the lemmas from the previous
- * three calls. If a lemma is mapping to a side effect, it should be
- * processed via a call to processSideEffect(...) immediately after the
- * lemma is sent (if it is indeed sent on this call to check).
- */
- int checkLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts,
- std::vector<Node>& lems,
- std::vector<Node>& lemsPp,
- std::vector<Node>& wlems,
- std::map<Node, NlLemmaSideEffect>& lemSE);
-
- /** get assertions
- *
- * Let M be the set of assertions known by THEORY_ARITH. This function adds a
- * set of literals M' to assertions such that M' and M are equivalent.
- *
- * Examples of how M' differs with M:
- * (1) M' may not include t < c (in M) if t < c' is in M' for c' < c, where
- * c and c' are constants,
- * (2) M' may contain t = c if both t >= c and t <= c are in M.
- */
- void getAssertions(std::vector<Node>& assertions);
- /** check model
- *
- * Returns the subset of assertions whose concrete values we cannot show are
- * true in the current model. Notice that we typically cannot compute concrete
- * values for assertions involving transcendental functions. Any assertion
- * whose model value cannot be computed is included in the return value of
- * this function.
- */
- std::vector<Node> checkModelEval(const std::vector<Node>& assertions);
-
- //---------------------------check model
- /** Check model
- *
- * Checks the current model based on solving for equalities, and using error
- * bounds on the Taylor approximation.
- *
- * If this function returns true, then all assertions in the input argument
- * "assertions" are satisfied for all interpretations of variables within
- * their computed bounds (as stored in d_check_model_bounds).
- *
- * For details, see Section 3 of Cimatti et al CADE 2017 under the heading
- * "Detecting Satisfiable Formulas".
- *
- * The arguments lemmas and gs store the lemmas and guard literals to be sent
- * out on the output channel of TheoryArith as lemmas and calls to
- * ensureLiteral respectively.
- */
- bool checkModel(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- std::vector<Node>& lemmas,
- std::vector<Node>& gs);
- //---------------------------end check model
-
- /** Is n entailed with polarity pol in the current context? */
- bool isEntailed(Node n, bool pol);
-
- /**
- * Potentially adds lemmas to the set out and clears lemmas. Returns
- * the number of lemmas added to out. We do not add lemmas that have already
- * been sent on the output channel of TheoryArith.
- */
- unsigned filterLemmas(std::vector<Node>& lemmas, std::vector<Node>& out);
- /** singleton version of above */
- unsigned filterLemma(Node lem, std::vector<Node>& out);
-
- /**
- * Send lemmas in out on the output channel of theory of arithmetic.
- */
- void sendLemmas(const std::vector<Node>& out,
- bool preprocess,
- std::map<Node, NlLemmaSideEffect>& lemSE);
- /** Process side effect se */
- void processSideEffect(const NlLemmaSideEffect& se);
-
- /** cache of all lemmas sent on the output channel (user-context-dependent) */
- NodeSet d_lemmas;
- /** commonly used terms */
- Node d_zero;
- Node d_one;
- Node d_neg_one;
- Node d_true;
- // The theory of arithmetic containing this extension.
- TheoryArith& d_containing;
- // pointer to used equality engine
- eq::EqualityEngine* d_ee;
- // needs last call effort
- bool d_needsLastCall;
- /** The non-linear model object
- *
- * This class is responsible for computing model values for arithmetic terms
- * and for establishing when we are able to answer "SAT".
- */
- NlModel d_model;
- /** The transcendental extension object
- *
- * This is the subsolver responsible for running the procedure for
- * transcendental functions.
- */
- TranscendentalSolver d_trSlv;
- /** The nonlinear extension object
- *
- * This is the subsolver responsible for running the procedure for
- * constraints involving nonlinear mulitplication.
- */
- NlSolver d_nlSlv;
- /**
- * The lemmas we computed during collectModelInfo. We store two vectors of
- * lemmas to be sent out on the output channel of TheoryArith. The first
- * is not preprocessed, the second is.
- */
- std::vector<Node> d_cmiLemmas;
- std::vector<Node> d_cmiLemmasPp;
- /** the side effects of the above lemmas */
- std::map<Node, NlLemmaSideEffect> d_cmiLemmasSE;
- /**
- * The approximations computed during collectModelInfo. For details, see
- * NlModel::getModelValueRepair.
- */
- std::map<Node, std::pair<Node, Node>> d_approximations;
- /** have we successfully built the model in this SAT context? */
- context::CDO<bool> d_builtModel;
-}; /* class NonlinearExtension */
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__NONLINEAR_EXTENSION_H */
#include "theory/arith/dio_solver.h"
#include "theory/arith/linear_equality.h"
#include "theory/arith/matrix.h"
-#include "theory/arith/nonlinear_extension.h"
+#include "theory/arith/nl/nonlinear_extension.h"
#include "theory/arith/normal_form.h"
#include "theory/arith/partial_model.h"
#include "theory/arith/simplex.h"
d_nlin_inverse_skolem(u)
{
if( options::nlExt() ){
- d_nonlinearExtension = new NonlinearExtension(
+ d_nonlinearExtension = new nl::NonlinearExtension(
containing, d_congruenceManager.getEqualityEngine());
}
}
}
class InferBoundsResult;
+namespace nl {
class NonlinearExtension;
+}
/**
* Implementation of QF_LRA.
AttemptSolutionSDP d_attemptSolSimplex;
/** non-linear algebraic approach */
- NonlinearExtension * d_nonlinearExtension;
+ nl::NonlinearExtension* d_nonlinearExtension;
bool solveRealRelaxation(Theory::Effort effortLevel);
+++ /dev/null
-/********************* */
-/*! \file transcendental_solver.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of solver for handling transcendental functions.
- **/
-
-#include "theory/arith/transcendental_solver.h"
-
-#include <cmath>
-#include <set>
-
-#include "expr/node_algorithm.h"
-#include "expr/node_builder.h"
-#include "options/arith_options.h"
-#include "theory/arith/arith_msum.h"
-#include "theory/arith/arith_utilities.h"
-#include "theory/rewriter.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-TranscendentalSolver::TranscendentalSolver(NlModel& m) : d_model(m)
-{
- NodeManager* nm = NodeManager::currentNM();
- d_true = nm->mkConst(true);
- d_false = nm->mkConst(false);
- d_zero = nm->mkConst(Rational(0));
- d_one = nm->mkConst(Rational(1));
- d_neg_one = nm->mkConst(Rational(-1));
- d_taylor_real_fv = nm->mkBoundVar("x", nm->realType());
- d_taylor_real_fv_base = nm->mkBoundVar("a", nm->realType());
- d_taylor_real_fv_base_rem = nm->mkBoundVar("b", nm->realType());
- d_taylor_degree = options::nlExtTfTaylorDegree();
-}
-
-TranscendentalSolver::~TranscendentalSolver() {}
-
-void TranscendentalSolver::initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts,
- std::vector<Node>& lems,
- std::vector<Node>& lemsPp)
-{
- d_funcCongClass.clear();
- d_funcMap.clear();
- d_tf_region.clear();
-
- NodeManager* nm = NodeManager::currentNM();
-
- // register the extended function terms
- std::vector<Node> trNeedsMaster;
- bool needPi = false;
- // for computing congruence
- std::map<Kind, ArgTrie> argTrie;
- for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
- {
- Node a = xts[i];
- Kind ak = a.getKind();
- bool consider = true;
- // if is an unpurified application of SINE, or it is a transcendental
- // applied to a trancendental, purify.
- if (isTranscendentalKind(ak))
- {
- // if we've already computed master for a
- if (d_trMaster.find(a) != d_trMaster.end())
- {
- // a master has at least one slave
- consider = (d_trSlaves.find(a) != d_trSlaves.end());
- }
- else
- {
- if (ak == SINE)
- {
- // always not a master
- consider = false;
- }
- else
- {
- for (const Node& ac : a)
- {
- if (isTranscendentalKind(ac.getKind()))
- {
- consider = false;
- break;
- }
- }
- }
- if (!consider)
- {
- // wait to assign a master below
- trNeedsMaster.push_back(a);
- }
- else
- {
- d_trMaster[a] = a;
- d_trSlaves[a].insert(a);
- }
- }
- }
- if (ak == EXPONENTIAL || ak == SINE)
- {
- needPi = needPi || (ak == SINE);
- // if we didn't indicate that it should be purified above
- if (consider)
- {
- std::vector<Node> repList;
- for (const Node& ac : a)
- {
- Node r = d_model.computeConcreteModelValue(ac);
- repList.push_back(r);
- }
- Node aa = argTrie[ak].add(a, repList);
- if (aa != a)
- {
- // apply congruence to pairs of terms that are disequal and congruent
- Assert(aa.getNumChildren() == a.getNumChildren());
- Node mvaa = d_model.computeAbstractModelValue(a);
- Node mvaaa = d_model.computeAbstractModelValue(aa);
- if (mvaa != mvaaa)
- {
- std::vector<Node> exp;
- for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
- {
- exp.push_back(a[j].eqNode(aa[j]));
- }
- Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp);
- Node cong_lemma = nm->mkNode(OR, expn.negate(), a.eqNode(aa));
- lems.push_back(cong_lemma);
- }
- }
- else
- {
- // new representative of congruence class
- d_funcMap[ak].push_back(a);
- }
- // add to congruence class
- d_funcCongClass[aa].push_back(a);
- }
- }
- else if (ak == PI)
- {
- Assert(consider);
- needPi = true;
- d_funcMap[ak].push_back(a);
- d_funcCongClass[a].push_back(a);
- }
- }
- // initialize pi if necessary
- if (needPi && d_pi.isNull())
- {
- mkPi();
- getCurrentPiBounds(lems);
- }
-
- if (!lems.empty())
- {
- return;
- }
-
- // process SINE phase shifting
- for (const Node& a : trNeedsMaster)
- {
- // should not have processed this already
- Assert(d_trMaster.find(a) == d_trMaster.end());
- Kind k = a.getKind();
- Assert(k == SINE || k == EXPONENTIAL);
- Node y =
- nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
- Node new_a = nm->mkNode(k, y);
- d_trSlaves[new_a].insert(new_a);
- d_trSlaves[new_a].insert(a);
- d_trMaster[a] = new_a;
- d_trMaster[new_a] = new_a;
- Node lem;
- if (k == SINE)
- {
- Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
- << std::endl;
- Assert(!d_pi.isNull());
- Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
- // TODO : do not introduce shift here, instead needs model-based
- // refinement for constant shifts (cvc4-projects #1284)
- lem = nm->mkNode(
- AND,
- mkValidPhase(y, d_pi),
- nm->mkNode(
- ITE,
- mkValidPhase(a[0], d_pi),
- a[0].eqNode(y),
- a[0].eqNode(nm->mkNode(
- PLUS,
- y,
- nm->mkNode(MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
- new_a.eqNode(a));
- }
- else
- {
- // do both equalities to ensure that new_a becomes a preregistered term
- lem = nm->mkNode(AND, a.eqNode(new_a), a[0].eqNode(y));
- }
- // note we must do preprocess on this lemma
- Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
- << std::endl;
- lemsPp.push_back(lem);
- }
-
- if (Trace.isOn("nl-ext-mv"))
- {
- Trace("nl-ext-mv") << "Arguments of trancendental functions : "
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == SINE || k == EXPONENTIAL)
- {
- for (const Node& tf : tfl.second)
- {
- Node v = tf[0];
- d_model.computeConcreteModelValue(v);
- d_model.computeAbstractModelValue(v);
- d_model.printModelValue("nl-ext-mv", v);
- }
- }
- }
- }
-}
-
-bool TranscendentalSolver::preprocessAssertionsCheckModel(
- std::vector<Node>& assertions)
-{
- std::vector<Node> pvars;
- std::vector<Node> psubs;
- for (const std::pair<const Node, Node>& tb : d_trMaster)
- {
- pvars.push_back(tb.first);
- psubs.push_back(tb.second);
- }
-
- // initialize representation of assertions
- std::vector<Node> passertions;
- for (const Node& a : assertions)
-
- {
- Node pa = a;
- if (!pvars.empty())
- {
- pa = arithSubstitute(pa, pvars, psubs);
- pa = Rewriter::rewrite(pa);
- }
- if (!pa.isConst() || !pa.getConst<bool>())
- {
- Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
- passertions.push_back(pa);
- }
- }
- // get model bounds for all transcendental functions
- Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
- {
- Kind k = tfs.first;
- for (const Node& tf : tfs.second)
- {
- Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
- bool success = true;
- // tf is Figure 3 : tf( x )
- Node bl;
- Node bu;
- if (k == PI)
- {
- bl = d_pi_bound[0];
- bu = d_pi_bound[1];
- }
- else
- {
- std::pair<Node, Node> bounds = getTfModelBounds(tf, d_taylor_degree);
- bl = bounds.first;
- bu = bounds.second;
- if (bl != bu)
- {
- d_model.setUsedApproximate();
- }
- }
- if (!bl.isNull() && !bu.isNull())
- {
- // for each function in the congruence classe
- for (const Node& ctf : d_funcCongClass[tf])
- {
- // each term in congruence classes should be master terms
- Assert(d_trSlaves.find(ctf) != d_trSlaves.end());
- // we set the bounds for each slave of tf
- for (const Node& stf : d_trSlaves[ctf])
- {
- Trace("nl-ext-cm") << "...bound for " << stf << " : [" << bl << ", "
- << bu << "]" << std::endl;
- success = d_model.addCheckModelBound(stf, bl, bu);
- }
- }
- }
- else
- {
- Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
- }
- if (!success)
- {
- // a bound was conflicting
- Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
- Trace("nl-ext-cm") << "-----" << std::endl;
- return false;
- }
- }
- }
- // replace the assertions
- assertions = passertions;
- return true;
-}
-
-void TranscendentalSolver::incrementTaylorDegree() { d_taylor_degree++; }
-unsigned TranscendentalSolver::getTaylorDegree() const
-{
- return d_taylor_degree;
-}
-
-void TranscendentalSolver::processSideEffect(const NlLemmaSideEffect& se)
-{
- for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
- {
- Node tf = std::get<0>(sp);
- unsigned d = std::get<1>(sp);
- Node c = std::get<2>(sp);
- d_secant_points[tf][d].push_back(c);
- }
-}
-
-void TranscendentalSolver::mkPi()
-{
- NodeManager* nm = NodeManager::currentNM();
- if (d_pi.isNull())
- {
- d_pi = nm->mkNullaryOperator(nm->realType(), PI);
- d_pi_2 = Rewriter::rewrite(
- nm->mkNode(MULT, d_pi, nm->mkConst(Rational(1) / Rational(2))));
- d_pi_neg_2 = Rewriter::rewrite(
- nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1) / Rational(2))));
- d_pi_neg =
- Rewriter::rewrite(nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1))));
- // initialize bounds
- d_pi_bound[0] = nm->mkConst(Rational(103993) / Rational(33102));
- d_pi_bound[1] = nm->mkConst(Rational(104348) / Rational(33215));
- }
-}
-
-void TranscendentalSolver::getCurrentPiBounds(std::vector<Node>& lemmas)
-{
- NodeManager* nm = NodeManager::currentNM();
- Node pi_lem = nm->mkNode(AND,
- nm->mkNode(GEQ, d_pi, d_pi_bound[0]),
- nm->mkNode(LEQ, d_pi, d_pi_bound[1]));
- lemmas.push_back(pi_lem);
-}
-
-std::vector<Node> TranscendentalSolver::checkTranscendentalInitialRefine()
-{
- NodeManager* nm = NodeManager::currentNM();
- std::vector<Node> lemmas;
- Trace("nl-ext")
- << "Get initial refinement lemmas for transcendental functions..."
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- for (const Node& t : tfl.second)
- {
- // initial refinements
- if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
- {
- d_tf_initial_refine[t] = true;
- Node lem;
- if (k == SINE)
- {
- Node symn = nm->mkNode(SINE, nm->mkNode(MULT, d_neg_one, t[0]));
- symn = Rewriter::rewrite(symn);
- // Can assume it is its own master since phase is split over 0,
- // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
- d_trMaster[symn] = symn;
- d_trSlaves[symn].insert(symn);
- Assert(d_trSlaves.find(t) != d_trSlaves.end());
- std::vector<Node> children;
-
- lem = nm->mkNode(AND,
- // bounds
- nm->mkNode(AND,
- nm->mkNode(LEQ, t, d_one),
- nm->mkNode(GEQ, t, d_neg_one)),
- // symmetry
- nm->mkNode(PLUS, t, symn).eqNode(d_zero),
- // sign
- nm->mkNode(EQUAL,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(LT, t, d_zero)),
- // zero val
- nm->mkNode(EQUAL,
- nm->mkNode(GT, t[0], d_zero),
- nm->mkNode(GT, t, d_zero)));
- lem = nm->mkNode(
- AND,
- lem,
- // zero tangent
- nm->mkNode(AND,
- nm->mkNode(IMPLIES,
- nm->mkNode(GT, t[0], d_zero),
- nm->mkNode(LT, t, t[0])),
- nm->mkNode(IMPLIES,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(GT, t, t[0]))),
- // pi tangent
- nm->mkNode(
- AND,
- nm->mkNode(IMPLIES,
- nm->mkNode(LT, t[0], d_pi),
- nm->mkNode(LT, t, nm->mkNode(MINUS, d_pi, t[0]))),
- nm->mkNode(
- IMPLIES,
- nm->mkNode(GT, t[0], d_pi_neg),
- nm->mkNode(GT, t, nm->mkNode(MINUS, d_pi_neg, t[0])))));
- }
- else if (k == EXPONENTIAL)
- {
- // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
- // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
- lem = nm->mkNode(
- AND,
- nm->mkNode(GT, t, d_zero),
- nm->mkNode(EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
- nm->mkNode(EQUAL,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(LT, t, d_one)),
- nm->mkNode(OR,
- nm->mkNode(LEQ, t[0], d_zero),
- nm->mkNode(GT, t, nm->mkNode(PLUS, t[0], d_one))));
- }
- if (!lem.isNull())
- {
- lemmas.push_back(lem);
- }
- }
- }
- }
-
- return lemmas;
-}
-
-std::vector<Node> TranscendentalSolver::checkTranscendentalMonotonic()
-{
- std::vector<Node> lemmas;
- Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
- << std::endl;
-
- // sort arguments of all transcendentals
- std::map<Kind, std::vector<Node> > sorted_tf_args;
- std::map<Kind, std::map<Node, Node> > tf_arg_to_term;
-
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == EXPONENTIAL || k == SINE)
- {
- for (const Node& tf : tfl.second)
- {
- Node a = tf[0];
- Node mvaa = d_model.computeAbstractModelValue(a);
- if (mvaa.isConst())
- {
- Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
- sorted_tf_args[k].push_back(a);
- tf_arg_to_term[k][a] = tf;
- }
- }
- }
- }
-
- SortNlModel smv;
- smv.d_nlm = &d_model;
- // sort by concrete values
- smv.d_isConcrete = true;
- smv.d_reverse_order = true;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (!sorted_tf_args[k].empty())
- {
- std::sort(sorted_tf_args[k].begin(), sorted_tf_args[k].end(), smv);
- Trace("nl-ext-tf-mono") << "Sorted transcendental function list for " << k
- << " : " << std::endl;
- for (unsigned i = 0; i < sorted_tf_args[k].size(); i++)
- {
- Node targ = sorted_tf_args[k][i];
- Node mvatarg = d_model.computeAbstractModelValue(targ);
- Trace("nl-ext-tf-mono")
- << " " << targ << " -> " << mvatarg << std::endl;
- Node t = tf_arg_to_term[k][targ];
- Node mvat = d_model.computeAbstractModelValue(t);
- Trace("nl-ext-tf-mono") << " f-val : " << mvat << std::endl;
- }
- std::vector<Node> mpoints;
- std::vector<Node> mpoints_vals;
- if (k == SINE)
- {
- mpoints.push_back(d_pi);
- mpoints.push_back(d_pi_2);
- mpoints.push_back(d_zero);
- mpoints.push_back(d_pi_neg_2);
- mpoints.push_back(d_pi_neg);
- }
- else if (k == EXPONENTIAL)
- {
- mpoints.push_back(Node::null());
- }
- if (!mpoints.empty())
- {
- // get model values for points
- for (unsigned i = 0; i < mpoints.size(); i++)
- {
- Node mpv;
- if (!mpoints[i].isNull())
- {
- mpv = d_model.computeAbstractModelValue(mpoints[i]);
- Assert(mpv.isConst());
- }
- mpoints_vals.push_back(mpv);
- }
-
- unsigned mdir_index = 0;
- int monotonic_dir = -1;
- Node mono_bounds[2];
- Node targ, targval, t, tval;
- for (unsigned i = 0, size = sorted_tf_args[k].size(); i < size; i++)
- {
- Node sarg = sorted_tf_args[k][i];
- Node sargval = d_model.computeAbstractModelValue(sarg);
- Assert(sargval.isConst());
- Node s = tf_arg_to_term[k][sarg];
- Node sval = d_model.computeAbstractModelValue(s);
- Assert(sval.isConst());
-
- // increment to the proper monotonicity region
- bool increment = true;
- while (increment && mdir_index < mpoints.size())
- {
- increment = false;
- if (mpoints[mdir_index].isNull())
- {
- increment = true;
- }
- else
- {
- Node pval = mpoints_vals[mdir_index];
- Assert(pval.isConst());
- if (sargval.getConst<Rational>() < pval.getConst<Rational>())
- {
- increment = true;
- Trace("nl-ext-tf-mono") << "...increment at " << sarg
- << " since model value is less than "
- << mpoints[mdir_index] << std::endl;
- }
- }
- if (increment)
- {
- tval = Node::null();
- mono_bounds[1] = mpoints[mdir_index];
- mdir_index++;
- monotonic_dir = regionToMonotonicityDir(k, mdir_index);
- if (mdir_index < mpoints.size())
- {
- mono_bounds[0] = mpoints[mdir_index];
- }
- else
- {
- mono_bounds[0] = Node::null();
- }
- }
- }
- // store the concavity region
- d_tf_region[s] = mdir_index;
- Trace("nl-ext-concavity") << "Transcendental function " << s
- << " is in region #" << mdir_index;
- Trace("nl-ext-concavity")
- << ", arg model value = " << sargval << std::endl;
-
- if (!tval.isNull())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node mono_lem;
- if (monotonic_dir == 1
- && sval.getConst<Rational>() > tval.getConst<Rational>())
- {
- mono_lem = nm->mkNode(
- IMPLIES, nm->mkNode(GEQ, targ, sarg), nm->mkNode(GEQ, t, s));
- }
- else if (monotonic_dir == -1
- && sval.getConst<Rational>() < tval.getConst<Rational>())
- {
- mono_lem = nm->mkNode(
- IMPLIES, nm->mkNode(LEQ, targ, sarg), nm->mkNode(LEQ, t, s));
- }
- if (!mono_lem.isNull())
- {
- if (!mono_bounds[0].isNull())
- {
- Assert(!mono_bounds[1].isNull());
- mono_lem = nm->mkNode(
- IMPLIES,
- nm->mkNode(AND,
- mkBounded(mono_bounds[0], targ, mono_bounds[1]),
- mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
- mono_lem);
- }
- Trace("nl-ext-tf-mono")
- << "Monotonicity lemma : " << mono_lem << std::endl;
- lemmas.push_back(mono_lem);
- }
- }
- // store the previous values
- targ = sarg;
- targval = sargval;
- t = s;
- tval = sval;
- }
- }
- }
- }
- return lemmas;
-}
-
-std::vector<Node> TranscendentalSolver::checkTranscendentalTangentPlanes(
- std::map<Node, NlLemmaSideEffect>& lemSE)
-{
- std::vector<Node> lemmas;
- Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
- << std::endl;
- // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
- // via Incremental Linearization" by Cimatti et al
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
- {
- Kind k = tfs.first;
- if (k == PI)
- {
- // We do not use Taylor approximation for PI currently.
- // This is because the convergence is extremely slow, and hence an
- // initial approximation is superior.
- continue;
- }
- Trace("nl-ext-tftp-debug2") << "Taylor variables: " << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv : " << d_taylor_real_fv << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base : " << d_taylor_real_fv_base << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base_rem : " << d_taylor_real_fv_base_rem
- << std::endl;
- Trace("nl-ext-tftp-debug2") << std::endl;
-
- // we substitute into the Taylor sum P_{n,f(0)}( x )
-
- for (const Node& tf : tfs.second)
- {
- // tf is Figure 3 : tf( x )
- Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
- // go until max degree is reached, or we don't meet bound criteria
- for (unsigned d = 1; d <= d_taylor_degree; d++)
- {
- Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
- unsigned prev = lemmas.size();
- if (checkTfTangentPlanesFun(tf, d, lemmas, lemSE))
- {
- Trace("nl-ext-tftp")
- << "...fail, #lemmas = " << (lemmas.size() - prev) << std::endl;
- break;
- }
- else
- {
- Trace("nl-ext-tftp") << "...success" << std::endl;
- }
- }
- }
- }
-
- return lemmas;
-}
-
-bool TranscendentalSolver::checkTfTangentPlanesFun(
- Node tf,
- unsigned d,
- std::vector<Node>& lemmas,
- std::map<Node, NlLemmaSideEffect>& lemSE)
-{
- NodeManager* nm = NodeManager::currentNM();
- Kind k = tf.getKind();
- // this should only be run on master applications
- Assert(d_trSlaves.find(tf) != d_trSlaves.end());
-
- // Figure 3 : c
- Node c = d_model.computeAbstractModelValue(tf[0]);
- int csign = c.getConst<Rational>().sgn();
- if (csign == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- Assert(csign == 1 || csign == -1);
-
- // Figure 3: P_l, P_u
- // mapped to for signs of c
- std::map<int, Node> poly_approx_bounds[2];
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
- poly_approx_bounds[0][1] = pbounds[0];
- poly_approx_bounds[0][-1] = pbounds[1];
- poly_approx_bounds[1][1] = pbounds[2];
- poly_approx_bounds[1][-1] = pbounds[3];
-
- // Figure 3 : v
- Node v = d_model.computeAbstractModelValue(tf);
-
- // check value of tf
- Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
- << ", degree " << d << "..." << std::endl;
- Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
- Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
-
- std::vector<Node> taylor_vars;
- taylor_vars.push_back(d_taylor_real_fv);
-
- // compute the concavity
- int region = -1;
- std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
- d_tf_region.find(tf);
- if (itr != d_tf_region.end())
- {
- region = itr->second;
- Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
- }
- // Figure 3 : conc
- int concavity = regionToConcavity(k, itr->second);
- Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
- if (concavity == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- // bounds for which we are this concavity
- // Figure 3: < l, u >
- Node bounds[2];
- if (k == SINE)
- {
- bounds[0] = regionToLowerBound(k, region);
- Assert(!bounds[0].isNull());
- bounds[1] = regionToUpperBound(k, region);
- Assert(!bounds[1].isNull());
- }
-
- // Figure 3: P
- Node poly_approx;
-
- // compute whether this is a tangent refinement or a secant refinement
- bool is_tangent = false;
- bool is_secant = false;
- std::pair<Node, Node> mvb = getTfModelBounds(tf, d);
- for (unsigned r = 0; r < 2; r++)
- {
- Node pab = poly_approx_bounds[r][csign];
- Node v_pab = r == 0 ? mvb.first : mvb.second;
- if (!v_pab.isNull())
- {
- Trace("nl-ext-tftp-debug2")
- << "...model value of " << pab << " is " << v_pab << std::endl;
-
- Assert(v_pab.isConst());
- Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
- Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
- Node compr = Rewriter::rewrite(comp);
- Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
- if (compr == d_true)
- {
- // beyond the bounds
- if (r == 0)
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == 1;
- is_secant = concavity == -1;
- }
- else
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == -1;
- is_secant = concavity == 1;
- }
- if (Trace.isOn("nl-ext-tftp"))
- {
- Trace("nl-ext-tftp") << "*** Outside boundary point (";
- Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << ", will refine..." << std::endl;
- Trace("nl-ext-tftp")
- << " poly_approx = " << poly_approx << std::endl;
- Trace("nl-ext-tftp")
- << " is_tangent = " << is_tangent << std::endl;
- Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
- }
- break;
- }
- else
- {
- Trace("nl-ext-tftp")
- << " ...within " << (r == 0 ? "low" : "high") << " bound : ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << std::endl;
- }
- }
- }
-
- // Figure 3: P( c )
- Node poly_approx_c;
- if (is_tangent || is_secant)
- {
- Assert(!poly_approx.isNull());
- std::vector<Node> taylor_subs;
- taylor_subs.push_back(c);
- Assert(taylor_vars.size() == taylor_subs.size());
- poly_approx_c = poly_approx.substitute(taylor_vars.begin(),
- taylor_vars.end(),
- taylor_subs.begin(),
- taylor_subs.end());
- Trace("nl-ext-tftp-debug2")
- << "...poly approximation at c is " << poly_approx_c << std::endl;
- }
- else
- {
- // we may want to continue getting better bounds
- return false;
- }
-
- if (is_tangent)
- {
- // compute tangent plane
- // Figure 3: T( x )
- // We use zero slope tangent planes, since the concavity of the Taylor
- // approximation cannot be easily established.
- Node tplane = poly_approx_c;
-
- Node lem = nm->mkNode(concavity == 1 ? GEQ : LEQ, tf, tplane);
- std::vector<Node> antec;
- int mdir = regionToMonotonicityDir(k, region);
- for (unsigned i = 0; i < 2; i++)
- {
- // Tangent plane is valid in the interval [c,u) if the slope of the
- // function matches its concavity, and is valid in (l, c] otherwise.
- Node use_bound = (mdir == concavity) == (i == 0) ? c : bounds[i];
- if (!use_bound.isNull())
- {
- Node ant = nm->mkNode(i == 0 ? GEQ : LEQ, tf[0], use_bound);
- antec.push_back(ant);
- }
- }
- if (!antec.empty())
- {
- Node antec_n = antec.size() == 1 ? antec[0] : nm->mkNode(AND, antec);
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- }
- Trace("nl-ext-tftp-debug2")
- << "*** Tangent plane lemma (pre-rewrite): " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma")
- << "*** Tangent plane lemma : " << lem << std::endl;
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- // Figure 3 : line 9
- lemmas.push_back(lem);
- }
- else if (is_secant)
- {
- // bounds are the minimum and maximum previous secant points
- // should not repeat secant points: secant lemmas should suffice to
- // rule out previous assignment
- Assert(std::find(
- d_secant_points[tf][d].begin(), d_secant_points[tf][d].end(), c)
- == d_secant_points[tf][d].end());
- // Insert into the (temporary) vector. We do not update this vector
- // until we are sure this secant plane lemma has been processed. We do
- // this by mapping the lemma to a side effect below.
- std::vector<Node> spoints = d_secant_points[tf][d];
- spoints.push_back(c);
-
- // sort
- SortNlModel smv;
- smv.d_nlm = &d_model;
- smv.d_isConcrete = true;
- std::sort(spoints.begin(), spoints.end(), smv);
- // get the resulting index of c
- unsigned index =
- std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
- // bounds are the next closest upper/lower bound values
- if (index > 0)
- {
- bounds[0] = spoints[index - 1];
- }
- else
- {
- // otherwise, we use the lower boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[0].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c-1
- bounds[0] = Rewriter::rewrite(nm->mkNode(MINUS, c, d_one));
- }
- }
- if (index < spoints.size() - 1)
- {
- bounds[1] = spoints[index + 1];
- }
- else
- {
- // otherwise, we use the upper boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[1].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c+1
- bounds[1] = Rewriter::rewrite(nm->mkNode(PLUS, c, d_one));
- }
- }
- Trace("nl-ext-tftp-debug2") << "...secant bounds are : " << bounds[0]
- << " ... " << bounds[1] << std::endl;
-
- // the secant plane may be conjunction of 1-2 guarded inequalities
- std::vector<Node> lemmaConj;
- for (unsigned s = 0; s < 2; s++)
- {
- // compute secant plane
- Assert(!poly_approx.isNull());
- Assert(!bounds[s].isNull());
- // take the model value of l or u (since may contain PI)
- Node b = d_model.computeAbstractModelValue(bounds[s]);
- Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds[s]
- << " is " << b << std::endl;
- Assert(b.isConst());
- if (c != b)
- {
- // Figure 3 : P(l), P(u), for s = 0,1
- Node poly_approx_b;
- std::vector<Node> taylor_subs;
- taylor_subs.push_back(b);
- Assert(taylor_vars.size() == taylor_subs.size());
- poly_approx_b = poly_approx.substitute(taylor_vars.begin(),
- taylor_vars.end(),
- taylor_subs.begin(),
- taylor_subs.end());
- // Figure 3: S_l( x ), S_u( x ) for s = 0,1
- Node splane;
- Node rcoeff_n = Rewriter::rewrite(nm->mkNode(MINUS, b, c));
- Assert(rcoeff_n.isConst());
- Rational rcoeff = rcoeff_n.getConst<Rational>();
- Assert(rcoeff.sgn() != 0);
- poly_approx_b = Rewriter::rewrite(poly_approx_b);
- poly_approx_c = Rewriter::rewrite(poly_approx_c);
- splane = nm->mkNode(
- PLUS,
- poly_approx_b,
- nm->mkNode(MULT,
- nm->mkNode(MINUS, poly_approx_b, poly_approx_c),
- nm->mkConst(Rational(1) / rcoeff),
- nm->mkNode(MINUS, tf[0], b)));
-
- Node lem = nm->mkNode(concavity == 1 ? LEQ : GEQ, tf, splane);
- // With respect to Figure 3, this is slightly different.
- // In particular, we chose b to be the model value of bounds[s],
- // which is a constant although bounds[s] may not be (e.g. if it
- // contains PI).
- // To ensure that c...b does not cross an inflection point,
- // we guard with the symbolic version of bounds[s].
- // This leads to lemmas e.g. of this form:
- // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
- // b ) + P( b ) )
- // where b = (PI/2)^M, the current value of PI/2 in the model.
- // This is sound since we are guarded by the symbolic
- // representation of PI/2.
- Node antec_n =
- nm->mkNode(AND,
- nm->mkNode(GEQ, tf[0], s == 0 ? bounds[s] : c),
- nm->mkNode(LEQ, tf[0], s == 0 ? c : bounds[s]));
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- Trace("nl-ext-tftp-debug2")
- << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma")
- << "*** Secant plane lemma : " << lem << std::endl;
- lemmaConj.push_back(lem);
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- }
- }
- // Figure 3 : line 22
- Assert(!lemmaConj.empty());
- Node lem =
- lemmaConj.size() == 1 ? lemmaConj[0] : nm->mkNode(AND, lemmaConj);
- lemmas.push_back(lem);
- // The side effect says that if lem is added, then we should add the
- // secant point c for (tf,d).
- lemSE[lem].d_secantPoint.push_back(std::make_tuple(tf, d, c));
- }
- return true;
-}
-
-int TranscendentalSolver::regionToMonotonicityDir(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 4)
- {
- return -1;
- }
- else if (region == 2 || region == 3)
- {
- return 1;
- }
- }
- return 0;
-}
-
-int TranscendentalSolver::regionToConcavity(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 2)
- {
- return -1;
- }
- else if (region == 3 || region == 4)
- {
- return 1;
- }
- }
- return 0;
-}
-
-Node TranscendentalSolver::regionToLowerBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi_2;
- }
- else if (region == 2)
- {
- return d_zero;
- }
- else if (region == 3)
- {
- return d_pi_neg_2;
- }
- else if (region == 4)
- {
- return d_pi_neg;
- }
- }
- return Node::null();
-}
-
-Node TranscendentalSolver::regionToUpperBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi;
- }
- else if (region == 2)
- {
- return d_pi_2;
- }
- else if (region == 3)
- {
- return d_zero;
- }
- else if (region == 4)
- {
- return d_pi_neg_2;
- }
- }
- return Node::null();
-}
-
-Node TranscendentalSolver::getDerivative(Node n, Node x)
-{
- NodeManager* nm = NodeManager::currentNM();
- Assert(x.isVar());
- // only handle the cases of the taylor expansion of d
- if (n.getKind() == EXPONENTIAL)
- {
- if (n[0] == x)
- {
- return n;
- }
- }
- else if (n.getKind() == SINE)
- {
- if (n[0] == x)
- {
- Node na = nm->mkNode(MINUS, d_pi_2, n[0]);
- Node ret = nm->mkNode(SINE, na);
- ret = Rewriter::rewrite(ret);
- return ret;
- }
- }
- else if (n.getKind() == PLUS)
- {
- std::vector<Node> dchildren;
- for (unsigned i = 0; i < n.getNumChildren(); i++)
- {
- // PLUS is flattened in rewriter, recursion depth is bounded by 1
- Node dc = getDerivative(n[i], x);
- if (dc.isNull())
- {
- return dc;
- }
- else
- {
- dchildren.push_back(dc);
- }
- }
- return nm->mkNode(PLUS, dchildren);
- }
- else if (n.getKind() == MULT)
- {
- Assert(n[0].isConst());
- Node dc = getDerivative(n[1], x);
- if (!dc.isNull())
- {
- return nm->mkNode(MULT, n[0], dc);
- }
- }
- else if (n.getKind() == NONLINEAR_MULT)
- {
- unsigned xcount = 0;
- std::vector<Node> children;
- unsigned xindex = 0;
- for (unsigned i = 0, size = n.getNumChildren(); i < size; i++)
- {
- if (n[i] == x)
- {
- xcount++;
- xindex = i;
- }
- children.push_back(n[i]);
- }
- if (xcount == 0)
- {
- return d_zero;
- }
- else
- {
- children[xindex] = nm->mkConst(Rational(xcount));
- }
- return nm->mkNode(MULT, children);
- }
- else if (n.isVar())
- {
- return n == x ? d_one : d_zero;
- }
- else if (n.isConst())
- {
- return d_zero;
- }
- Trace("nl-ext-debug") << "No derivative computed for " << n;
- Trace("nl-ext-debug") << " for d/d{" << x << "}" << std::endl;
- return Node::null();
-}
-
-std::pair<Node, Node> TranscendentalSolver::getTaylor(Node fa, unsigned n)
-{
- NodeManager* nm = NodeManager::currentNM();
- Assert(n > 0);
- Node fac; // what term we cache for fa
- if (fa[0] == d_zero)
- {
- // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
- fac = fa;
- }
- else
- {
- // otherwise we use a standard factor a in (x-a)^n
- fac = nm->mkNode(fa.getKind(), d_taylor_real_fv_base);
- }
- Node taylor_rem;
- Node taylor_sum;
- // check if we have already computed this Taylor series
- std::unordered_map<unsigned, Node>::iterator itt = d_taylor_sum[fac].find(n);
- if (itt == d_taylor_sum[fac].end())
- {
- Node i_exp_base;
- if (fa[0] == d_zero)
- {
- i_exp_base = d_taylor_real_fv;
- }
- else
- {
- i_exp_base = Rewriter::rewrite(
- nm->mkNode(MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
- }
- Node i_derv = fac;
- Node i_fact = d_one;
- Node i_exp = d_one;
- int i_derv_status = 0;
- unsigned counter = 0;
- std::vector<Node> sum;
- do
- {
- counter++;
- if (fa.getKind() == EXPONENTIAL)
- {
- // unchanged
- }
- else if (fa.getKind() == SINE)
- {
- if (i_derv_status % 2 == 1)
- {
- Node arg = nm->mkNode(PLUS, d_pi_2, d_taylor_real_fv_base);
- i_derv = nm->mkNode(SINE, arg);
- }
- else
- {
- i_derv = fa;
- }
- if (i_derv_status >= 2)
- {
- i_derv = nm->mkNode(MINUS, d_zero, i_derv);
- }
- i_derv = Rewriter::rewrite(i_derv);
- i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
- }
- if (counter == (n + 1))
- {
- TNode x = d_taylor_real_fv_base;
- i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
- }
- Node curr = nm->mkNode(MULT, nm->mkNode(DIVISION, i_derv, i_fact), i_exp);
- if (counter == (n + 1))
- {
- taylor_rem = curr;
- }
- else
- {
- sum.push_back(curr);
- i_fact = Rewriter::rewrite(
- nm->mkNode(MULT, nm->mkConst(Rational(counter)), i_fact));
- i_exp = Rewriter::rewrite(nm->mkNode(MULT, i_exp_base, i_exp));
- }
- } while (counter <= n);
- taylor_sum = sum.size() == 1 ? sum[0] : nm->mkNode(PLUS, sum);
-
- if (fac[0] != d_taylor_real_fv_base)
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fac[0]);
- }
-
- // cache
- d_taylor_sum[fac][n] = taylor_sum;
- d_taylor_rem[fac][n] = taylor_rem;
- }
- else
- {
- taylor_sum = itt->second;
- Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
- taylor_rem = d_taylor_rem[fac][n];
- }
-
- // must substitute for the argument if we were using a different lookup
- if (fa[0] != fac[0])
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fa[0]);
- }
- return std::pair<Node, Node>(taylor_sum, taylor_rem);
-}
-
-void TranscendentalSolver::getPolynomialApproximationBounds(
- Kind k, unsigned d, std::vector<Node>& pbounds)
-{
- if (d_poly_bounds[k][d].empty())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- // n is the Taylor degree we are currently considering
- unsigned n = 2 * d;
- // n must be even
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- Trace("nl-ext-tftp-debug2")
- << "Taylor for " << k << " is : " << taylor.first << std::endl;
- Node taylor_sum = Rewriter::rewrite(taylor.first);
- Trace("nl-ext-tftp-debug2")
- << "Taylor for " << k << " is (post-rewrite) : " << taylor_sum
- << std::endl;
- Assert(taylor.second.getKind() == MULT);
- Assert(taylor.second.getNumChildren() == 2);
- Assert(taylor.second[0].getKind() == DIVISION);
- Trace("nl-ext-tftp-debug2")
- << "Taylor remainder for " << k << " is " << taylor.second << std::endl;
- // ru is x^{n+1}/(n+1)!
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- ru = Rewriter::rewrite(ru);
- Trace("nl-ext-tftp-debug2")
- << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
- if (k == EXPONENTIAL)
- {
- pbounds.push_back(taylor_sum);
- pbounds.push_back(taylor_sum);
- pbounds.push_back(Rewriter::rewrite(
- nm->mkNode(MULT, taylor_sum, nm->mkNode(PLUS, d_one, ru))));
- pbounds.push_back(Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru)));
- }
- else
- {
- Assert(k == SINE);
- Node l = Rewriter::rewrite(nm->mkNode(MINUS, taylor_sum, ru));
- Node u = Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru));
- pbounds.push_back(l);
- pbounds.push_back(l);
- pbounds.push_back(u);
- pbounds.push_back(u);
- }
- Trace("nl-ext-tf-tplanes")
- << "Polynomial approximation for " << k << " is: " << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
- d_poly_bounds[k][d].insert(
- d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
- }
- else
- {
- pbounds.insert(
- pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
- }
-}
-
-void TranscendentalSolver::getPolynomialApproximationBoundForArg(
- Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
-{
- getPolynomialApproximationBounds(k, d, pbounds);
- Assert(c.isConst());
- if (k == EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- bool success = false;
- unsigned ds = d;
- TNode ttrf = d_taylor_real_fv;
- TNode tc = c;
- do
- {
- success = true;
- unsigned n = 2 * ds;
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- // check that 1-c^{n+1}/(n+1)! > 0
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- Node rus = ru.substitute(ttrf, tc);
- rus = Rewriter::rewrite(rus);
- Assert(rus.isConst());
- if (rus.getConst<Rational>() > d_one.getConst<Rational>())
- {
- success = false;
- ds = ds + 1;
- }
- } while (!success);
- if (ds > d)
- {
- Trace("nl-ext-exp-taylor")
- << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
- << k << " " << c << ")" << std::endl;
- // must use sound upper bound
- std::vector<Node> pboundss;
- getPolynomialApproximationBounds(k, ds, pboundss);
- pbounds[2] = pboundss[2];
- }
- }
-}
-
-std::pair<Node, Node> TranscendentalSolver::getTfModelBounds(Node tf,
- unsigned d)
-{
- // compute the model value of the argument
- Node c = d_model.computeAbstractModelValue(tf[0]);
- Assert(c.isConst());
- int csign = c.getConst<Rational>().sgn();
- Kind k = tf.getKind();
- if (csign == 0)
- {
- // at zero, its trivial
- if (k == SINE)
- {
- return std::pair<Node, Node>(d_zero, d_zero);
- }
- Assert(k == EXPONENTIAL);
- return std::pair<Node, Node>(d_one, d_one);
- }
- bool isNeg = csign == -1;
-
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
-
- std::vector<Node> bounds;
- TNode tfv = d_taylor_real_fv;
- TNode tfs = tf[0];
- for (unsigned d2 = 0; d2 < 2; d2++)
- {
- int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
- Node pab = pbounds[index];
- if (!pab.isNull())
- {
- // { x -> tf[0] }
- pab = pab.substitute(tfv, tfs);
- pab = Rewriter::rewrite(pab);
- Node v_pab = d_model.computeAbstractModelValue(pab);
- bounds.push_back(v_pab);
- }
- else
- {
- bounds.push_back(Node::null());
- }
- }
- return std::pair<Node, Node>(bounds[0], bounds[1]);
-}
-
-Node TranscendentalSolver::mkValidPhase(Node a, Node pi)
-{
- return mkBounded(
- NodeManager::currentNM()->mkNode(MULT, mkRationalNode(-1), pi), a, pi);
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file transcendental_solver.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2019 by the authors listed in the file AUTHORS
- ** in the top-level source directory) and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Solving for handling transcendental functions.
- **/
-
-#ifndef CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H
-#define CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H
-
-#include <map>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-#include "expr/node.h"
-#include "theory/arith/nl_lemma_utils.h"
-#include "theory/arith/nl_model.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-/** Transcendental solver class
- *
- * This class implements model-based refinement schemes
- * for transcendental functions, described in:
- *
- * - "Satisfiability Modulo Transcendental
- * Functions via Incremental Linearization" by Cimatti
- * et al., CADE 2017.
- *
- * It's main functionality are methods that implement lemma schemas below,
- * which return a set of lemmas that should be sent on the output channel.
- */
-class TranscendentalSolver
-{
- public:
- TranscendentalSolver(NlModel& m);
- ~TranscendentalSolver();
-
- /** init last call
- *
- * This is called at the beginning of last call effort check, where
- * assertions are the set of assertions belonging to arithmetic,
- * false_asserts is the subset of assertions that are false in the current
- * model, and xts is the set of extended function terms that are active in
- * the current context.
- *
- * This call may add lemmas to lems/lemsPp based on registering term
- * information (for example, purification of sine terms).
- */
- void initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts,
- std::vector<Node>& lems,
- std::vector<Node>& lemsPp);
- /** increment taylor degree */
- void incrementTaylorDegree();
- /** get taylor degree */
- unsigned getTaylorDegree() const;
- /** preprocess assertions check model
- *
- * This modifies the given assertions in preparation for running a call
- * to check model.
- *
- * This method returns false if a bound for a transcendental function
- * was conflicting.
- */
- bool preprocessAssertionsCheckModel(std::vector<Node>& assertions);
- /** Process side effect se */
- void processSideEffect(const NlLemmaSideEffect& se);
- //-------------------------------------------- lemma schemas
- /** check transcendental initial refine
- *
- * Returns a set of valid theory lemmas, based on
- * simple facts about transcendental functions.
- * This mostly follows the initial axioms described in
- * Section 4 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- *
- * Examples:
- *
- * sin( x ) = -sin( -x )
- * ( PI > x > 0 ) => 0 < sin( x ) < 1
- * exp( x )>0
- * x<0 => exp( x )<1
- */
- std::vector<Node> checkTranscendentalInitialRefine();
-
- /** check transcendental monotonic
- *
- * Returns a set of valid theory lemmas, based on a
- * lemma scheme that ensures that applications
- * of transcendental functions respect monotonicity.
- *
- * Examples:
- *
- * x > y => exp( x ) > exp( y )
- * PI/2 > x > y > 0 => sin( x ) > sin( y )
- * PI > x > y > PI/2 => sin( x ) < sin( y )
- */
- std::vector<Node> checkTranscendentalMonotonic();
-
- /** check transcendental tangent planes
- *
- * Returns a set of valid theory lemmas, based on
- * computing an "incremental linearization" of
- * transcendental functions based on the model values
- * of transcendental functions and their arguments.
- * It is based on Figure 3 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- * This schema is not terminating in general.
- * It is not enabled by default, and can
- * be enabled by --nl-ext-tf-tplanes.
- *
- * Example:
- *
- * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
- * Note that:
- * sin(1) ~= .841471
- *
- * The Taylor series and remainder of sin(y) of degree 7 is
- * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
- * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
- *
- * This gives us lower and upper bounds :
- * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
- * ...where note P_u( 1 ) = 4243/5040 ~= .841865
- * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
- * ...where note P_l( 1 ) = 4241/5040 ~= .841468
- *
- * Assume that M( sin(y) ) > P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a tangent plane refinement.
- * The tangent plane at the point 1 in P_u is
- * given by the formula:
- * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
- * We add the lemma:
- * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
- * which is:
- * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
- *
- * Assume that M( sin(y) ) < P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a secant plane refinement for some constants ( l, u )
- * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
- * l = 0 and u = M( PI/2 ) = 150517/47912.
- * The secant planes at point 1 for P_l
- * are given by the formulas:
- * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
- * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
- * We add the lemmas:
- * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
- * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
- * which are:
- * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
- * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
- * where c1, c2 are rationals (for brevity, omitted here)
- * such that c1 ~= .277 and c2 ~= 2.032.
- *
- * The argument lemSE is the "side effect" of the lemmas in the return
- * value of this function (for details, see checkLastCall).
- */
- std::vector<Node> checkTranscendentalTangentPlanes(
- std::map<Node, NlLemmaSideEffect>& lemSE);
- /** check transcendental function refinement for tf
- *
- * This method is called by the above method for each "master"
- * transcendental function application that occurs in an assertion in the
- * current context. For example, an application like sin(t) is not a master
- * if we have introduced the constraints:
- * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
- * See d_trMaster/d_trSlaves for more detail.
- *
- * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
- * function application tf for Taylor degree d. It may add a secant or
- * tangent plane lemma to lems and its side effect (if one exists)
- * to lemSE.
- *
- * It returns false if the bounds are not precise enough to add a
- * secant or tangent plane lemma.
- */
- bool checkTfTangentPlanesFun(Node tf,
- unsigned d,
- std::vector<Node>& lems,
- std::map<Node, NlLemmaSideEffect>& lemSE);
- //-------------------------------------------- end lemma schemas
- private:
- /** polynomial approximation bounds
- *
- * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
- * d_taylor_real_fv. These are polynomial approximations of the Taylor series
- * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
- * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
- * for positive/negative (+/-) values of the argument of <k>( 0 ).
- *
- * Notice that for certain bounds (e.g. upper bounds for exponential), the
- * Taylor approximation for a fixed degree is only sound up to a given
- * upper bound on the argument. To obtain sound lower/upper bounds for a
- * given <k>( c ), use the function below.
- */
- void getPolynomialApproximationBounds(Kind k,
- unsigned d,
- std::vector<Node>& pbounds);
- /** polynomial approximation bounds
- *
- * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
- * that are sound (lower, upper) bounds for <k>( c ). Notice that these
- * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
- * c>0, we return the P_u+[x] from the function above for the minimum degree
- * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
- */
- void getPolynomialApproximationBoundForArg(Kind k,
- Node c,
- unsigned d,
- std::vector<Node>& pbounds);
- /** get transcendental function model bounds
- *
- * This returns the current lower and upper bounds of transcendental
- * function application tf based on Taylor of degree 2*d, which is dependent
- * on the model value of its argument.
- */
- std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d);
- /** get monotonicity direction
- *
- * Returns whether the slope is positive (+1) or negative(-1)
- * in region of transcendental function with kind k.
- * Returns 0 if region is invalid.
- */
- int regionToMonotonicityDir(Kind k, int region);
- /** get concavity
- *
- * Returns whether we are concave (+1) or convex (-1)
- * in region of transcendental function with kind k,
- * where region is defined above.
- * Returns 0 if region is invalid.
- */
- int regionToConcavity(Kind k, int region);
- /** region to lower bound
- *
- * Returns the term corresponding to the lower
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no lower bound for the
- * region.
- */
- Node regionToLowerBound(Kind k, int region);
- /** region to upper bound
- *
- * Returns the term corresponding to the upper
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no upper bound for the
- * region.
- */
- Node regionToUpperBound(Kind k, int region);
- /** get derivative
- *
- * Returns d/dx n. Supports cases of n
- * for transcendental functions applied to x,
- * multiplication, addition, constants and variables.
- * Returns Node::null() if derivative is an
- * unhandled case.
- */
- Node getDerivative(Node n, Node x);
-
- void mkPi();
- void getCurrentPiBounds(std::vector<Node>& lemmas);
- /** Make the node -pi <= a <= pi */
- static Node mkValidPhase(Node a, Node pi);
-
- /** Reference to the non-linear model object */
- NlModel& d_model;
- /** commonly used terms */
- Node d_zero;
- Node d_one;
- Node d_neg_one;
- Node d_true;
- Node d_false;
- /**
- * Some transcendental functions f(t) are "purified", e.g. we add
- * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
- * purified we call "master terms".
- *
- * The maps below maintain a master/slave relationship over
- * transcendental functions (SINE, EXPONENTIAL, PI), where above
- * f(y) is the master of itself and of f(t).
- *
- * This is used for ensuring that the argument y of SINE we process is on the
- * interval [-pi .. pi], and that exponentials are not applied to arguments
- * that contain transcendental functions.
- */
- std::map<Node, Node> d_trMaster;
- std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
- /** The transcendental functions we have done initial refinements on */
- std::map<Node, bool> d_tf_initial_refine;
-
- /** concavity region for transcendental functions
- *
- * This stores an integer that identifies an interval in
- * which the current model value for an argument of an
- * application of a transcendental function resides.
- *
- * For exp( x ):
- * region #1 is -infty < x < infty
- * For sin( x ):
- * region #0 is pi < x < infty (this is an invalid region)
- * region #1 is pi/2 < x <= pi
- * region #2 is 0 < x <= pi/2
- * region #3 is -pi/2 < x <= 0
- * region #4 is -pi < x <= -pi/2
- * region #5 is -infty < x <= -pi (this is an invalid region)
- * All regions not listed above, as well as regions 0 and 5
- * for SINE are "invalid". We only process applications
- * of transcendental functions whose arguments have model
- * values that reside in valid regions.
- */
- std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
- /** cache of the above function */
- std::map<Kind, std::map<unsigned, std::vector<Node>>> d_poly_bounds;
-
- /**
- * Maps representives of a congruence class to the members of that class.
- *
- * In detail, a congruence class is a set of terms of the form
- * { f(t1), ..., f(tn) }
- * such that t1 = ... = tn in the current context. We choose an arbitrary
- * term among these to be the repesentative of this congruence class.
- *
- * Moreover, notice we compute congruence classes only over terms that
- * are transcendental function applications that are "master terms",
- * see d_trMaster/d_trSlave.
- */
- std::map<Node, std::vector<Node>> d_funcCongClass;
- /**
- * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
- * that are representives of their congruence class.
- */
- std::map<Kind, std::vector<Node>> d_funcMap;
-
- // tangent plane bounds
- std::map<Node, std::map<Node, Node>> d_tangent_val_bound[4];
-
- /** secant points (sorted list) for transcendental functions
- *
- * This is used for tangent plane refinements for
- * transcendental functions. This is the set
- * "get-previous-secant-points" in "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017, for
- * each transcendental function application. We store this set for each
- * Taylor degree.
- */
- std::unordered_map<Node,
- std::map<unsigned, std::vector<Node>>,
- NodeHashFunction>
- d_secant_points;
-
- /** get Taylor series of degree n for function fa centered around point fa[0].
- *
- * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
- * the first part of the pair is the Taylor series expansion :
- * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
- * and the second part of the pair is the Taylor series remainder :
- * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
- *
- * The above values are cached for each (f,n) for a fixed variable "a".
- * To compute the Taylor series for fa, we compute the Taylor series
- * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
- * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
- * if fa[0]=0.
- * In the latter case, note we compute the exponential x^{n+1}
- * instead of (x-a)^{n+1}, which can be done faster.
- */
- std::pair<Node, Node> getTaylor(Node fa, unsigned n);
-
- /** internal variables used for constructing (cached) versions of the Taylor
- * series above.
- */
- Node d_taylor_real_fv; // x above
- Node d_taylor_real_fv_base; // a above
- Node d_taylor_real_fv_base_rem; // b above
-
- /** cache of sum and remainder terms for getTaylor */
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_sum;
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_rem;
- /** taylor degree
- *
- * Indicates that the degree of the polynomials in the Taylor approximation of
- * all transcendental functions is 2*d_taylor_degree. This value is set
- * initially to options::nlExtTfTaylorDegree() and may be incremented
- * if the option options::nlExtTfIncPrecision() is enabled.
- */
- unsigned d_taylor_degree;
- /** PI
- *
- * Note that PI is a (symbolic, non-constant) nullary operator. This is
- * because its value cannot be computed exactly. We constraint PI to concrete
- * lower and upper bounds stored in d_pi_bound below.
- */
- Node d_pi;
- /** PI/2 */
- Node d_pi_2;
- /** -PI/2 */
- Node d_pi_neg_2;
- /** -PI */
- Node d_pi_neg;
- /** the concrete lower and upper bounds for PI */
- Node d_pi_bound[2];
-}; /* class TranscendentalSolver */
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */