The transcendental solver has grown over time, and a refactoring was due.
This PR splits the transcendental solver into five components:
a utility to compute taylor approximations
a common state for transcendental solvers
a solver for exponential function
a solver for sine function
a solver that wraps everything to a transcendental solver.
theory/arith/nl/stats.h
theory/arith/nl/strategy.cpp
theory/arith/nl/strategy.h
- theory/arith/nl/transcendental_solver.cpp
- theory/arith/nl/transcendental_solver.h
+ theory/arith/nl/transcendental/exponential_solver.cpp
+ theory/arith/nl/transcendental/exponential_solver.h
+ theory/arith/nl/transcendental/sine_solver.cpp
+ theory/arith/nl/transcendental/sine_solver.h
+ theory/arith/nl/transcendental/taylor_generator.cpp
+ theory/arith/nl/transcendental/taylor_generator.h
+ theory/arith/nl/transcendental/transcendental_solver.cpp
+ theory/arith/nl/transcendental/transcendental_solver.h
+ theory/arith/nl/transcendental/transcendental_state.cpp
+ theory/arith/nl/transcendental/transcendental_state.h
theory/arith/normal_form.cpp
theory/arith/normal_form.h
theory/arith/operator_elim.cpp
bool operator()(Node i, Node j);
};
+/**
+ * Wrapper for std::sort that uses SortNlModel to sort an iterator range.
+ */
+template <typename It>
+void sortByNlModel(It begin,
+ It end,
+ NlModel* model,
+ bool concrete = true,
+ bool absolute = false,
+ bool reverse = false)
+{
+ SortNlModel smv;
+ smv.d_nlm = model;
+ smv.d_isConcrete = concrete;
+ smv.d_isAbsolute = absolute;
+ smv.d_reverse_order = reverse;
+ std::sort(begin, end, smv);
+}
+
struct SortNonlinearDegree
{
SortNonlinearDegree(const std::map<Node, unsigned>& m) : d_mdegree(m) {}
#include "theory/arith/nl/nl_model.h"
#include "theory/arith/nl/stats.h"
#include "theory/arith/nl/strategy.h"
-#include "theory/arith/nl/transcendental_solver.h"
+#include "theory/arith/nl/transcendental/transcendental_solver.h"
#include "theory/ext_theory.h"
#include "theory/uf/equality_engine.h"
* This is the subsolver responsible for running the procedure for
* transcendental functions.
*/
- TranscendentalSolver d_trSlv;
+ transcendental::TranscendentalSolver d_trSlv;
/**
* Holds common lookup data for the checks implemented in the "nl-ext"
* solvers (from Cimatti et al., TACAS 2017).
--- /dev/null
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/exponential_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+ExponentialSolver::ExponentialSolver(TranscendentalState* tstate)
+ : d_data(tstate)
+{
+}
+
+ExponentialSolver::~ExponentialSolver() {}
+
+void ExponentialSolver::checkInitialRefine()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Trace("nl-ext")
+ << "Get initial refinement lemmas for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_data->d_funcMap)
+ {
+ if (tfl.first != Kind::EXPONENTIAL)
+ {
+ continue;
+ }
+ Assert(tfl.first == Kind::EXPONENTIAL);
+ for (const Node& t : tfl.second)
+ {
+ // initial refinements
+ if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
+ {
+ d_tf_initial_refine[t] = true;
+ Node lem;
+ // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
+ // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
+ lem = nm->mkNode(
+ Kind::AND,
+ nm->mkNode(Kind::GT, t, d_data->d_zero),
+ nm->mkNode(Kind::EQUAL,
+ t[0].eqNode(d_data->d_zero),
+ t.eqNode(d_data->d_one)),
+ nm->mkNode(Kind::EQUAL,
+ nm->mkNode(Kind::LT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::LT, t, d_data->d_one)),
+ nm->mkNode(
+ Kind::OR,
+ nm->mkNode(Kind::LEQ, t[0], d_data->d_zero),
+ nm->mkNode(
+ Kind::GT, t, nm->mkNode(Kind::PLUS, t[0], d_data->d_one))));
+ if (!lem.isNull())
+ {
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_INIT_REFINE);
+ }
+ }
+ }
+ }
+}
+
+void ExponentialSolver::checkMonotonic()
+{
+ Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
+ << std::endl;
+
+ auto it = d_data->d_funcMap.find(Kind::EXPONENTIAL);
+ if (it == d_data->d_funcMap.end())
+ {
+ Trace("nl-ext-exp") << "No exponential terms" << std::endl;
+ return;
+ }
+
+ // sort arguments of all transcendentals
+ std::vector<Node> tf_args;
+ std::map<Node, Node> tf_arg_to_term;
+
+ for (const Node& tf : it->second)
+ {
+ Node a = tf[0];
+ Node mvaa = d_data->d_model.computeAbstractModelValue(a);
+ if (mvaa.isConst())
+ {
+ Trace("nl-ext-exp") << "...tf term : " << a << std::endl;
+ tf_args.push_back(a);
+ tf_arg_to_term[a] = tf;
+ }
+ }
+
+ if (tf_args.empty())
+ {
+ return;
+ }
+
+ sortByNlModel(
+ tf_args.begin(), tf_args.end(), &d_data->d_model, true, false, true);
+
+ Node targ, targval, t, tval;
+ for (const auto& sarg : tf_args)
+ {
+ Node sargval = d_data->d_model.computeAbstractModelValue(sarg);
+ Assert(sargval.isConst());
+ Node s = tf_arg_to_term[sarg];
+ Node sval = d_data->d_model.computeAbstractModelValue(s);
+ Assert(sval.isConst());
+
+ // store the concavity region
+ d_data->d_tf_region[s] = 1;
+ Trace("nl-ext-concavity") << ", arg model value = " << sargval << std::endl;
+
+ if (!tval.isNull() && sval.getConst<Rational>() > tval.getConst<Rational>())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node mono_lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GEQ, targ, sarg),
+ nm->mkNode(Kind::GEQ, t, s));
+ Trace("nl-ext-exp") << "Monotonicity lemma : " << mono_lem << std::endl;
+
+ d_data->d_im.addPendingArithLemma(mono_lem,
+ InferenceId::NL_T_MONOTONICITY);
+ }
+ // store the previous values
+ targ = sarg;
+ targval = sargval;
+ t = s;
+ tval = sval;
+ }
+}
+
+void ExponentialSolver::doTangentLemma(TNode e, TNode c, TNode poly_approx)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ // compute tangent plane
+ // Figure 3: T( x )
+ // We use zero slope tangent planes, since the concavity of the Taylor
+ // approximation cannot be easily established.
+ // Tangent plane is valid in the interval [c,u).
+ Node lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GEQ, e[0], c),
+ nm->mkNode(Kind::GEQ, e, poly_approx));
+ Trace("nl-ext-exp") << "*** Tangent plane lemma (pre-rewrite): " << lem
+ << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-exp") << "*** Tangent plane lemma : " << lem << std::endl;
+ Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
+ // Figure 3 : line 9
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_TANGENT, nullptr, true);
+}
+
+void ExponentialSolver::doSecantLemmas(TNode e,
+ TNode c,
+ TNode poly_approx,
+ unsigned d)
+{
+ d_data->doSecantLemmas(getSecantBounds(e, c, d), c, poly_approx, e, d, 1);
+}
+
+std::pair<Node, Node> ExponentialSolver::getSecantBounds(TNode e,
+ TNode c,
+ unsigned d)
+{
+ std::pair<Node, Node> bounds = d_data->getClosestSecantPoints(e, c, d);
+
+ // Check if we already have neighboring secant points
+ if (bounds.first.isNull())
+ {
+ // pick c-1
+ bounds.first = Rewriter::rewrite(
+ NodeManager::currentNM()->mkNode(Kind::MINUS, c, d_data->d_one));
+ }
+ if (bounds.second.isNull())
+ {
+ // pick c+1
+ bounds.second = Rewriter::rewrite(
+ NodeManager::currentNM()->mkNode(Kind::PLUS, c, d_data->d_one));
+ }
+ return bounds;
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file exponential_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling exponential function.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__EXPONENTIAL_SOLVER_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__EXPONENTIAL_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class ExponentialSolver
+{
+ public:
+ ExponentialSolver(TranscendentalState* tstate);
+ ~ExponentialSolver();
+
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+
+ /**
+ * check initial refine
+ *
+ * Constructs a set of valid theory lemmas, based on
+ * simple facts about the exponential function.
+ * This mostly follows the initial axioms described in
+ * Section 4 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ *
+ * Examples:
+ *
+ * exp( x )>0
+ * x<0 => exp( x )<1
+ */
+ void checkInitialRefine();
+
+ /**
+ * check monotonicity
+ *
+ * Constructs a set of valid theory lemmas, based on a
+ * lemma scheme that ensures that applications
+ * of the exponential function respect monotonicity.
+ *
+ * Examples:
+ *
+ * x > y => exp( x ) > exp( y )
+ */
+ void checkMonotonic();
+
+ /** Sent tangent lemma around c for e */
+ void doTangentLemma(TNode e, TNode c, TNode poly_approx);
+
+ /** Sent secant lemmas around c for e */
+ void doSecantLemmas(TNode e, TNode c, TNode poly_approx, unsigned d);
+
+ private:
+ /** Generate bounds for secant lemmas */
+ std::pair<Node, Node> getSecantBounds(TNode e, TNode c, unsigned d);
+
+ /** Holds common state for transcendental solvers */
+ TranscendentalState* d_data;
+
+ /** The transcendental functions we have done initial refinements on */
+ std::map<Node, bool> d_tf_initial_refine;
+
+}; /* class ExponentialSolver */
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
--- /dev/null
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/sine_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+SineSolver::SineSolver(TranscendentalState* tstate) : d_data(tstate) {}
+
+SineSolver::~SineSolver() {}
+
+void SineSolver::checkInitialRefine()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Trace("nl-ext")
+ << "Get initial refinement lemmas for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_data->d_funcMap)
+ {
+ if (tfl.first != Kind::SINE)
+ {
+ continue;
+ }
+ for (const Node& t : tfl.second)
+ {
+ // initial refinements
+ if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
+ {
+ d_tf_initial_refine[t] = true;
+ Node lem;
+ Node symn = nm->mkNode(Kind::SINE,
+ nm->mkNode(Kind::MULT, d_data->d_neg_one, t[0]));
+ symn = Rewriter::rewrite(symn);
+ // Can assume it is its own master since phase is split over 0,
+ // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
+ d_data->d_trMaster[symn] = symn;
+ d_data->d_trSlaves[symn].insert(symn);
+ Assert(d_data->d_trSlaves.find(t) != d_data->d_trSlaves.end());
+ std::vector<Node> children;
+
+ lem =
+ nm->mkNode(Kind::AND,
+ // bounds
+ nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::LEQ, t, d_data->d_one),
+ nm->mkNode(Kind::GEQ, t, d_data->d_neg_one)),
+ // symmetry
+ nm->mkNode(Kind::PLUS, t, symn).eqNode(d_data->d_zero),
+ // sign
+ nm->mkNode(Kind::EQUAL,
+ nm->mkNode(Kind::LT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::LT, t, d_data->d_zero)),
+ // zero val
+ nm->mkNode(Kind::EQUAL,
+ nm->mkNode(Kind::GT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::GT, t, d_data->d_zero)));
+ lem = nm->mkNode(
+ Kind::AND,
+ lem,
+ // zero tangent
+ nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::LT, t, t[0])),
+ nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::LT, t[0], d_data->d_zero),
+ nm->mkNode(Kind::GT, t, t[0]))),
+ // pi tangent
+ nm->mkNode(
+ Kind::AND,
+ nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(Kind::LT, t[0], d_data->d_pi),
+ nm->mkNode(Kind::LT,
+ t,
+ nm->mkNode(Kind::MINUS, d_data->d_pi, t[0]))),
+ nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(Kind::GT, t[0], d_data->d_pi_neg),
+ nm->mkNode(
+ Kind::GT,
+ t,
+ nm->mkNode(Kind::MINUS, d_data->d_pi_neg, t[0])))));
+ if (!lem.isNull())
+ {
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_INIT_REFINE);
+ }
+ }
+ }
+ }
+}
+
+void SineSolver::checkMonotonic()
+{
+ Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
+ << std::endl;
+
+ auto it = d_data->d_funcMap.find(Kind::SINE);
+ if (it == d_data->d_funcMap.end())
+ {
+ Trace("nl-ext-exp") << "No sine terms" << std::endl;
+ return;
+ }
+
+ // sort arguments of all transcendentals
+ std::vector<Node> tf_args;
+ std::map<Node, Node> tf_arg_to_term;
+
+ for (const Node& tf : it->second)
+ {
+ Node a = tf[0];
+ Node mvaa = d_data->d_model.computeAbstractModelValue(a);
+ if (mvaa.isConst())
+ {
+ Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
+ tf_args.push_back(a);
+ tf_arg_to_term[a] = tf;
+ }
+ }
+
+ if (tf_args.empty())
+ {
+ return;
+ }
+
+ sortByNlModel(
+ tf_args.begin(), tf_args.end(), &d_data->d_model, true, false, true);
+
+ std::vector<Node> mpoints = {d_data->d_pi,
+ d_data->d_pi_2,
+ d_data->d_zero,
+ d_data->d_pi_neg_2,
+ d_data->d_pi_neg};
+ std::vector<Node> mpoints_vals;
+
+ // get model values for points
+ for (const auto& point : mpoints)
+ {
+ mpoints_vals.emplace_back(d_data->d_model.computeAbstractModelValue(point));
+ Assert(mpoints_vals.back().isConst());
+ }
+
+ unsigned mdir_index = 0;
+ int monotonic_dir = -1;
+ Node mono_bounds[2];
+ Node targ, targval, t, tval;
+ for (const auto& sarg : tf_args)
+ {
+ Node sargval = d_data->d_model.computeAbstractModelValue(sarg);
+ Assert(sargval.isConst());
+ Node s = tf_arg_to_term[sarg];
+ Node sval = d_data->d_model.computeAbstractModelValue(s);
+ Assert(sval.isConst());
+
+ // increment to the proper monotonicity region
+ bool increment = true;
+ while (increment && mdir_index < mpoints.size())
+ {
+ increment = false;
+ Node pval = mpoints_vals[mdir_index];
+ Assert(pval.isConst());
+ if (sargval.getConst<Rational>() < pval.getConst<Rational>())
+ {
+ increment = true;
+ Trace("nl-ext-tf-mono")
+ << "...increment at " << sarg << " since model value is less than "
+ << mpoints[mdir_index] << std::endl;
+ }
+ if (increment)
+ {
+ tval = Node::null();
+ mono_bounds[1] = mpoints[mdir_index];
+ mdir_index++;
+ monotonic_dir = regionToMonotonicityDir(mdir_index);
+ if (mdir_index < mpoints.size())
+ {
+ mono_bounds[0] = mpoints[mdir_index];
+ }
+ else
+ {
+ mono_bounds[0] = Node::null();
+ }
+ }
+ }
+ // store the concavity region
+ d_data->d_tf_region[s] = mdir_index;
+ Trace("nl-ext-concavity")
+ << "Transcendental function " << s << " is in region #" << mdir_index;
+ Trace("nl-ext-concavity") << ", arg model value = " << sargval << std::endl;
+
+ if (!tval.isNull())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node mono_lem;
+ if (monotonic_dir == 1
+ && sval.getConst<Rational>() > tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::GEQ, targ, sarg),
+ nm->mkNode(Kind::GEQ, t, s));
+ }
+ else if (monotonic_dir == -1
+ && sval.getConst<Rational>() < tval.getConst<Rational>())
+ {
+ mono_lem = nm->mkNode(Kind::IMPLIES,
+ nm->mkNode(Kind::LEQ, targ, sarg),
+ nm->mkNode(Kind::LEQ, t, s));
+ }
+ if (!mono_lem.isNull())
+ {
+ if (!mono_bounds[0].isNull())
+ {
+ Assert(!mono_bounds[1].isNull());
+ mono_lem = nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(Kind::AND,
+ mkBounded(mono_bounds[0], targ, mono_bounds[1]),
+ mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
+ mono_lem);
+ }
+ Trace("nl-ext-tf-mono")
+ << "Monotonicity lemma : " << mono_lem << std::endl;
+
+ d_data->d_im.addPendingArithLemma(mono_lem,
+ InferenceId::NL_T_MONOTONICITY);
+ }
+ }
+ // store the previous values
+ targ = sarg;
+ targval = sargval;
+ t = s;
+ tval = sval;
+ }
+}
+
+void SineSolver::doTangentLemma(TNode e, TNode c, TNode poly_approx, int region)
+{
+ NodeManager* nm = NodeManager::currentNM();
+
+ // compute tangent plane
+ // Figure 3: T( x )
+ // We use zero slope tangent planes, since the concavity of the Taylor
+ // approximation cannot be easily established.
+ int concavity = regionToConcavity(region);
+ int mdir = regionToMonotonicityDir(region);
+ Node lem = nm->mkNode(
+ Kind::IMPLIES,
+ nm->mkNode(
+ Kind::AND,
+ nm->mkNode(Kind::GEQ,
+ e[0],
+ mdir == concavity ? Node(c) : regionToLowerBound(region)),
+ nm->mkNode(Kind::LEQ,
+ e[0],
+ mdir != concavity ? Node(c) : regionToUpperBound(region))),
+ nm->mkNode(concavity == 1 ? Kind::GEQ : Kind::LEQ, e, poly_approx));
+
+ Trace("nl-ext-sine") << "*** Tangent plane lemma (pre-rewrite): " << lem
+ << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-sine") << "*** Tangent plane lemma : " << lem << std::endl;
+ Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
+ // Figure 3 : line 9
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_TANGENT, nullptr, true);
+}
+
+void SineSolver::doSecantLemmas(
+ TNode e, TNode c, TNode poly_approx, unsigned d, int region)
+{
+ d_data->doSecantLemmas(getSecantBounds(e, c, d, region),
+ c,
+ poly_approx,
+ e,
+ d,
+ regionToConcavity(region));
+}
+
+std::pair<Node, Node> SineSolver::getSecantBounds(TNode e,
+ TNode c,
+ unsigned d,
+ int region)
+{
+ std::pair<Node, Node> bounds = d_data->getClosestSecantPoints(e, c, d);
+
+ // Check if we already have neighboring secant points
+ if (bounds.first.isNull())
+ {
+ // lower boundary point for this concavity region
+ bounds.first = regionToLowerBound(region);
+ }
+ if (bounds.second.isNull())
+ {
+ // upper boundary point for this concavity region
+ bounds.second = regionToUpperBound(region);
+ }
+ return bounds;
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file sine_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling exponential function.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__SINE_SOLVER_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__SINE_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class SineSolver
+{
+ public:
+ SineSolver(TranscendentalState* tstate);
+ ~SineSolver();
+
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+
+ /**
+ * check initial refine
+ *
+ * Constructs a set of valid theory lemmas, based on
+ * simple facts about the sine function.
+ * This mostly follows the initial axioms described in
+ * Section 4 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ *
+ * Examples:
+ *
+ * sin( x ) = -sin( -x )
+ * ( PI > x > 0 ) => 0 < sin( x ) < 1
+ */
+ void checkInitialRefine();
+
+ /**
+ * check monotonicity
+ *
+ * Constructs a set of valid theory lemmas, based on a
+ * lemma scheme that ensures that applications
+ * of the sine function respect monotonicity.
+ *
+ * Examples:
+ *
+ * PI/2 > x > y > 0 => sin( x ) > sin( y )
+ * PI > x > y > PI/2 => sin( x ) < sin( y )
+ */
+ void checkMonotonic();
+
+ /** Sent tangent lemma around c for e */
+ void doTangentLemma(TNode e, TNode c, TNode poly_approx, int region);
+
+ /** Sent secant lemmas around c for e */
+ void doSecantLemmas(
+ TNode e, TNode c, TNode poly_approx, unsigned d, int region);
+
+ private:
+ std::pair<Node, Node> getSecantBounds(TNode e,
+ TNode c,
+ unsigned d,
+ int region);
+
+ /** region to lower bound
+ *
+ * Returns the term corresponding to the lower
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no lower bound for the
+ * region.
+ */
+ Node regionToLowerBound(int region)
+ {
+ switch (region)
+ {
+ case 1: return d_data->d_pi_2;
+ case 2: return d_data->d_zero;
+ case 3: return d_data->d_pi_neg_2;
+ case 4: return d_data->d_pi_neg;
+ default: return Node();
+ }
+ }
+
+ /** region to upper bound
+ *
+ * Returns the term corresponding to the upper
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no upper bound for the
+ * region.
+ */
+ Node regionToUpperBound(int region)
+ {
+ switch (region)
+ {
+ case 1: return d_data->d_pi;
+ case 2: return d_data->d_pi_2;
+ case 3: return d_data->d_zero;
+ case 4: return d_data->d_pi_neg_2;
+ default: return Node();
+ }
+ }
+
+ int regionToMonotonicityDir(int region)
+ {
+ switch (region)
+ {
+ case 1:
+ case 4: return -1;
+ case 2:
+ case 3: return 1;
+ default: return 0;
+ }
+ }
+ int regionToConcavity(int region)
+ {
+ switch (region)
+ {
+ case 1:
+ case 2: return -1;
+ case 3:
+ case 4: return 1;
+ default: return 0;
+ }
+ }
+
+ /** Holds common state for transcendental solvers */
+ TranscendentalState* d_data;
+
+ /** The transcendental functions we have done initial refinements on */
+ std::map<Node, bool> d_tf_initial_refine;
+
+}; /* class SineSolver */
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
--- /dev/null
+/********************* */
+/*! \file taylor_generator.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Generate taylor approximations transcendental lemmas.
+ **/
+
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+
+#include "theory/arith/arith_utilities.h"
+#include "theory/rewriter.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+TaylorGenerator::TaylorGenerator()
+ : d_nm(NodeManager::currentNM()),
+ d_taylor_real_fv(d_nm->mkBoundVar("x", d_nm->realType())),
+ d_taylor_real_fv_base(d_nm->mkBoundVar("a", d_nm->realType())),
+ d_taylor_real_fv_base_rem(d_nm->mkBoundVar("b", d_nm->realType()))
+{
+}
+
+TNode TaylorGenerator::getTaylorVariable() { return d_taylor_real_fv; }
+
+std::pair<Node, Node> TaylorGenerator::getTaylor(TNode fa, std::uint64_t n)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node d_zero = nm->mkConst(Rational(0));
+
+ Assert(n > 0);
+ Node fac; // what term we cache for fa
+ if (fa[0] == d_zero)
+ {
+ // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
+ fac = fa;
+ }
+ else
+ {
+ // otherwise we use a standard factor a in (x-a)^n
+ fac = nm->mkNode(fa.getKind(), d_taylor_real_fv_base);
+ }
+ Node taylor_rem;
+ Node taylor_sum;
+ // check if we have already computed this Taylor series
+ auto itt = s_taylor_sum[fac].find(n);
+ if (itt == s_taylor_sum[fac].end())
+ {
+ Node i_exp_base;
+ if (fa[0] == d_zero)
+ {
+ i_exp_base = d_taylor_real_fv;
+ }
+ else
+ {
+ i_exp_base = Rewriter::rewrite(
+ nm->mkNode(Kind::MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
+ }
+ Node i_derv = fac;
+ Node i_fact = nm->mkConst(Rational(1));
+ Node i_exp = i_fact;
+ int i_derv_status = 0;
+ unsigned counter = 0;
+ std::vector<Node> sum;
+ do
+ {
+ counter++;
+ if (fa.getKind() == Kind::EXPONENTIAL)
+ {
+ // unchanged
+ }
+ else if (fa.getKind() == Kind::SINE)
+ {
+ if (i_derv_status % 2 == 1)
+ {
+ Node pi = nm->mkNullaryOperator(nm->realType(), Kind::PI);
+ Node pi_2 = Rewriter::rewrite(nm->mkNode(
+ Kind::MULT, pi, nm->mkConst(Rational(1) / Rational(2))));
+
+ Node arg = nm->mkNode(Kind::PLUS, pi_2, d_taylor_real_fv_base);
+ i_derv = nm->mkNode(Kind::SINE, arg);
+ }
+ else
+ {
+ i_derv = fa;
+ }
+ if (i_derv_status >= 2)
+ {
+ i_derv = nm->mkNode(Kind::MINUS, d_zero, i_derv);
+ }
+ i_derv = Rewriter::rewrite(i_derv);
+ i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
+ }
+ if (counter == (n + 1))
+ {
+ TNode x = d_taylor_real_fv_base;
+ i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
+ }
+ Node curr = nm->mkNode(
+ Kind::MULT, nm->mkNode(Kind::DIVISION, i_derv, i_fact), i_exp);
+ if (counter == (n + 1))
+ {
+ taylor_rem = curr;
+ }
+ else
+ {
+ sum.push_back(curr);
+ i_fact = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, nm->mkConst(Rational(counter)), i_fact));
+ i_exp = Rewriter::rewrite(nm->mkNode(Kind::MULT, i_exp_base, i_exp));
+ }
+ } while (counter <= n);
+ taylor_sum = sum.size() == 1 ? sum[0] : nm->mkNode(Kind::PLUS, sum);
+
+ if (fac[0] != d_taylor_real_fv_base)
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fac[0]);
+ }
+
+ // cache
+ s_taylor_sum[fac][n] = taylor_sum;
+ d_taylor_rem[fac][n] = taylor_rem;
+ }
+ else
+ {
+ taylor_sum = itt->second;
+ Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
+ taylor_rem = d_taylor_rem[fac][n];
+ }
+
+ // must substitute for the argument if we were using a different lookup
+ if (fa[0] != fac[0])
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fa[0]);
+ }
+ return std::pair<Node, Node>(taylor_sum, taylor_rem);
+}
+
+void TaylorGenerator::getPolynomialApproximationBounds(
+ Kind k, unsigned d, std::vector<Node>& pbounds)
+{
+ if (d_poly_bounds[k][d].empty())
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, nm->mkConst(Rational(0)));
+ // n is the Taylor degree we are currently considering
+ unsigned n = 2 * d;
+ // n must be even
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is : " << taylor.first << std::endl;
+ Node taylor_sum = Rewriter::rewrite(taylor.first);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor for " << k << " is (post-rewrite) : " << taylor_sum
+ << std::endl;
+ Assert(taylor.second.getKind() == Kind::MULT);
+ Assert(taylor.second.getNumChildren() == 2);
+ Assert(taylor.second[0].getKind() == Kind::DIVISION);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder for " << k << " is " << taylor.second << std::endl;
+ // ru is x^{n+1}/(n+1)!
+ Node ru = nm->mkNode(Kind::DIVISION, taylor.second[1], taylor.second[0][1]);
+ ru = Rewriter::rewrite(ru);
+ Trace("nl-ext-tftp-debug2")
+ << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
+ if (k == Kind::EXPONENTIAL)
+ {
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(taylor_sum);
+ pbounds.push_back(Rewriter::rewrite(
+ nm->mkNode(Kind::MULT,
+ taylor_sum,
+ nm->mkNode(Kind::PLUS, nm->mkConst(Rational(1)), ru))));
+ pbounds.push_back(
+ Rewriter::rewrite(nm->mkNode(Kind::PLUS, taylor_sum, ru)));
+ }
+ else
+ {
+ Assert(k == Kind::SINE);
+ Node l = Rewriter::rewrite(nm->mkNode(Kind::MINUS, taylor_sum, ru));
+ Node u = Rewriter::rewrite(nm->mkNode(Kind::PLUS, taylor_sum, ru));
+ pbounds.push_back(l);
+ pbounds.push_back(l);
+ pbounds.push_back(u);
+ pbounds.push_back(u);
+ }
+ Trace("nl-ext-tf-tplanes")
+ << "Polynomial approximation for " << k << " is: " << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
+ Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
+ d_poly_bounds[k][d].insert(
+ d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
+ }
+ else
+ {
+ pbounds.insert(
+ pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
+ }
+}
+
+void TaylorGenerator::getPolynomialApproximationBoundForArg(
+ Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
+{
+ getPolynomialApproximationBounds(k, d, pbounds);
+ Assert(c.isConst());
+ if (k == Kind::EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ Node tft = nm->mkNode(k, nm->mkConst(Rational(0)));
+ bool success = false;
+ unsigned ds = d;
+ TNode ttrf = getTaylorVariable();
+ TNode tc = c;
+ do
+ {
+ success = true;
+ unsigned n = 2 * ds;
+ std::pair<Node, Node> taylor = getTaylor(tft, n);
+ // check that 1-c^{n+1}/(n+1)! > 0
+ Node ru =
+ nm->mkNode(Kind::DIVISION, taylor.second[1], taylor.second[0][1]);
+ Node rus = ru.substitute(ttrf, tc);
+ rus = Rewriter::rewrite(rus);
+ Assert(rus.isConst());
+ if (rus.getConst<Rational>() > 1)
+ {
+ success = false;
+ ds = ds + 1;
+ }
+ } while (!success);
+ if (ds > d)
+ {
+ Trace("nl-ext-exp-taylor")
+ << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
+ << k << " " << c << ")" << std::endl;
+ // must use sound upper bound
+ std::vector<Node> pboundss;
+ getPolynomialApproximationBounds(k, ds, pboundss);
+ pbounds[2] = pboundss[2];
+ }
+ }
+}
+
+std::pair<Node, Node> TaylorGenerator::getTfModelBounds(Node tf, unsigned d, NlModel& model)
+{
+ // compute the model value of the argument
+ Node c = model.computeAbstractModelValue(tf[0]);
+ Assert(c.isConst());
+ int csign = c.getConst<Rational>().sgn();
+ Kind k = tf.getKind();
+ if (csign == 0)
+ {
+ NodeManager* nm = NodeManager::currentNM();
+ // at zero, its trivial
+ if (k == Kind::SINE)
+ {
+ Node zero = nm->mkConst(Rational(0));
+ return std::pair<Node, Node>(zero, zero);
+ }
+ Assert(k == Kind::EXPONENTIAL);
+ Node one = nm->mkConst(Rational(1));
+ return std::pair<Node, Node>(one, one);
+ }
+ bool isNeg = csign == -1;
+
+ std::vector<Node> pbounds;
+ getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+
+ std::vector<Node> bounds;
+ TNode tfv = getTaylorVariable();
+ TNode tfs = tf[0];
+ for (unsigned d2 = 0; d2 < 2; d2++)
+ {
+ int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
+ Node pab = pbounds[index];
+ if (!pab.isNull())
+ {
+ // { x -> M_A(tf[0]) }
+ // Notice that we compute the model value of tfs first, so that
+ // the call to rewrite below does not modify the term, where notice that
+ // rewrite( x*x { x -> M_A(t) } ) = M_A(t)*M_A(t)
+ // is not equal to
+ // M_A( x*x { x -> t } ) = M_A( t*t )
+ // where M_A denotes the abstract model.
+ Node mtfs = model.computeAbstractModelValue(tfs);
+ pab = pab.substitute(tfv, mtfs);
+ pab = Rewriter::rewrite(pab);
+ Assert(pab.isConst());
+ bounds.push_back(pab);
+ }
+ else
+ {
+ bounds.push_back(Node::null());
+ }
+ }
+ return std::pair<Node, Node>(bounds[0], bounds[1]);
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file taylor_generator.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Generate taylor approximations transcendental lemmas.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TAYLOR_GENERATOR_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TAYLOR_GENERATOR_H
+
+#include "expr/node.h"
+#include "theory/arith/nl/nl_model.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+class TaylorGenerator
+{
+ public:
+ TaylorGenerator();
+
+ /**
+ * Return the variable used as x in getTaylor().
+ */
+ TNode getTaylorVariable();
+
+ /**
+ * Get Taylor series of degree n for function fa centered around point fa[0].
+ *
+ * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
+ * the first part of the pair is the Taylor series expansion :
+ * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
+ * and the second part of the pair is the Taylor series remainder :
+ * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
+ *
+ * The above values are cached for each (f,n) for a fixed variable "a".
+ * To compute the Taylor series for fa, we compute the Taylor series
+ * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
+ * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
+ * if fa[0]=0.
+ * In the latter case, note we compute the exponential x^{n+1}
+ * instead of (x-a)^{n+1}, which can be done faster.
+ */
+ std::pair<Node, Node> getTaylor(TNode fa, std::uint64_t n);
+
+ /**
+ * polynomial approximation bounds
+ *
+ * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
+ * d_taylor_real_fv. These are polynomial approximations of the Taylor series
+ * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
+ * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
+ * for positive/negative (+/-) values of the argument of <k>( 0 ).
+ *
+ * Notice that for certain bounds (e.g. upper bounds for exponential), the
+ * Taylor approximation for a fixed degree is only sound up to a given
+ * upper bound on the argument. To obtain sound lower/upper bounds for a
+ * given <k>( c ), use the function below.
+ */
+ void getPolynomialApproximationBounds(Kind k,
+ unsigned d,
+ std::vector<Node>& pbounds);
+
+ /**
+ * polynomial approximation bounds
+ *
+ * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
+ * that are sound (lower, upper) bounds for <k>( c ). Notice that these
+ * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
+ * c>0, we return the P_u+[x] from the function above for the minimum degree
+ * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
+ */
+ void getPolynomialApproximationBoundForArg(Kind k,
+ Node c,
+ unsigned d,
+ std::vector<Node>& pbounds);
+
+ /** get transcendental function model bounds
+ *
+ * This returns the current lower and upper bounds of transcendental
+ * function application tf based on Taylor of degree 2*d, which is dependent
+ * on the model value of its argument.
+ */
+ std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d, NlModel& model);
+
+ private:
+ NodeManager* d_nm;
+ const Node d_taylor_real_fv;
+ const Node d_taylor_real_fv_base;
+ const Node d_taylor_real_fv_base_rem;
+ std::unordered_map<Node,
+ std::unordered_map<std::uint64_t, Node>,
+ NodeHashFunction>
+ s_taylor_sum;
+ std::unordered_map<Node,
+ std::unordered_map<std::uint64_t, Node>,
+ NodeHashFunction>
+ d_taylor_rem;
+ std::map<Kind, std::map<unsigned, std::vector<Node>>> d_poly_bounds;
+};
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
--- /dev/null
+/********************* */
+/*! \file transcendental_solver.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/transcendental_solver.h"
+
+#include <cmath>
+#include <set>
+
+#include "expr/node_algorithm.h"
+#include "expr/node_builder.h"
+#include "options/arith_options.h"
+#include "theory/arith/arith_msum.h"
+#include "theory/arith/arith_utilities.h"
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+#include "theory/rewriter.h"
+
+using namespace CVC4::kind;
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+TranscendentalSolver::TranscendentalSolver(InferenceManager& im, NlModel& m)
+ : d_tstate(im, m), d_expSlv(&d_tstate), d_sineSlv(&d_tstate)
+{
+ d_taylor_degree = options::nlExtTfTaylorDegree();
+}
+
+TranscendentalSolver::~TranscendentalSolver() {}
+
+void TranscendentalSolver::initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts)
+{
+ d_tstate.init(assertions, false_asserts, xts);
+}
+
+bool TranscendentalSolver::preprocessAssertionsCheckModel(
+ std::vector<Node>& assertions)
+{
+ std::vector<Node> pvars;
+ std::vector<Node> psubs;
+ for (const std::pair<const Node, Node>& tb : d_tstate.d_trMaster)
+ {
+ pvars.push_back(tb.first);
+ psubs.push_back(tb.second);
+ }
+
+ // initialize representation of assertions
+ std::vector<Node> passertions;
+ for (const Node& a : assertions)
+
+ {
+ Node pa = a;
+ if (!pvars.empty())
+ {
+ pa = arithSubstitute(pa, pvars, psubs);
+ pa = Rewriter::rewrite(pa);
+ }
+ if (!pa.isConst() || !pa.getConst<bool>())
+ {
+ Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
+ passertions.push_back(pa);
+ }
+ }
+ // get model bounds for all transcendental functions
+ Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfs : d_tstate.d_funcMap)
+ {
+ for (const Node& tf : tfs.second)
+ {
+ Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
+ bool success = true;
+ // tf is Figure 3 : tf( x )
+ std::pair<Node, Node> bounds;
+ if (tfs.first == Kind::PI)
+ {
+ bounds = {d_tstate.d_pi_bound[0], d_tstate.d_pi_bound[1]};
+ }
+ else
+ {
+ bounds = d_tstate.d_taylor.getTfModelBounds(
+ tf, d_taylor_degree, d_tstate.d_model);
+ if (bounds.first != bounds.second)
+ {
+ d_tstate.d_model.setUsedApproximate();
+ }
+ }
+ if (!bounds.first.isNull() && !bounds.second.isNull())
+ {
+ // for each function in the congruence classe
+ for (const Node& ctf : d_tstate.d_funcCongClass[tf])
+ {
+ // each term in congruence classes should be master terms
+ Assert(d_tstate.d_trSlaves.find(ctf) != d_tstate.d_trSlaves.end());
+ // we set the bounds for each slave of tf
+ for (const Node& stf : d_tstate.d_trSlaves[ctf])
+ {
+ Trace("nl-ext-cm")
+ << "...bound for " << stf << " : [" << bounds.first << ", "
+ << bounds.second << "]" << std::endl;
+ success = d_tstate.d_model.addCheckModelBound(
+ stf, bounds.first, bounds.second);
+ }
+ }
+ }
+ else
+ {
+ Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
+ }
+ if (!success)
+ {
+ // a bound was conflicting
+ Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
+ Trace("nl-ext-cm") << "-----" << std::endl;
+ return false;
+ }
+ }
+ }
+ // replace the assertions
+ assertions = std::move(passertions);
+ return true;
+}
+
+void TranscendentalSolver::incrementTaylorDegree() { d_taylor_degree++; }
+unsigned TranscendentalSolver::getTaylorDegree() const
+{
+ return d_taylor_degree;
+}
+
+void TranscendentalSolver::processSideEffect(const NlLemma& se)
+{
+ for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
+ {
+ Node tf = std::get<0>(sp);
+ unsigned d = std::get<1>(sp);
+ Node c = std::get<2>(sp);
+ d_tstate.d_secant_points[tf][d].push_back(c);
+ }
+}
+
+void TranscendentalSolver::checkTranscendentalInitialRefine()
+{
+ d_expSlv.checkInitialRefine();
+ d_sineSlv.checkInitialRefine();
+}
+
+void TranscendentalSolver::checkTranscendentalMonotonic()
+{
+ d_expSlv.checkMonotonic();
+ d_sineSlv.checkMonotonic();
+}
+
+void TranscendentalSolver::checkTranscendentalTangentPlanes()
+{
+ Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
+ << std::endl;
+ // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
+ // via Incremental Linearization" by Cimatti et al
+ for (const std::pair<const Kind, std::vector<Node> >& tfs :
+ d_tstate.d_funcMap)
+ {
+ Kind k = tfs.first;
+ if (k == PI)
+ {
+ // We do not use Taylor approximation for PI currently.
+ // This is because the convergence is extremely slow, and hence an
+ // initial approximation is superior.
+ continue;
+ }
+
+ // we substitute into the Taylor sum P_{n,f(0)}( x )
+
+ for (const Node& tf : tfs.second)
+ {
+ // tf is Figure 3 : tf( x )
+ Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
+ // go until max degree is reached, or we don't meet bound criteria
+ for (unsigned d = 1; d <= d_taylor_degree; d++)
+ {
+ Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
+ unsigned prev =
+ d_tstate.d_im.numPendingLemmas() + d_tstate.d_im.numWaitingLemmas();
+ if (checkTfTangentPlanesFun(tf, d))
+ {
+ Trace("nl-ext-tftp") << "...fail, #lemmas = "
+ << (d_tstate.d_im.numPendingLemmas()
+ + d_tstate.d_im.numWaitingLemmas() - prev)
+ << std::endl;
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp") << "...success" << std::endl;
+ }
+ }
+ }
+ }
+}
+
+bool TranscendentalSolver::checkTfTangentPlanesFun(Node tf, unsigned d)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Kind k = tf.getKind();
+ // this should only be run on master applications
+ Assert(d_tstate.d_trSlaves.find(tf) != d_tstate.d_trSlaves.end());
+
+ // Figure 3 : c
+ Node c = d_tstate.d_model.computeAbstractModelValue(tf[0]);
+ int csign = c.getConst<Rational>().sgn();
+ if (csign == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+ Assert(csign == 1 || csign == -1);
+
+ // Figure 3: P_l, P_u
+ // mapped to for signs of c
+ std::map<int, Node> poly_approx_bounds[2];
+ std::vector<Node> pbounds;
+ d_tstate.d_taylor.getPolynomialApproximationBoundForArg(k, c, d, pbounds);
+ poly_approx_bounds[0][1] = pbounds[0];
+ poly_approx_bounds[0][-1] = pbounds[1];
+ poly_approx_bounds[1][1] = pbounds[2];
+ poly_approx_bounds[1][-1] = pbounds[3];
+
+ // Figure 3 : v
+ Node v = d_tstate.d_model.computeAbstractModelValue(tf);
+
+ // check value of tf
+ Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
+ << ", degree " << d << "..." << std::endl;
+ Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
+ Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
+
+ // compute the concavity
+ int region = -1;
+ std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
+ d_tstate.d_tf_region.find(tf);
+ if (itr != d_tstate.d_tf_region.end())
+ {
+ region = itr->second;
+ Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
+ }
+ // Figure 3 : conc
+ int concavity = regionToConcavity(k, itr->second);
+ Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
+ if (concavity == 0)
+ {
+ // no secant/tangent plane is necessary
+ return true;
+ }
+
+ // Figure 3: P
+ Node poly_approx;
+
+ // compute whether this is a tangent refinement or a secant refinement
+ bool is_tangent = false;
+ bool is_secant = false;
+ std::pair<Node, Node> mvb =
+ d_tstate.d_taylor.getTfModelBounds(tf, d, d_tstate.d_model);
+ // this is the approximated value of tf(c), which is a value such that:
+ // M_A(tf(c)) >= poly_appox_c >= tf(c) or
+ // M_A(tf(c)) <= poly_appox_c <= tf(c)
+ // In other words, it is a better approximation of the true value of tf(c)
+ // in the case that we add a refinement lemma. We use this value in the
+ // refinement schemas below.
+ Node poly_approx_c;
+ for (unsigned r = 0; r < 2; r++)
+ {
+ Node pab = poly_approx_bounds[r][csign];
+ Node v_pab = r == 0 ? mvb.first : mvb.second;
+ if (!v_pab.isNull())
+ {
+ Trace("nl-ext-tftp-debug2")
+ << "...model value of " << pab << " is " << v_pab << std::endl;
+
+ Assert(v_pab.isConst());
+ Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
+ Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
+ Node compr = Rewriter::rewrite(comp);
+ Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
+ if (compr == d_tstate.d_true)
+ {
+ poly_approx_c = Rewriter::rewrite(v_pab);
+ // beyond the bounds
+ if (r == 0)
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == 1;
+ is_secant = concavity == -1;
+ }
+ else
+ {
+ poly_approx = poly_approx_bounds[r][csign];
+ is_tangent = concavity == -1;
+ is_secant = concavity == 1;
+ }
+ if (Trace.isOn("nl-ext-tftp"))
+ {
+ Trace("nl-ext-tftp") << "*** Outside boundary point (";
+ Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << ", will refine..." << std::endl;
+ Trace("nl-ext-tftp")
+ << " poly_approx = " << poly_approx << std::endl;
+ Trace("nl-ext-tftp")
+ << " is_tangent = " << is_tangent << std::endl;
+ Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
+ }
+ break;
+ }
+ else
+ {
+ Trace("nl-ext-tftp")
+ << " ...within " << (r == 0 ? "low" : "high") << " bound : ";
+ printRationalApprox("nl-ext-tftp", v_pab);
+ Trace("nl-ext-tftp") << std::endl;
+ }
+ }
+ }
+
+ // Figure 3: P( c )
+ if (is_tangent || is_secant)
+ {
+ Trace("nl-ext-tftp-debug2")
+ << "...poly approximation at c is " << poly_approx_c << std::endl;
+ }
+ else
+ {
+ // we may want to continue getting better bounds
+ return false;
+ }
+
+ if (is_tangent)
+ {
+ if (k == Kind::EXPONENTIAL)
+ {
+ d_expSlv.doTangentLemma(tf, c, poly_approx_c);
+ }
+ else if (k == Kind::SINE)
+ {
+ d_sineSlv.doTangentLemma(tf, c, poly_approx_c, region);
+ }
+ }
+ else if (is_secant)
+ {
+ if (k == EXPONENTIAL)
+ {
+ d_expSlv.doSecantLemmas(tf, c, poly_approx_c, d);
+ }
+ else if (k == Kind::SINE)
+ {
+ d_sineSlv.doSecantLemmas(tf, c, poly_approx_c, d, region);
+ }
+ }
+ return true;
+}
+
+int TranscendentalSolver::regionToConcavity(Kind k, int region)
+{
+ if (k == EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == SINE)
+ {
+ if (region == 1 || region == 2)
+ {
+ return -1;
+ }
+ else if (region == 3 || region == 4)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+
+/********************* */
+/*! \file transcendental_solver.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Solving for handling transcendental functions.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_SOLVER_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_SOLVER_H
+
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/exponential_solver.h"
+#include "theory/arith/nl/transcendental/sine_solver.h"
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/** Transcendental solver class
+ *
+ * This class implements model-based refinement schemes
+ * for transcendental functions, described in:
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality are methods that implement lemma schemas below,
+ * which return a set of lemmas that should be sent on the output channel.
+ */
+class TranscendentalSolver
+{
+ public:
+ TranscendentalSolver(InferenceManager& im, NlModel& m);
+ ~TranscendentalSolver();
+
+ /** init last call
+ *
+ * This is called at the beginning of last call effort check, where
+ * assertions are the set of assertions belonging to arithmetic,
+ * false_asserts is the subset of assertions that are false in the current
+ * model, and xts is the set of extended function terms that are active in
+ * the current context.
+ *
+ * This call may add lemmas to lems based on registering term
+ * information (for example, purification of sine terms).
+ */
+ void initLastCall(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+ /** increment taylor degree */
+ void incrementTaylorDegree();
+ /** get taylor degree */
+ unsigned getTaylorDegree() const;
+ /** preprocess assertions check model
+ *
+ * This modifies the given assertions in preparation for running a call
+ * to check model.
+ *
+ * This method returns false if a bound for a transcendental function
+ * was conflicting.
+ */
+ bool preprocessAssertionsCheckModel(std::vector<Node>& assertions);
+ /** Process side effects in lemma se */
+ void processSideEffect(const NlLemma& se);
+ //-------------------------------------------- lemma schemas
+ // Relays to exp and sine solvers.
+ void checkTranscendentalInitialRefine();
+
+ // Relays to exp and sine solvers.
+ void checkTranscendentalMonotonic();
+
+ /** check transcendental tangent planes
+ *
+ * Constructs a set of valid theory lemmas, based on
+ * computing an "incremental linearization" of
+ * transcendental functions based on the model values
+ * of transcendental functions and their arguments.
+ * It is based on Figure 3 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ * This schema is not terminating in general.
+ * It is not enabled by default, and can
+ * be enabled by --nl-ext-tf-tplanes.
+ *
+ * Example:
+ *
+ * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
+ * Note that:
+ * sin(1) ~= .841471
+ *
+ * The Taylor series and remainder of sin(y) of degree 7 is
+ * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
+ * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
+ *
+ * This gives us lower and upper bounds :
+ * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
+ * ...where note P_u( 1 ) = 4243/5040 ~= .841865
+ * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
+ * ...where note P_l( 1 ) = 4241/5040 ~= .841468
+ *
+ * Assume that M( sin(y) ) > P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a tangent plane refinement.
+ * The tangent plane at the point 1 in P_u is
+ * given by the formula:
+ * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
+ * We add the lemma:
+ * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
+ * which is:
+ * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
+ *
+ * Assume that M( sin(y) ) < P_u( 1 ).
+ * Since the concavity of sine in the region 0 < x < PI/2 is -1,
+ * we add a secant plane refinement for some constants ( l, u )
+ * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
+ * l = 0 and u = M( PI/2 ) = 150517/47912.
+ * The secant planes at point 1 for P_l
+ * are given by the formulas:
+ * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
+ * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
+ * We add the lemmas:
+ * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
+ * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
+ * which are:
+ * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
+ * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
+ * where c1, c2 are rationals (for brevity, omitted here)
+ * such that c1 ~= .277 and c2 ~= 2.032.
+ */
+ void checkTranscendentalTangentPlanes();
+
+ private:
+ /** check transcendental function refinement for tf
+ *
+ * This method is called by the above method for each "master"
+ * transcendental function application that occurs in an assertion in the
+ * current context. For example, an application like sin(t) is not a master
+ * if we have introduced the constraints:
+ * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
+ * See d_trMaster/d_trSlaves for more detail.
+ *
+ * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
+ * function application tf for Taylor degree d. It may add a secant or
+ * tangent plane lemma to lems, which includes the side effect of the lemma
+ * (if one exists).
+ *
+ * It returns false if the bounds are not precise enough to add a
+ * secant or tangent plane lemma.
+ */
+ bool checkTfTangentPlanesFun(Node tf, unsigned d);
+ //-------------------------------------------- end lemma schemas
+
+ /** get concavity
+ *
+ * Returns whether we are concave (+1) or convex (-1)
+ * in region of transcendental function with kind k,
+ * where region is defined above.
+ * Returns 0 if region is invalid.
+ */
+ int regionToConcavity(Kind k, int region);
+
+ /** taylor degree
+ *
+ * Indicates that the degree of the polynomials in the Taylor approximation of
+ * all transcendental functions is 2*d_taylor_degree. This value is set
+ * initially to options::nlExtTfTaylorDegree() and may be incremented
+ * if the option options::nlExtTfIncPrecision() is enabled.
+ */
+ unsigned d_taylor_degree;
+
+ /** Common state for transcendental solver */
+ transcendental::TranscendentalState d_tstate;
+ /** The solver responsible for the exponential function */
+ transcendental::ExponentialSolver d_expSlv;
+ /** The solver responsible for the sine function */
+ transcendental::SineSolver d_sineSlv;
+}; /* class TranscendentalSolver */
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */
--- /dev/null
+/********************* */
+/*! \file transcendental_state.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King, Gereon Kremer
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of solver for handling transcendental functions.
+ **/
+
+#include "theory/arith/nl/transcendental/transcendental_state.h"
+
+#include "theory/arith/arith_utilities.h"
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+namespace {
+
+/**
+ * Ensure a is in the main phase:
+ * -pi <= a <= pi
+ */
+inline Node mkValidPhase(TNode a, TNode pi)
+{
+ return mkBounded(
+ NodeManager::currentNM()->mkNode(Kind::MULT, mkRationalNode(-1), pi),
+ a,
+ pi);
+}
+} // namespace
+
+TranscendentalState::TranscendentalState(InferenceManager& im, NlModel& model)
+ : d_im(im), d_model(model)
+{
+ d_true = NodeManager::currentNM()->mkConst(true);
+ d_false = NodeManager::currentNM()->mkConst(false);
+ d_zero = NodeManager::currentNM()->mkConst(Rational(0));
+ d_one = NodeManager::currentNM()->mkConst(Rational(1));
+ d_neg_one = NodeManager::currentNM()->mkConst(Rational(-1));
+}
+
+void TranscendentalState::init(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts)
+{
+ d_funcCongClass.clear();
+ d_funcMap.clear();
+ d_tf_region.clear();
+
+ NodeManager* nm = NodeManager::currentNM();
+
+ // register the extended function terms
+ std::vector<Node> trNeedsMaster;
+ bool needPi = false;
+ // for computing congruence
+ std::map<Kind, ArgTrie> argTrie;
+ for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
+ {
+ Node a = xts[i];
+ Kind ak = a.getKind();
+ bool consider = true;
+ // if is an unpurified application of SINE, or it is a transcendental
+ // applied to a trancendental, purify.
+ if (isTranscendentalKind(ak))
+ {
+ // if we've already computed master for a
+ if (d_trMaster.find(a) != d_trMaster.end())
+ {
+ // a master has at least one slave
+ consider = (d_trSlaves.find(a) != d_trSlaves.end());
+ }
+ else
+ {
+ if (ak == Kind::SINE)
+ {
+ // always not a master
+ consider = false;
+ }
+ else
+ {
+ for (const Node& ac : a)
+ {
+ if (isTranscendentalKind(ac.getKind()))
+ {
+ consider = false;
+ break;
+ }
+ }
+ }
+ if (!consider)
+ {
+ // wait to assign a master below
+ trNeedsMaster.push_back(a);
+ }
+ else
+ {
+ d_trMaster[a] = a;
+ d_trSlaves[a].insert(a);
+ }
+ }
+ }
+ if (ak == Kind::EXPONENTIAL || ak == Kind::SINE)
+ {
+ needPi = needPi || (ak == Kind::SINE);
+ // if we didn't indicate that it should be purified above
+ if (consider)
+ {
+ std::vector<Node> repList;
+ for (const Node& ac : a)
+ {
+ Node r = d_model.computeConcreteModelValue(ac);
+ repList.push_back(r);
+ }
+ Node aa = argTrie[ak].add(a, repList);
+ if (aa != a)
+ {
+ // apply congruence to pairs of terms that are disequal and congruent
+ Assert(aa.getNumChildren() == a.getNumChildren());
+ Node mvaa = d_model.computeAbstractModelValue(a);
+ Node mvaaa = d_model.computeAbstractModelValue(aa);
+ if (mvaa != mvaaa)
+ {
+ std::vector<Node> exp;
+ for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
+ {
+ exp.push_back(a[j].eqNode(aa[j]));
+ }
+ Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(Kind::AND, exp);
+ Node cong_lemma = nm->mkNode(Kind::OR, expn.negate(), a.eqNode(aa));
+ d_im.addPendingArithLemma(cong_lemma, InferenceId::NL_CONGRUENCE);
+ }
+ }
+ else
+ {
+ // new representative of congruence class
+ d_funcMap[ak].push_back(a);
+ }
+ // add to congruence class
+ d_funcCongClass[aa].push_back(a);
+ }
+ }
+ else if (ak == Kind::PI)
+ {
+ Assert(consider);
+ needPi = true;
+ d_funcMap[ak].push_back(a);
+ d_funcCongClass[a].push_back(a);
+ }
+ }
+ // initialize pi if necessary
+ if (needPi && d_pi.isNull())
+ {
+ mkPi();
+ getCurrentPiBounds();
+ }
+
+ if (d_im.hasUsed())
+ {
+ return;
+ }
+
+ // process SINE phase shifting
+ for (const Node& a : trNeedsMaster)
+ {
+ // should not have processed this already
+ Assert(d_trMaster.find(a) == d_trMaster.end());
+ Kind k = a.getKind();
+ Assert(k == Kind::SINE || k == Kind::EXPONENTIAL);
+ Node y =
+ nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
+ Node new_a = nm->mkNode(k, y);
+ d_trSlaves[new_a].insert(new_a);
+ d_trSlaves[new_a].insert(a);
+ d_trMaster[a] = new_a;
+ d_trMaster[new_a] = new_a;
+ Node lem;
+ if (k == Kind::SINE)
+ {
+ Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
+ << std::endl;
+ Assert(!d_pi.isNull());
+ Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
+ // TODO : do not introduce shift here, instead needs model-based
+ // refinement for constant shifts (cvc4-projects #1284)
+ lem = nm->mkNode(
+ Kind::AND,
+ transcendental::mkValidPhase(y, d_pi),
+ nm->mkNode(
+ Kind::ITE,
+ transcendental::mkValidPhase(a[0], d_pi),
+ a[0].eqNode(y),
+ a[0].eqNode(nm->mkNode(
+ Kind::PLUS,
+ y,
+ nm->mkNode(
+ Kind::MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
+ new_a.eqNode(a));
+ }
+ else
+ {
+ // do both equalities to ensure that new_a becomes a preregistered term
+ lem = nm->mkNode(Kind::AND, a.eqNode(new_a), a[0].eqNode(y));
+ }
+ // note we must do preprocess on this lemma
+ Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
+ << std::endl;
+ NlLemma nlem(
+ lem, LemmaProperty::PREPROCESS, nullptr, InferenceId::NL_T_PURIFY_ARG);
+ d_im.addPendingArithLemma(nlem);
+ }
+
+ if (Trace.isOn("nl-ext-mv"))
+ {
+ Trace("nl-ext-mv") << "Arguments of trancendental functions : "
+ << std::endl;
+ for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
+ {
+ Kind k = tfl.first;
+ if (k == Kind::SINE || k == Kind::EXPONENTIAL)
+ {
+ for (const Node& tf : tfl.second)
+ {
+ Node v = tf[0];
+ d_model.computeConcreteModelValue(v);
+ d_model.computeAbstractModelValue(v);
+ d_model.printModelValue("nl-ext-mv", v);
+ }
+ }
+ }
+ }
+}
+
+void TranscendentalState::mkPi()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ if (d_pi.isNull())
+ {
+ d_pi = nm->mkNullaryOperator(nm->realType(), Kind::PI);
+ d_pi_2 = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, d_pi, nm->mkConst(Rational(1) / Rational(2))));
+ d_pi_neg_2 = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, d_pi, nm->mkConst(Rational(-1) / Rational(2))));
+ d_pi_neg = Rewriter::rewrite(
+ nm->mkNode(Kind::MULT, d_pi, nm->mkConst(Rational(-1))));
+ // initialize bounds
+ d_pi_bound[0] = nm->mkConst(Rational(103993) / Rational(33102));
+ d_pi_bound[1] = nm->mkConst(Rational(104348) / Rational(33215));
+ }
+}
+
+void TranscendentalState::getCurrentPiBounds()
+{
+ NodeManager* nm = NodeManager::currentNM();
+ Node pi_lem = nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::GEQ, d_pi, d_pi_bound[0]),
+ nm->mkNode(Kind::LEQ, d_pi, d_pi_bound[1]));
+ d_im.addPendingArithLemma(pi_lem, InferenceId::NL_T_PI_BOUND);
+}
+
+std::pair<Node, Node> TranscendentalState::getClosestSecantPoints(TNode e,
+ TNode c,
+ unsigned d)
+{
+ // bounds are the minimum and maximum previous secant points
+ // should not repeat secant points: secant lemmas should suffice to
+ // rule out previous assignment
+ Assert(
+ std::find(d_secant_points[e][d].begin(), d_secant_points[e][d].end(), c)
+ == d_secant_points[e][d].end());
+ // Insert into the (temporary) vector. We do not update this vector
+ // until we are sure this secant plane lemma has been processed. We do
+ // this by mapping the lemma to a side effect below.
+ std::vector<Node> spoints = d_secant_points[e][d];
+ spoints.push_back(c);
+
+ // sort
+ sortByNlModel(spoints.begin(), spoints.end(), &d_model);
+ // get the resulting index of c
+ unsigned index =
+ std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
+
+ // bounds are the next closest upper/lower bound values
+ return {index > 0 ? spoints[index - 1] : Node(),
+ index < spoints.size() - 1 ? spoints[index + 1] : Node()};
+}
+
+Node TranscendentalState::mkSecantPlane(
+ TNode arg, TNode b, TNode c, TNode approx, TNode approx_c)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ // Figure 3 : P(l), P(u), for s = 0,1
+ Node approx_b =
+ Rewriter::rewrite(approx.substitute(d_taylor.getTaylorVariable(), b));
+ // Figure 3: S_l( x ), S_u( x ) for s = 0,1
+ Node rcoeff_n = Rewriter::rewrite(nm->mkNode(Kind::MINUS, b, c));
+ Assert(rcoeff_n.isConst());
+ Rational rcoeff = rcoeff_n.getConst<Rational>();
+ Assert(rcoeff.sgn() != 0);
+ return nm->mkNode(Kind::PLUS,
+ approx_b,
+ nm->mkNode(Kind::MULT,
+ nm->mkNode(Kind::MINUS, approx_b, approx_c),
+ nm->mkConst(rcoeff.inverse()),
+ nm->mkNode(Kind::MINUS, arg, b)));
+}
+
+NlLemma TranscendentalState::mkSecantLemma(
+ TNode lower, TNode upper, int concavity, TNode tf, TNode splane)
+{
+ NodeManager* nm = NodeManager::currentNM();
+ // With respect to Figure 3, this is slightly different.
+ // In particular, we chose b to be the model value of bounds[s],
+ // which is a constant although bounds[s] may not be (e.g. if it
+ // contains PI).
+ // To ensure that c...b does not cross an inflection point,
+ // we guard with the symbolic version of bounds[s].
+ // This leads to lemmas e.g. of this form:
+ // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
+ // b ) + P( b ) )
+ // where b = (PI/2)^M, the current value of PI/2 in the model.
+ // This is sound since we are guarded by the symbolic
+ // representation of PI/2.
+ Node antec_n = nm->mkNode(Kind::AND,
+ nm->mkNode(Kind::GEQ, tf[0], lower),
+ nm->mkNode(Kind::LEQ, tf[0], upper));
+ Node lem = nm->mkNode(
+ Kind::IMPLIES,
+ antec_n,
+ nm->mkNode(concavity == 1 ? Kind::LEQ : Kind::GEQ, tf, splane));
+ Trace("nl-ext-tftp-debug2")
+ << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
+ lem = Rewriter::rewrite(lem);
+ Trace("nl-ext-tftp-lemma") << "*** Secant plane lemma : " << lem << std::endl;
+ Assert(d_model.computeAbstractModelValue(lem) == d_false);
+ return NlLemma(lem, LemmaProperty::NONE, nullptr, InferenceId::NL_T_SECANT);
+}
+
+void TranscendentalState::doSecantLemmas(const std::pair<Node, Node>& bounds,
+ TNode c,
+ TNode approx_c,
+ TNode tf,
+ unsigned d,
+ int concavity)
+{
+ // take the model value of l or u (since may contain PI)
+ // Make secant from bounds.first to c
+ Node lval = d_model.computeAbstractModelValue(bounds.first);
+ if (lval != c)
+ {
+ Node splane = mkSecantPlane(tf[0], lval, c, bounds.first, approx_c);
+ NlLemma nlem = mkSecantLemma(lval, c, concavity, tf, splane);
+ // The side effect says that if lem is added, then we should add the
+ // secant point c for (tf,d).
+ nlem.d_secantPoint.push_back(std::make_tuple(tf, d, c));
+ d_im.addPendingArithLemma(nlem, true);
+ }
+
+ // Make secant from c to bounds.second
+ Node uval = d_model.computeAbstractModelValue(bounds.second);
+ if (c != uval)
+ {
+ Node splane = mkSecantPlane(tf[0], c, uval, approx_c, bounds.second);
+ NlLemma nlem = mkSecantLemma(c, uval, concavity, tf, splane);
+ // The side effect says that if lem is added, then we should add the
+ // secant point c for (tf,d).
+ nlem.d_secantPoint.push_back(std::make_tuple(tf, d, c));
+ d_im.addPendingArithLemma(nlem, true);
+ }
+}
+
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file transcendental_state.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Tim King
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Utilities for transcendental lemmas.
+ **/
+
+#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_STATE_H
+#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_STATE_H
+
+#include "expr/node.h"
+#include "theory/arith/inference_manager.h"
+#include "theory/arith/nl/nl_model.h"
+#include "theory/arith/nl/transcendental/taylor_generator.h"
+
+namespace CVC4 {
+namespace theory {
+namespace arith {
+namespace nl {
+namespace transcendental {
+
+/**
+ * Holds common state and utilities for transcendental solvers.
+ *
+ * This includes common lookups and caches as well as generic utilities for
+ * secant plane lemmas and taylor approximations.
+ */
+struct TranscendentalState
+{
+ TranscendentalState(InferenceManager& im, NlModel& model);
+
+ /** init last call
+ *
+ * This is called at the beginning of last call effort check, where
+ * assertions are the set of assertions belonging to arithmetic,
+ * false_asserts is the subset of assertions that are false in the current
+ * model, and xts is the set of extended function terms that are active in
+ * the current context.
+ *
+ * This call may add lemmas to lems based on registering term
+ * information (for example, purification of sine terms).
+ */
+ void init(const std::vector<Node>& assertions,
+ const std::vector<Node>& false_asserts,
+ const std::vector<Node>& xts);
+
+ /** Initialize members for pi-related values */
+ void mkPi();
+ /** Get current bounds for pi as a lemma */
+ void getCurrentPiBounds();
+
+ /**
+ * Get the two closest secant points from the once stored already.
+ * "closest" is determined according to the current model.
+ * @param e The transcendental term (like (exp t))
+ * @param c The point currently under consideration (probably the model of t)
+ * @param d The taylor degree.
+ */
+ std::pair<Node, Node> getClosestSecantPoints(TNode e, TNode c, unsigned d);
+
+ /**
+ * Construct a secant plane between b and c
+ * @param arg The argument of the transcendental term
+ * @param b Left secant point
+ * @param c Right secant point
+ * @param approx Approximation for b (not yet substituted)
+ * @param approx_c Approximation for c (already substituted)
+ */
+ Node mkSecantPlane(TNode arg, TNode b, TNode c, TNode approx, TNode approx_c);
+
+ /**
+ * Construct a secant lemma between lower and upper for tf.
+ * @param lower Left secant point
+ * @param upper Right secant point
+ * @param concavity Concavity of the current regions
+ * @param tf Current transcendental term
+ * @param splane Secant plane as computed by mkSecantPlane()
+ */
+ NlLemma mkSecantLemma(
+ TNode lower, TNode upper, int concavity, TNode tf, TNode splane);
+
+ /**
+ * Construct and send secant lemmas (if appropriate)
+ * @param bounds Secant bounds
+ * @param c Current point
+ * @param approx_c Approximation for c
+ * @param tf Current transcendental term
+ * @param d Current taylor degree
+ * @param concavity Concavity in region of c
+ */
+ void doSecantLemmas(const std::pair<Node, Node>& bounds,
+ TNode c,
+ TNode approx_c,
+ TNode tf,
+ unsigned d,
+ int concavity)
+ ;
+
+ Node d_true;
+ Node d_false;
+ Node d_zero;
+ Node d_one;
+ Node d_neg_one;
+
+ /** The inference manager that we push conflicts and lemmas to. */
+ InferenceManager& d_im;
+ /** Reference to the non-linear model object */
+ NlModel& d_model;
+ /** Utility to compute taylor approximations */
+ TaylorGenerator d_taylor;
+
+ /**
+ * Some transcendental functions f(t) are "purified", e.g. we add
+ * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
+ * purified we call "master terms".
+ *
+ * The maps below maintain a master/slave relationship over
+ * transcendental functions (SINE, EXPONENTIAL, PI), where above
+ * f(y) is the master of itself and of f(t).
+ *
+ * This is used for ensuring that the argument y of SINE we process is on
+ * the interval [-pi .. pi], and that exponentials are not applied to
+ * arguments that contain transcendental functions.
+ */
+ std::map<Node, Node> d_trMaster;
+ std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
+
+ /** concavity region for transcendental functions
+ *
+ * This stores an integer that identifies an interval in
+ * which the current model value for an argument of an
+ * application of a transcendental function resides.
+ *
+ * For exp( x ):
+ * region #1 is -infty < x < infty
+ * For sin( x ):
+ * region #0 is pi < x < infty (this is an invalid region)
+ * region #1 is pi/2 < x <= pi
+ * region #2 is 0 < x <= pi/2
+ * region #3 is -pi/2 < x <= 0
+ * region #4 is -pi < x <= -pi/2
+ * region #5 is -infty < x <= -pi (this is an invalid region)
+ * All regions not listed above, as well as regions 0 and 5
+ * for SINE are "invalid". We only process applications
+ * of transcendental functions whose arguments have model
+ * values that reside in valid regions.
+ */
+ std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
+ /**
+ * Maps representives of a congruence class to the members of that class.
+ *
+ * In detail, a congruence class is a set of terms of the form
+ * { f(t1), ..., f(tn) }
+ * such that t1 = ... = tn in the current context. We choose an arbitrary
+ * term among these to be the repesentative of this congruence class.
+ *
+ * Moreover, notice we compute congruence classes only over terms that
+ * are transcendental function applications that are "master terms",
+ * see d_trMaster/d_trSlave.
+ */
+ std::map<Node, std::vector<Node>> d_funcCongClass;
+ /**
+ * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
+ * that are representives of their congruence class.
+ */
+ std::map<Kind, std::vector<Node>> d_funcMap;
+
+ /** secant points (sorted list) for transcendental functions
+ *
+ * This is used for tangent plane refinements for
+ * transcendental functions. This is the set
+ * "get-previous-secant-points" in "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017, for
+ * each transcendental function application. We store this set for each
+ * Taylor degree.
+ */
+ std::unordered_map<Node,
+ std::map<unsigned, std::vector<Node>>,
+ NodeHashFunction>
+ d_secant_points;
+
+ /** PI
+ *
+ * Note that PI is a (symbolic, non-constant) nullary operator. This is
+ * because its value cannot be computed exactly. We constraint PI to
+ * concrete lower and upper bounds stored in d_pi_bound below.
+ */
+ Node d_pi;
+ /** PI/2 */
+ Node d_pi_2;
+ /** -PI/2 */
+ Node d_pi_neg_2;
+ /** -PI */
+ Node d_pi_neg;
+ /** the concrete lower and upper bounds for PI */
+ Node d_pi_bound[2];
+ };
+
+} // namespace transcendental
+} // namespace transcendental
+} // namespace nl
+} // namespace arith
+} // namespace theory
+
+#endif /* CVC4__THEORY__ARITH__NL__TRANSCENDENTAL__TRANSCENDENTAL_STATE_H */
+++ /dev/null
-/********************* */
-/*! \file transcendental_solver.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King, Gereon Kremer
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of solver for handling transcendental functions.
- **/
-
-#include "theory/arith/nl/transcendental_solver.h"
-
-#include <cmath>
-#include <set>
-
-#include "expr/node_algorithm.h"
-#include "expr/node_builder.h"
-#include "options/arith_options.h"
-#include "theory/arith/arith_msum.h"
-#include "theory/arith/arith_utilities.h"
-#include "theory/rewriter.h"
-
-using namespace CVC4::kind;
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-namespace nl {
-
-TranscendentalSolver::TranscendentalSolver(InferenceManager& im, NlModel& m) : d_im(im), d_model(m)
-{
- NodeManager* nm = NodeManager::currentNM();
- d_true = nm->mkConst(true);
- d_false = nm->mkConst(false);
- d_zero = nm->mkConst(Rational(0));
- d_one = nm->mkConst(Rational(1));
- d_neg_one = nm->mkConst(Rational(-1));
- d_taylor_real_fv = nm->mkBoundVar("x", nm->realType());
- d_taylor_real_fv_base = nm->mkBoundVar("a", nm->realType());
- d_taylor_real_fv_base_rem = nm->mkBoundVar("b", nm->realType());
- d_taylor_degree = options::nlExtTfTaylorDegree();
-}
-
-TranscendentalSolver::~TranscendentalSolver() {}
-
-void TranscendentalSolver::initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts)
-{
- d_funcCongClass.clear();
- d_funcMap.clear();
- d_tf_region.clear();
-
- NodeManager* nm = NodeManager::currentNM();
-
- // register the extended function terms
- std::vector<Node> trNeedsMaster;
- bool needPi = false;
- // for computing congruence
- std::map<Kind, ArgTrie> argTrie;
- for (unsigned i = 0, xsize = xts.size(); i < xsize; i++)
- {
- Node a = xts[i];
- Kind ak = a.getKind();
- bool consider = true;
- // if is an unpurified application of SINE, or it is a transcendental
- // applied to a trancendental, purify.
- if (isTranscendentalKind(ak))
- {
- // if we've already computed master for a
- if (d_trMaster.find(a) != d_trMaster.end())
- {
- // a master has at least one slave
- consider = (d_trSlaves.find(a) != d_trSlaves.end());
- }
- else
- {
- if (ak == SINE)
- {
- // always not a master
- consider = false;
- }
- else
- {
- for (const Node& ac : a)
- {
- if (isTranscendentalKind(ac.getKind()))
- {
- consider = false;
- break;
- }
- }
- }
- if (!consider)
- {
- // wait to assign a master below
- trNeedsMaster.push_back(a);
- }
- else
- {
- d_trMaster[a] = a;
- d_trSlaves[a].insert(a);
- }
- }
- }
- if (ak == EXPONENTIAL || ak == SINE)
- {
- needPi = needPi || (ak == SINE);
- // if we didn't indicate that it should be purified above
- if (consider)
- {
- std::vector<Node> repList;
- for (const Node& ac : a)
- {
- Node r = d_model.computeConcreteModelValue(ac);
- repList.push_back(r);
- }
- Node aa = argTrie[ak].add(a, repList);
- if (aa != a)
- {
- // apply congruence to pairs of terms that are disequal and congruent
- Assert(aa.getNumChildren() == a.getNumChildren());
- Node mvaa = d_model.computeAbstractModelValue(a);
- Node mvaaa = d_model.computeAbstractModelValue(aa);
- if (mvaa != mvaaa)
- {
- std::vector<Node> exp;
- for (unsigned j = 0, size = a.getNumChildren(); j < size; j++)
- {
- exp.push_back(a[j].eqNode(aa[j]));
- }
- Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(AND, exp);
- Node cong_lemma = nm->mkNode(OR, expn.negate(), a.eqNode(aa));
- d_im.addPendingArithLemma(cong_lemma, InferenceId::NL_CONGRUENCE);
- }
- }
- else
- {
- // new representative of congruence class
- d_funcMap[ak].push_back(a);
- }
- // add to congruence class
- d_funcCongClass[aa].push_back(a);
- }
- }
- else if (ak == PI)
- {
- Assert(consider);
- needPi = true;
- d_funcMap[ak].push_back(a);
- d_funcCongClass[a].push_back(a);
- }
- }
- // initialize pi if necessary
- if (needPi && d_pi.isNull())
- {
- mkPi();
- getCurrentPiBounds();
- }
-
- if (d_im.hasUsed())
- {
- return;
- }
-
- // process SINE phase shifting
- for (const Node& a : trNeedsMaster)
- {
- // should not have processed this already
- Assert(d_trMaster.find(a) == d_trMaster.end());
- Kind k = a.getKind();
- Assert(k == SINE || k == EXPONENTIAL);
- Node y =
- nm->mkSkolem("y", nm->realType(), "phase shifted trigonometric arg");
- Node new_a = nm->mkNode(k, y);
- d_trSlaves[new_a].insert(new_a);
- d_trSlaves[new_a].insert(a);
- d_trMaster[a] = new_a;
- d_trMaster[new_a] = new_a;
- Node lem;
- if (k == SINE)
- {
- Trace("nl-ext-tf") << "Basis sine : " << new_a << " for " << a
- << std::endl;
- Assert(!d_pi.isNull());
- Node shift = nm->mkSkolem("s", nm->integerType(), "number of shifts");
- // TODO : do not introduce shift here, instead needs model-based
- // refinement for constant shifts (cvc4-projects #1284)
- lem = nm->mkNode(
- AND,
- mkValidPhase(y, d_pi),
- nm->mkNode(
- ITE,
- mkValidPhase(a[0], d_pi),
- a[0].eqNode(y),
- a[0].eqNode(nm->mkNode(
- PLUS,
- y,
- nm->mkNode(MULT, nm->mkConst(Rational(2)), shift, d_pi)))),
- new_a.eqNode(a));
- }
- else
- {
- // do both equalities to ensure that new_a becomes a preregistered term
- lem = nm->mkNode(AND, a.eqNode(new_a), a[0].eqNode(y));
- }
- // note we must do preprocess on this lemma
- Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
- << std::endl;
- NlLemma nlem(lem, LemmaProperty::PREPROCESS, nullptr, InferenceId::NL_T_PURIFY_ARG);
- d_im.addPendingArithLemma(nlem);
- }
-
- if (Trace.isOn("nl-ext-mv"))
- {
- Trace("nl-ext-mv") << "Arguments of trancendental functions : "
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == SINE || k == EXPONENTIAL)
- {
- for (const Node& tf : tfl.second)
- {
- Node v = tf[0];
- d_model.computeConcreteModelValue(v);
- d_model.computeAbstractModelValue(v);
- d_model.printModelValue("nl-ext-mv", v);
- }
- }
- }
- }
-}
-
-bool TranscendentalSolver::preprocessAssertionsCheckModel(
- std::vector<Node>& assertions)
-{
- std::vector<Node> pvars;
- std::vector<Node> psubs;
- for (const std::pair<const Node, Node>& tb : d_trMaster)
- {
- pvars.push_back(tb.first);
- psubs.push_back(tb.second);
- }
-
- // initialize representation of assertions
- std::vector<Node> passertions;
- for (const Node& a : assertions)
-
- {
- Node pa = a;
- if (!pvars.empty())
- {
- pa = arithSubstitute(pa, pvars, psubs);
- pa = Rewriter::rewrite(pa);
- }
- if (!pa.isConst() || !pa.getConst<bool>())
- {
- Trace("nl-ext-cm-assert") << "- assert : " << pa << std::endl;
- passertions.push_back(pa);
- }
- }
- // get model bounds for all transcendental functions
- Trace("nl-ext-cm") << "----- Get bounds for transcendental functions..."
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
- {
- Kind k = tfs.first;
- for (const Node& tf : tfs.second)
- {
- Trace("nl-ext-cm") << "- Term: " << tf << std::endl;
- bool success = true;
- // tf is Figure 3 : tf( x )
- Node bl;
- Node bu;
- if (k == PI)
- {
- bl = d_pi_bound[0];
- bu = d_pi_bound[1];
- }
- else
- {
- std::pair<Node, Node> bounds = getTfModelBounds(tf, d_taylor_degree);
- bl = bounds.first;
- bu = bounds.second;
- if (bl != bu)
- {
- d_model.setUsedApproximate();
- }
- }
- if (!bl.isNull() && !bu.isNull())
- {
- // for each function in the congruence classe
- for (const Node& ctf : d_funcCongClass[tf])
- {
- // each term in congruence classes should be master terms
- Assert(d_trSlaves.find(ctf) != d_trSlaves.end());
- // we set the bounds for each slave of tf
- for (const Node& stf : d_trSlaves[ctf])
- {
- Trace("nl-ext-cm") << "...bound for " << stf << " : [" << bl << ", "
- << bu << "]" << std::endl;
- success = d_model.addCheckModelBound(stf, bl, bu);
- }
- }
- }
- else
- {
- Trace("nl-ext-cm") << "...no bound for " << tf << std::endl;
- }
- if (!success)
- {
- // a bound was conflicting
- Trace("nl-ext-cm") << "...failed to set bound for " << tf << std::endl;
- Trace("nl-ext-cm") << "-----" << std::endl;
- return false;
- }
- }
- }
- // replace the assertions
- assertions = passertions;
- return true;
-}
-
-void TranscendentalSolver::incrementTaylorDegree() { d_taylor_degree++; }
-unsigned TranscendentalSolver::getTaylorDegree() const
-{
- return d_taylor_degree;
-}
-
-void TranscendentalSolver::processSideEffect(const NlLemma& se)
-{
- for (const std::tuple<Node, unsigned, Node>& sp : se.d_secantPoint)
- {
- Node tf = std::get<0>(sp);
- unsigned d = std::get<1>(sp);
- Node c = std::get<2>(sp);
- d_secant_points[tf][d].push_back(c);
- }
-}
-
-void TranscendentalSolver::mkPi()
-{
- NodeManager* nm = NodeManager::currentNM();
- if (d_pi.isNull())
- {
- d_pi = nm->mkNullaryOperator(nm->realType(), PI);
- d_pi_2 = Rewriter::rewrite(
- nm->mkNode(MULT, d_pi, nm->mkConst(Rational(1) / Rational(2))));
- d_pi_neg_2 = Rewriter::rewrite(
- nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1) / Rational(2))));
- d_pi_neg =
- Rewriter::rewrite(nm->mkNode(MULT, d_pi, nm->mkConst(Rational(-1))));
- // initialize bounds
- d_pi_bound[0] = nm->mkConst(Rational(103993) / Rational(33102));
- d_pi_bound[1] = nm->mkConst(Rational(104348) / Rational(33215));
- }
-}
-
-void TranscendentalSolver::getCurrentPiBounds()
-{
- NodeManager* nm = NodeManager::currentNM();
- Node pi_lem = nm->mkNode(AND,
- nm->mkNode(GEQ, d_pi, d_pi_bound[0]),
- nm->mkNode(LEQ, d_pi, d_pi_bound[1]));
- d_im.addPendingArithLemma(pi_lem, InferenceId::NL_T_PI_BOUND);
-}
-
-void TranscendentalSolver::checkTranscendentalInitialRefine()
-{
- NodeManager* nm = NodeManager::currentNM();
- Trace("nl-ext")
- << "Get initial refinement lemmas for transcendental functions..."
- << std::endl;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- for (const Node& t : tfl.second)
- {
- // initial refinements
- if (d_tf_initial_refine.find(t) == d_tf_initial_refine.end())
- {
- d_tf_initial_refine[t] = true;
- Node lem;
- if (k == SINE)
- {
- Node symn = nm->mkNode(SINE, nm->mkNode(MULT, d_neg_one, t[0]));
- symn = Rewriter::rewrite(symn);
- // Can assume it is its own master since phase is split over 0,
- // hence -pi <= t[0] <= pi implies -pi <= -t[0] <= pi.
- d_trMaster[symn] = symn;
- d_trSlaves[symn].insert(symn);
- Assert(d_trSlaves.find(t) != d_trSlaves.end());
- std::vector<Node> children;
-
- lem = nm->mkNode(AND,
- // bounds
- nm->mkNode(AND,
- nm->mkNode(LEQ, t, d_one),
- nm->mkNode(GEQ, t, d_neg_one)),
- // symmetry
- nm->mkNode(PLUS, t, symn).eqNode(d_zero),
- // sign
- nm->mkNode(EQUAL,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(LT, t, d_zero)),
- // zero val
- nm->mkNode(EQUAL,
- nm->mkNode(GT, t[0], d_zero),
- nm->mkNode(GT, t, d_zero)));
- lem = nm->mkNode(
- AND,
- lem,
- // zero tangent
- nm->mkNode(AND,
- nm->mkNode(IMPLIES,
- nm->mkNode(GT, t[0], d_zero),
- nm->mkNode(LT, t, t[0])),
- nm->mkNode(IMPLIES,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(GT, t, t[0]))),
- // pi tangent
- nm->mkNode(
- AND,
- nm->mkNode(IMPLIES,
- nm->mkNode(LT, t[0], d_pi),
- nm->mkNode(LT, t, nm->mkNode(MINUS, d_pi, t[0]))),
- nm->mkNode(
- IMPLIES,
- nm->mkNode(GT, t[0], d_pi_neg),
- nm->mkNode(GT, t, nm->mkNode(MINUS, d_pi_neg, t[0])))));
- }
- else if (k == EXPONENTIAL)
- {
- // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
- // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
- lem = nm->mkNode(
- AND,
- nm->mkNode(GT, t, d_zero),
- nm->mkNode(EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
- nm->mkNode(EQUAL,
- nm->mkNode(LT, t[0], d_zero),
- nm->mkNode(LT, t, d_one)),
- nm->mkNode(OR,
- nm->mkNode(LEQ, t[0], d_zero),
- nm->mkNode(GT, t, nm->mkNode(PLUS, t[0], d_one))));
- }
- if (!lem.isNull())
- {
- d_im.addPendingArithLemma(lem, InferenceId::NL_T_INIT_REFINE);
- }
- }
- }
- }
-}
-
-void TranscendentalSolver::checkTranscendentalMonotonic()
-{
- Trace("nl-ext") << "Get monotonicity lemmas for transcendental functions..."
- << std::endl;
-
- // sort arguments of all transcendentals
- std::map<Kind, std::vector<Node> > sorted_tf_args;
- std::map<Kind, std::map<Node, Node> > tf_arg_to_term;
-
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (k == EXPONENTIAL || k == SINE)
- {
- for (const Node& tf : tfl.second)
- {
- Node a = tf[0];
- Node mvaa = d_model.computeAbstractModelValue(a);
- if (mvaa.isConst())
- {
- Trace("nl-ext-tf-mono-debug") << "...tf term : " << a << std::endl;
- sorted_tf_args[k].push_back(a);
- tf_arg_to_term[k][a] = tf;
- }
- }
- }
- }
-
- SortNlModel smv;
- smv.d_nlm = &d_model;
- // sort by concrete values
- smv.d_isConcrete = true;
- smv.d_reverse_order = true;
- for (std::pair<const Kind, std::vector<Node> >& tfl : d_funcMap)
- {
- Kind k = tfl.first;
- if (!sorted_tf_args[k].empty())
- {
- std::sort(sorted_tf_args[k].begin(), sorted_tf_args[k].end(), smv);
- Trace("nl-ext-tf-mono") << "Sorted transcendental function list for " << k
- << " : " << std::endl;
- for (unsigned i = 0; i < sorted_tf_args[k].size(); i++)
- {
- Node targ = sorted_tf_args[k][i];
- Node mvatarg = d_model.computeAbstractModelValue(targ);
- Trace("nl-ext-tf-mono")
- << " " << targ << " -> " << mvatarg << std::endl;
- Node t = tf_arg_to_term[k][targ];
- Node mvat = d_model.computeAbstractModelValue(t);
- Trace("nl-ext-tf-mono") << " f-val : " << mvat << std::endl;
- }
- std::vector<Node> mpoints;
- std::vector<Node> mpoints_vals;
- if (k == SINE)
- {
- mpoints.push_back(d_pi);
- mpoints.push_back(d_pi_2);
- mpoints.push_back(d_zero);
- mpoints.push_back(d_pi_neg_2);
- mpoints.push_back(d_pi_neg);
- }
- else if (k == EXPONENTIAL)
- {
- mpoints.push_back(Node::null());
- }
- if (!mpoints.empty())
- {
- // get model values for points
- for (unsigned i = 0; i < mpoints.size(); i++)
- {
- Node mpv;
- if (!mpoints[i].isNull())
- {
- mpv = d_model.computeAbstractModelValue(mpoints[i]);
- Assert(mpv.isConst());
- }
- mpoints_vals.push_back(mpv);
- }
-
- unsigned mdir_index = 0;
- int monotonic_dir = -1;
- Node mono_bounds[2];
- Node targ, targval, t, tval;
- for (unsigned i = 0, size = sorted_tf_args[k].size(); i < size; i++)
- {
- Node sarg = sorted_tf_args[k][i];
- Node sargval = d_model.computeAbstractModelValue(sarg);
- Assert(sargval.isConst());
- Node s = tf_arg_to_term[k][sarg];
- Node sval = d_model.computeAbstractModelValue(s);
- Assert(sval.isConst());
-
- // increment to the proper monotonicity region
- bool increment = true;
- while (increment && mdir_index < mpoints.size())
- {
- increment = false;
- if (mpoints[mdir_index].isNull())
- {
- increment = true;
- }
- else
- {
- Node pval = mpoints_vals[mdir_index];
- Assert(pval.isConst());
- if (sargval.getConst<Rational>() < pval.getConst<Rational>())
- {
- increment = true;
- Trace("nl-ext-tf-mono") << "...increment at " << sarg
- << " since model value is less than "
- << mpoints[mdir_index] << std::endl;
- }
- }
- if (increment)
- {
- tval = Node::null();
- mono_bounds[1] = mpoints[mdir_index];
- mdir_index++;
- monotonic_dir = regionToMonotonicityDir(k, mdir_index);
- if (mdir_index < mpoints.size())
- {
- mono_bounds[0] = mpoints[mdir_index];
- }
- else
- {
- mono_bounds[0] = Node::null();
- }
- }
- }
- // store the concavity region
- d_tf_region[s] = mdir_index;
- Trace("nl-ext-concavity") << "Transcendental function " << s
- << " is in region #" << mdir_index;
- Trace("nl-ext-concavity")
- << ", arg model value = " << sargval << std::endl;
-
- if (!tval.isNull())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node mono_lem;
- if (monotonic_dir == 1
- && sval.getConst<Rational>() > tval.getConst<Rational>())
- {
- mono_lem = nm->mkNode(
- IMPLIES, nm->mkNode(GEQ, targ, sarg), nm->mkNode(GEQ, t, s));
- }
- else if (monotonic_dir == -1
- && sval.getConst<Rational>() < tval.getConst<Rational>())
- {
- mono_lem = nm->mkNode(
- IMPLIES, nm->mkNode(LEQ, targ, sarg), nm->mkNode(LEQ, t, s));
- }
- if (!mono_lem.isNull())
- {
- if (!mono_bounds[0].isNull())
- {
- Assert(!mono_bounds[1].isNull());
- mono_lem = nm->mkNode(
- IMPLIES,
- nm->mkNode(AND,
- mkBounded(mono_bounds[0], targ, mono_bounds[1]),
- mkBounded(mono_bounds[0], sarg, mono_bounds[1])),
- mono_lem);
- }
- Trace("nl-ext-tf-mono")
- << "Monotonicity lemma : " << mono_lem << std::endl;
-
- d_im.addPendingArithLemma(mono_lem, InferenceId::NL_T_MONOTONICITY);
- }
- }
- // store the previous values
- targ = sarg;
- targval = sargval;
- t = s;
- tval = sval;
- }
- }
- }
- }
-}
-
-void TranscendentalSolver::checkTranscendentalTangentPlanes()
-{
- Trace("nl-ext") << "Get tangent plane lemmas for transcendental functions..."
- << std::endl;
- // this implements Figure 3 of "Satisfiaility Modulo Transcendental Functions
- // via Incremental Linearization" by Cimatti et al
- for (std::pair<const Kind, std::vector<Node> >& tfs : d_funcMap)
- {
- Kind k = tfs.first;
- if (k == PI)
- {
- // We do not use Taylor approximation for PI currently.
- // This is because the convergence is extremely slow, and hence an
- // initial approximation is superior.
- continue;
- }
- Trace("nl-ext-tftp-debug2") << "Taylor variables: " << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv : " << d_taylor_real_fv << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base : " << d_taylor_real_fv_base << std::endl;
- Trace("nl-ext-tftp-debug2")
- << " taylor_real_fv_base_rem : " << d_taylor_real_fv_base_rem
- << std::endl;
- Trace("nl-ext-tftp-debug2") << std::endl;
-
- // we substitute into the Taylor sum P_{n,f(0)}( x )
-
- for (const Node& tf : tfs.second)
- {
- // tf is Figure 3 : tf( x )
- Trace("nl-ext-tftp") << "Compute tangent planes " << tf << std::endl;
- // go until max degree is reached, or we don't meet bound criteria
- for (unsigned d = 1; d <= d_taylor_degree; d++)
- {
- Trace("nl-ext-tftp") << "- run at degree " << d << "..." << std::endl;
- unsigned prev = d_im.numPendingLemmas() + d_im.numWaitingLemmas();
- if (checkTfTangentPlanesFun(tf, d))
- {
- Trace("nl-ext-tftp")
- << "...fail, #lemmas = "
- << (d_im.numPendingLemmas() + d_im.numWaitingLemmas() - prev)
- << std::endl;
- break;
- }
- else
- {
- Trace("nl-ext-tftp") << "...success" << std::endl;
- }
- }
- }
- }
-}
-
-bool TranscendentalSolver::checkTfTangentPlanesFun(Node tf,
- unsigned d)
-{
- NodeManager* nm = NodeManager::currentNM();
- Kind k = tf.getKind();
- // this should only be run on master applications
- Assert(d_trSlaves.find(tf) != d_trSlaves.end());
-
- // Figure 3 : c
- Node c = d_model.computeAbstractModelValue(tf[0]);
- int csign = c.getConst<Rational>().sgn();
- if (csign == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- Assert(csign == 1 || csign == -1);
-
- // Figure 3: P_l, P_u
- // mapped to for signs of c
- std::map<int, Node> poly_approx_bounds[2];
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
- poly_approx_bounds[0][1] = pbounds[0];
- poly_approx_bounds[0][-1] = pbounds[1];
- poly_approx_bounds[1][1] = pbounds[2];
- poly_approx_bounds[1][-1] = pbounds[3];
-
- // Figure 3 : v
- Node v = d_model.computeAbstractModelValue(tf);
-
- // check value of tf
- Trace("nl-ext-tftp-debug") << "Process tangent plane refinement for " << tf
- << ", degree " << d << "..." << std::endl;
- Trace("nl-ext-tftp-debug") << " value in model : " << v << std::endl;
- Trace("nl-ext-tftp-debug") << " arg value in model : " << c << std::endl;
-
- std::vector<Node> taylor_vars;
- taylor_vars.push_back(d_taylor_real_fv);
-
- // compute the concavity
- int region = -1;
- std::unordered_map<Node, int, NodeHashFunction>::iterator itr =
- d_tf_region.find(tf);
- if (itr != d_tf_region.end())
- {
- region = itr->second;
- Trace("nl-ext-tftp-debug") << " region is : " << region << std::endl;
- }
- // Figure 3 : conc
- int concavity = regionToConcavity(k, itr->second);
- Trace("nl-ext-tftp-debug") << " concavity is : " << concavity << std::endl;
- if (concavity == 0)
- {
- // no secant/tangent plane is necessary
- return true;
- }
- // bounds for which we are this concavity
- // Figure 3: < l, u >
- Node bounds[2];
- if (k == SINE)
- {
- bounds[0] = regionToLowerBound(k, region);
- Assert(!bounds[0].isNull());
- bounds[1] = regionToUpperBound(k, region);
- Assert(!bounds[1].isNull());
- }
-
- // Figure 3: P
- Node poly_approx;
-
- // compute whether this is a tangent refinement or a secant refinement
- bool is_tangent = false;
- bool is_secant = false;
- std::pair<Node, Node> mvb = getTfModelBounds(tf, d);
- // this is the approximated value of tf(c), which is a value such that:
- // M_A(tf(c)) >= poly_appox_c >= tf(c) or
- // M_A(tf(c)) <= poly_appox_c <= tf(c)
- // In other words, it is a better approximation of the true value of tf(c)
- // in the case that we add a refinement lemma. We use this value in the
- // refinement schemas below.
- Node poly_approx_c;
- for (unsigned r = 0; r < 2; r++)
- {
- Node pab = poly_approx_bounds[r][csign];
- Node v_pab = r == 0 ? mvb.first : mvb.second;
- if (!v_pab.isNull())
- {
- Trace("nl-ext-tftp-debug2")
- << "...model value of " << pab << " is " << v_pab << std::endl;
-
- Assert(v_pab.isConst());
- Node comp = nm->mkNode(r == 0 ? LT : GT, v, v_pab);
- Trace("nl-ext-tftp-debug2") << "...compare : " << comp << std::endl;
- Node compr = Rewriter::rewrite(comp);
- Trace("nl-ext-tftp-debug2") << "...got : " << compr << std::endl;
- if (compr == d_true)
- {
- poly_approx_c = v_pab;
- // beyond the bounds
- if (r == 0)
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == 1;
- is_secant = concavity == -1;
- }
- else
- {
- poly_approx = poly_approx_bounds[r][csign];
- is_tangent = concavity == -1;
- is_secant = concavity == 1;
- }
- if (Trace.isOn("nl-ext-tftp"))
- {
- Trace("nl-ext-tftp") << "*** Outside boundary point (";
- Trace("nl-ext-tftp") << (r == 0 ? "low" : "high") << ") ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << ", will refine..." << std::endl;
- Trace("nl-ext-tftp")
- << " poly_approx = " << poly_approx << std::endl;
- Trace("nl-ext-tftp")
- << " is_tangent = " << is_tangent << std::endl;
- Trace("nl-ext-tftp") << " is_secant = " << is_secant << std::endl;
- }
- break;
- }
- else
- {
- Trace("nl-ext-tftp")
- << " ...within " << (r == 0 ? "low" : "high") << " bound : ";
- printRationalApprox("nl-ext-tftp", v_pab);
- Trace("nl-ext-tftp") << std::endl;
- }
- }
- }
-
- // Figure 3: P( c )
- if (is_tangent || is_secant)
- {
- Trace("nl-ext-tftp-debug2")
- << "...poly approximation at c is " << poly_approx_c << std::endl;
- }
- else
- {
- // we may want to continue getting better bounds
- return false;
- }
-
- if (is_tangent)
- {
- // compute tangent plane
- // Figure 3: T( x )
- // We use zero slope tangent planes, since the concavity of the Taylor
- // approximation cannot be easily established.
- Node tplane = poly_approx_c;
-
- Node lem = nm->mkNode(concavity == 1 ? GEQ : LEQ, tf, tplane);
- std::vector<Node> antec;
- int mdir = regionToMonotonicityDir(k, region);
- for (unsigned i = 0; i < 2; i++)
- {
- // Tangent plane is valid in the interval [c,u) if the slope of the
- // function matches its concavity, and is valid in (l, c] otherwise.
- Node use_bound = (mdir == concavity) == (i == 0) ? c : bounds[i];
- if (!use_bound.isNull())
- {
- Node ant = nm->mkNode(i == 0 ? GEQ : LEQ, tf[0], use_bound);
- antec.push_back(ant);
- }
- }
- if (!antec.empty())
- {
- Node antec_n = antec.size() == 1 ? antec[0] : nm->mkNode(AND, antec);
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- }
- Trace("nl-ext-tftp-debug2")
- << "*** Tangent plane lemma (pre-rewrite): " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma")
- << "*** Tangent plane lemma : " << lem << std::endl;
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- // Figure 3 : line 9
- d_im.addPendingArithLemma(lem, InferenceId::NL_T_TANGENT, nullptr, true);
- }
- else if (is_secant)
- {
- // bounds are the minimum and maximum previous secant points
- // should not repeat secant points: secant lemmas should suffice to
- // rule out previous assignment
- Assert(std::find(
- d_secant_points[tf][d].begin(), d_secant_points[tf][d].end(), c)
- == d_secant_points[tf][d].end());
- // Insert into the (temporary) vector. We do not update this vector
- // until we are sure this secant plane lemma has been processed. We do
- // this by mapping the lemma to a side effect below.
- std::vector<Node> spoints = d_secant_points[tf][d];
- spoints.push_back(c);
-
- // sort
- SortNlModel smv;
- smv.d_nlm = &d_model;
- smv.d_isConcrete = true;
- std::sort(spoints.begin(), spoints.end(), smv);
- // get the resulting index of c
- unsigned index =
- std::find(spoints.begin(), spoints.end(), c) - spoints.begin();
- // bounds are the next closest upper/lower bound values
- if (index > 0)
- {
- bounds[0] = spoints[index - 1];
- }
- else
- {
- // otherwise, we use the lower boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[0].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c-1
- bounds[0] = Rewriter::rewrite(nm->mkNode(MINUS, c, d_one));
- }
- }
- if (index < spoints.size() - 1)
- {
- bounds[1] = spoints[index + 1];
- }
- else
- {
- // otherwise, we use the upper boundary point for this concavity
- // region
- if (k == SINE)
- {
- Assert(!bounds[1].isNull());
- }
- else if (k == EXPONENTIAL)
- {
- // pick c+1
- bounds[1] = Rewriter::rewrite(nm->mkNode(PLUS, c, d_one));
- }
- }
- Trace("nl-ext-tftp-debug2") << "...secant bounds are : " << bounds[0]
- << " ... " << bounds[1] << std::endl;
-
- // the secant plane may be conjunction of 1-2 guarded inequalities
- std::vector<Node> lemmaConj;
- for (unsigned s = 0; s < 2; s++)
- {
- // compute secant plane
- Assert(!poly_approx.isNull());
- Assert(!bounds[s].isNull());
- // take the model value of l or u (since may contain PI)
- Node b = d_model.computeAbstractModelValue(bounds[s]);
- Trace("nl-ext-tftp-debug2") << "...model value of bound " << bounds[s]
- << " is " << b << std::endl;
- Assert(b.isConst());
- if (c != b)
- {
- // Figure 3 : P(l), P(u), for s = 0,1
- Node poly_approx_b;
- std::vector<Node> taylor_subs;
- taylor_subs.push_back(b);
- Assert(taylor_vars.size() == taylor_subs.size());
- poly_approx_b = poly_approx.substitute(taylor_vars.begin(),
- taylor_vars.end(),
- taylor_subs.begin(),
- taylor_subs.end());
- // Figure 3: S_l( x ), S_u( x ) for s = 0,1
- Node splane;
- Node rcoeff_n = Rewriter::rewrite(nm->mkNode(MINUS, b, c));
- Assert(rcoeff_n.isConst());
- Rational rcoeff = rcoeff_n.getConst<Rational>();
- Assert(rcoeff.sgn() != 0);
- poly_approx_b = Rewriter::rewrite(poly_approx_b);
- poly_approx_c = Rewriter::rewrite(poly_approx_c);
- splane = nm->mkNode(
- PLUS,
- poly_approx_b,
- nm->mkNode(MULT,
- nm->mkNode(MINUS, poly_approx_b, poly_approx_c),
- nm->mkConst(Rational(1) / rcoeff),
- nm->mkNode(MINUS, tf[0], b)));
-
- Node lem = nm->mkNode(concavity == 1 ? LEQ : GEQ, tf, splane);
- // With respect to Figure 3, this is slightly different.
- // In particular, we chose b to be the model value of bounds[s],
- // which is a constant although bounds[s] may not be (e.g. if it
- // contains PI).
- // To ensure that c...b does not cross an inflection point,
- // we guard with the symbolic version of bounds[s].
- // This leads to lemmas e.g. of this form:
- // ( c <= x <= PI/2 ) => ( sin(x) < ( P( b ) - P( c ) )*( x -
- // b ) + P( b ) )
- // where b = (PI/2)^M, the current value of PI/2 in the model.
- // This is sound since we are guarded by the symbolic
- // representation of PI/2.
- Node antec_n =
- nm->mkNode(AND,
- nm->mkNode(GEQ, tf[0], s == 0 ? bounds[s] : c),
- nm->mkNode(LEQ, tf[0], s == 0 ? c : bounds[s]));
- lem = nm->mkNode(IMPLIES, antec_n, lem);
- Trace("nl-ext-tftp-debug2")
- << "*** Secant plane lemma (pre-rewrite) : " << lem << std::endl;
- lem = Rewriter::rewrite(lem);
- Trace("nl-ext-tftp-lemma")
- << "*** Secant plane lemma : " << lem << std::endl;
- lemmaConj.push_back(lem);
- Assert(d_model.computeAbstractModelValue(lem) == d_false);
- }
- }
- // Figure 3 : line 22
- Assert(!lemmaConj.empty());
- Node lem =
- lemmaConj.size() == 1 ? lemmaConj[0] : nm->mkNode(AND, lemmaConj);
- NlLemma nlem(lem, LemmaProperty::NONE, nullptr, InferenceId::NL_T_SECANT);
- // The side effect says that if lem is added, then we should add the
- // secant point c for (tf,d).
- nlem.d_secantPoint.push_back(std::make_tuple(tf, d, c));
- d_im.addPendingArithLemma(nlem, true);
- }
- return true;
-}
-
-int TranscendentalSolver::regionToMonotonicityDir(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 4)
- {
- return -1;
- }
- else if (region == 2 || region == 3)
- {
- return 1;
- }
- }
- return 0;
-}
-
-int TranscendentalSolver::regionToConcavity(Kind k, int region)
-{
- if (k == EXPONENTIAL)
- {
- if (region == 1)
- {
- return 1;
- }
- }
- else if (k == SINE)
- {
- if (region == 1 || region == 2)
- {
- return -1;
- }
- else if (region == 3 || region == 4)
- {
- return 1;
- }
- }
- return 0;
-}
-
-Node TranscendentalSolver::regionToLowerBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi_2;
- }
- else if (region == 2)
- {
- return d_zero;
- }
- else if (region == 3)
- {
- return d_pi_neg_2;
- }
- else if (region == 4)
- {
- return d_pi_neg;
- }
- }
- return Node::null();
-}
-
-Node TranscendentalSolver::regionToUpperBound(Kind k, int region)
-{
- if (k == SINE)
- {
- if (region == 1)
- {
- return d_pi;
- }
- else if (region == 2)
- {
- return d_pi_2;
- }
- else if (region == 3)
- {
- return d_zero;
- }
- else if (region == 4)
- {
- return d_pi_neg_2;
- }
- }
- return Node::null();
-}
-
-Node TranscendentalSolver::getDerivative(Node n, Node x)
-{
- NodeManager* nm = NodeManager::currentNM();
- Assert(x.isVar());
- // only handle the cases of the taylor expansion of d
- if (n.getKind() == EXPONENTIAL)
- {
- if (n[0] == x)
- {
- return n;
- }
- }
- else if (n.getKind() == SINE)
- {
- if (n[0] == x)
- {
- Node na = nm->mkNode(MINUS, d_pi_2, n[0]);
- Node ret = nm->mkNode(SINE, na);
- ret = Rewriter::rewrite(ret);
- return ret;
- }
- }
- else if (n.getKind() == PLUS)
- {
- std::vector<Node> dchildren;
- for (unsigned i = 0; i < n.getNumChildren(); i++)
- {
- // PLUS is flattened in rewriter, recursion depth is bounded by 1
- Node dc = getDerivative(n[i], x);
- if (dc.isNull())
- {
- return dc;
- }
- else
- {
- dchildren.push_back(dc);
- }
- }
- return nm->mkNode(PLUS, dchildren);
- }
- else if (n.getKind() == MULT)
- {
- Assert(n[0].isConst());
- Node dc = getDerivative(n[1], x);
- if (!dc.isNull())
- {
- return nm->mkNode(MULT, n[0], dc);
- }
- }
- else if (n.getKind() == NONLINEAR_MULT)
- {
- unsigned xcount = 0;
- std::vector<Node> children;
- unsigned xindex = 0;
- for (unsigned i = 0, size = n.getNumChildren(); i < size; i++)
- {
- if (n[i] == x)
- {
- xcount++;
- xindex = i;
- }
- children.push_back(n[i]);
- }
- if (xcount == 0)
- {
- return d_zero;
- }
- else
- {
- children[xindex] = nm->mkConst(Rational(xcount));
- }
- return nm->mkNode(MULT, children);
- }
- else if (n.isVar())
- {
- return n == x ? d_one : d_zero;
- }
- else if (n.isConst())
- {
- return d_zero;
- }
- Trace("nl-ext-debug") << "No derivative computed for " << n;
- Trace("nl-ext-debug") << " for d/d{" << x << "}" << std::endl;
- return Node::null();
-}
-
-std::pair<Node, Node> TranscendentalSolver::getTaylor(Node fa, unsigned n)
-{
- NodeManager* nm = NodeManager::currentNM();
- Assert(n > 0);
- Node fac; // what term we cache for fa
- if (fa[0] == d_zero)
- {
- // optimization : simpler to compute (x-fa[0])^n if we are centered around 0
- fac = fa;
- }
- else
- {
- // otherwise we use a standard factor a in (x-a)^n
- fac = nm->mkNode(fa.getKind(), d_taylor_real_fv_base);
- }
- Node taylor_rem;
- Node taylor_sum;
- // check if we have already computed this Taylor series
- std::unordered_map<unsigned, Node>::iterator itt = d_taylor_sum[fac].find(n);
- if (itt == d_taylor_sum[fac].end())
- {
- Node i_exp_base;
- if (fa[0] == d_zero)
- {
- i_exp_base = d_taylor_real_fv;
- }
- else
- {
- i_exp_base = Rewriter::rewrite(
- nm->mkNode(MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
- }
- Node i_derv = fac;
- Node i_fact = d_one;
- Node i_exp = d_one;
- int i_derv_status = 0;
- unsigned counter = 0;
- std::vector<Node> sum;
- do
- {
- counter++;
- if (fa.getKind() == EXPONENTIAL)
- {
- // unchanged
- }
- else if (fa.getKind() == SINE)
- {
- if (i_derv_status % 2 == 1)
- {
- Node arg = nm->mkNode(PLUS, d_pi_2, d_taylor_real_fv_base);
- i_derv = nm->mkNode(SINE, arg);
- }
- else
- {
- i_derv = fa;
- }
- if (i_derv_status >= 2)
- {
- i_derv = nm->mkNode(MINUS, d_zero, i_derv);
- }
- i_derv = Rewriter::rewrite(i_derv);
- i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
- }
- if (counter == (n + 1))
- {
- TNode x = d_taylor_real_fv_base;
- i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
- }
- Node curr = nm->mkNode(MULT, nm->mkNode(DIVISION, i_derv, i_fact), i_exp);
- if (counter == (n + 1))
- {
- taylor_rem = curr;
- }
- else
- {
- sum.push_back(curr);
- i_fact = Rewriter::rewrite(
- nm->mkNode(MULT, nm->mkConst(Rational(counter)), i_fact));
- i_exp = Rewriter::rewrite(nm->mkNode(MULT, i_exp_base, i_exp));
- }
- } while (counter <= n);
- taylor_sum = sum.size() == 1 ? sum[0] : nm->mkNode(PLUS, sum);
-
- if (fac[0] != d_taylor_real_fv_base)
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fac[0]);
- }
-
- // cache
- d_taylor_sum[fac][n] = taylor_sum;
- d_taylor_rem[fac][n] = taylor_rem;
- }
- else
- {
- taylor_sum = itt->second;
- Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
- taylor_rem = d_taylor_rem[fac][n];
- }
-
- // must substitute for the argument if we were using a different lookup
- if (fa[0] != fac[0])
- {
- TNode x = d_taylor_real_fv_base;
- taylor_sum = taylor_sum.substitute(x, fa[0]);
- }
- return std::pair<Node, Node>(taylor_sum, taylor_rem);
-}
-
-void TranscendentalSolver::getPolynomialApproximationBounds(
- Kind k, unsigned d, std::vector<Node>& pbounds)
-{
- if (d_poly_bounds[k][d].empty())
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- // n is the Taylor degree we are currently considering
- unsigned n = 2 * d;
- // n must be even
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- Trace("nl-ext-tftp-debug2")
- << "Taylor for " << k << " is : " << taylor.first << std::endl;
- Node taylor_sum = Rewriter::rewrite(taylor.first);
- Trace("nl-ext-tftp-debug2")
- << "Taylor for " << k << " is (post-rewrite) : " << taylor_sum
- << std::endl;
- Assert(taylor.second.getKind() == MULT);
- Assert(taylor.second.getNumChildren() == 2);
- Assert(taylor.second[0].getKind() == DIVISION);
- Trace("nl-ext-tftp-debug2")
- << "Taylor remainder for " << k << " is " << taylor.second << std::endl;
- // ru is x^{n+1}/(n+1)!
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- ru = Rewriter::rewrite(ru);
- Trace("nl-ext-tftp-debug2")
- << "Taylor remainder factor is (post-rewrite) : " << ru << std::endl;
- if (k == EXPONENTIAL)
- {
- pbounds.push_back(taylor_sum);
- pbounds.push_back(taylor_sum);
- pbounds.push_back(Rewriter::rewrite(
- nm->mkNode(MULT, taylor_sum, nm->mkNode(PLUS, d_one, ru))));
- pbounds.push_back(Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru)));
- }
- else
- {
- Assert(k == SINE);
- Node l = Rewriter::rewrite(nm->mkNode(MINUS, taylor_sum, ru));
- Node u = Rewriter::rewrite(nm->mkNode(PLUS, taylor_sum, ru));
- pbounds.push_back(l);
- pbounds.push_back(l);
- pbounds.push_back(u);
- pbounds.push_back(u);
- }
- Trace("nl-ext-tf-tplanes")
- << "Polynomial approximation for " << k << " is: " << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (pos): " << pbounds[0] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (pos): " << pbounds[2] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Lower (neg): " << pbounds[1] << std::endl;
- Trace("nl-ext-tf-tplanes") << " Upper (neg): " << pbounds[3] << std::endl;
- d_poly_bounds[k][d].insert(
- d_poly_bounds[k][d].end(), pbounds.begin(), pbounds.end());
- }
- else
- {
- pbounds.insert(
- pbounds.end(), d_poly_bounds[k][d].begin(), d_poly_bounds[k][d].end());
- }
-}
-
-void TranscendentalSolver::getPolynomialApproximationBoundForArg(
- Kind k, Node c, unsigned d, std::vector<Node>& pbounds)
-{
- getPolynomialApproximationBounds(k, d, pbounds);
- Assert(c.isConst());
- if (k == EXPONENTIAL && c.getConst<Rational>().sgn() == 1)
- {
- NodeManager* nm = NodeManager::currentNM();
- Node tft = nm->mkNode(k, d_zero);
- bool success = false;
- unsigned ds = d;
- TNode ttrf = d_taylor_real_fv;
- TNode tc = c;
- do
- {
- success = true;
- unsigned n = 2 * ds;
- std::pair<Node, Node> taylor = getTaylor(tft, n);
- // check that 1-c^{n+1}/(n+1)! > 0
- Node ru = nm->mkNode(DIVISION, taylor.second[1], taylor.second[0][1]);
- Node rus = ru.substitute(ttrf, tc);
- rus = Rewriter::rewrite(rus);
- Assert(rus.isConst());
- if (rus.getConst<Rational>() > d_one.getConst<Rational>())
- {
- success = false;
- ds = ds + 1;
- }
- } while (!success);
- if (ds > d)
- {
- Trace("nl-ext-exp-taylor")
- << "*** Increase Taylor bound to " << ds << " > " << d << " for ("
- << k << " " << c << ")" << std::endl;
- // must use sound upper bound
- std::vector<Node> pboundss;
- getPolynomialApproximationBounds(k, ds, pboundss);
- pbounds[2] = pboundss[2];
- }
- }
-}
-
-std::pair<Node, Node> TranscendentalSolver::getTfModelBounds(Node tf,
- unsigned d)
-{
- // compute the model value of the argument
- Node c = d_model.computeAbstractModelValue(tf[0]);
- Assert(c.isConst());
- int csign = c.getConst<Rational>().sgn();
- Kind k = tf.getKind();
- if (csign == 0)
- {
- // at zero, its trivial
- if (k == SINE)
- {
- return std::pair<Node, Node>(d_zero, d_zero);
- }
- Assert(k == EXPONENTIAL);
- return std::pair<Node, Node>(d_one, d_one);
- }
- bool isNeg = csign == -1;
-
- std::vector<Node> pbounds;
- getPolynomialApproximationBoundForArg(k, c, d, pbounds);
-
- std::vector<Node> bounds;
- TNode tfv = d_taylor_real_fv;
- TNode tfs = tf[0];
- for (unsigned d2 = 0; d2 < 2; d2++)
- {
- int index = d2 == 0 ? (isNeg ? 1 : 0) : (isNeg ? 3 : 2);
- Node pab = pbounds[index];
- if (!pab.isNull())
- {
- // { x -> M_A(tf[0]) }
- // Notice that we compute the model value of tfs first, so that
- // the call to rewrite below does not modify the term, where notice that
- // rewrite( x*x { x -> M_A(t) } ) = M_A(t)*M_A(t)
- // is not equal to
- // M_A( x*x { x -> t } ) = M_A( t*t )
- // where M_A denotes the abstract model.
- Node mtfs = d_model.computeAbstractModelValue(tfs);
- pab = pab.substitute(tfv, mtfs);
- pab = Rewriter::rewrite(pab);
- Assert (pab.isConst());
- bounds.push_back(pab);
- }
- else
- {
- bounds.push_back(Node::null());
- }
- }
- return std::pair<Node, Node>(bounds[0], bounds[1]);
-}
-
-Node TranscendentalSolver::mkValidPhase(Node a, Node pi)
-{
- return mkBounded(
- NodeManager::currentNM()->mkNode(MULT, mkRationalNode(-1), pi), a, pi);
-}
-
-} // namespace nl
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file transcendental_solver.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Tim King
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Solving for handling transcendental functions.
- **/
-
-#ifndef CVC4__THEORY__ARITH__NL__TRANSCENDENTAL_SOLVER_H
-#define CVC4__THEORY__ARITH__NL__TRANSCENDENTAL_SOLVER_H
-
-#include <map>
-#include <unordered_map>
-#include <unordered_set>
-#include <vector>
-
-#include "expr/node.h"
-#include "theory/arith/inference_manager.h"
-#include "theory/arith/nl/nl_model.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-namespace nl {
-
-/** Transcendental solver class
- *
- * This class implements model-based refinement schemes
- * for transcendental functions, described in:
- *
- * - "Satisfiability Modulo Transcendental
- * Functions via Incremental Linearization" by Cimatti
- * et al., CADE 2017.
- *
- * It's main functionality are methods that implement lemma schemas below,
- * which return a set of lemmas that should be sent on the output channel.
- */
-class TranscendentalSolver
-{
- public:
- TranscendentalSolver(InferenceManager& im, NlModel& m);
- ~TranscendentalSolver();
-
- /** init last call
- *
- * This is called at the beginning of last call effort check, where
- * assertions are the set of assertions belonging to arithmetic,
- * false_asserts is the subset of assertions that are false in the current
- * model, and xts is the set of extended function terms that are active in
- * the current context.
- *
- * This call may add lemmas to lems based on registering term
- * information (for example, purification of sine terms).
- */
- void initLastCall(const std::vector<Node>& assertions,
- const std::vector<Node>& false_asserts,
- const std::vector<Node>& xts);
- /** increment taylor degree */
- void incrementTaylorDegree();
- /** get taylor degree */
- unsigned getTaylorDegree() const;
- /** preprocess assertions check model
- *
- * This modifies the given assertions in preparation for running a call
- * to check model.
- *
- * This method returns false if a bound for a transcendental function
- * was conflicting.
- */
- bool preprocessAssertionsCheckModel(std::vector<Node>& assertions);
- /** Process side effects in lemma se */
- void processSideEffect(const NlLemma& se);
- //-------------------------------------------- lemma schemas
- /** check transcendental initial refine
- *
- * Constructs a set of valid theory lemmas, based on
- * simple facts about transcendental functions.
- * This mostly follows the initial axioms described in
- * Section 4 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- *
- * Examples:
- *
- * sin( x ) = -sin( -x )
- * ( PI > x > 0 ) => 0 < sin( x ) < 1
- * exp( x )>0
- * x<0 => exp( x )<1
- */
- void checkTranscendentalInitialRefine();
-
- /** check transcendental monotonic
- *
- * Constructs a set of valid theory lemmas, based on a
- * lemma scheme that ensures that applications
- * of transcendental functions respect monotonicity.
- *
- * Examples:
- *
- * x > y => exp( x ) > exp( y )
- * PI/2 > x > y > 0 => sin( x ) > sin( y )
- * PI > x > y > PI/2 => sin( x ) < sin( y )
- */
- void checkTranscendentalMonotonic();
-
- /** check transcendental tangent planes
- *
- * Constructs a set of valid theory lemmas, based on
- * computing an "incremental linearization" of
- * transcendental functions based on the model values
- * of transcendental functions and their arguments.
- * It is based on Figure 3 of "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017.
- * This schema is not terminating in general.
- * It is not enabled by default, and can
- * be enabled by --nl-ext-tf-tplanes.
- *
- * Example:
- *
- * Assume we have a term sin(y) where M( y ) = 1 where M is the current model.
- * Note that:
- * sin(1) ~= .841471
- *
- * The Taylor series and remainder of sin(y) of degree 7 is
- * P_{7,sin(0)}( x ) = x + (-1/6)*x^3 + (1/20)*x^5
- * R_{7,sin(0),b}( x ) = (-1/5040)*x^7
- *
- * This gives us lower and upper bounds :
- * P_u( x ) = P_{7,sin(0)}( x ) + R_{7,sin(0),b}( x )
- * ...where note P_u( 1 ) = 4243/5040 ~= .841865
- * P_l( x ) = P_{7,sin(0)}( x ) - R_{7,sin(0),b}( x )
- * ...where note P_l( 1 ) = 4241/5040 ~= .841468
- *
- * Assume that M( sin(y) ) > P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a tangent plane refinement.
- * The tangent plane at the point 1 in P_u is
- * given by the formula:
- * T( x ) = P_u( 1 ) + ((d/dx)(P_u(x)))( 1 )*( x - 1 )
- * We add the lemma:
- * ( 0 < y < PI/2 ) => sin( y ) <= T( y )
- * which is:
- * ( 0 < y < PI/2 ) => sin( y ) <= (391/720)*(y - 2737/1506)
- *
- * Assume that M( sin(y) ) < P_u( 1 ).
- * Since the concavity of sine in the region 0 < x < PI/2 is -1,
- * we add a secant plane refinement for some constants ( l, u )
- * such that 0 <= l < M( y ) < u <= PI/2. Assume we choose
- * l = 0 and u = M( PI/2 ) = 150517/47912.
- * The secant planes at point 1 for P_l
- * are given by the formulas:
- * S_l( x ) = (x-l)*(P_l( l )-P_l(c))/(l-1) + P_l( l )
- * S_u( x ) = (x-u)*(P_l( u )-P_l(c))/(u-1) + P_l( u )
- * We add the lemmas:
- * ( 0 < y < 1 ) => sin( y ) >= S_l( y )
- * ( 1 < y < PI/2 ) => sin( y ) >= S_u( y )
- * which are:
- * ( 0 < y < 1 ) => (sin y) >= 4251/5040*y
- * ( 1 < y < PI/2 ) => (sin y) >= c1*(y+c2)
- * where c1, c2 are rationals (for brevity, omitted here)
- * such that c1 ~= .277 and c2 ~= 2.032.
- */
- void checkTranscendentalTangentPlanes();
- private:
- /** check transcendental function refinement for tf
- *
- * This method is called by the above method for each "master"
- * transcendental function application that occurs in an assertion in the
- * current context. For example, an application like sin(t) is not a master
- * if we have introduced the constraints:
- * t=y+2*pi*n ^ -pi <= y <= pi ^ sin(t) = sin(y).
- * See d_trMaster/d_trSlaves for more detail.
- *
- * This runs Figure 3 of Cimatti et al., CADE 2017 for transcendental
- * function application tf for Taylor degree d. It may add a secant or
- * tangent plane lemma to lems, which includes the side effect of the lemma
- * (if one exists).
- *
- * It returns false if the bounds are not precise enough to add a
- * secant or tangent plane lemma.
- */
- bool checkTfTangentPlanesFun(Node tf, unsigned d);
- //-------------------------------------------- end lemma schemas
- /** polynomial approximation bounds
- *
- * This adds P_l+[x], P_l-[x], P_u+[x], P_u-[x] to pbounds, where x is
- * d_taylor_real_fv. These are polynomial approximations of the Taylor series
- * of <k>( 0 ) for degree 2*d where k is SINE or EXPONENTIAL.
- * These correspond to P_l and P_u from Figure 3 of Cimatti et al., CADE 2017,
- * for positive/negative (+/-) values of the argument of <k>( 0 ).
- *
- * Notice that for certain bounds (e.g. upper bounds for exponential), the
- * Taylor approximation for a fixed degree is only sound up to a given
- * upper bound on the argument. To obtain sound lower/upper bounds for a
- * given <k>( c ), use the function below.
- */
- void getPolynomialApproximationBounds(Kind k,
- unsigned d,
- std::vector<Node>& pbounds);
- /** polynomial approximation bounds
- *
- * This computes polynomial approximations P_l+[x], P_l-[x], P_u+[x], P_u-[x]
- * that are sound (lower, upper) bounds for <k>( c ). Notice that these
- * polynomials may depend on c. In particular, for P_u+[x] for <k>( c ) where
- * c>0, we return the P_u+[x] from the function above for the minimum degree
- * d' >= d such that (1-c^{2*d'+1}/(2*d'+1)!) is positive.
- */
- void getPolynomialApproximationBoundForArg(Kind k,
- Node c,
- unsigned d,
- std::vector<Node>& pbounds);
- /** get transcendental function model bounds
- *
- * This returns the current lower and upper bounds of transcendental
- * function application tf based on Taylor of degree 2*d, which is dependent
- * on the model value of its argument.
- */
- std::pair<Node, Node> getTfModelBounds(Node tf, unsigned d);
- /** get monotonicity direction
- *
- * Returns whether the slope is positive (+1) or negative(-1)
- * in region of transcendental function with kind k.
- * Returns 0 if region is invalid.
- */
- int regionToMonotonicityDir(Kind k, int region);
- /** get concavity
- *
- * Returns whether we are concave (+1) or convex (-1)
- * in region of transcendental function with kind k,
- * where region is defined above.
- * Returns 0 if region is invalid.
- */
- int regionToConcavity(Kind k, int region);
- /** region to lower bound
- *
- * Returns the term corresponding to the lower
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no lower bound for the
- * region.
- */
- Node regionToLowerBound(Kind k, int region);
- /** region to upper bound
- *
- * Returns the term corresponding to the upper
- * bound of the region of transcendental function
- * with kind k. Returns Node::null if the region
- * is invalid, or there is no upper bound for the
- * region.
- */
- Node regionToUpperBound(Kind k, int region);
- /** get derivative
- *
- * Returns d/dx n. Supports cases of n
- * for transcendental functions applied to x,
- * multiplication, addition, constants and variables.
- * Returns Node::null() if derivative is an
- * unhandled case.
- */
- Node getDerivative(Node n, Node x);
-
- void mkPi();
- void getCurrentPiBounds();
- /** Make the node -pi <= a <= pi */
- static Node mkValidPhase(Node a, Node pi);
-
- /** The inference manager that we push conflicts and lemmas to. */
- InferenceManager& d_im;
- /** Reference to the non-linear model object */
- NlModel& d_model;
- /** commonly used terms */
- Node d_zero;
- Node d_one;
- Node d_neg_one;
- Node d_true;
- Node d_false;
- /**
- * Some transcendental functions f(t) are "purified", e.g. we add
- * t = y ^ f(t) = f(y) where y is a fresh variable. Those that are not
- * purified we call "master terms".
- *
- * The maps below maintain a master/slave relationship over
- * transcendental functions (SINE, EXPONENTIAL, PI), where above
- * f(y) is the master of itself and of f(t).
- *
- * This is used for ensuring that the argument y of SINE we process is on the
- * interval [-pi .. pi], and that exponentials are not applied to arguments
- * that contain transcendental functions.
- */
- std::map<Node, Node> d_trMaster;
- std::map<Node, std::unordered_set<Node, NodeHashFunction>> d_trSlaves;
- /** The transcendental functions we have done initial refinements on */
- std::map<Node, bool> d_tf_initial_refine;
-
- /** concavity region for transcendental functions
- *
- * This stores an integer that identifies an interval in
- * which the current model value for an argument of an
- * application of a transcendental function resides.
- *
- * For exp( x ):
- * region #1 is -infty < x < infty
- * For sin( x ):
- * region #0 is pi < x < infty (this is an invalid region)
- * region #1 is pi/2 < x <= pi
- * region #2 is 0 < x <= pi/2
- * region #3 is -pi/2 < x <= 0
- * region #4 is -pi < x <= -pi/2
- * region #5 is -infty < x <= -pi (this is an invalid region)
- * All regions not listed above, as well as regions 0 and 5
- * for SINE are "invalid". We only process applications
- * of transcendental functions whose arguments have model
- * values that reside in valid regions.
- */
- std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
- /** cache of the above function */
- std::map<Kind, std::map<unsigned, std::vector<Node>>> d_poly_bounds;
-
- /**
- * Maps representives of a congruence class to the members of that class.
- *
- * In detail, a congruence class is a set of terms of the form
- * { f(t1), ..., f(tn) }
- * such that t1 = ... = tn in the current context. We choose an arbitrary
- * term among these to be the repesentative of this congruence class.
- *
- * Moreover, notice we compute congruence classes only over terms that
- * are transcendental function applications that are "master terms",
- * see d_trMaster/d_trSlave.
- */
- std::map<Node, std::vector<Node>> d_funcCongClass;
- /**
- * A list of all functions for each kind in { EXPONENTIAL, SINE, POW, PI }
- * that are representives of their congruence class.
- */
- std::map<Kind, std::vector<Node>> d_funcMap;
-
- // tangent plane bounds
- std::map<Node, std::map<Node, Node>> d_tangent_val_bound[4];
-
- /** secant points (sorted list) for transcendental functions
- *
- * This is used for tangent plane refinements for
- * transcendental functions. This is the set
- * "get-previous-secant-points" in "Satisfiability
- * Modulo Transcendental Functions via Incremental
- * Linearization" by Cimatti et al., CADE 2017, for
- * each transcendental function application. We store this set for each
- * Taylor degree.
- */
- std::unordered_map<Node,
- std::map<unsigned, std::vector<Node>>,
- NodeHashFunction>
- d_secant_points;
-
- /** get Taylor series of degree n for function fa centered around point fa[0].
- *
- * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
- * the first part of the pair is the Taylor series expansion :
- * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
- * and the second part of the pair is the Taylor series remainder :
- * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
- *
- * The above values are cached for each (f,n) for a fixed variable "a".
- * To compute the Taylor series for fa, we compute the Taylor series
- * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
- * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
- * if fa[0]=0.
- * In the latter case, note we compute the exponential x^{n+1}
- * instead of (x-a)^{n+1}, which can be done faster.
- */
- std::pair<Node, Node> getTaylor(Node fa, unsigned n);
-
- /** internal variables used for constructing (cached) versions of the Taylor
- * series above.
- */
- Node d_taylor_real_fv; // x above
- Node d_taylor_real_fv_base; // a above
- Node d_taylor_real_fv_base_rem; // b above
-
- /** cache of sum and remainder terms for getTaylor */
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_sum;
- std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
- d_taylor_rem;
- /** taylor degree
- *
- * Indicates that the degree of the polynomials in the Taylor approximation of
- * all transcendental functions is 2*d_taylor_degree. This value is set
- * initially to options::nlExtTfTaylorDegree() and may be incremented
- * if the option options::nlExtTfIncPrecision() is enabled.
- */
- unsigned d_taylor_degree;
- /** PI
- *
- * Note that PI is a (symbolic, non-constant) nullary operator. This is
- * because its value cannot be computed exactly. We constraint PI to concrete
- * lower and upper bounds stored in d_pi_bound below.
- */
- Node d_pi;
- /** PI/2 */
- Node d_pi_2;
- /** -PI/2 */
- Node d_pi_neg_2;
- /** -PI */
- Node d_pi_neg;
- /** the concrete lower and upper bounds for PI */
- Node d_pi_bound[2];
-}; /* class TranscendentalSolver */
-
-} // namespace nl
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__TRANSCENDENTAL_SOLVER_H */