d_order_points.push_back(d_neg_one);
d_order_points.push_back(d_zero);
d_order_points.push_back(d_one);
+ d_taylor_real_fv = NodeManager::currentNM()->mkBoundVar(
+ "x", NodeManager::currentNM()->realType());
+ d_taylor_real_fv_base = NodeManager::currentNM()->mkBoundVar(
+ "a", NodeManager::currentNM()->realType());
+ d_taylor_real_fv_base_rem = NodeManager::currentNM()->mkBoundVar(
+ "b", NodeManager::currentNM()->realType());
}
NonlinearExtension::~NonlinearExtension() {}
std::pair<bool, Node> NonlinearExtension::isExtfReduced(
int effort, Node n, Node on, const std::vector<Node>& exp) const {
if (n != d_zero) {
- return std::make_pair(n.getKind() != kind::NONLINEAR_MULT, Node::null());
+ Kind k = n.getKind();
+ return std::make_pair(k != kind::NONLINEAR_MULT && !isTranscendentalKind(k),
+ Node::null());
}
Assert(n == d_zero);
- Trace("nl-ext-zero-exp") << "Infer zero : " << on << " == " << n << std::endl;
- // minimize explanation
- const std::set<Node> vars(on.begin(), on.end());
-
- for (unsigned i = 0; i < exp.size(); i++) {
- Trace("nl-ext-zero-exp") << " exp[" << i << "] = " << exp[i] << std::endl;
- std::vector<Node> eqs;
- if (exp[i].getKind() == kind::EQUAL) {
- eqs.push_back(exp[i]);
- } else if (exp[i].getKind() == kind::AND) {
- for (unsigned j = 0; j < exp[i].getNumChildren(); j++) {
- if (exp[i][j].getKind() == kind::EQUAL) {
- eqs.push_back(exp[i][j]);
+ if (on.getKind() == kind::NONLINEAR_MULT)
+ {
+ Trace("nl-ext-zero-exp") << "Infer zero : " << on << " == " << n
+ << std::endl;
+ // minimize explanation if a substitution+rewrite results in zero
+ const std::set<Node> vars(on.begin(), on.end());
+
+ for (unsigned i = 0, size = exp.size(); i < size; i++)
+ {
+ Trace("nl-ext-zero-exp") << " exp[" << i << "] = " << exp[i]
+ << std::endl;
+ std::vector<Node> eqs;
+ if (exp[i].getKind() == kind::EQUAL)
+ {
+ eqs.push_back(exp[i]);
+ }
+ else if (exp[i].getKind() == kind::AND)
+ {
+ for (const Node& ec : exp[i])
+ {
+ if (ec.getKind() == kind::EQUAL)
+ {
+ eqs.push_back(ec);
+ }
}
}
- }
- for (unsigned j = 0; j < eqs.size(); j++) {
- for (unsigned r = 0; r < 2; r++) {
- if (eqs[j][r] == d_zero && vars.find(eqs[j][1 - r]) != vars.end()) {
- Trace("nl-ext-zero-exp") << "...single exp : " << eqs[j] << std::endl;
- return std::make_pair(true, eqs[j]);
+ for (unsigned j = 0; j < eqs.size(); j++)
+ {
+ for (unsigned r = 0; r < 2; r++)
+ {
+ if (eqs[j][r] == d_zero && vars.find(eqs[j][1 - r]) != vars.end())
+ {
+ Trace("nl-ext-zero-exp") << "...single exp : " << eqs[j]
+ << std::endl;
+ return std::make_pair(true, eqs[j]);
+ }
}
}
}
//Assert( ret.isConst() );
} else if (n.getNumChildren() == 0) {
if( n.getKind()==kind::PI ){
+ // we are interested in the exact value of PI, which cannot be computed.
+ // hence, we return PI itself when asked for the concrete value.
ret = n;
}else{
ret = d_containing.getValuation().getModel()->getValue(n);
d_ci_exp.clear();
d_ci_max.clear();
d_tf_rep_map.clear();
-
+ d_tf_region.clear();
+
int lemmas_proc = 0;
std::vector<Node> lemmas;
getCurrentPiBounds( lemmas );
}
Node shift = NodeManager::currentNM()->mkSkolem( "s", NodeManager::currentNM()->integerType(), "number of shifts" );
+ // FIXME : do not introduce shift here, instead needs model-based
+ // refinement for constant shifts (#1284)
Node shift_lem = NodeManager::currentNM()->mkNode( kind::AND, mkValidPhase( y, d_pi ),
a[0].eqNode( NodeManager::currentNM()->mkNode( kind::PLUS, y,
NodeManager::currentNM()->mkNode( kind::MULT, NodeManager::currentNM()->mkConst( Rational(2) ), shift, d_pi ) ) ),
if( itrm!=d_tf_rep_map[a.getKind()].end() ){
//verify they have the same model value
if( d_mv[1][a]!=d_mv[1][itrm->second] ){
- //congruence lemma
+ // if not, add congruence lemma
Node cong_lemma = NodeManager::currentNM()->mkNode( kind::IMPLIES, a[0].eqNode( itrm->second[0] ), a.eqNode( itrm->second ) );
lemmas.push_back( cong_lemma );
//Assert( false );
}
Node NonlinearExtension::get_compare_value(Node i, unsigned orderType) const {
+ if (i.isConst())
+ {
+ return i;
+ }
Trace("nl-ext-debug") << "Compare variable " << i << " " << orderType
<< std::endl;
Assert(orderType >= 0 && orderType <= 3);
std::vector< Node > lemmas;
Trace("nl-ext") << "Get initial refinement lemmas for transcendental functions..." << std::endl;
for( std::map< Kind, std::map< Node, Node > >::iterator it = d_tf_rep_map.begin(); it != d_tf_rep_map.end(); ++it ){
- std::map< Node, Node > mv_to_term;
for( std::map< Node, Node >::iterator itt = it->second.begin(); itt != it->second.end(); ++itt ){
Node t = itt->second;
Assert( d_mv[1].find( t )!=d_mv[1].end() );
- Node tv = d_mv[1][t];
- mv_to_term[tv] = t;
- // easy model-based bounds (TODO: make these unconditional?)
- if( it->first==kind::SINE ){
- Assert( tv.isConst() );
- /*
- if( tv.getConst<Rational>() > d_one.getConst<Rational>() ){
- lemmas.push_back( NodeManager::currentNM()->mkNode( kind::LEQ, t, d_one ) );
- }else if( tv.getConst<Rational>() < d_neg_one.getConst<Rational>() ){
- lemmas.push_back( NodeManager::currentNM()->mkNode( kind::GEQ, t, d_neg_one ) );
- }
- */
- /*
- if( tv.getConst<Rational>().sgn()!=0 ){
- //symmetry (model-based)
- Node neg_tv = NodeManager::currentNM()->mkConst( -tv.getConst<Rational>() );
- if( mv_to_term.find( neg_tv )!=mv_to_term.end() ){
- Node sum = NodeManager::currentNM()->mkNode( kind::PLUS, mv_to_term[neg_tv][0], t[0] );
- Node res = computeModelValue( sum, 0 );
- if( !res.isConst() || res.getConst<Rational>().sgn()!=0 ){
- Node tsum = NodeManager::currentNM()->mkNode( kind::PLUS, mv_to_term[neg_tv], t );
- Node sym_lem = NodeManager::currentNM()->mkNode( kind::IMPLIES, tsum.eqNode( d_zero ), sum.eqNode( d_zero ) );
- lemmas.push_back( sym_lem );
- }
- }
- }
- */
- }else if( it->first==kind::EXPONENTIAL ){
- if( tv.getConst<Rational>().sgn()==-1 ){
- lemmas.push_back( NodeManager::currentNM()->mkNode( kind::GT, t, d_zero ) );
- }
- }
//initial refinements
if( d_tf_initial_refine.find( t )==d_tf_initial_refine.end() ){
d_tf_initial_refine[t] = true;
NodeManager::currentNM()->mkNode( kind::GT, t[0], d_pi_neg ),
NodeManager::currentNM()->mkNode( kind::GT, t, NodeManager::currentNM()->mkNode( kind::MINUS, d_pi_neg, t[0] ) ) ) ) );
}else if( it->first==kind::EXPONENTIAL ){
- // exp(x)>0 ^ x < 0 <=> exp( x ) < 1 ^ ( x = 0 V exp( x ) > x + 1 )
- lem = NodeManager::currentNM()->mkNode( kind::AND, NodeManager::currentNM()->mkNode( kind::EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
- NodeManager::currentNM()->mkNode( kind::EQUAL,
- NodeManager::currentNM()->mkNode( kind::LT, t[0], d_zero ),
- NodeManager::currentNM()->mkNode( kind::LT, t, d_one ) ),
- NodeManager::currentNM()->mkNode( kind::OR,
- NodeManager::currentNM()->mkNode( kind::LEQ, t[0], d_zero ),
- NodeManager::currentNM()->mkNode( kind::GT, t, NodeManager::currentNM()->mkNode( kind::PLUS, t[0], d_one ) ) ) );
-
+ // ( exp(x) > 0 ) ^ ( x=0 <=> exp( x ) = 1 ) ^ ( x < 0 <=> exp( x ) <
+ // 1 ) ^ ( x <= 0 V exp( x ) > x + 1 )
+ lem = NodeManager::currentNM()->mkNode(
+ kind::AND,
+ NodeManager::currentNM()->mkNode(kind::GT, t, d_zero),
+ NodeManager::currentNM()->mkNode(
+ kind::EQUAL, t[0].eqNode(d_zero), t.eqNode(d_one)),
+ NodeManager::currentNM()->mkNode(
+ kind::EQUAL,
+ NodeManager::currentNM()->mkNode(kind::LT, t[0], d_zero),
+ NodeManager::currentNM()->mkNode(kind::LT, t, d_one)),
+ NodeManager::currentNM()->mkNode(
+ kind::OR,
+ NodeManager::currentNM()->mkNode(kind::LEQ, t[0], d_zero),
+ NodeManager::currentNM()->mkNode(
+ kind::GT,
+ t,
+ NodeManager::currentNM()->mkNode(
+ kind::PLUS, t[0], d_one))));
}
if( !lem.isNull() ){
lemmas.push_back( lem );
Assert( d_mv[1].find( t )!=d_mv[1].end() );
Trace("nl-ext-tf-mono") << " f-val : " << d_mv[1][t] << std::endl;
}
- std::vector< int > mdirs;
std::vector< Node > mpoints;
std::vector< Node > mpoints_vals;
if( it->first==kind::SINE ){
- mdirs.push_back( -1 );
mpoints.push_back( d_pi );
- mdirs.push_back( 1 );
mpoints.push_back( d_pi_2 );
- mdirs.push_back( -1 );
+ mpoints.push_back(d_zero);
mpoints.push_back( d_pi_neg_2 );
- mdirs.push_back( 0 );
mpoints.push_back( d_pi_neg );
}else if( it->first==kind::EXPONENTIAL ){
- mdirs.push_back( 1 );
mpoints.push_back( Node::null() );
}
if( !mpoints.empty() ){
//increment to the proper monotonicity region
bool increment = true;
- while( increment && mdir_index<mdirs.size() ){
+ while (increment && mdir_index < mpoints.size())
+ {
increment = false;
if( mpoints[mdir_index].isNull() ){
increment = true;
}
if( increment ){
tval = Node::null();
- monotonic_dir = mdirs[mdir_index];
mono_bounds[1] = mpoints[mdir_index];
mdir_index++;
- if( mdir_index<mdirs.size() ){
+ monotonic_dir = regionToMonotonicityDir(it->first, mdir_index);
+ if (mdir_index < mpoints.size())
+ {
mono_bounds[0] = mpoints[mdir_index];
}else{
mono_bounds[0] = Node::null();
}
}
}
-
-
+ // store the concavity region
+ d_tf_region[s] = mdir_index;
+ Trace("nl-ext-concavity") << "Transcendental function " << s
+ << " is in region #" << mdir_index;
+ Trace("nl-ext-concavity") << ", arg model value = " << sargval
+ << std::endl;
+
if( !tval.isNull() ){
Node mono_lem;
if( monotonic_dir==1 && sval.getConst<Rational>() > tval.getConst<Rational>() ){
lemmas.push_back( mono_lem );
}
}
+ // store the previous values
targ = sarg;
targval = sargval;
t = s;
}
}
}
-
-
-
-
return lemmas;
}
-
-
-Node NonlinearExtension::getTaylor( Node tf, Node x, unsigned n, std::vector< Node >& lemmas ) {
- Node i_exp_base_term = Rewriter::rewrite( NodeManager::currentNM()->mkNode( kind::MINUS, x, tf[0] ) );
- Node i_exp_base = getFactorSkolem( i_exp_base_term, lemmas );
- Node i_derv = tf;
- Node i_fact = d_one;
- Node i_exp = d_one;
- int i_derv_status = 0;
- unsigned counter = 0;
- std::vector< Node > sum;
- do {
- counter++;
- if( tf.getKind()==kind::EXPONENTIAL ){
- //unchanged
- }else if( tf.getKind()==kind::SINE ){
- if( i_derv_status%2==1 ){
- Node arg = NodeManager::currentNM()->mkNode( kind::MINUS,
- NodeManager::currentNM()->mkNode( kind::MULT,
- NodeManager::currentNM()->mkConst( Rational(1)/Rational(2) ),
- NodeManager::currentNM()->mkNullaryOperator( NodeManager::currentNM()->realType(), kind::PI ) ),
- tf[0] );
- i_derv = NodeManager::currentNM()->mkNode( kind::SINE, arg );
+
+int NonlinearExtension::regionToMonotonicityDir(Kind k, int region)
+{
+ if (k == kind::EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == kind::SINE)
+ {
+ if (region == 1 || region == 4)
+ {
+ return -1;
+ }
+ else if (region == 2 || region == 3)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int NonlinearExtension::regionToConcavity(Kind k, int region)
+{
+ if (k == kind::EXPONENTIAL)
+ {
+ if (region == 1)
+ {
+ return 1;
+ }
+ }
+ else if (k == kind::SINE)
+ {
+ if (region == 1 || region == 2)
+ {
+ return -1;
+ }
+ else if (region == 3 || region == 4)
+ {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+Node NonlinearExtension::regionToLowerBound(Kind k, int region)
+{
+ if (k == kind::SINE)
+ {
+ if (region == 1)
+ {
+ return d_pi_2;
+ }
+ else if (region == 2)
+ {
+ return d_zero;
+ }
+ else if (region == 3)
+ {
+ return d_pi_neg_2;
+ }
+ else if (region == 4)
+ {
+ return d_pi_neg;
+ }
+ }
+ return Node::null();
+}
+
+Node NonlinearExtension::regionToUpperBound(Kind k, int region)
+{
+ if (k == kind::SINE)
+ {
+ if (region == 1)
+ {
+ return d_pi;
+ }
+ else if (region == 2)
+ {
+ return d_pi_2;
+ }
+ else if (region == 3)
+ {
+ return d_zero;
+ }
+ else if (region == 4)
+ {
+ return d_pi_neg_2;
+ }
+ }
+ return Node::null();
+}
+
+Node NonlinearExtension::getDerivative(Node n, Node x)
+{
+ Assert(x.isVar());
+ // only handle the cases of the taylor expansion of d
+ if (n.getKind() == kind::EXPONENTIAL)
+ {
+ if (n[0] == x)
+ {
+ return n;
+ }
+ }
+ else if (n.getKind() == kind::SINE)
+ {
+ if (n[0] == x)
+ {
+ Node na = NodeManager::currentNM()->mkNode(kind::MINUS, d_pi_2, n[0]);
+ Node ret = NodeManager::currentNM()->mkNode(kind::SINE, na);
+ ret = Rewriter::rewrite(ret);
+ return ret;
+ }
+ }
+ else if (n.getKind() == kind::PLUS)
+ {
+ std::vector<Node> dchildren;
+ for (unsigned i = 0; i < n.getNumChildren(); i++)
+ {
+ // PLUS is flattened in rewriter, recursion depth is bounded by 1
+ Node dc = getDerivative(n[i], x);
+ if (dc.isNull())
+ {
+ return dc;
}else{
- i_derv = tf;
+ dchildren.push_back(dc);
}
- if( i_derv_status>=2 ){
- i_derv = NodeManager::currentNM()->mkNode( kind::MINUS, d_zero, i_derv );
+ }
+ return NodeManager::currentNM()->mkNode(kind::PLUS, dchildren);
+ }
+ else if (n.getKind() == kind::MULT)
+ {
+ Assert(n[0].isConst());
+ Node dc = getDerivative(n[1], x);
+ if (!dc.isNull())
+ {
+ return NodeManager::currentNM()->mkNode(kind::MULT, n[0], dc);
+ }
+ }
+ else if (n.getKind() == kind::NONLINEAR_MULT)
+ {
+ unsigned xcount = 0;
+ std::vector<Node> children;
+ unsigned xindex = 0;
+ for (unsigned i = 0, size = n.getNumChildren(); i < size; i++)
+ {
+ if (n[i] == x)
+ {
+ xcount++;
+ xindex = i;
}
- i_derv = Rewriter::rewrite( i_derv );
- i_derv_status = i_derv_status==3 ? 0 : i_derv_status+1;
+ children.push_back(n[i]);
}
- Node curr = NodeManager::currentNM()->mkNode( kind::MULT,
- NodeManager::currentNM()->mkNode( kind::DIVISION, i_derv, i_fact ), i_exp );
- sum.push_back( curr );
- i_fact = Rewriter::rewrite( NodeManager::currentNM()->mkNode( kind::MULT, NodeManager::currentNM()->mkConst( Rational( counter ) ) ) );
- i_exp = Rewriter::rewrite( NodeManager::currentNM()->mkNode( kind::MULT, i_exp_base, i_exp ) );
- }while( counter<n );
- return sum.size()==1 ? sum[0] : NodeManager::currentNM()->mkNode( kind::PLUS, sum );
+ if (xcount == 0)
+ {
+ return d_zero;
+ }
+ else
+ {
+ children[xindex] = NodeManager::currentNM()->mkConst(Rational(xcount));
+ }
+ return NodeManager::currentNM()->mkNode(kind::MULT, children);
+ }
+ else if (n.isVar())
+ {
+ return n == x ? d_one : d_zero;
+ }
+ else if (n.isConst())
+ {
+ return d_zero;
+ }
+ Trace("nl-ext-debug") << "No derivative computed for " << n;
+ Trace("nl-ext-debug") << " for d/d{" << x << "}" << std::endl;
+ return Node::null();
}
-
+
+std::pair<Node, Node> NonlinearExtension::getTaylor(TNode fa, unsigned n)
+{
+ Node fac; // what term we cache for fa
+ if (fa[0] == d_zero)
+ {
+ // optimization : simpler to compute (x-fa[0])^n if we are centered around
+ // 0.
+ fac = fa;
+ }
+ else
+ {
+ // otherwise we use a standard factor a in (x-a)^n
+ fac = NodeManager::currentNM()->mkNode(fa.getKind(), d_taylor_real_fv_base);
+ }
+ Node taylor_rem;
+ Node taylor_sum;
+ // check if we have already computed this Taylor series
+ std::unordered_map<unsigned, Node>::iterator itt = d_taylor_sum[fac].find(n);
+ if (itt == d_taylor_sum[fac].end())
+ {
+ Node i_exp_base;
+ if (fa[0] == d_zero)
+ {
+ i_exp_base = d_taylor_real_fv;
+ }
+ else
+ {
+ i_exp_base = Rewriter::rewrite(NodeManager::currentNM()->mkNode(
+ kind::MINUS, d_taylor_real_fv, d_taylor_real_fv_base));
+ }
+ Node i_derv = fac;
+ Node i_fact = d_one;
+ Node i_exp = d_one;
+ int i_derv_status = 0;
+ unsigned counter = 0;
+ std::vector<Node> sum;
+ do
+ {
+ counter++;
+ if (fa.getKind() == kind::EXPONENTIAL)
+ {
+ // unchanged
+ }
+ else if (fa.getKind() == kind::SINE)
+ {
+ if (i_derv_status % 2 == 1)
+ {
+ Node arg = NodeManager::currentNM()->mkNode(
+ kind::PLUS, d_pi_2, d_taylor_real_fv_base);
+ i_derv = NodeManager::currentNM()->mkNode(kind::SINE, arg);
+ }
+ else
+ {
+ i_derv = fa;
+ }
+ if (i_derv_status >= 2)
+ {
+ i_derv =
+ NodeManager::currentNM()->mkNode(kind::MINUS, d_zero, i_derv);
+ }
+ i_derv = Rewriter::rewrite(i_derv);
+ i_derv_status = i_derv_status == 3 ? 0 : i_derv_status + 1;
+ }
+ if (counter == (n + 1))
+ {
+ TNode x = d_taylor_real_fv_base;
+ i_derv = i_derv.substitute(x, d_taylor_real_fv_base_rem);
+ }
+ Node curr = NodeManager::currentNM()->mkNode(
+ kind::MULT,
+ NodeManager::currentNM()->mkNode(kind::DIVISION, i_derv, i_fact),
+ i_exp);
+ if (counter == (n + 1))
+ {
+ taylor_rem = curr;
+ }
+ else
+ {
+ sum.push_back(curr);
+ i_fact = Rewriter::rewrite(NodeManager::currentNM()->mkNode(
+ kind::MULT,
+ NodeManager::currentNM()->mkConst(Rational(counter)),
+ i_fact));
+ i_exp = Rewriter::rewrite(
+ NodeManager::currentNM()->mkNode(kind::MULT, i_exp_base, i_exp));
+ }
+ } while (counter <= n);
+ taylor_sum = sum.size() == 1
+ ? sum[0]
+ : NodeManager::currentNM()->mkNode(kind::PLUS, sum);
+
+ if (fac[0] != d_taylor_real_fv_base)
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fac[0]);
+ }
+
+ // cache
+ d_taylor_sum[fac][n] = taylor_sum;
+ d_taylor_rem[fac][n] = taylor_rem;
+ }
+ else
+ {
+ taylor_sum = itt->second;
+ Assert(d_taylor_rem[fac].find(n) != d_taylor_rem[fac].end());
+ taylor_rem = d_taylor_rem[fac][n];
+ }
+
+ // must substitute for the argument if we were using a different lookup
+ if (fa[0] != fac[0])
+ {
+ TNode x = d_taylor_real_fv_base;
+ taylor_sum = taylor_sum.substitute(x, fa[0]);
+ }
+ return std::pair<Node, Node>(taylor_sum, taylor_rem);
+}
+
} // namespace arith
} // namespace theory
} // namespace CVC4
#include <map>
#include <queue>
#include <set>
+#include <unordered_map>
#include <utility>
#include <vector>
typedef std::map<Node, unsigned> NodeMultiset;
+// TODO : refactor/document this class (#1287)
+/** Non-linear extension class
+ *
+ * This class implements model-based refinement schemes
+ * for non-linear arithmetic, described in:
+ *
+ * - "Invariant Checking of NRA Transition Systems
+ * via Incremental Reduction to LRA with EUF" by
+ * Cimatti et al., TACAS 2017.
+ *
+ * - Section 5 of "Desiging Theory Solvers with
+ * Extensions" by Reynolds et al., FroCoS 2017.
+ *
+ * - "Satisfiability Modulo Transcendental
+ * Functions via Incremental Linearization" by Cimatti
+ * et al., CADE 2017.
+ *
+ * It's main functionality is a check(...) method,
+ * which is called by TheoryArithPrivate either:
+ * (1) at full effort with no conflicts or lemmas emitted,
+ * or
+ * (2) at last call effort.
+ * In this method, this class calls d_out->lemma(...)
+ * for valid arithmetic theory lemmas, based on the current set of assertions,
+ * where d_out is the output channel of TheoryArith.
+ */
class NonlinearExtension {
public:
NonlinearExtension(TheoryArith& containing, eq::EqualityEngine* ee);
~NonlinearExtension();
+ /** Get current substitution
+ *
+ * This function and the one below are
+ * used for context-dependent
+ * simplification, see Section 3.1 of
+ * "Designing Theory Solvers with Extensions"
+ * by Reynolds et al. FroCoS 2017.
+ *
+ * effort : an identifier indicating the stage where
+ * we are performing context-dependent simplification,
+ * vars : a set of arithmetic variables.
+ *
+ * This function populates subs and exp, such that for 0 <= i < vars.size():
+ * ( exp[vars[i]] ) => vars[i] = subs[i]
+ * where exp[vars[i]] is a set of assertions
+ * that hold in the current context. We call { vars -> subs } a "derivable
+ * substituion" (see Reynolds et al. FroCoS 2017).
+ */
bool getCurrentSubstitution(int effort, const std::vector<Node>& vars,
std::vector<Node>& subs,
std::map<Node, std::vector<Node> >& exp);
-
+ /** Is the term n in reduced form?
+ *
+ * Used for context-dependent simplification.
+ *
+ * effort : an identifier indicating the stage where
+ * we are performing context-dependent simplification,
+ * on : the original term that we reduced to n,
+ * exp : an explanation such that ( exp => on = n ).
+ *
+ * We return a pair ( b, exp' ) such that
+ * if b is true, then:
+ * n is in reduced form
+ * if exp' is non-null, then ( exp' => on = n )
+ * The second part of the pair is used for constructing
+ * minimal explanations for context-dependent simplifications.
+ */
std::pair<bool, Node> isExtfReduced(int effort, Node n, Node on,
const std::vector<Node>& exp) const;
+ /** Check at effort level e.
+ *
+ * This call may result in (possibly multiple)
+ * calls to d_out->lemma(...) where d_out
+ * is the output channel of TheoryArith.
+ */
void check(Theory::Effort e);
+ /** Does this class need a call to check(...) at last call effort? */
bool needsCheckLastEffort() const { return d_needsLastCall; }
+ /** Compare arithmetic terms i and j based an ordering.
+ *
+ * orderType = 0 : compare concrete model values
+ * orderType = 1 : compare abstract model values
+ * orderType = 2 : compare abs of concrete model values
+ * orderType = 3 : compare abs of abstract model values
+ * TODO (#1287) make this an enum?
+ *
+ * For definitions of concrete vs abstract model values,
+ * see computeModelValue below.
+ */
int compare(Node i, Node j, unsigned orderType) const;
+ /** Compare constant rationals i and j based an ordering.
+ * orderType is the same as above.
+ */
int compare_value(Node i, Node j, unsigned orderType) const;
+ private:
+ /** returns true if the multiset containing the
+ * factors of monomial a is a subset of the multiset
+ * containing the factors of monomial b.
+ */
bool isMonomialSubset(Node a, Node b) const;
void registerMonomialSubset(Node a, Node b);
- private:
typedef context::CDHashSet<Node, NodeHashFunction> NodeSet;
// monomial information (context-independent)
Kind d_type;
}; /* struct ConstraintInfo */
- // Check assertions for consistency in the effort LAST_CALL with a subset of
- // the assertions, false_asserts, evaluate to false in the current model.
- // Returns the number of lemmas added on the output channel.
+ /** check last call
+ *
+ * Check assertions for consistency in the effort LAST_CALL with a subset of
+ * the assertions, false_asserts, that evaluate to false in the current model.
+ *
+ * xts : the list of (non-reduced) extended terms in the current context.
+ *
+ * This method returns the number of lemmas added on the output channel of
+ * TheoryArith.
+ */
int checkLastCall(const std::vector<Node>& assertions,
const std::set<Node>& false_asserts,
const std::vector<Node>& xts);
-
+ //---------------------------------------term utilities
static bool isArithKind(Kind k);
static Node mkLit(Node a, Node b, int status, int orderType = 0);
static Node mkAbs(Node a);
static Kind transKinds(Kind k1, Kind k2);
static bool isTranscendentalKind(Kind k);
Node mkMonomialRemFactor(Node n, const NodeMultiset& n_exp_rem) const;
+ //---------------------------------------end term utilities
- // register monomial
+ /** register monomial */
void registerMonomial(Node n);
void setMonomialFactor(Node a, Node b, const NodeMultiset& common);
void registerConstraint(Node atom);
- // index = 0 : concrete, 1 : abstract
+ /** compute model value
+ *
+ * This computes model values for terms based on two semantics, a "concrete"
+ * semantics and an "abstract" semantics.
+ *
+ * index = 0 means compute the value of n based on its children recursively.
+ * (we call this its "concrete" value)
+ * index = 1 means lookup the value of n in the model.
+ * (we call this its "abstract" value)
+ * In other words, index = 1 treats multiplication terms and transcendental
+ * function applications as variables, whereas index = 0 computes their
+ * actual values. This is a key distinction used in the model-based
+ * refinement scheme in Cimatti et al. TACAS 2017.
+ *
+ * For example, if M( a ) = 2, M( b ) = 3, M( a * b ) = 5, then :
+ *
+ * computeModelValue( a*b, 0 ) =
+ * computeModelValue( a, 0 )*computeModelValue( b, 0 ) = 2*3 = 6
+ * whereas:
+ * computeModelValue( a*b, 1 ) = 5
+ */
Node computeModelValue(Node n, unsigned index = 0);
-
+ /** returns the Node corresponding to the value of i in the
+ * type of order orderType, which is one of values
+ * described above ::compare(...).
+ */
Node get_compare_value(Node i, unsigned orderType) const;
void assignOrderIds(std::vector<Node>& vars, NodeMultiset& d_order,
unsigned orderType);
- // Returns the subset of assertions that evaluate to false in the model.
+ /** Returns the subset of assertions whose concrete values are
+ * false in the model.
+ */
std::set<Node> getFalseInModel(const std::vector<Node>& assertions);
- // status
- // 0 : equal
- // 1 : greater than or equal
- // 2 : greater than
- // -X : (less)
- // in these functions we are iterating over variables of monomials
- // initially : exp => ( oa = a ^ ob = b )
+ /** In the following functions, status states a relationship
+ * between two arithmetic terms, where:
+ * 0 : equal
+ * 1 : greater than or equal
+ * 2 : greater than
+ * -X : (greater -> less)
+ * TODO (#1287) make this an enum?
+ */
+ /** compute the sign of a.
+ *
+ * Calls to this function are such that :
+ * exp => ( oa = a ^ a <status> 0 )
+ *
+ * This function iterates over the factors of a,
+ * where a_index is the index of the factor in a
+ * we are currently looking at.
+ *
+ * This function returns a status, which indicates
+ * a's relationship to 0.
+ * We add lemmas to lem of the form given by the
+ * lemma schema checkSign(...).
+ */
int compareSign(Node oa, Node a, unsigned a_index, int status,
std::vector<Node>& exp, std::vector<Node>& lem);
+ /** compare monomials a and b
+ *
+ * Initially, a call to this function is such that :
+ * exp => ( oa = a ^ ob = b )
+ *
+ * This function returns true if we can infer a valid
+ * arithmetic lemma of the form :
+ * P => abs( a ) >= abs( b )
+ * where P is true and abs( a ) >= abs( b ) is false in the
+ * current model.
+ *
+ * This function is implemented by "processing" factors
+ * of monomials a and b until an inference of the above
+ * form can be made. For example, if :
+ * a = x*x*y and b = z*w
+ * Assuming we are trying to show abs( a ) >= abs( c ),
+ * then if abs( M( x ) ) >= abs( M( z ) ) where M is the current model,
+ * then we can add abs( x ) >= abs( z ) to our explanation, and
+ * mark one factor of x as processed in a, and
+ * one factor of z as processed in b. The number of processed factors of a
+ * and b are stored in a_exp_proc and b_exp_proc respectively.
+ *
+ * cmp_infers stores information that is helpful
+ * in discarding redundant inferences. For example,
+ * we do not want to infer abs( x ) >= abs( z ) if
+ * we have already inferred abs( x ) >= abs( y ) and
+ * abs( y ) >= abs( z ).
+ * It stores entries of the form (status,t1,t2)->F,
+ * which indicates that we constructed a lemma F that
+ * showed t1 <status> t2.
+ *
+ * We add lemmas to lem of the form given by the
+ * lemma schema checkMagnitude(...).
+ */
bool compareMonomial(
Node oa, Node a, NodeMultiset& a_exp_proc, Node ob, Node b,
NodeMultiset& b_exp_proc, std::vector<Node>& exp, std::vector<Node>& lem,
std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
+ /** helper function for above
+ *
+ * The difference is the inputs a_index and b_index, which are the indices of
+ * children (factors) in monomials a and b which we are currently looking at.
+ */
bool compareMonomial(
Node oa, Node a, unsigned a_index, NodeMultiset& a_exp_proc, Node ob,
Node b, unsigned b_index, NodeMultiset& b_exp_proc, int status,
std::vector<Node>& exp, std::vector<Node>& lem,
std::map<int, std::map<Node, std::map<Node, Node> > >& cmp_infers);
+ /** Check whether we have already inferred a relationship between monomials
+ * x and y based on the information in cmp_infers. This computes the
+ * transitive closure of the relation stored in cmp_infers.
+ */
bool cmp_holds(Node x, Node y,
std::map<Node, std::map<Node, Node> >& cmp_infers,
std::vector<Node>& exp, std::map<Node, bool>& visited);
-
+ /** Is n entailed with polarity pol in the current context? */
bool isEntailed(Node n, bool pol);
- // Potentially sends lem on the output channel if lem has not been sent on the
- // output channel in this context. Returns the number of lemmas sent on the
- // output channel.
+ /** flush lemmas
+ *
+ * Potentially sends lem on the output channel if lem has not been sent on the
+ * output channel in this context. Returns the number of lemmas sent on the
+ * output channel of TheoryArith.
+ */
int flushLemma(Node lem);
- // Potentially sends lemmas to the output channel and clears lemmas. Returns
- // the number of lemmas sent to the output channel.
+ /** Potentially sends lemmas to the output channel and clears lemmas. Returns
+ * the number of lemmas sent to the output channel.
+ */
int flushLemmas(std::vector<Node>& lemmas);
// Returns the NodeMultiset for an existing monomial.
// literals with Skolems (need not be satisfied by model)
NodeSet d_skolem_atoms;
- // utilities
+ /** commonly used terms */
Node d_zero;
Node d_one;
Node d_neg_one;
Node d_true;
Node d_false;
+ /** PI
+ *
+ * Note that PI is a (symbolic, non-constant) nullary operator. This is
+ * because its value cannot be computed exactly. We constraint PI to concrete
+ * lower and upper bounds stored in d_pi_bound below.
+ */
+ Node d_pi;
+ /** PI/2 */
+ Node d_pi_2;
+ /** -PI/2 */
+ Node d_pi_neg_2;
+ /** -PI */
+ Node d_pi_neg;
+ /** the concrete lower and upper bounds for PI */
+ Node d_pi_bound[2];
// The theory of arithmetic containing this extension.
TheoryArith& d_containing;
std::vector<Node> d_constraints;
// model values/orderings
- // model values
+ /** cache of model values
+ *
+ * Stores the the concrete/abstract model values
+ * at indices 0 and 1 respectively.
+ */
std::map<Node, Node> d_mv[2];
// ordering, stores variables and 0,1,-1
std::map<Node, Node> d_trig_base;
std::map<Node, bool> d_trig_is_base;
std::map< Node, bool > d_tf_initial_refine;
- Node d_pi;
- Node d_pi_2;
- Node d_pi_neg_2;
- Node d_pi_neg;
- Node d_pi_bound[2];
void mkPi();
void getCurrentPiBounds( std::vector< Node >& lemmas );
std::map<Node, std::map<Node, std::map<Node, Kind> > > d_ci;
std::map<Node, std::map<Node, std::map<Node, Node> > > d_ci_exp;
std::map<Node, std::map<Node, std::map<Node, bool> > > d_ci_max;
-
- //information about transcendental functions
+
+ /** transcendental function representative map
+ *
+ * For each transcendental function n = tf( x ),
+ * this stores ( n.getKind(), r ) -> n
+ * where r is the current representative of x
+ * in the equality engine assoiated with this class.
+ */
std::map< Kind, std::map< Node, Node > > d_tf_rep_map;
// factor skolems
// tangent plane bounds
std::map< Node, std::map< Node, Node > > d_tangent_val_bound[4];
-
- Node getTaylor( Node tf, Node x, unsigned n, std::vector< Node >& lemmas );
-private:
- // Returns a vector containing a split on whether each term is 0 or not for
- // those terms that have not been split on in the current context.
+
+ /** secant points (sorted list) for transcendental functions
+ *
+ * This is used for tangent plane refinements for
+ * transcendental functions. This is the set
+ * "get-previous-secant-points" in "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017, for
+ * each transcendental function application.
+ */
+ std::unordered_map<Node, std::vector<Node>, NodeHashFunction> d_secant_points;
+
+ /** get Taylor series of degree n for function fa centered around point fa[0].
+ *
+ * Return value is ( P_{n,f(a)}( x ), R_{n+1,f(a)}( x ) ) where
+ * the first part of the pair is the Taylor series expansion :
+ * P_{n,f(a)}( x ) = sum_{i=0}^n (f^i( a )/i!)*(x-a)^i
+ * and the second part of the pair is the Taylor series remainder :
+ * R_{n+1,f(a),b}( x ) = (f^{n+1}( b )/(n+1)!)*(x-a)^{n+1}
+ *
+ * The above values are cached for each (f,n) for a fixed variable "a".
+ * To compute the Taylor series for fa, we compute the Taylor series
+ * for ( fa.getKind(), n ) then substitute { a -> fa[0] } if fa[0]!=0.
+ * We compute P_{n,f(0)}( x )/R_{n+1,f(0),b}( x ) for ( fa.getKind(), n )
+ * if fa[0]=0.
+ * In the latter case, note we compute the exponential x^{n+1}
+ * instead of (x-a)^{n+1}, which can be done faster.
+ */
+ std::pair<Node, Node> getTaylor(TNode fa, unsigned n);
+
+ /** internal variables used for constructing (cached) versions of the Taylor
+ * series above.
+ */
+ Node d_taylor_real_fv; // x above
+ Node d_taylor_real_fv_base; // a above
+ Node d_taylor_real_fv_base_rem; // b above
+
+ /** cache of sum and remainder terms for getTaylor */
+ std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
+ d_taylor_sum;
+ std::unordered_map<Node, std::unordered_map<unsigned, Node>, NodeHashFunction>
+ d_taylor_rem;
+
+ /** concavity region for transcendental functions
+ *
+ * This stores an integer that identifies an interval in
+ * which the current model value for an argument of an
+ * application of a transcendental function resides.
+ *
+ * For exp( x ):
+ * region #1 is -infty < x < infty
+ * For sin( x ):
+ * region #0 is pi < x < infty (this is an invalid region)
+ * region #1 is pi/2 < x <= pi
+ * region #2 is 0 < x <= pi/2
+ * region #3 is -pi/2 < x <= 0
+ * region #4 is -pi < x <= -pi/2
+ * region #5 is -infty < x <= -pi (this is an invalid region)
+ * All regions not listed above, as well as regions 0 and 5
+ * for SINE are "invalid". We only process applications
+ * of transcendental functions whose arguments have model
+ * values that reside in valid regions.
+ */
+ std::unordered_map<Node, int, NodeHashFunction> d_tf_region;
+ /** get monotonicity direction
+ *
+ * Returns whether the slope is positive (+1) or negative(-1)
+ * in region of transcendental function with kind k.
+ * Returns 0 if region is invalid.
+ */
+ int regionToMonotonicityDir(Kind k, int region);
+ /** get concavity
+ *
+ * Returns whether we are concave (+1) or convex (-1)
+ * in region of transcendental function with kind k,
+ * where region is defined above.
+ * Returns 0 if region is invalid.
+ */
+ int regionToConcavity(Kind k, int region);
+ /** region to lower bound
+ *
+ * Returns the term corresponding to the lower
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no lower bound for the
+ * region.
+ */
+ Node regionToLowerBound(Kind k, int region);
+ /** region to upper bound
+ *
+ * Returns the term corresponding to the upper
+ * bound of the region of transcendental function
+ * with kind k. Returns Node::null if the region
+ * is invalid, or there is no upper bound for the
+ * region.
+ */
+ Node regionToUpperBound(Kind k, int region);
+ /** get derivative
+ *
+ * Returns d/dx n. Supports cases of n
+ * for transcendental functions applied to x,
+ * multiplication, addition, constants and variables.
+ * Returns Node::null() if derivative is an
+ * unhandled case.
+ */
+ Node getDerivative(Node n, Node x);
+
+ private:
+ //-------------------------------------------- lemma schemas
+ /** check split zero
+ *
+ * Returns a set of theory lemmas of the form
+ * t = 0 V t != 0
+ * where t is a term that exists in the current context.
+ */
std::vector<Node> checkSplitZero();
-
+
+ /** check monomial sign
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema which ensures that non-linear monomials
+ * respect sign information based on their facts.
+ * For more details, see Section 5 of "Design Theory
+ * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
+ * Figure 5, this is the schema "Sign".
+ *
+ * Examples:
+ *
+ * x > 0 ^ y > 0 => x*y > 0
+ * x < 0 => x*y*y < 0
+ * x = 0 => x*y*z = 0
+ */
std::vector<Node> checkMonomialSign();
-
+
+ /** check monomial magnitude
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema which ensures that comparisons between
+ * non-linear monomials respect the magnitude of their
+ * factors.
+ * For more details, see Section 5 of "Design Theory
+ * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
+ * Figure 5, this is the schema "Magnitude".
+ *
+ * Examples:
+ *
+ * |x|>|y| => |x*z|>|y*z|
+ * |x|>|y| ^ |z|>|w| ^ |x|>=1 => |x*x*z*u|>|y*w|
+ */
std::vector<Node> checkMonomialMagnitude( unsigned c );
-
+
+ /** check monomial inferred bounds
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema that infers new constraints about existing
+ * terms based on mulitplying both sides of an existing
+ * constraint by a term.
+ * For more details, see Section 5 of "Design Theory
+ * Solvers with Extensions" by Reynolds et al., FroCoS 2017,
+ * Figure 5, this is the schema "Multiply".
+ *
+ * Examples:
+ *
+ * x > 0 ^ (y > z + w) => x*y > x*(z+w)
+ * x < 0 ^ (y > z + w) => x*y < x*(z+w)
+ * ...where (y > z + w) and x*y are a constraint and term
+ * that occur in the current context.
+ */
std::vector<Node> checkMonomialInferBounds( std::vector<Node>& nt_lemmas,
const std::set<Node>& false_asserts );
-
+
+ /** check factoring
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema that states a relationship betwen monomials
+ * with common factors that occur in the same constraint.
+ *
+ * Examples:
+ *
+ * x*z+y*z > t => ( k = x + y ^ k*z > t )
+ * ...where k is fresh and x*z + y*z > t is a
+ * constraint that occurs in the current context.
+ */
std::vector<Node> checkFactoring( const std::set<Node>& false_asserts );
-
+
+ /** check monomial infer resolution bounds
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma schema which "resolves" upper bounds
+ * of one inequality with lower bounds for another.
+ * This schema is not enabled by default, and can
+ * be enabled by --nl-ext-rbound.
+ *
+ * Examples:
+ *
+ * ( y>=0 ^ s <= x*z ^ x*y <= t ) => y*s <= z*t
+ * ...where s <= x*z and x*y <= t are constraints
+ * that occur in the current context.
+ */
std::vector<Node> checkMonomialInferResBounds();
-
+
+ /** check tangent planes
+ *
+ * Returns a set of valid theory lemmas, based on an
+ * "incremental linearization" of non-linear monomials.
+ * This linearization is accomplished by adding constraints
+ * corresponding to "tangent planes" at the current
+ * model value of each non-linear monomial. In particular
+ * consider the definition for constants a,b :
+ * T_{a,b}( x*y ) = b*x + a*y - a*b.
+ * The lemmas added by this function are of the form :
+ * ( ( x>a ^ y<b) ^ (x<a ^ y>b) ) => x*y < T_{a,b}( x*y )
+ * ( ( x>a ^ y>b) ^ (x<a ^ y<b) ) => x*y > T_{a,b}( x*y )
+ * It is inspired by "Invariant Checking of NRA Transition
+ * Systems via Incremental Reduction to LRA with EUF" by
+ * Cimatti et al., TACAS 2017.
+ * This schema is not terminating in general.
+ * It is not enabled by default, and can
+ * be enabled by --nl-ext-tplanes.
+ *
+ * Examples:
+ *
+ * ( ( x>2 ^ y>5) ^ (x<2 ^ y<5) ) => x*y > 5*x + 2*y - 10
+ * ( ( x>2 ^ y<5) ^ (x<2 ^ y>5) ) => x*y < 5*x + 2*y - 10
+ */
std::vector<Node> checkTangentPlanes();
-
+
+ /** check transcendental initial refine
+ *
+ * Returns a set of valid theory lemmas, based on
+ * simple facts about transcendental functions.
+ * This mostly follows the initial axioms described in
+ * Section 4 of "Satisfiability
+ * Modulo Transcendental Functions via Incremental
+ * Linearization" by Cimatti et al., CADE 2017.
+ *
+ * Examples:
+ *
+ * sin( x ) = -sin( -x )
+ * ( PI > x > 0 ) => 0 < sin( x ) < 1
+ * exp( x )>0
+ * x<0 => exp( x )<1
+ */
std::vector<Node> checkTranscendentalInitialRefine();
+ /** check transcendental monotonic
+ *
+ * Returns a set of valid theory lemmas, based on a
+ * lemma scheme that ensures that applications
+ * of transcendental functions respect monotonicity.
+ *
+ * Examples:
+ *
+ * x > y => exp( x ) > exp( y )
+ * PI/2 > x > y > 0 => sin( x ) > sin( y )
+ * PI > x > y > PI/2 => sin( x ) < sin( y )
+ */
std::vector<Node> checkTranscendentalMonotonic();
+
+ //-------------------------------------------- end lemma schemas
}; /* class NonlinearExtension */
} // namespace arith