d_tangentPlaneSlv(&d_extState),
d_cadSlv(d_im, d_model, state.getUserContext(), pnm),
d_icpSlv(d_im),
- d_iandSlv(d_im, state, d_model),
- d_builtModel(containing.getSatContext(), false)
+ d_iandSlv(d_im, state, d_model)
{
d_extTheory.addFunctionKind(kind::NONLINEAR_MULT);
d_extTheory.addFunctionKind(kind::EXPONENTIAL);
{
d_im.reset();
Trace("nl-ext") << std::endl;
- Trace("nl-ext") << "NonlinearExtension::check, effort = " << e
- << ", built model = " << d_builtModel.get() << std::endl;
+ Trace("nl-ext") << "NonlinearExtension::check, effort = " << e << std::endl;
if (e == Theory::EFFORT_FULL)
{
d_extTheory.clearCache();
}
}
-bool NonlinearExtension::modelBasedRefinement(const std::set<Node>& termSet)
+Result::Sat NonlinearExtension::modelBasedRefinement(const std::set<Node>& termSet)
{
++(d_stats.d_mbrRuns);
d_checkCounter++;
if (d_im.hasSentLemma() || d_im.hasPendingLemma())
{
d_im.clearWaitingLemmas();
- return true;
+ return Result::Sat::UNSAT;
}
}
Trace("nl-ext") << "Finished check with status : " << complete_status
if (d_im.hasUsed())
{
d_im.clearWaitingLemmas();
- return true;
+ return Result::Sat::UNSAT;
}
}
d_im.flushWaitingLemmas();
Trace("nl-ext") << "...added " << count << " waiting lemmas."
<< std::endl;
- return true;
+ return Result::Sat::UNSAT;
}
// resort to splitting on shared terms with their model value
// if we did not add any lemmas
d_im.flushWaitingLemmas();
Trace("nl-ext") << "...added " << d_im.numPendingLemmas()
<< " shared term value split lemmas." << std::endl;
- return true;
+ return Result::Sat::UNSAT;
}
}
else
d_containing.getOutputChannel().setIncomplete();
}
}
- else
- {
- // we have built a model
- d_builtModel = true;
- }
d_im.clearWaitingLemmas();
} while (needsRecheck);
// did not add lemmas
- return false;
+ return Result::Sat::SAT;
}
void NonlinearExtension::interceptModel(std::map<Node, Node>& arithModel,
Trace("nl-ext") << "NonlinearExtension::interceptModel begin" << std::endl;
d_model.reset(d_containing.getValuation().getModel(), arithModel);
// run a last call effort check
- if (!d_builtModel.get())
- {
- Trace("nl-ext") << "interceptModel: do model-based refinement" << std::endl;
- modelBasedRefinement(termSet);
- }
- if (d_builtModel.get())
+ Trace("nl-ext") << "interceptModel: do model-based refinement" << std::endl;
+ Result::Sat res = modelBasedRefinement(termSet);
+ if (res == Result::Sat::SAT)
{
Trace("nl-ext") << "interceptModel: do model repair" << std::endl;
d_approximations.clear();
void NonlinearExtension::presolve()
{
Trace("nl-ext") << "NonlinearExtension::presolve" << std::endl;
- d_builtModel = false;
}
void NonlinearExtension::runStrategy(Theory::Effort effort,
#include "theory/arith/nl/transcendental/transcendental_solver.h"
#include "theory/ext_theory.h"
#include "theory/theory.h"
+#include "util/result.h"
namespace CVC4 {
namespace theory {
* described in Reynolds et al. FroCoS 2017 that are based on ruling out
* the current candidate model.
*
- * This function returns true if a lemma was added to the inference manager.
- * Otherwise, it returns false. In the latter case, the model object d_model
- * may have information regarding how to construct a model, in the case that
- * we determined the problem is satisfiable.
+ * This function returns whether we found a satisfying assignment
+ * (Result::Sat::SAT), or not (Result::Sat::UNSAT). Note that UNSAT does not
+ * necessarily means the whole query is UNSAT, but that the linear model was
+ * refuted by a lemma.
*/
- bool modelBasedRefinement(const std::set<Node>& termSet);
+ Result::Sat modelBasedRefinement(const std::set<Node>& termSet);
/** get assertions
*
* NlModel::getModelValueRepair.
*/
std::map<Node, Node> d_witnesses;
- /** have we successfully built the model in this SAT context? */
- context::CDO<bool> d_builtModel;
}; /* class NonlinearExtension */
} // namespace nl