This PR is the first step in consolidating the various variants of Inferences, InferenceIds and InferenceManagers of the theories.
It merges all InferencedIds into one global enum and makes all theories use them.
theory/arith/fc_simplex.h
theory/arith/infer_bounds.cpp
theory/arith/infer_bounds.h
- theory/arith/inference_id.cpp
- theory/arith/inference_id.h
theory/arith/inference_manager.cpp
theory/arith/inference_manager.h
theory/arith/linear_equality.cpp
theory/fp/theory_fp_type_rules.h
theory/fp/type_enumerator.h
theory/interrupted.h
+ theory/inference_id.cpp
+ theory/inference_id.h
theory/inference_manager_buffered.cpp
theory/inference_manager_buffered.h
theory/logic_info.cpp
#include <vector>
#include "expr/node.h"
-#include "theory/arith/inference_id.h"
+#include "theory/inference_id.h"
#include "theory/inference_manager_buffered.h"
#include "theory/output_channel.h"
#include "theory/theory_inference.h"
+++ /dev/null
-/********************* */
-/*! \file inference_id.cpp
- ** \verbatim
- ** Top contributors (to current version):
- ** Gereon Kremer, Yoni Zohar
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Implementation of inference enumeration.
- **/
-
-#include "theory/arith/inference_id.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-const char* toString(InferenceId i)
-{
- switch (i)
- {
- case InferenceId::NL_CONGRUENCE: return "CONGRUENCE";
- case InferenceId::NL_SHARED_TERM_VALUE_SPLIT: return "SHARED_TERM_VALUE_SPLIT";
- case InferenceId::NL_SPLIT_ZERO: return "SPLIT_ZERO";
- case InferenceId::NL_SIGN: return "SIGN";
- case InferenceId::NL_COMPARISON: return "COMPARISON";
- case InferenceId::NL_INFER_BOUNDS: return "INFER_BOUNDS";
- case InferenceId::NL_INFER_BOUNDS_NT: return "INFER_BOUNDS_NT";
- case InferenceId::NL_FACTOR: return "FACTOR";
- case InferenceId::NL_RES_INFER_BOUNDS: return "RES_INFER_BOUNDS";
- case InferenceId::NL_TANGENT_PLANE: return "TANGENT_PLANE";
- case InferenceId::NL_T_PURIFY_ARG: return "T_PURIFY_ARG";
- case InferenceId::NL_T_INIT_REFINE: return "T_INIT_REFINE";
- case InferenceId::NL_T_PI_BOUND: return "T_PI_BOUND";
- case InferenceId::NL_T_MONOTONICITY: return "T_MONOTONICITY";
- case InferenceId::NL_T_SECANT: return "T_SECANT";
- case InferenceId::NL_T_TANGENT: return "T_TANGENT";
- case InferenceId::NL_IAND_INIT_REFINE: return "IAND_INIT_REFINE";
- case InferenceId::NL_IAND_VALUE_REFINE: return "IAND_VALUE_REFINE";
- case InferenceId::NL_IAND_SUM_REFINE: return "IAND_SUM_REFINE";
- case InferenceId::NL_IAND_BITWISE_REFINE: return "IAND_BITWISE_REFINE";
- case InferenceId::NL_CAD_CONFLICT: return "CAD_CONFLICT";
- case InferenceId::NL_CAD_EXCLUDED_INTERVAL: return "CAD_EXCLUDED_INTERVAL";
- case InferenceId::NL_ICP_CONFLICT: return "ICP_CONFLICT";
- case InferenceId::NL_ICP_PROPAGATION: return "ICP_PROPAGATION";
- default: return "?";
- }
-}
-
-std::ostream& operator<<(std::ostream& out, InferenceId i)
-{
- out << toString(i);
- return out;
-}
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
+++ /dev/null
-/********************* */
-/*! \file inference_id.h
- ** \verbatim
- ** Top contributors (to current version):
- ** Andrew Reynolds, Gereon Kremer, Yoni Zohar
- ** This file is part of the CVC4 project.
- ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
- ** in the top-level source directory and their institutional affiliations.
- ** All rights reserved. See the file COPYING in the top-level source
- ** directory for licensing information.\endverbatim
- **
- ** \brief Inference enumeration for arithmetic.
- **/
-
-#include "cvc4_private.h"
-
-#ifndef CVC4__THEORY__ARITH__INFERENCE_ID_H
-#define CVC4__THEORY__ARITH__INFERENCE_ID_H
-
-#include <map>
-#include <vector>
-
-#include "util/safe_print.h"
-
-namespace CVC4 {
-namespace theory {
-namespace arith {
-
-/**
- * Types of inferences used in the procedure
- */
-enum class InferenceId : uint32_t
-{
- //-------------------- core
- // simple congruence x=y => f(x)=f(y)
- NL_CONGRUENCE,
- // shared term value split (for naive theory combination)
- NL_SHARED_TERM_VALUE_SPLIT,
- //-------------------- incremental linearization solver
- // splitting on zero (NlSolver::checkSplitZero)
- NL_SPLIT_ZERO,
- // based on sign (NlSolver::checkMonomialSign)
- NL_SIGN,
- // based on comparing (abs) model values (NlSolver::checkMonomialMagnitude)
- NL_COMPARISON,
- // based on inferring bounds (NlSolver::checkMonomialInferBounds)
- NL_INFER_BOUNDS,
- // same as above, for inferences that introduce new terms
- NL_INFER_BOUNDS_NT,
- // factoring (NlSolver::checkFactoring)
- NL_FACTOR,
- // resolution bound inferences (NlSolver::checkMonomialInferResBounds)
- NL_RES_INFER_BOUNDS,
- // tangent planes (NlSolver::checkTangentPlanes)
- NL_TANGENT_PLANE,
- //-------------------- transcendental solver
- // purification of arguments to transcendental functions
- NL_T_PURIFY_ARG,
- // initial refinement (TranscendentalSolver::checkTranscendentalInitialRefine)
- NL_T_INIT_REFINE,
- // pi bounds
- NL_T_PI_BOUND,
- // monotonicity (TranscendentalSolver::checkTranscendentalMonotonic)
- NL_T_MONOTONICITY,
- // tangent refinement (TranscendentalSolver::checkTranscendentalTangentPlanes)
- NL_T_TANGENT,
- // secant refinement, the dual of the above inference
- NL_T_SECANT,
- //-------------------- iand solver
- // initial refinements (IAndSolver::checkInitialRefine)
- NL_IAND_INIT_REFINE,
- // value refinements (IAndSolver::checkFullRefine)
- NL_IAND_VALUE_REFINE,
- // sum refinements (IAndSolver::checkFullRefine)
- NL_IAND_SUM_REFINE,
- // bitwise refinements (IAndSolver::checkFullRefine)
- NL_IAND_BITWISE_REFINE,
- //-------------------- cad solver
- // conflict / infeasible subset obtained from cad
- NL_CAD_CONFLICT,
- // excludes an interval for a single variable
- NL_CAD_EXCLUDED_INTERVAL,
- //-------------------- icp solver
- // conflict obtained from icp
- NL_ICP_CONFLICT,
- // propagation / contraction of variable bounds from icp
- NL_ICP_PROPAGATION,
- //-------------------- unknown
- UNKNOWN,
-};
-
-/**
- * Converts an inference to a string. Note: This function is also used in
- * `safe_print()`. Changing this functions name or signature will result in
- * `safe_print()` printing "<unsupported>" instead of the proper strings for
- * the enum values.
- *
- * @param i The inference
- * @return The name of the inference
- */
-const char* toString(InferenceId i);
-
-/**
- * Writes an inference name to a stream.
- *
- * @param out The stream to write to
- * @param i The inference to write to the stream
- * @return The stream
- */
-std::ostream& operator<<(std::ostream& out, InferenceId i);
-
-} // namespace arith
-} // namespace theory
-} // namespace CVC4
-
-#endif /* CVC4__THEORY__ARITH__INFERENCE_H */
#include "theory/arith/arith_lemma.h"
#include "theory/arith/arith_state.h"
-#include "theory/arith/inference_id.h"
+#include "theory/inference_id.h"
#include "theory/arith/nl/nl_lemma_utils.h"
#include "theory/inference_manager_buffered.h"
#endif
#include "options/arith_options.h"
-#include "theory/arith/inference_id.h"
+#include "theory/inference_id.h"
#include "theory/arith/nl/cad/cdcac.h"
#include "theory/arith/nl/poly_conversion.h"
#include "util/poly_util.h"
n = n.negate();
}
d_im.addPendingArithLemma(NodeManager::currentNM()->mkOr(mis),
- InferenceId::NL_CAD_CONFLICT);
+ InferenceId::ARITH_NL_CAD_CONFLICT);
}
#else
Warning() << "Tried to use CadSolver but libpoly is not available. Compile "
Trace("nl-cad") << "Excluding " << first_var << " -> "
<< interval.d_interval << " using " << lemma
<< std::endl;
- d_im.addPendingArithLemma(lemma, InferenceId::NL_CAD_EXCLUDED_INTERVAL);
+ d_im.addPendingArithLemma(lemma, InferenceId::ARITH_NL_CAD_EXCLUDED_INTERVAL);
}
}
}
flem, PfRule::MACRO_SR_PRED_TRANSFORM, {split, k_eq}, {flem});
}
d_data->d_im.addPendingArithLemma(
- flem, InferenceId::NL_FACTOR, proof);
+ flem, InferenceId::ARITH_NL_FACTOR, proof);
}
}
}
Node k_eq = k.eqNode(n);
Trace("nl-ext-factor") << "...adding factor skolem " << k << " == " << n
<< std::endl;
- d_data->d_im.addPendingArithLemma(k_eq, InferenceId::NL_FACTOR, proof);
+ d_data->d_im.addPendingArithLemma(k_eq, InferenceId::ARITH_NL_FACTOR, proof);
d_factor_skolem[n] = k;
}
else
// Trace("nl-ext-bound-lemma") << " intro new
// monomials = " << introNewTerms << std::endl;
d_data->d_im.addPendingArithLemma(
- iblem, InferenceId::NL_INFER_BOUNDS_NT, nullptr, introNewTerms);
+ iblem, InferenceId::ARITH_NL_INFER_BOUNDS_NT, nullptr, introNewTerms);
}
}
}
Trace("nl-ext-rbound-lemma")
<< "Resolution bound lemma : " << rblem << std::endl;
d_data->d_im.addPendingArithLemma(
- rblem, InferenceId::NL_RES_INFER_BOUNDS);
+ rblem, InferenceId::ARITH_NL_RES_INFER_BOUNDS);
}
}
exp.pop_back();
{
Node lemma =
nm->mkAnd(exp).impNode(mkLit(oa, d_data->d_zero, status * 2));
- d_data->d_im.addPendingArithLemma(lemma, InferenceId::NL_SIGN);
+ d_data->d_im.addPendingArithLemma(lemma, InferenceId::ARITH_NL_SIGN);
}
return status;
}
proof->addStep(conc, PfRule::MACRO_SR_PRED_INTRO, {prem}, {conc});
proof->addStep(lemma, PfRule::SCOPE, {conc}, {prem});
}
- d_data->d_im.addPendingArithLemma(lemma, InferenceId::NL_SIGN, proof);
+ d_data->d_im.addPendingArithLemma(lemma, InferenceId::ARITH_NL_SIGN, proof);
}
return 0;
}
Kind::IMPLIES, nm->mkAnd(exp), mkLit(oa, ob, status, true));
Trace("nl-ext-comp-lemma") << "comparison lemma : " << clem << std::endl;
lem.emplace_back(
- clem, LemmaProperty::NONE, nullptr, InferenceId::NL_COMPARISON);
+ clem, LemmaProperty::NONE, nullptr, InferenceId::ARITH_NL_COMPARISON);
cmp_infers[status][oa][ob] = clem;
}
return true;
proof->addStep(lem, PfRule::SPLIT, {}, {eq});
}
d_data->d_im.addPendingPhaseRequirement(eq, true);
- d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_SPLIT_ZERO, proof);
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::ARITH_NL_SPLIT_ZERO, proof);
}
}
}
nm->mkConst(Rational(d == 0 ? -1 : 1))});
}
d_data->d_im.addPendingArithLemma(
- tlem, InferenceId::NL_TANGENT_PLANE, proof, asWaitingLemmas);
+ tlem, InferenceId::ARITH_NL_TANGENT_PLANE, proof, asWaitingLemmas);
}
}
}
Node lem = conj.size() == 1 ? conj[0] : nm->mkNode(AND, conj);
Trace("iand-lemma") << "IAndSolver::Lemma: " << lem << " ; INIT_REFINE"
<< std::endl;
- d_im.addPendingArithLemma(lem, InferenceId::NL_IAND_INIT_REFINE);
+ d_im.addPendingArithLemma(lem, InferenceId::ARITH_NL_IAND_INIT_REFINE);
}
}
}
// note that lemma can contain div/mod, and will be preprocessed in the
// prop engine
d_im.addPendingArithLemma(
- lem, InferenceId::NL_IAND_SUM_REFINE, nullptr, true);
+ lem, InferenceId::ARITH_NL_IAND_SUM_REFINE, nullptr, true);
}
else if (options::iandMode() == options::IandMode::BITWISE)
{
// note that lemma can contain div/mod, and will be preprocessed in the
// prop engine
d_im.addPendingArithLemma(
- lem, InferenceId::NL_IAND_BITWISE_REFINE, nullptr, true);
+ lem, InferenceId::ARITH_NL_IAND_BITWISE_REFINE, nullptr, true);
}
else
{
<< "IAndSolver::Lemma: " << lem << " ; VALUE_REFINE" << std::endl;
// value lemmas should not contain div/mod so we don't need to tag it with PREPROCESS
d_im.addPendingArithLemma(lem,
- InferenceId::NL_IAND_VALUE_REFINE,
+ InferenceId::ARITH_NL_IAND_VALUE_REFINE,
nullptr,
true,
LemmaProperty::NONE);
mis.emplace_back(n.negate());
}
d_im.addPendingArithLemma(NodeManager::currentNM()->mkOr(mis),
- InferenceId::NL_ICP_CONFLICT);
+ InferenceId::ARITH_NL_ICP_CONFLICT);
did_progress = true;
progress = false;
break;
std::vector<Node> lemmas = generateLemmas();
for (const auto& l : lemmas)
{
- d_im.addPendingArithLemma(l, InferenceId::NL_ICP_PROPAGATION);
+ d_im.addPendingArithLemma(l, InferenceId::ARITH_NL_ICP_PROPAGATION);
}
}
}
d_containing.getOutputChannel().requirePhase(literal, true);
Trace("nl-ext-debug") << "Split on : " << literal << std::endl;
Node split = literal.orNode(literal.negate());
- NlLemma nsplit(split, InferenceId::NL_SHARED_TERM_VALUE_SPLIT);
+ NlLemma nsplit(split, InferenceId::ARITH_NL_SHARED_TERM_VALUE_SPLIT);
d_im.addPendingArithLemma(nsplit, true);
}
if (d_im.hasWaitingLemma())
#define CVC4__THEORY__ARITH__NL__STATS_H
#include "expr/kind.h"
-#include "theory/arith/inference_id.h"
+#include "theory/inference_id.h"
#include "util/statistics_registry.h"
namespace CVC4 {
// note we must do preprocess on this lemma
Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
<< std::endl;
- NlLemma nlem(lem, LemmaProperty::NONE, nullptr, InferenceId::NL_T_PURIFY_ARG);
+ NlLemma nlem(lem, LemmaProperty::NONE, nullptr, InferenceId::ARITH_NL_T_PURIFY_ARG);
d_data->d_im.addPendingArithLemma(nlem);
}
// exp is always positive: exp(t) > 0
Node lem = nm->mkNode(Kind::GT, t, d_data->d_zero);
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE, nullptr);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
{
// exp at zero: (t = 0) <=> (exp(t) = 1)
t[0].eqNode(d_data->d_zero),
t.eqNode(d_data->d_one));
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE, nullptr);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
{
// exp on negative values: (t < 0) <=> (exp(t) < 1)
nm->mkNode(Kind::LT, t[0], d_data->d_zero),
nm->mkNode(Kind::LT, t, d_data->d_one));
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE, nullptr);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
{
// exp on positive values: (t <= 0) or (exp(t) > t+1)
nm->mkNode(
Kind::GT, t, nm->mkNode(Kind::PLUS, t[0], d_data->d_one)));
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE, nullptr);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE, nullptr);
}
}
}
Trace("nl-ext-exp") << "Monotonicity lemma : " << mono_lem << std::endl;
d_data->d_im.addPendingArithLemma(mono_lem,
- InferenceId::NL_T_MONOTONICITY);
+ InferenceId::ARITH_NL_T_MONOTONICITY);
}
// store the previous values
targ = sarg;
Trace("nl-ext-exp") << "*** Tangent plane lemma : " << lem << std::endl;
Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
// Figure 3 : line 9
- d_data->d_im.addPendingArithLemma(lem, InferenceId::NL_T_TANGENT, nullptr, true);
+ d_data->d_im.addPendingArithLemma(lem, InferenceId::ARITH_NL_T_TANGENT, nullptr, true);
}
void ExponentialSolver::doSecantLemmas(TNode e,
// note we must do preprocess on this lemma
Trace("nl-ext-lemma") << "NonlinearExtension::Lemma : purify : " << lem
<< std::endl;
- NlLemma nlem(lem, LemmaProperty::NONE, proof, InferenceId::NL_T_PURIFY_ARG);
+ NlLemma nlem(lem, LemmaProperty::NONE, proof, InferenceId::ARITH_NL_T_PURIFY_ARG);
d_data->d_im.addPendingArithLemma(nlem);
}
nm->mkNode(Kind::LEQ, t, d_data->d_one),
nm->mkNode(Kind::GEQ, t, d_data->d_neg_one));
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
// sine symmetry: sin(t) - sin(-t) = 0
Node lem = nm->mkNode(Kind::PLUS, t, symn).eqNode(d_data->d_zero);
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
// sine zero tangent:
nm->mkNode(Kind::LT, t[0], d_data->d_zero),
nm->mkNode(Kind::GT, t, t[0])));
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
// sine pi tangent:
t,
nm->mkNode(Kind::MINUS, d_data->d_pi, t[0]))));
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE);
}
{
Node lem =
nm->mkNode(Kind::GT, t[0], d_data->d_zero),
nm->mkNode(Kind::GT, t, d_data->d_zero)));
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_INIT_REFINE);
+ lem, InferenceId::ARITH_NL_T_INIT_REFINE);
}
}
}
<< "Monotonicity lemma : " << mono_lem << std::endl;
d_data->d_im.addPendingArithLemma(mono_lem,
- InferenceId::NL_T_MONOTONICITY);
+ InferenceId::ARITH_NL_T_MONOTONICITY);
}
}
// store the previous values
Assert(d_data->d_model.computeAbstractModelValue(lem) == d_data->d_false);
// Figure 3 : line 9
d_data->d_im.addPendingArithLemma(
- lem, InferenceId::NL_T_TANGENT, nullptr, true);
+ lem, InferenceId::ARITH_NL_T_TANGENT, nullptr, true);
}
void SineSolver::doSecantLemmas(TNode e,
}
Node expn = exp.size() == 1 ? exp[0] : nm->mkNode(Kind::AND, exp);
Node cong_lemma = expn.impNode(a.eqNode(aa));
- d_im.addPendingArithLemma(cong_lemma, InferenceId::NL_CONGRUENCE);
+ d_im.addPendingArithLemma(cong_lemma, InferenceId::ARITH_NL_CONGRUENCE);
}
}
else
proof->addStep(
pi_lem, PfRule::ARITH_TRANS_PI, {}, {d_pi_bound[0], d_pi_bound[1]});
}
- d_im.addPendingArithLemma(pi_lem, InferenceId::NL_T_PI_BOUND, proof);
+ d_im.addPendingArithLemma(pi_lem, InferenceId::ARITH_NL_T_PI_BOUND, proof);
}
std::pair<Node, Node> TranscendentalState::getClosestSecantPoints(TNode e,
lem = Rewriter::rewrite(lem);
Trace("nl-trans-lemma") << "*** Secant plane lemma : " << lem << std::endl;
Assert(d_model.computeAbstractModelValue(lem) == d_false);
- return NlLemma(lem, LemmaProperty::NONE, nullptr, InferenceId::NL_T_SECANT);
+ return NlLemma(lem, LemmaProperty::NONE, nullptr, InferenceId::ARITH_NL_T_SECANT);
}
void TranscendentalState::doSecantLemmas(const std::pair<Node, Node>& bounds,
namespace theory {
namespace bags {
-const char* toString(Inference i)
-{
- switch (i)
- {
- case Inference::NONE: return "NONE";
- case Inference::BAG_NON_NEGATIVE_COUNT: return "BAG_NON_NEGATIVE_COUNT";
- case Inference::BAG_MK_BAG_SAME_ELEMENT: return "BAG_MK_BAG_SAME_ELEMENT";
- case Inference::BAG_MK_BAG: return "BAG_MK_BAG";
- case Inference::BAG_EQUALITY: return "BAG_EQUALITY";
- case Inference::BAG_DISEQUALITY: return "BAG_DISEQUALITY";
- case Inference::BAG_EMPTY: return "BAG_EMPTY";
- case Inference::BAG_UNION_DISJOINT: return "BAG_UNION_DISJOINT";
- case Inference::BAG_UNION_MAX: return "BAG_UNION_MAX";
- case Inference::BAG_INTERSECTION_MIN: return "BAG_INTERSECTION_MIN";
- case Inference::BAG_DIFFERENCE_SUBTRACT: return "BAG_DIFFERENCE_SUBTRACT";
- case Inference::BAG_DIFFERENCE_REMOVE: return "BAG_DIFFERENCE_REMOVE";
- case Inference::BAG_DUPLICATE_REMOVAL: return "BAG_DUPLICATE_REMOVAL";
- default: return "?";
- }
-}
-
-std::ostream& operator<<(std::ostream& out, Inference i)
-{
- out << toString(i);
- return out;
-}
-
-InferInfo::InferInfo() : d_id(Inference::NONE) {}
+InferInfo::InferInfo() : d_id(InferenceId::UNKNOWN) {}
bool InferInfo::process(TheoryInferenceManager* im, bool asLemma)
{
#include <vector>
#include "expr/node.h"
+#include "theory/inference_id.h"
#include "theory/theory_inference.h"
namespace CVC4 {
namespace theory {
namespace bags {
-/**
- * Types of inferences used in the procedure
- */
-enum class Inference : uint32_t
-{
- NONE,
- BAG_NON_NEGATIVE_COUNT,
- BAG_MK_BAG_SAME_ELEMENT,
- BAG_MK_BAG,
- BAG_EQUALITY,
- BAG_DISEQUALITY,
- BAG_EMPTY,
- BAG_UNION_DISJOINT,
- BAG_UNION_MAX,
- BAG_INTERSECTION_MIN,
- BAG_DIFFERENCE_SUBTRACT,
- BAG_DIFFERENCE_REMOVE,
- BAG_DUPLICATE_REMOVAL
-};
-
-/**
- * Converts an inference to a string. Note: This function is also used in
- * `safe_print()`. Changing this functions name or signature will result in
- * `safe_print()` printing "<unsupported>" instead of the proper strings for
- * the enum values.
- *
- * @param i The inference
- * @return The name of the inference
- */
-const char* toString(Inference i);
-
-/**
- * Writes an inference name to a stream.
- *
- * @param out The stream to write to
- * @param i The inference to write to the stream
- * @return The stream
- */
-std::ostream& operator<<(std::ostream& out, Inference i);
-
class InferenceManager;
/**
/** Process this inference */
bool process(TheoryInferenceManager* im, bool asLemma) override;
/** The inference identifier */
- Inference d_id;
- /** The conclusions */
+ InferenceId d_id;
+ /** The conclusion */
Node d_conclusion;
/**
* The premise(s) of the inference, interpreted conjunctively. These are
Assert(e.getType() == n.getType().getBagElementType());
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_NON_NEGATIVE_COUNT;
+ inferInfo.d_id = InferenceId::BAG_NON_NEGATIVE_COUNT;
Node count = d_nm->mkNode(kind::BAG_COUNT, e, n);
Node gte = d_nm->mkNode(kind::GEQ, count, d_zero);
{
// TODO issue #78: refactor this with BagRewriter
// (=> true (= (bag.count e (bag e c)) c))
- inferInfo.d_id = Inference::BAG_MK_BAG_SAME_ELEMENT;
+ inferInfo.d_id = InferenceId::BAG_MK_BAG_SAME_ELEMENT;
inferInfo.d_conclusion = count.eqNode(n[1]);
}
else
// (=>
// true
// (= (bag.count e (bag x c)) (ite (= e x) c 0)))
- inferInfo.d_id = Inference::BAG_MK_BAG;
+ inferInfo.d_id = InferenceId::BAG_MK_BAG;
Node same = d_nm->mkNode(kind::EQUAL, n[0], e);
Node ite = d_nm->mkNode(kind::ITE, same, n[1], d_zero);
Node equal = count.eqNode(ite);
Node B = n[1];
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_DISEQUALITY;
+ inferInfo.d_id = InferenceId::BAG_DISEQUALITY;
TypeNode elementType = A.getType().getBagElementType();
BoundVarManager* bvm = d_nm->getBoundVarManager();
InferInfo inferInfo;
Node skolem = getSkolem(n, inferInfo);
- inferInfo.d_id = Inference::BAG_EMPTY;
+ inferInfo.d_id = InferenceId::BAG_EMPTY;
Node count = getMultiplicityTerm(e, skolem);
Node equal = count.eqNode(d_zero);
Node A = n[0];
Node B = n[1];
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_UNION_DISJOINT;
+ inferInfo.d_id = InferenceId::BAG_UNION_DISJOINT;
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_UNION_MAX;
+ inferInfo.d_id = InferenceId::BAG_UNION_MAX;
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_INTERSECTION_MIN;
+ inferInfo.d_id = InferenceId::BAG_INTERSECTION_MIN;
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_DIFFERENCE_SUBTRACT;
+ inferInfo.d_id = InferenceId::BAG_DIFFERENCE_SUBTRACT;
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
Node B = n[1];
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_DIFFERENCE_REMOVE;
+ inferInfo.d_id = InferenceId::BAG_DIFFERENCE_REMOVE;
Node countA = getMultiplicityTerm(e, A);
Node countB = getMultiplicityTerm(e, B);
Node A = n[0];
InferInfo inferInfo;
- inferInfo.d_id = Inference::BAG_DUPLICATE_REMOVAL;
+ inferInfo.d_id = InferenceId::BAG_DUPLICATE_REMOVAL;
Node countA = getMultiplicityTerm(e, A);
Node skolem = getSkolem(n, inferInfo);
d_lazyFactMap.insert(fact, di);
}
-void InferProofCons::convert(InferId infer, TNode conc, TNode exp, CDProof* cdp)
+void InferProofCons::convert(InferenceId infer, TNode conc, TNode exp, CDProof* cdp)
{
Trace("dt-ipc") << "convert: " << infer << ": " << conc << " by " << exp
<< std::endl;
bool success = false;
switch (infer)
{
- case InferId::UNIF:
+ case InferenceId::DATATYPES_UNIF:
{
Assert(expv.size() == 1);
Assert(exp.getKind() == EQUAL && exp[0].getKind() == APPLY_CONSTRUCTOR
}
}
break;
- case InferId::INST:
+ case InferenceId::DATATYPES_INST:
{
if (expv.size() == 1)
{
}
}
break;
- case InferId::SPLIT:
+ case InferenceId::DATATYPES_SPLIT:
{
Assert(expv.empty());
Node t = conc.getKind() == OR ? conc[0][0] : conc[0];
success = true;
}
break;
- case InferId::COLLAPSE_SEL:
+ case InferenceId::DATATYPES_COLLAPSE_SEL:
{
Assert(exp.getKind() == EQUAL);
Node concEq = conc;
success = true;
}
break;
- case InferId::CLASH_CONFLICT:
+ case InferenceId::DATATYPES_CLASH_CONFLICT:
{
cdp->addStep(conc, PfRule::MACRO_SR_PRED_ELIM, {exp}, {});
success = true;
}
break;
- case InferId::TESTER_CONFLICT:
+ case InferenceId::DATATYPES_TESTER_CONFLICT:
{
// rewrites to false under substitution
Node fn = nm->mkConst(false);
success = true;
}
break;
- case InferId::TESTER_MERGE_CONFLICT:
+ case InferenceId::DATATYPES_TESTER_MERGE_CONFLICT:
{
Assert(expv.size() == 3);
Node tester1 = expv[0];
}
break;
// inferences currently not supported
- case InferId::LABEL_EXH:
- case InferId::BISIMILAR:
- case InferId::CYCLE:
+ case InferenceId::DATATYPES_LABEL_EXH:
+ case InferenceId::DATATYPES_BISIMILAR:
+ case InferenceId::DATATYPES_CYCLE:
default:
Trace("dt-ipc") << "...no conversion for inference " << infer
<< std::endl;
* step(s) are for concluding the conclusion of the inference. This
* information is stored in cdp.
*/
- void convert(InferId infer, TNode conc, TNode exp, CDProof* cdp);
+ void convert(InferenceId infer, TNode conc, TNode exp, CDProof* cdp);
/** A dummy context used by this class if none is provided */
context::Context d_context;
/** the proof node manager */
namespace theory {
namespace datatypes {
-const char* toString(InferId i)
-{
- switch (i)
- {
- case InferId::NONE: return "NONE";
- case InferId::UNIF: return "UNIF";
- case InferId::INST: return "INST";
- case InferId::SPLIT: return "SPLIT";
- case InferId::LABEL_EXH: return "LABEL_EXH";
- case InferId::COLLAPSE_SEL: return "COLLAPSE_SEL";
- case InferId::CLASH_CONFLICT: return "CLASH_CONFLICT";
- case InferId::TESTER_CONFLICT: return "TESTER_CONFLICT";
- case InferId::TESTER_MERGE_CONFLICT: return "TESTER_MERGE_CONFLICT";
- case InferId::BISIMILAR: return "BISIMILAR";
- case InferId::CYCLE: return "CYCLE";
- default: return "?";
- }
-}
-
-std::ostream& operator<<(std::ostream& out, InferId i)
-{
- out << toString(i);
- return out;
-}
-
DatatypesInference::DatatypesInference(InferenceManager* im,
Node conc,
Node exp,
- InferId i)
+ InferenceId i)
: SimpleTheoryInternalFact(conc, exp, nullptr), d_im(im), d_id(i)
{
// false is not a valid explanation
return d_im->processDtFact(d_conc, d_exp, d_id);
}
-InferId DatatypesInference::getInferId() const { return d_id; }
+InferenceId DatatypesInference::getInferId() const { return d_id; }
} // namespace datatypes
} // namespace theory
#include "context/cdhashmap.h"
#include "expr/node.h"
#include "theory/inference_manager_buffered.h"
+#include "theory/inference_id.h"
namespace CVC4 {
namespace theory {
namespace datatypes {
-enum class InferId : uint32_t
-{
- NONE,
- // (= (C t1 ... tn) (C s1 .. sn)) => (= ti si)
- UNIF,
- // ((_ is Ci) t) => (= t (Ci (sel_1 t) ... (sel_n t)))
- INST,
- // (or ((_ is C1) t) V ... V ((_ is Cn) t))
- SPLIT,
- // (not ((_ is C1) t)) ^ ... [j] ... ^ (not ((_ is Cn) t)) => ((_ is Cj) t)
- LABEL_EXH,
- // (= t (Ci t1 ... tn)) => (= (sel_j t) rewrite((sel_j (Ci t1 ... tn))))
- COLLAPSE_SEL,
- // (= (Ci t1...tn) (Cj t1...tn)) => false
- CLASH_CONFLICT,
- // ((_ is Ci) t) ^ (= t (Cj t1 ... tn)) => false
- TESTER_CONFLICT,
- // ((_ is Ci) t) ^ ((_ is Cj) s) ^ (= t s) => false
- TESTER_MERGE_CONFLICT,
- // bisimilarity for codatatypes
- BISIMILAR,
- // cycle conflict for datatypes
- CYCLE,
-};
-
-/**
- * Converts an inference to a string. Note: This function is also used in
- * `safe_print()`. Changing this functions name or signature will result in
- * `safe_print()` printing "<unsupported>" instead of the proper strings for
- * the enum values.
- *
- * @param i The inference
- * @return The name of the inference
- */
-const char* toString(InferId i);
-
-/**
- * Writes an inference name to a stream.
- *
- * @param out The stream to write to
- * @param i The inference to write to the stream
- * @return The stream
- */
-std::ostream& operator<<(std::ostream& out, InferId i);
-
class InferenceManager;
/**
DatatypesInference(InferenceManager* im,
Node conc,
Node exp,
- InferId i = InferId::NONE);
+ InferenceId i = InferenceId::UNKNOWN);
/**
* Must communicate fact method.
* The datatypes decision procedure makes "internal" inferences :
*/
bool process(TheoryInferenceManager* im, bool asLemma) override;
/** Get the inference identifier */
- InferId getInferId() const;
+ InferenceId getInferId() const;
private:
/** Pointer to the inference manager */
InferenceManager* d_im;
/** The inference */
- InferId d_id;
+ InferenceId d_id;
};
} // namespace datatypes
void InferenceManager::addPendingInference(Node conc,
Node exp,
bool forceLemma,
- InferId i)
+ InferenceId i)
{
if (forceLemma)
{
}
void InferenceManager::sendDtLemma(Node lem,
- InferId id,
+ InferenceId id,
LemmaProperty p,
bool doCache)
{
}
}
-void InferenceManager::sendDtConflict(const std::vector<Node>& conf, InferId id)
+void InferenceManager::sendDtConflict(const std::vector<Node>& conf, InferenceId id)
{
if (isProofEnabled())
{
bool InferenceManager::isProofEnabled() const { return d_ipc != nullptr; }
bool InferenceManager::processDtLemma(
- Node conc, Node exp, InferId id, LemmaProperty p, bool doCache)
+ Node conc, Node exp, InferenceId id, LemmaProperty p, bool doCache)
{
// set up a proof constructor
std::shared_ptr<InferProofCons> ipcl;
return true;
}
-bool InferenceManager::processDtFact(Node conc, Node exp, InferId id)
+bool InferenceManager::processDtFact(Node conc, Node exp, InferenceId id)
{
conc = prepareDtInference(conc, exp, id, d_ipc.get());
// assert the internal fact, which has the same issue as above
Node InferenceManager::prepareDtInference(Node conc,
Node exp,
- InferId id,
+ InferenceId id,
InferProofCons* ipc)
{
Trace("dt-lemma-debug") << "prepareDtInference : " << conc << " via " << exp
void addPendingInference(Node conc,
Node exp,
bool forceLemma = false,
- InferId i = InferId::NONE);
+ InferenceId i = InferenceId::UNKNOWN);
/**
* Process the current lemmas and facts. This is a custom method that can
* be seen as overriding the behavior of calling both doPendingLemmas and
* Send lemma immediately on the output channel
*/
void sendDtLemma(Node lem,
- InferId i = InferId::NONE,
+ InferenceId i = InferenceId::UNKNOWN,
LemmaProperty p = LemmaProperty::NONE,
bool doCache = true);
/**
* Send conflict immediately on the output channel
*/
- void sendDtConflict(const std::vector<Node>& conf, InferId i = InferId::NONE);
+ void sendDtConflict(const std::vector<Node>& conf, InferenceId i = InferenceId::UNKNOWN);
/**
* Send lemmas with property NONE on the output channel immediately.
* Returns true if any lemma was sent.
*/
bool processDtLemma(Node conc,
Node exp,
- InferId id,
+ InferenceId id,
LemmaProperty p = LemmaProperty::NONE,
bool doCache = true);
/**
* Process datatype inference as a fact
*/
- bool processDtFact(Node conc, Node exp, InferId id);
+ bool processDtFact(Node conc, Node exp, InferenceId id);
/**
* Helper function for the above methods. Returns the conclusion, which
* may be modified so that it is compatible with proofs. If proofs are
* status for proof generation. If this is not done, then it is possible
* to have proofs with missing connections and hence free assumptions.
*/
- Node prepareDtInference(Node conc, Node exp, InferId id, InferProofCons* ipc);
+ Node prepareDtInference(Node conc, Node exp, InferenceId id, InferProofCons* ipc);
/** The false node */
Node d_false;
/**
* Counts the number of applications of each type of inference processed by
* the above method as facts, lemmas and conflicts.
*/
- HistogramStat<InferId> d_inferenceLemmas;
- HistogramStat<InferId> d_inferenceFacts;
- HistogramStat<InferId> d_inferenceConflicts;
+ HistogramStat<InferenceId> d_inferenceLemmas;
+ HistogramStat<InferenceId> d_inferenceFacts;
+ HistogramStat<InferenceId> d_inferenceConflicts;
/** Pointer to the proof node manager */
ProofNodeManager* d_pnm;
/** The inference to proof converter */
//this may not be necessary?
//if only one constructor, then this term must be this constructor
Node t = utils::mkTester(n, 0, dt);
- d_im.addPendingInference(t, d_true, false, InferId::SPLIT);
+ d_im.addPendingInference(t, d_true, false, InferenceId::DATATYPES_SPLIT);
Trace("datatypes-infer") << "DtInfer : 1-cons (full) : " << t << std::endl;
}else{
Assert(consIndex != -1 || dt.isSygus());
Node lemma = utils::mkSplit(n, dt);
Trace("dt-split-debug") << "Split lemma is : " << lemma << std::endl;
d_im.sendDtLemma(lemma,
- InferId::SPLIT,
+ InferenceId::DATATYPES_SPLIT,
LemmaProperty::SEND_ATOMS,
false);
}
conf.push_back(unifEq);
Trace("dt-conflict")
<< "CONFLICT: Clash conflict : " << conf << std::endl;
- d_im.sendDtConflict(conf, InferId::CLASH_CONFLICT);
+ d_im.sendDtConflict(conf, InferenceId::DATATYPES_CLASH_CONFLICT);
return;
}
else
for( int i=0; i<(int)cons1.getNumChildren(); i++ ) {
if( !areEqual( cons1[i], cons2[i] ) ){
Node eq = cons1[i].eqNode( cons2[i] );
- d_im.addPendingInference(eq, unifEq, false, InferId::UNIF);
+ d_im.addPendingInference(eq, unifEq, false, InferenceId::DATATYPES_UNIF);
Trace("datatypes-infer") << "DtInfer : cons-inj : " << eq << " by " << unifEq << std::endl;
}
}
conf.push_back(t_arg.eqNode(eqc->d_constructor.get()));
Trace("dt-conflict")
<< "CONFLICT: Tester eq conflict " << conf << std::endl;
- d_im.sendDtConflict(conf, InferId::TESTER_CONFLICT);
+ d_im.sendDtConflict(conf, InferenceId::DATATYPES_TESTER_CONFLICT);
return;
}else{
makeConflict = true;
: utils::mkTester(t_arg, testerIndex, dt);
Node t_concl_exp = ( nb.getNumChildren() == 1 ) ? nb.getChild( 0 ) : nb;
d_im.addPendingInference(
- t_concl, t_concl_exp, false, InferId::LABEL_EXH);
+ t_concl, t_concl_exp, false, InferenceId::DATATYPES_LABEL_EXH);
Trace("datatypes-infer") << "DtInfer : label : " << t_concl << " by " << t_concl_exp << std::endl;
return;
}
conf.push_back(t);
conf.push_back(jt[0].eqNode(t_arg));
Trace("dt-conflict") << "CONFLICT: Tester conflict : " << conf << std::endl;
- d_im.sendDtConflict(conf, InferId::TESTER_MERGE_CONFLICT);
+ d_im.sendDtConflict(conf, InferenceId::DATATYPES_TESTER_MERGE_CONFLICT);
}
}
conf.push_back(t[0][0].eqNode(c));
Trace("dt-conflict")
<< "CONFLICT: Tester merge eq conflict : " << conf << std::endl;
- d_im.sendDtConflict(conf, InferId::TESTER_CONFLICT);
+ d_im.sendDtConflict(conf, InferenceId::DATATYPES_TESTER_CONFLICT);
return;
}
}
bool forceLemma = !s.getType().isDatatype();
Trace("datatypes-infer") << "DtInfer : collapse sel";
Trace("datatypes-infer") << " : " << eq << " by " << eq_exp << std::endl;
- d_im.addPendingInference(eq, eq_exp, forceLemma, InferId::COLLAPSE_SEL);
+ d_im.addPendingInference(eq, eq_exp, forceLemma, InferenceId::DATATYPES_COLLAPSE_SEL);
}
}
}
}
Trace("datatypes-infer-debug") << "DtInstantiate : " << eqc << " " << eq
<< " forceLemma = " << forceLemma << std::endl;
- d_im.addPendingInference(eq, exp, forceLemma, InferId::INST);
+ d_im.addPendingInference(eq, exp, forceLemma, InferenceId::DATATYPES_INST);
Trace("datatypes-infer") << "DtInfer : instantiate : " << eq << " by " << exp
<< std::endl;
}
Assert(expl.size() > 0);
Trace("dt-conflict")
<< "CONFLICT: Cycle conflict : " << expl << std::endl;
- d_im.sendDtConflict(expl, InferId::CYCLE);
+ d_im.sendDtConflict(expl, InferenceId::DATATYPES_CYCLE);
return;
}
}
Trace("dt-cdt") << std::endl;
Node eq = part_out[i][0].eqNode( part_out[i][j] );
Node eqExp = NodeManager::currentNM()->mkAnd(exp);
- d_im.addPendingInference(eq, eqExp, false, InferId::BISIMILAR);
+ d_im.addPendingInference(eq, eqExp, false, InferenceId::DATATYPES_BISIMILAR);
Trace("datatypes-infer") << "DtInfer : cdt-bisimilar : " << eq << " by " << eqExp << std::endl;
}
}
--- /dev/null
+/********************* */
+/*! \file inference_id.cpp
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Gereon Kremer, Yoni Zohar
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Implementation of inference enumeration.
+ **/
+
+#include "theory/inference_id.h"
+
+namespace CVC4 {
+namespace theory {
+
+const char* toString(InferenceId i)
+{
+ switch (i)
+ {
+ case InferenceId::ARITH_NL_CONGRUENCE: return "CONGRUENCE";
+ case InferenceId::ARITH_NL_SHARED_TERM_VALUE_SPLIT: return "SHARED_TERM_VALUE_SPLIT";
+ case InferenceId::ARITH_NL_SPLIT_ZERO: return "SPLIT_ZERO";
+ case InferenceId::ARITH_NL_SIGN: return "SIGN";
+ case InferenceId::ARITH_NL_COMPARISON: return "COMPARISON";
+ case InferenceId::ARITH_NL_INFER_BOUNDS: return "INFER_BOUNDS";
+ case InferenceId::ARITH_NL_INFER_BOUNDS_NT: return "INFER_BOUNDS_NT";
+ case InferenceId::ARITH_NL_FACTOR: return "FACTOR";
+ case InferenceId::ARITH_NL_RES_INFER_BOUNDS: return "RES_INFER_BOUNDS";
+ case InferenceId::ARITH_NL_TANGENT_PLANE: return "TANGENT_PLANE";
+ case InferenceId::ARITH_NL_T_PURIFY_ARG: return "T_PURIFY_ARG";
+ case InferenceId::ARITH_NL_T_INIT_REFINE: return "T_INIT_REFINE";
+ case InferenceId::ARITH_NL_T_PI_BOUND: return "T_PI_BOUND";
+ case InferenceId::ARITH_NL_T_MONOTONICITY: return "T_MONOTONICITY";
+ case InferenceId::ARITH_NL_T_SECANT: return "T_SECANT";
+ case InferenceId::ARITH_NL_T_TANGENT: return "T_TANGENT";
+ case InferenceId::ARITH_NL_IAND_INIT_REFINE: return "IAND_INIT_REFINE";
+ case InferenceId::ARITH_NL_IAND_VALUE_REFINE: return "IAND_VALUE_REFINE";
+ case InferenceId::ARITH_NL_IAND_SUM_REFINE: return "IAND_SUM_REFINE";
+ case InferenceId::ARITH_NL_IAND_BITWISE_REFINE: return "IAND_BITWISE_REFINE";
+ case InferenceId::ARITH_NL_CAD_CONFLICT: return "CAD_CONFLICT";
+ case InferenceId::ARITH_NL_CAD_EXCLUDED_INTERVAL: return "CAD_EXCLUDED_INTERVAL";
+ case InferenceId::ARITH_NL_ICP_CONFLICT: return "ICP_CONFLICT";
+ case InferenceId::ARITH_NL_ICP_PROPAGATION: return "ICP_PROPAGATION";
+
+ case InferenceId::BAG_NON_NEGATIVE_COUNT: return "BAG_NON_NEGATIVE_COUNT";
+ case InferenceId::BAG_MK_BAG_SAME_ELEMENT: return "BAG_MK_BAG_SAME_ELEMENT";
+ case InferenceId::BAG_MK_BAG: return "BAG_MK_BAG";
+ case InferenceId::BAG_EQUALITY: return "BAG_EQUALITY";
+ case InferenceId::BAG_DISEQUALITY: return "BAG_DISEQUALITY";
+ case InferenceId::BAG_EMPTY: return "BAG_EMPTY";
+ case InferenceId::BAG_UNION_DISJOINT: return "BAG_UNION_DISJOINT";
+ case InferenceId::BAG_UNION_MAX: return "BAG_UNION_MAX";
+ case InferenceId::BAG_INTERSECTION_MIN: return "BAG_INTERSECTION_MIN";
+ case InferenceId::BAG_DIFFERENCE_SUBTRACT: return "BAG_DIFFERENCE_SUBTRACT";
+ case InferenceId::BAG_DIFFERENCE_REMOVE: return "BAG_DIFFERENCE_REMOVE";
+ case InferenceId::BAG_DUPLICATE_REMOVAL: return "BAG_DUPLICATE_REMOVAL";
+
+ case InferenceId::DATATYPES_UNIF: return "UNIF";
+ case InferenceId::DATATYPES_INST: return "INST";
+ case InferenceId::DATATYPES_SPLIT: return "SPLIT";
+ case InferenceId::DATATYPES_LABEL_EXH: return "LABEL_EXH";
+ case InferenceId::DATATYPES_COLLAPSE_SEL: return "COLLAPSE_SEL";
+ case InferenceId::DATATYPES_CLASH_CONFLICT: return "CLASH_CONFLICT";
+ case InferenceId::DATATYPES_TESTER_CONFLICT: return "TESTER_CONFLICT";
+ case InferenceId::DATATYPES_TESTER_MERGE_CONFLICT: return "TESTER_MERGE_CONFLICT";
+ case InferenceId::DATATYPES_BISIMILAR: return "BISIMILAR";
+ case InferenceId::DATATYPES_CYCLE: return "CYCLE";
+
+ case InferenceId::STRINGS_I_NORM_S: return "I_NORM_S";
+ case InferenceId::STRINGS_I_CONST_MERGE: return "I_CONST_MERGE";
+ case InferenceId::STRINGS_I_CONST_CONFLICT: return "I_CONST_CONFLICT";
+ case InferenceId::STRINGS_I_NORM: return "I_NORM";
+ case InferenceId::STRINGS_UNIT_INJ: return "UNIT_INJ";
+ case InferenceId::STRINGS_UNIT_CONST_CONFLICT: return "UNIT_CONST_CONFLICT";
+ case InferenceId::STRINGS_UNIT_INJ_DEQ: return "UNIT_INJ_DEQ";
+ case InferenceId::STRINGS_CARD_SP: return "CARD_SP";
+ case InferenceId::STRINGS_CARDINALITY: return "CARDINALITY";
+ case InferenceId::STRINGS_I_CYCLE_E: return "I_CYCLE_E";
+ case InferenceId::STRINGS_I_CYCLE: return "I_CYCLE";
+ case InferenceId::STRINGS_F_CONST: return "F_CONST";
+ case InferenceId::STRINGS_F_UNIFY: return "F_UNIFY";
+ case InferenceId::STRINGS_F_ENDPOINT_EMP: return "F_ENDPOINT_EMP";
+ case InferenceId::STRINGS_F_ENDPOINT_EQ: return "F_ENDPOINT_EQ";
+ case InferenceId::STRINGS_F_NCTN: return "F_NCTN";
+ case InferenceId::STRINGS_N_EQ_CONF: return "N_EQ_CONF";
+ case InferenceId::STRINGS_N_ENDPOINT_EMP: return "N_ENDPOINT_EMP";
+ case InferenceId::STRINGS_N_UNIFY: return "N_UNIFY";
+ case InferenceId::STRINGS_N_ENDPOINT_EQ: return "N_ENDPOINT_EQ";
+ case InferenceId::STRINGS_N_CONST: return "N_CONST";
+ case InferenceId::STRINGS_INFER_EMP: return "INFER_EMP";
+ case InferenceId::STRINGS_SSPLIT_CST_PROP: return "SSPLIT_CST_PROP";
+ case InferenceId::STRINGS_SSPLIT_VAR_PROP: return "SSPLIT_VAR_PROP";
+ case InferenceId::STRINGS_LEN_SPLIT: return "LEN_SPLIT";
+ case InferenceId::STRINGS_LEN_SPLIT_EMP: return "LEN_SPLIT_EMP";
+ case InferenceId::STRINGS_SSPLIT_CST: return "SSPLIT_CST";
+ case InferenceId::STRINGS_SSPLIT_VAR: return "SSPLIT_VAR";
+ case InferenceId::STRINGS_FLOOP: return "FLOOP";
+ case InferenceId::STRINGS_FLOOP_CONFLICT: return "FLOOP_CONFLICT";
+ case InferenceId::STRINGS_NORMAL_FORM: return "NORMAL_FORM";
+ case InferenceId::STRINGS_N_NCTN: return "N_NCTN";
+ case InferenceId::STRINGS_LEN_NORM: return "LEN_NORM";
+ case InferenceId::STRINGS_DEQ_DISL_EMP_SPLIT: return "DEQ_DISL_EMP_SPLIT";
+ case InferenceId::STRINGS_DEQ_DISL_FIRST_CHAR_EQ_SPLIT:
+ return "DEQ_DISL_FIRST_CHAR_EQ_SPLIT";
+ case InferenceId::STRINGS_DEQ_DISL_FIRST_CHAR_STRING_SPLIT:
+ return "DEQ_DISL_FIRST_CHAR_STRING_SPLIT";
+ case InferenceId::STRINGS_DEQ_STRINGS_EQ: return "DEQ_STRINGS_EQ";
+ case InferenceId::STRINGS_DEQ_DISL_STRINGS_SPLIT: return "DEQ_DISL_STRINGS_SPLIT";
+ case InferenceId::STRINGS_DEQ_LENS_EQ: return "DEQ_LENS_EQ";
+ case InferenceId::STRINGS_DEQ_NORM_EMP: return "DEQ_NORM_EMP";
+ case InferenceId::STRINGS_DEQ_LENGTH_SP: return "DEQ_LENGTH_SP";
+ case InferenceId::STRINGS_CODE_PROXY: return "CODE_PROXY";
+ case InferenceId::STRINGS_CODE_INJ: return "CODE_INJ";
+ case InferenceId::STRINGS_RE_NF_CONFLICT: return "RE_NF_CONFLICT";
+ case InferenceId::STRINGS_RE_UNFOLD_POS: return "RE_UNFOLD_POS";
+ case InferenceId::STRINGS_RE_UNFOLD_NEG: return "RE_UNFOLD_NEG";
+ case InferenceId::STRINGS_RE_INTER_INCLUDE: return "RE_INTER_INCLUDE";
+ case InferenceId::STRINGS_RE_INTER_CONF: return "RE_INTER_CONF";
+ case InferenceId::STRINGS_RE_INTER_INFER: return "RE_INTER_INFER";
+ case InferenceId::STRINGS_RE_DELTA: return "RE_DELTA";
+ case InferenceId::STRINGS_RE_DELTA_CONF: return "RE_DELTA_CONF";
+ case InferenceId::STRINGS_RE_DERIVE: return "RE_DERIVE";
+ case InferenceId::STRINGS_EXTF: return "EXTF";
+ case InferenceId::STRINGS_EXTF_N: return "EXTF_N";
+ case InferenceId::STRINGS_EXTF_D: return "EXTF_D";
+ case InferenceId::STRINGS_EXTF_D_N: return "EXTF_D_N";
+ case InferenceId::STRINGS_EXTF_EQ_REW: return "EXTF_EQ_REW";
+ case InferenceId::STRINGS_CTN_TRANS: return "CTN_TRANS";
+ case InferenceId::STRINGS_CTN_DECOMPOSE: return "CTN_DECOMPOSE";
+ case InferenceId::STRINGS_CTN_NEG_EQUAL: return "CTN_NEG_EQUAL";
+ case InferenceId::STRINGS_CTN_POS: return "CTN_POS";
+ case InferenceId::STRINGS_REDUCTION: return "REDUCTION";
+ case InferenceId::STRINGS_PREFIX_CONFLICT: return "PREFIX_CONFLICT";
+
+ default: return "?";
+ }
+}
+
+std::ostream& operator<<(std::ostream& out, InferenceId i)
+{
+ out << toString(i);
+ return out;
+}
+
+} // namespace theory
+} // namespace CVC4
--- /dev/null
+/********************* */
+/*! \file inference_id.h
+ ** \verbatim
+ ** Top contributors (to current version):
+ ** Andrew Reynolds, Gereon Kremer, Yoni Zohar
+ ** This file is part of the CVC4 project.
+ ** Copyright (c) 2009-2020 by the authors listed in the file AUTHORS
+ ** in the top-level source directory and their institutional affiliations.
+ ** All rights reserved. See the file COPYING in the top-level source
+ ** directory for licensing information.\endverbatim
+ **
+ ** \brief Inference enumeration.
+ **/
+
+#include "cvc4_private.h"
+
+#ifndef CVC4__THEORY__INFERENCE_ID_H
+#define CVC4__THEORY__INFERENCE_ID_H
+
+#include <map>
+#include <vector>
+
+#include "util/safe_print.h"
+
+namespace CVC4 {
+namespace theory {
+
+/** Types of inferences used in the procedure
+ *
+ * Note: The order in this enum matters in certain cases (e.g. inferences
+ * related to normal forms in strings), where inferences that come first are
+ * generally preferred.
+ *
+ * Notice that an inference is intentionally distinct from PfRule. An
+ * inference captures *why* we performed a reasoning step, and a PfRule
+ * rule captures *what* reasoning step was used. For instance, the inference
+ * LEN_SPLIT translates to PfRule::SPLIT. The use of stats on inferences allows
+ * us to know that we performed N splits (PfRule::SPLIT) because we wanted
+ * to split on lengths for string equalities (Inference::LEN_SPLIT).
+ */
+enum class InferenceId
+{
+ //-------------------- core
+ // simple congruence x=y => f(x)=f(y)
+ ARITH_NL_CONGRUENCE,
+ // shared term value split (for naive theory combination)
+ ARITH_NL_SHARED_TERM_VALUE_SPLIT,
+ //-------------------- incremental linearization solver
+ // splitting on zero (NlSolver::checkSplitZero)
+ ARITH_NL_SPLIT_ZERO,
+ // based on sign (NlSolver::checkMonomialSign)
+ ARITH_NL_SIGN,
+ // based on comparing (abs) model values (NlSolver::checkMonomialMagnitude)
+ ARITH_NL_COMPARISON,
+ // based on inferring bounds (NlSolver::checkMonomialInferBounds)
+ ARITH_NL_INFER_BOUNDS,
+ // same as above, for inferences that introduce new terms
+ ARITH_NL_INFER_BOUNDS_NT,
+ // factoring (NlSolver::checkFactoring)
+ ARITH_NL_FACTOR,
+ // resolution bound inferences (NlSolver::checkMonomialInferResBounds)
+ ARITH_NL_RES_INFER_BOUNDS,
+ // tangent planes (NlSolver::checkTangentPlanes)
+ ARITH_NL_TANGENT_PLANE,
+ //-------------------- transcendental solver
+ // purification of arguments to transcendental functions
+ ARITH_NL_T_PURIFY_ARG,
+ // initial refinement (TranscendentalSolver::checkTranscendentalInitialRefine)
+ ARITH_NL_T_INIT_REFINE,
+ // pi bounds
+ ARITH_NL_T_PI_BOUND,
+ // monotonicity (TranscendentalSolver::checkTranscendentalMonotonic)
+ ARITH_NL_T_MONOTONICITY,
+ // tangent refinement (TranscendentalSolver::checkTranscendentalTangentPlanes)
+ ARITH_NL_T_TANGENT,
+ // secant refinement, the dual of the above inference
+ ARITH_NL_T_SECANT,
+ //-------------------- iand solver
+ // initial refinements (IAndSolver::checkInitialRefine)
+ ARITH_NL_IAND_INIT_REFINE,
+ // value refinements (IAndSolver::checkFullRefine)
+ ARITH_NL_IAND_VALUE_REFINE,
+ // sum refinements (IAndSolver::checkFullRefine)
+ ARITH_NL_IAND_SUM_REFINE,
+ // bitwise refinements (IAndSolver::checkFullRefine)
+ ARITH_NL_IAND_BITWISE_REFINE,
+ //-------------------- cad solver
+ // conflict / infeasible subset obtained from cad
+ ARITH_NL_CAD_CONFLICT,
+ // excludes an interval for a single variable
+ ARITH_NL_CAD_EXCLUDED_INTERVAL,
+ //-------------------- icp solver
+ // conflict obtained from icp
+ ARITH_NL_ICP_CONFLICT,
+ // propagation / contraction of variable bounds from icp
+ ARITH_NL_ICP_PROPAGATION,
+ //-------------------- unknown
+
+
+ BAG_NON_NEGATIVE_COUNT,
+ BAG_MK_BAG_SAME_ELEMENT,
+ BAG_MK_BAG,
+ BAG_EQUALITY,
+ BAG_DISEQUALITY,
+ BAG_EMPTY,
+ BAG_UNION_DISJOINT,
+ BAG_UNION_MAX,
+ BAG_INTERSECTION_MIN,
+ BAG_DIFFERENCE_SUBTRACT,
+ BAG_DIFFERENCE_REMOVE,
+ BAG_DUPLICATE_REMOVAL,
+
+ // (= (C t1 ... tn) (C s1 .. sn)) => (= ti si)
+ DATATYPES_UNIF,
+ // ((_ is Ci) t) => (= t (Ci (sel_1 t) ... (sel_n t)))
+ DATATYPES_INST,
+ // (or ((_ is C1) t) V ... V ((_ is Cn) t))
+ DATATYPES_SPLIT,
+ // (not ((_ is C1) t)) ^ ... [j] ... ^ (not ((_ is Cn) t)) => ((_ is Cj) t)
+ DATATYPES_LABEL_EXH,
+ // (= t (Ci t1 ... tn)) => (= (sel_j t) rewrite((sel_j (Ci t1 ... tn))))
+ DATATYPES_COLLAPSE_SEL,
+ // (= (Ci t1...tn) (Cj t1...tn)) => false
+ DATATYPES_CLASH_CONFLICT,
+ // ((_ is Ci) t) ^ (= t (Cj t1 ... tn)) => false
+ DATATYPES_TESTER_CONFLICT,
+ // ((_ is Ci) t) ^ ((_ is Cj) s) ^ (= t s) => false
+ DATATYPES_TESTER_MERGE_CONFLICT,
+ // bisimilarity for codatatypes
+ DATATYPES_BISIMILAR,
+ // cycle conflict for datatypes
+ DATATYPES_CYCLE,
+
+ //-------------------------------------- base solver
+ // initial normalize singular
+ // x1 = "" ^ ... ^ x_{i-1} = "" ^ x_{i+1} = "" ^ ... ^ xn = "" =>
+ // x1 ++ ... ++ xn = xi
+ STRINGS_I_NORM_S,
+ // initial constant merge
+ // explain_constant(x, c) => x = c
+ // Above, explain_constant(x,c) is a basic explanation of why x must be equal
+ // to string constant c, which is computed by taking arguments of
+ // concatenation terms that are entailed to be constants. For example:
+ // ( y = "AB" ^ z = "C" ) => y ++ z = "ABC"
+ STRINGS_I_CONST_MERGE,
+ // initial constant conflict
+ // ( explain_constant(x, c1) ^ explain_constant(x, c2) ^ x = y) => false
+ // where c1 != c2.
+ STRINGS_I_CONST_CONFLICT,
+ // initial normalize
+ // Given two concatenation terms, this is applied when we find that they are
+ // equal after e.g. removing strings that are currently empty. For example:
+ // y = "" ^ z = "" => x ++ y = z ++ x
+ STRINGS_I_NORM,
+ // injectivity of seq.unit
+ // (seq.unit x) = (seq.unit y) => x=y, or
+ // (seq.unit x) = (seq.unit c) => x=c
+ STRINGS_UNIT_INJ,
+ // unit constant conflict
+ // (seq.unit x) = C => false if |C| != 1.
+ STRINGS_UNIT_CONST_CONFLICT,
+ // injectivity of seq.unit for disequality
+ // (seq.unit x) != (seq.unit y) => x != y, or
+ // (seq.unit x) != (seq.unit c) => x != c
+ STRINGS_UNIT_INJ_DEQ,
+ // A split due to cardinality
+ STRINGS_CARD_SP,
+ // The cardinality inference for strings, see Liang et al CAV 2014.
+ STRINGS_CARDINALITY,
+ //-------------------------------------- end base solver
+ //-------------------------------------- core solver
+ // A cycle in the empty string equivalence class, e.g.:
+ // x ++ y = "" => x = ""
+ // This is typically not applied due to length constraints implying emptiness.
+ STRINGS_I_CYCLE_E,
+ // A cycle in the containment ordering.
+ // x = y ++ x => y = "" or
+ // x = y ++ z ^ y = x ++ w => z = "" ^ w = ""
+ // This is typically not applied due to length constraints implying emptiness.
+ STRINGS_I_CYCLE,
+ // Flat form constant
+ // x = y ^ x = z ++ c ... ^ y = z ++ d => false
+ // where c and d are distinct constants.
+ STRINGS_F_CONST,
+ // Flat form unify
+ // x = y ^ x = z ++ x' ... ^ y = z ++ y' ^ len(x') = len(y') => x' = y'
+ // Notice flat form instances are similar to normal form inferences but do
+ // not involve recursive explanations.
+ STRINGS_F_UNIFY,
+ // Flat form endpoint empty
+ // x = y ^ x = z ^ y = z ++ y' => y' = ""
+ STRINGS_F_ENDPOINT_EMP,
+ // Flat form endpoint equal
+ // x = y ^ x = z ++ x' ^ y = z ++ y' => x' = y'
+ STRINGS_F_ENDPOINT_EQ,
+ // Flat form not contained
+ // x = c ^ x = y => false when rewrite( contains( y, c ) ) = false
+ STRINGS_F_NCTN,
+ // Normal form equality conflict
+ // x = N[x] ^ y = N[y] ^ x=y => false
+ // where Rewriter::rewrite(N[x]=N[y]) = false.
+ STRINGS_N_EQ_CONF,
+ // Given two normal forms, infers that the remainder one of them has to be
+ // empty. For example:
+ // If x1 ++ x2 = y1 and x1 = y1, then x2 = ""
+ STRINGS_N_ENDPOINT_EMP,
+ // Given two normal forms, infers that two components have to be the same if
+ // they have the same length. For example:
+ // If x1 ++ x2 = x3 ++ x4 and len(x1) = len(x3) then x1 = x3
+ STRINGS_N_UNIFY,
+ // Given two normal forms, infers that the endpoints have to be the same. For
+ // example:
+ // If x1 ++ x2 = x3 ++ x4 ++ x5 and x1 = x3 then x2 = x4 ++ x5
+ STRINGS_N_ENDPOINT_EQ,
+ // Given two normal forms with constant endpoints, infers a conflict if the
+ // endpoints do not agree. For example:
+ // If "abc" ++ ... = "bc" ++ ... then conflict
+ STRINGS_N_CONST,
+ // infer empty, for example:
+ // (~) x = ""
+ // This is inferred when we encounter an x such that x = "" rewrites to a
+ // constant. This inference is used for instance when we otherwise would have
+ // split on the emptiness of x but the rewriter tells us the emptiness of x
+ // can be inferred.
+ STRINGS_INFER_EMP,
+ // string split constant propagation, for example:
+ // x = y, x = "abc", y = y1 ++ "b" ++ y2
+ // implies y1 = "a" ++ y1'
+ STRINGS_SSPLIT_CST_PROP,
+ // string split variable propagation, for example:
+ // x = y, x = x1 ++ x2, y = y1 ++ y2, len( x1 ) >= len( y1 )
+ // implies x1 = y1 ++ x1'
+ // This is inspired by Zheng et al CAV 2015.
+ STRINGS_SSPLIT_VAR_PROP,
+ // length split, for example:
+ // len( x1 ) = len( y1 ) V len( x1 ) != len( y1 )
+ // This is inferred when e.g. x = y, x = x1 ++ x2, y = y1 ++ y2.
+ STRINGS_LEN_SPLIT,
+ // length split empty, for example:
+ // z = "" V z != ""
+ // This is inferred when, e.g. x = y, x = z ++ x1, y = y1 ++ z
+ STRINGS_LEN_SPLIT_EMP,
+ // string split constant
+ // x = y, x = "c" ++ x2, y = y1 ++ y2, y1 != ""
+ // implies y1 = "c" ++ y1'
+ // This is a special case of F-Split in Figure 5 of Liang et al CAV 2014.
+ STRINGS_SSPLIT_CST,
+ // string split variable, for example:
+ // x = y, x = x1 ++ x2, y = y1 ++ y2
+ // implies x1 = y1 ++ x1' V y1 = x1 ++ y1'
+ // This is rule F-Split in Figure 5 of Liang et al CAV 2014.
+ STRINGS_SSPLIT_VAR,
+ // flat form loop, for example:
+ // x = y, x = x1 ++ z, y = z ++ y2
+ // implies z = u2 ++ u1, u in ( u1 ++ u2 )*, x1 = u2 ++ u, y2 = u ++ u1
+ // for fresh u, u1, u2.
+ // This is the rule F-Loop from Figure 5 of Liang et al CAV 2014.
+ STRINGS_FLOOP,
+ // loop conflict ???
+ STRINGS_FLOOP_CONFLICT,
+ // Normal form inference
+ // x = y ^ z = y => x = z
+ // This is applied when y is the normal form of both x and z.
+ STRINGS_NORMAL_FORM,
+ // Normal form not contained, same as FFROM_NCTN but for normal forms
+ STRINGS_N_NCTN,
+ // Length normalization
+ // x = y => len( x ) = len( y )
+ // Typically applied when y is the normal form of x.
+ STRINGS_LEN_NORM,
+ // When x ++ x' ++ ... != "abc" ++ y' ++ ... ^ len(x) != len(y), we apply the
+ // inference:
+ // x = "" v x != ""
+ STRINGS_DEQ_DISL_EMP_SPLIT,
+ // When x ++ x' ++ ... != "abc" ++ y' ++ ... ^ len(x) = 1, we apply the
+ // inference:
+ // x = "a" v x != "a"
+ STRINGS_DEQ_DISL_FIRST_CHAR_EQ_SPLIT,
+ // When x ++ x' ++ ... != "abc" ++ y' ++ ... ^ len(x) != "", we apply the
+ // inference:
+ // ni = x ++ x' ++ ... ^ nj = "abc" ++ y' ++ ... ^ x != "" --->
+ // x = k1 ++ k2 ^ len(k1) = 1 ^ (k1 != "a" v x = "a" ++ k2)
+ STRINGS_DEQ_DISL_FIRST_CHAR_STRING_SPLIT,
+ // When x ++ x' ++ ... != y ++ y' ++ ... ^ len(x) != len(y), we apply the
+ // inference:
+ // ni = x ++ x' ++ ... ^ nj = y ++ y' ++ ... ^ ni != nj ^ len(x) != len(y)
+ // --->
+ // len(k1) = len(x) ^ len(k2) = len(y) ^ (y = k1 ++ k3 v x = k1 ++ k2)
+ STRINGS_DEQ_DISL_STRINGS_SPLIT,
+ // When x ++ x' ++ ... != y ++ y' ++ ... ^ len(x) = len(y), we apply the
+ // inference:
+ // x = y v x != y
+ STRINGS_DEQ_STRINGS_EQ,
+ // When x ++ x' ++ ... != y ++ y' ++ ... and we do not know how the lengths
+ // of x and y compare, we apply the inference:
+ // len(x) = len(y) v len(x) != len(y)
+ STRINGS_DEQ_LENS_EQ,
+ // When px ++ x ++ ... != py ^ len(px ++ x ++ ...) = len(py), we apply the
+ // following inference that infers that the remainder of the longer normal
+ // form must be empty:
+ // ni = px ++ x ++ ... ^ nj = py ^ len(ni) = len(nj) --->
+ // x = "" ^ ...
+ STRINGS_DEQ_NORM_EMP,
+ // When two strings are disequal s != t and the comparison of their lengths
+ // is unknown, we apply the inference:
+ // len(s) != len(t) V len(s) = len(t)
+ STRINGS_DEQ_LENGTH_SP,
+ //-------------------------------------- end core solver
+ //-------------------------------------- codes solver
+ // str.to_code( v ) = rewrite( str.to_code(c) )
+ // where v is the proxy variable for c.
+ STRINGS_CODE_PROXY,
+ // str.code(x) = -1 V str.code(x) != str.code(y) V x = y
+ STRINGS_CODE_INJ,
+ //-------------------------------------- end codes solver
+ //-------------------------------------- regexp solver
+ // regular expression normal form conflict
+ // ( x in R ^ x = y ^ rewrite((str.in_re y R)) = false ) => false
+ // where y is the normal form computed for x.
+ STRINGS_RE_NF_CONFLICT,
+ // regular expression unfolding
+ // This is a general class of inferences of the form:
+ // (x in R) => F
+ // where F is formula expressing the next step of checking whether x is in
+ // R. For example:
+ // (x in (R)*) =>
+ // x = "" V x in R V ( x = x1 ++ x2 ++ x3 ^ x1 in R ^ x2 in (R)* ^ x3 in R)
+ STRINGS_RE_UNFOLD_POS,
+ // Same as above, for negative memberships
+ STRINGS_RE_UNFOLD_NEG,
+ // intersection inclusion conflict
+ // (x in R1 ^ ~ x in R2) => false where [[includes(R2,R1)]]
+ // Where includes(R2,R1) is a heuristic check for whether R2 includes R1.
+ STRINGS_RE_INTER_INCLUDE,
+ // intersection conflict, using regexp intersection computation
+ // (x in R1 ^ x in R2) => false where [[intersect(R1, R2) = empty]]
+ STRINGS_RE_INTER_CONF,
+ // intersection inference
+ // (x in R1 ^ y in R2 ^ x = y) => (x in re.inter(R1,R2))
+ STRINGS_RE_INTER_INFER,
+ // regular expression delta
+ // (x = "" ^ x in R) => C
+ // where "" in R holds if and only if C holds.
+ STRINGS_RE_DELTA,
+ // regular expression delta conflict
+ // (x = "" ^ x in R) => false
+ // where R does not accept the empty string.
+ STRINGS_RE_DELTA_CONF,
+ // regular expression derive ???
+ STRINGS_RE_DERIVE,
+ //-------------------------------------- end regexp solver
+ //-------------------------------------- extended function solver
+ // Standard extended function inferences from context-dependent rewriting
+ // produced by constant substitutions. See Reynolds et al CAV 2017. These are
+ // inferences of the form:
+ // X = Y => f(X) = t when rewrite( f(Y) ) = t
+ // where X = Y is a vector of equalities, where some of Y may be constants.
+ STRINGS_EXTF,
+ // Same as above, for normal form substitutions.
+ STRINGS_EXTF_N,
+ // Decompositions based on extended function inferences from context-dependent
+ // rewriting produced by constant substitutions. This is like the above, but
+ // handles cases where the inferred predicate is not necessarily an equality
+ // involving f(X). For example:
+ // x = "A" ^ contains( y ++ x, "B" ) => contains( y, "B" )
+ // This is generally only inferred if contains( y, "B" ) is a known term in
+ // the current context.
+ STRINGS_EXTF_D,
+ // Same as above, for normal form substitutions.
+ STRINGS_EXTF_D_N,
+ // Extended function equality rewrite. This is an inference of the form:
+ // t = s => P
+ // where P is a predicate implied by rewrite( t = s ).
+ // Typically, t is an application of an extended function and s is a constant.
+ // It is generally only inferred if P is a predicate over known terms.
+ STRINGS_EXTF_EQ_REW,
+ // contain transitive
+ // ( str.contains( s, t ) ^ ~contains( s, r ) ) => ~contains( t, r ).
+ STRINGS_CTN_TRANS,
+ // contain decompose
+ // str.contains( x, str.++( y1, ..., yn ) ) => str.contains( x, yi ) or
+ // ~str.contains( str.++( x1, ..., xn ), y ) => ~str.contains( xi, y )
+ STRINGS_CTN_DECOMPOSE,
+ // contain neg equal
+ // ( len( x ) = len( s ) ^ ~contains( x, s ) ) => x != s
+ STRINGS_CTN_NEG_EQUAL,
+ // contain positive
+ // str.contains( x, y ) => x = w1 ++ y ++ w2
+ // where w1 and w2 are skolem variables.
+ STRINGS_CTN_POS,
+ // All reduction inferences of the form:
+ // f(x1, .., xn) = y ^ P(x1, ..., xn, y)
+ // where f is an extended function, y is the purification variable for
+ // f(x1, .., xn) and P is the reduction predicate for f
+ // (see theory_strings_preprocess).
+ STRINGS_REDUCTION,
+ //-------------------------------------- end extended function solver
+ //-------------------------------------- prefix conflict
+ // prefix conflict (coarse-grained)
+ STRINGS_PREFIX_CONFLICT,
+ //-------------------------------------- end prefix conflict
+
+ UNKNOWN,
+};
+
+/**
+ * Converts an inference to a string. Note: This function is also used in
+ * `safe_print()`. Changing this functions name or signature will result in
+ * `safe_print()` printing "<unsupported>" instead of the proper strings for
+ * the enum values.
+ *
+ * @param i The inference
+ * @return The name of the inference
+ */
+const char* toString(InferenceId i);
+
+/**
+ * Writes an inference name to a stream.
+ *
+ * @param out The stream to write to
+ * @param i The inference to write to the stream
+ * @return The stream
+ */
+std::ostream& operator<<(std::ostream& out, InferenceId i);
+
+} // namespace theory
+} // namespace CVC4
+
+#endif /* CVC4__THEORY__INFERENCE_H */
{
// (seq.unit x) = C => false if |C| != 1.
d_im.sendInference(
- exp, d_false, Inference::UNIT_CONST_CONFLICT);
+ exp, d_false, InferenceId::STRINGS_UNIT_CONST_CONFLICT);
return;
}
}
// (seq.unit x) = (seq.unit y) => x=y, or
// (seq.unit x) = (seq.unit c) => x=c
Assert(s.getType() == t.getType());
- d_im.sendInference(exp, s.eqNode(t), Inference::UNIT_INJ);
+ d_im.sendInference(exp, s.eqNode(t), InferenceId::STRINGS_UNIT_INJ);
}
}
// update best content
}
}
// infer the equality
- d_im.sendInference(exp, n.eqNode(nc), Inference::I_NORM);
+ d_im.sendInference(exp, n.eqNode(nc), InferenceId::STRINGS_I_NORM);
}
else
{
}
AlwaysAssert(foundNEmpty);
// infer the equality
- d_im.sendInference(exp, n.eqNode(ns), Inference::I_NORM_S);
+ d_im.sendInference(exp, n.eqNode(ns), InferenceId::STRINGS_I_NORM_S);
}
d_congruent.insert(n);
congruentCount[k].first++;
}
else if (d_state.hasTerm(c))
{
- d_im.sendInference(exp, n.eqNode(c), Inference::I_CONST_MERGE);
+ d_im.sendInference(exp, n.eqNode(c), InferenceId::STRINGS_I_CONST_MERGE);
return;
}
else if (!d_im.hasProcessed())
exp.push_back(bei.d_exp);
d_im.addToExplanation(n, bei.d_base, exp);
}
- d_im.sendInference(exp, d_false, Inference::I_CONST_CONFLICT);
+ d_im.sendInference(exp, d_false, InferenceId::STRINGS_I_CONST_CONFLICT);
return;
}
else
if (!d_state.areDisequal(*itr1, *itr2))
{
// add split lemma
- if (d_im.sendSplit(*itr1, *itr2, Inference::CARD_SP))
+ if (d_im.sendSplit(*itr1, *itr2, InferenceId::STRINGS_CARD_SP))
{
return;
}
if (!cons.isConst() || !cons.getConst<bool>())
{
d_im.sendInference(
- expn, expn, cons, Inference::CARDINALITY, false, true);
+ expn, expn, cons, InferenceId::STRINGS_CARDINALITY, false, true);
return;
}
}
}
d_bsolver.explainConstantEqc(n, eqc, exp);
Node conc = d_false;
- d_im.sendInference(exp, conc, Inference::F_NCTN);
+ d_im.sendInference(exp, conc, InferenceId::STRINGS_F_NCTN);
return;
}
}
{
std::vector<Node> exp;
Node conc;
- Inference infType = Inference::NONE;
+ InferenceId infType = InferenceId::UNKNOWN;
size_t eqc_size = eqc.size();
size_t asize = d_flat_form[a].size();
if (count == asize)
}
Assert(!conc_c.empty());
conc = utils::mkAnd(conc_c);
- infType = Inference::F_ENDPOINT_EMP;
+ infType = InferenceId::STRINGS_F_ENDPOINT_EMP;
Assert(count > 0);
// swap, will enforce is empty past current
a = eqc[i];
}
Assert(!conc_c.empty());
conc = utils::mkAnd(conc_c);
- infType = Inference::F_ENDPOINT_EMP;
+ infType = InferenceId::STRINGS_F_ENDPOINT_EMP;
Assert(count > 0);
break;
}
d_bsolver.explainConstantEqc(ac,curr,exp);
d_bsolver.explainConstantEqc(bc,cc,exp);
conc = d_false;
- infType = Inference::F_CONST;
+ infType = InferenceId::STRINGS_F_CONST;
break;
}
}
&& (d_flat_form[b].size() - 1) == count)
{
conc = ac.eqNode(bc);
- infType = Inference::F_ENDPOINT_EQ;
+ infType = InferenceId::STRINGS_F_ENDPOINT_EQ;
break;
}
else
d_im.addToExplanation(lcurr, lcc, lexpc);
lant = utils::mkAnd(lexpc);
conc = ac.eqNode(bc);
- infType = Inference::F_UNIFY;
+ infType = InferenceId::STRINGS_F_UNIFY;
break;
}
}
{
Node c = t == 0 ? a : b;
ssize_t jj;
- if (infType == Inference::F_ENDPOINT_EQ
- || (t == 1 && infType == Inference::F_ENDPOINT_EMP))
+ if (infType == InferenceId::STRINGS_F_ENDPOINT_EQ
+ || (t == 1 && infType == InferenceId::STRINGS_F_ENDPOINT_EMP))
{
// explain all the empty components for F_EndpointEq, all for
// the short end for F_EndpointEmp
std::vector<Node> exps;
exps.push_back(n.eqNode(emp));
d_im.sendInference(
- exps, n[i].eqNode(emp), Inference::I_CYCLE_E);
+ exps, n[i].eqNode(emp), InferenceId::STRINGS_I_CYCLE_E);
return Node::null();
}
}else{
if (j != i && !d_state.areEqual(n[j], emp))
{
d_im.sendInference(
- exp, n[j].eqNode(emp), Inference::I_CYCLE);
+ exp, n[j].eqNode(emp), InferenceId::STRINGS_I_CYCLE);
return Node::null();
}
}
nf_exp.push_back(eexp);
}
Node eq = nfe.d_base.eqNode(nfe_eq.d_base);
- d_im.sendInference(nf_exp, eq, Inference::NORMAL_FORM);
+ d_im.sendInference(nf_exp, eq, InferenceId::STRINGS_NORMAL_FORM);
if (d_im.hasProcessed())
{
return;
// Notice although not implemented, this can be minimized based on
// firstc/lastc, normal_forms_exp_depend.
d_bsolver.explainConstantEqc(n, eqc, exp);
- d_im.sendInference(exp, d_false, Inference::N_NCTN);
+ d_im.sendInference(exp, d_false, InferenceId::STRINGS_N_NCTN);
// conflict, finished
return;
}
exp.insert(exp.end(), nfi.d_exp.begin(), nfi.d_exp.end());
exp.insert(exp.end(), nfj.d_exp.begin(), nfj.d_exp.end());
exp.push_back(nfi.d_base.eqNode(nfj.d_base));
- d_im.sendInference(exp, d_false, Inference::N_EQ_CONF);
+ d_im.sendInference(exp, d_false, InferenceId::STRINGS_N_EQ_CONF);
return;
}
}
bool set_use_index = false;
Trace("strings-solve") << "Possible inferences (" << pinfer.size()
<< ") : " << std::endl;
- Inference min_id = Inference::NONE;
+ InferenceId min_id = InferenceId::UNKNOWN;
unsigned max_index = 0;
for (unsigned i = 0, psize = pinfer.size(); i < psize; i++)
{
// can infer that this string must be empty
Node eq = nfkv[index_k].eqNode(emp);
Assert(!d_state.areEqual(emp, nfkv[index_k]));
- d_im.sendInference(curr_exp, eq, Inference::N_ENDPOINT_EMP, isRev);
+ d_im.sendInference(curr_exp, eq, InferenceId::STRINGS_N_ENDPOINT_EMP, isRev);
index_k++;
}
break;
if (x.isConst() && y.isConst())
{
// if both are constant, it's just a constant conflict
- d_im.sendInference(ant, d_false, Inference::N_CONST, isRev, true);
+ d_im.sendInference(ant, d_false, InferenceId::STRINGS_N_CONST, isRev, true);
return;
}
// `x` and `y` have the same length. We infer that the two components
// set the explanation for length
Node lant = utils::mkAnd(lenExp);
ant.push_back(lant);
- d_im.sendInference(ant, eq, Inference::N_UNIFY, isRev);
+ d_im.sendInference(ant, eq, InferenceId::STRINGS_N_UNIFY, isRev);
break;
}
else if ((!x.isConst() && index == nfiv.size() - rproc - 1)
NormalForm::getExplanationForPrefixEq(nfi, nfj, -1, -1, antec);
d_im.sendInference(antec,
eqn[0].eqNode(eqn[1]),
- Inference::N_ENDPOINT_EQ,
+ InferenceId::STRINGS_N_ENDPOINT_EQ,
isRev,
true);
}
// E.g. "abc" ++ ... = "bc" ++ ... ---> conflict
std::vector<Node> antec;
NormalForm::getExplanationForPrefixEq(nfi, nfj, index, index, antec);
- d_im.sendInference(antec, d_false, Inference::N_CONST, isRev, true);
+ d_im.sendInference(antec, d_false, InferenceId::STRINGS_N_CONST, isRev, true);
break;
}
}
Node lenEq = nm->mkNode(EQUAL, xLenTerm, yLenTerm);
lenEq = Rewriter::rewrite(lenEq);
iinfo.d_conc = nm->mkNode(OR, lenEq, lenEq.negate());
- iinfo.d_id = Inference::LEN_SPLIT;
+ iinfo.d_id = InferenceId::STRINGS_LEN_SPLIT;
info.d_pendingPhase[lenEq] = true;
pinfer.push_back(info);
break;
// inferred
iinfo.d_conc = nm->mkNode(
AND, p.eqNode(nc), !eq.getConst<bool>() ? pEq.negate() : pEq);
- iinfo.d_id = Inference::INFER_EMP;
+ iinfo.d_id = InferenceId::STRINGS_INFER_EMP;
}
else
{
iinfo.d_conc = nm->mkNode(OR, eq, eq.negate());
- iinfo.d_id = Inference::LEN_SPLIT_EMP;
+ iinfo.d_id = InferenceId::STRINGS_LEN_SPLIT_EMP;
}
pinfer.push_back(info);
break;
xcv, stra, PfRule::CONCAT_CPROP, isRev, skc, newSkolems);
Assert(newSkolems.size() == 1);
iinfo.d_skolems[LENGTH_SPLIT].push_back(newSkolems[0]);
- iinfo.d_id = Inference::SSPLIT_CST_PROP;
+ iinfo.d_id = InferenceId::STRINGS_SSPLIT_CST_PROP;
iinfo.d_idRev = isRev;
pinfer.push_back(info);
break;
iinfo.d_premises.push_back(expNonEmpty);
Assert(newSkolems.size() == 1);
iinfo.d_skolems[LENGTH_SPLIT].push_back(newSkolems[0]);
- iinfo.d_id = Inference::SSPLIT_CST;
+ iinfo.d_id = InferenceId::STRINGS_SSPLIT_CST;
iinfo.d_idRev = isRev;
pinfer.push_back(info);
break;
// make the conclusion
if (lentTestSuccess == -1)
{
- iinfo.d_id = Inference::SSPLIT_VAR;
+ iinfo.d_id = InferenceId::STRINGS_SSPLIT_VAR;
iinfo.d_conc =
getConclusion(x, y, PfRule::CONCAT_SPLIT, isRev, skc, newSkolems);
if (options::stringUnifiedVSpt() && !options::stringLenConc())
}
else if (lentTestSuccess == 0)
{
- iinfo.d_id = Inference::SSPLIT_VAR_PROP;
+ iinfo.d_id = InferenceId::STRINGS_SSPLIT_VAR_PROP;
iinfo.d_conc =
getConclusion(x, y, PfRule::CONCAT_LPROP, isRev, skc, newSkolems);
}
else
{
Assert(lentTestSuccess == 1);
- iinfo.d_id = Inference::SSPLIT_VAR_PROP;
+ iinfo.d_id = InferenceId::STRINGS_SSPLIT_VAR_PROP;
iinfo.d_conc =
getConclusion(y, x, PfRule::CONCAT_LPROP, isRev, skc, newSkolems);
}
Trace("strings-loop") << "Strings::Loop: tails are different."
<< std::endl;
d_im.sendInference(
- iinfo.d_premises, conc, Inference::FLOOP_CONFLICT, false, true);
+ iinfo.d_premises, conc, InferenceId::STRINGS_FLOOP_CONFLICT, false, true);
return ProcessLoopResult::CONFLICT;
}
}
iinfo.d_premises.clear();
// try to make t equal to empty to avoid loop
iinfo.d_conc = nm->mkNode(kind::OR, split_eq, split_eq.negate());
- iinfo.d_id = Inference::LEN_SPLIT_EMP;
+ iinfo.d_id = InferenceId::STRINGS_LEN_SPLIT_EMP;
return ProcessLoopResult::INFERENCE;
}
else
// we will be done
iinfo.d_conc = conc;
- iinfo.d_id = Inference::FLOOP;
+ iinfo.d_id = InferenceId::STRINGS_FLOOP;
info.d_nfPair[0] = nfi.d_base;
info.d_nfPair[1] = nfj.d_base;
return ProcessLoopResult::INFERENCE;
std::vector<Node> premises;
premises.push_back(deq);
Node conc = u[0].eqNode(vc).notNode();
- d_im.sendInference(premises, conc, Inference::UNIT_INJ_DEQ, false, true);
+ d_im.sendInference(premises, conc, InferenceId::STRINGS_UNIT_INJ_DEQ, false, true);
return;
}
Trace("strings-solve-debug") << "...trivial" << std::endl;
// E.g. x ++ x' ++ ... != "abc" ++ y' ++ ... ^ len(x) != len(y) --->
// x = "" v x != ""
Node emp = Word::mkEmptyWord(nck.getType());
- d_im.sendSplit(nck, emp, Inference::DEQ_DISL_EMP_SPLIT);
+ d_im.sendSplit(nck, emp, InferenceId::STRINGS_DEQ_DISL_EMP_SPLIT);
return;
}
// x = "a" v x != "a"
if (d_im.sendSplit(firstChar,
nck,
- Inference::DEQ_DISL_FIRST_CHAR_EQ_SPLIT,
+ InferenceId::STRINGS_DEQ_DISL_FIRST_CHAR_EQ_SPLIT,
false))
{
return;
d_im.sendInference(antecLen,
antecLen,
conc,
- Inference::DEQ_DISL_FIRST_CHAR_STRING_SPLIT,
+ InferenceId::STRINGS_DEQ_DISL_FIRST_CHAR_STRING_SPLIT,
false,
true);
return;
d_im.sendInference(antecLen,
antecLen,
conc,
- Inference::DEQ_DISL_STRINGS_SPLIT,
+ InferenceId::STRINGS_DEQ_DISL_STRINGS_SPLIT,
false,
true);
}
// E.g. x ++ x' ++ ... != y ++ y' ++ ... ^ len(x) = len(y) --->
// x = y v x != y
Assert(!d_state.areDisequal(x, y));
- if (d_im.sendSplit(x, y, Inference::DEQ_STRINGS_EQ, false))
+ if (d_im.sendSplit(x, y, InferenceId::STRINGS_DEQ_STRINGS_EQ, false))
{
return;
}
//
// E.g. x ++ x' ++ ... != y ++ y' ++ ... --->
// len(x) = len(y) v len(x) != len(y)
- if (d_im.sendSplit(xLenTerm, yLenTerm, Inference::DEQ_LENS_EQ))
+ if (d_im.sendSplit(xLenTerm, yLenTerm, InferenceId::STRINGS_DEQ_LENS_EQ))
{
return;
}
Node conc = cc.size() == 1
? cc[0]
: NodeManager::currentNM()->mkNode(kind::AND, cc);
- d_im.sendInference(ant, antn, conc, Inference::DEQ_NORM_EMP, isRev, true);
+ d_im.sendInference(ant, antn, conc, InferenceId::STRINGS_DEQ_NORM_EMP, isRev, true);
return true;
}
}
if (!d_state.areEqual(lt[0], lt[1]) && !d_state.areDisequal(lt[0], lt[1]))
{
- d_im.sendSplit(lt[0], lt[1], Inference::DEQ_LENGTH_SP);
+ d_im.sendSplit(lt[0], lt[1], InferenceId::STRINGS_DEQ_LENGTH_SP);
}
}
}
{
Node eq = llt.eqNode(lc);
ei->d_normalizedLength.set(eq);
- d_im.sendInference(ant, eq, Inference::LEN_NORM, false, true);
+ d_im.sendInference(ant, eq, InferenceId::STRINGS_LEN_NORM, false, true);
}
}
}
lexp.push_back(n.negate());
Node xneqs = x.eqNode(s).negate();
d_im.sendInference(
- lexp, xneqs, Inference::CTN_NEG_EQUAL, false, true);
+ lexp, xneqs, InferenceId::STRINGS_CTN_NEG_EQUAL, false, true);
}
// this depends on the current assertions, so this
// inference is context-dependent
eq = eq[1];
std::vector<Node> expn;
expn.push_back(n);
- d_im.sendInference(expn, expn, eq, Inference::CTN_POS, false, true);
+ d_im.sendInference(expn, expn, eq, InferenceId::STRINGS_CTN_POS, false, true);
Trace("strings-extf-debug")
<< " resolve extf : " << n << " based on positive contain reduction."
<< std::endl;
Trace("strings-red-lemma") << "...from " << n << std::endl;
Trace("strings-red-lemma")
<< "Reduction_" << effort << " rewritten : " << Rewriter::rewrite(nnlem) << std::endl;
- d_im.sendInference(d_emptyVec, nnlem, Inference::REDUCTION, false, true);
+ d_im.sendInference(d_emptyVec, nnlem, InferenceId::STRINGS_REDUCTION, false, true);
Trace("strings-extf-debug")
<< " resolve extf : " << n << " based on reduction." << std::endl;
// add as reduction lemma
{
Trace("strings-extf")
<< " resolve extf : " << sn << " -> " << nrc << std::endl;
- Inference inf = effort == 0 ? Inference::EXTF : Inference::EXTF_N;
+ InferenceId inf = effort == 0 ? InferenceId::STRINGS_EXTF : InferenceId::STRINGS_EXTF_N;
d_im.sendInference(einfo.d_exp, conc, inf, false, true);
d_statistics.d_cdSimplifications << n.getKind();
}
// reduced since this argument may be circular: we may infer than n
// can be reduced to something else, but that thing may argue that it
// can be reduced to n, in theory.
- Inference infer =
- effort == 0 ? Inference::EXTF_D : Inference::EXTF_D_N;
+ InferenceId infer =
+ effort == 0 ? InferenceId::STRINGS_EXTF_D : InferenceId::STRINGS_EXTF_D_N;
d_im.sendInternalInference(einfo.d_exp, nrcAssert, infer);
}
to_reduce = nrc;
if (d_state.areEqual(conc, d_false))
{
// we are in conflict
- d_im.sendInference(in.d_exp, conc, Inference::CTN_DECOMPOSE);
+ d_im.sendInference(in.d_exp, conc, InferenceId::STRINGS_CTN_DECOMPOSE);
}
else if (d_extt.hasFunctionKind(conc.getKind()))
{
exp_c.insert(exp_c.end(),
d_extfInfoTmp[ofrom].d_exp.begin(),
d_extfInfoTmp[ofrom].d_exp.end());
- d_im.sendInference(exp_c, conc, Inference::CTN_TRANS);
+ d_im.sendInference(exp_c, conc, InferenceId::STRINGS_CTN_TRANS);
}
}
}
inferEqrr = Rewriter::rewrite(inferEqrr);
Trace("strings-extf-infer") << "checkExtfInference: " << inferEq
<< " ...reduces to " << inferEqrr << std::endl;
- d_im.sendInternalInference(in.d_exp, inferEqrr, Inference::EXTF_EQ_REW);
+ d_im.sendInternalInference(in.d_exp, inferEqrr, InferenceId::STRINGS_EXTF_EQ_REW);
}
}
namespace theory {
namespace strings {
-const char* toString(Inference i)
-{
- switch (i)
- {
- case Inference::I_NORM_S: return "I_NORM_S";
- case Inference::I_CONST_MERGE: return "I_CONST_MERGE";
- case Inference::I_CONST_CONFLICT: return "I_CONST_CONFLICT";
- case Inference::I_NORM: return "I_NORM";
- case Inference::UNIT_INJ: return "UNIT_INJ";
- case Inference::UNIT_CONST_CONFLICT: return "UNIT_CONST_CONFLICT";
- case Inference::UNIT_INJ_DEQ: return "UNIT_INJ_DEQ";
- case Inference::CARD_SP: return "CARD_SP";
- case Inference::CARDINALITY: return "CARDINALITY";
- case Inference::I_CYCLE_E: return "I_CYCLE_E";
- case Inference::I_CYCLE: return "I_CYCLE";
- case Inference::F_CONST: return "F_CONST";
- case Inference::F_UNIFY: return "F_UNIFY";
- case Inference::F_ENDPOINT_EMP: return "F_ENDPOINT_EMP";
- case Inference::F_ENDPOINT_EQ: return "F_ENDPOINT_EQ";
- case Inference::F_NCTN: return "F_NCTN";
- case Inference::N_EQ_CONF: return "N_EQ_CONF";
- case Inference::N_ENDPOINT_EMP: return "N_ENDPOINT_EMP";
- case Inference::N_UNIFY: return "N_UNIFY";
- case Inference::N_ENDPOINT_EQ: return "N_ENDPOINT_EQ";
- case Inference::N_CONST: return "N_CONST";
- case Inference::INFER_EMP: return "INFER_EMP";
- case Inference::SSPLIT_CST_PROP: return "SSPLIT_CST_PROP";
- case Inference::SSPLIT_VAR_PROP: return "SSPLIT_VAR_PROP";
- case Inference::LEN_SPLIT: return "LEN_SPLIT";
- case Inference::LEN_SPLIT_EMP: return "LEN_SPLIT_EMP";
- case Inference::SSPLIT_CST: return "SSPLIT_CST";
- case Inference::SSPLIT_VAR: return "SSPLIT_VAR";
- case Inference::FLOOP: return "FLOOP";
- case Inference::FLOOP_CONFLICT: return "FLOOP_CONFLICT";
- case Inference::NORMAL_FORM: return "NORMAL_FORM";
- case Inference::N_NCTN: return "N_NCTN";
- case Inference::LEN_NORM: return "LEN_NORM";
- case Inference::DEQ_DISL_EMP_SPLIT: return "DEQ_DISL_EMP_SPLIT";
- case Inference::DEQ_DISL_FIRST_CHAR_EQ_SPLIT:
- return "DEQ_DISL_FIRST_CHAR_EQ_SPLIT";
- case Inference::DEQ_DISL_FIRST_CHAR_STRING_SPLIT:
- return "DEQ_DISL_FIRST_CHAR_STRING_SPLIT";
- case Inference::DEQ_STRINGS_EQ: return "DEQ_STRINGS_EQ";
- case Inference::DEQ_DISL_STRINGS_SPLIT: return "DEQ_DISL_STRINGS_SPLIT";
- case Inference::DEQ_LENS_EQ: return "DEQ_LENS_EQ";
- case Inference::DEQ_NORM_EMP: return "DEQ_NORM_EMP";
- case Inference::DEQ_LENGTH_SP: return "DEQ_LENGTH_SP";
- case Inference::CODE_PROXY: return "CODE_PROXY";
- case Inference::CODE_INJ: return "CODE_INJ";
- case Inference::RE_NF_CONFLICT: return "RE_NF_CONFLICT";
- case Inference::RE_UNFOLD_POS: return "RE_UNFOLD_POS";
- case Inference::RE_UNFOLD_NEG: return "RE_UNFOLD_NEG";
- case Inference::RE_INTER_INCLUDE: return "RE_INTER_INCLUDE";
- case Inference::RE_INTER_CONF: return "RE_INTER_CONF";
- case Inference::RE_INTER_INFER: return "RE_INTER_INFER";
- case Inference::RE_DELTA: return "RE_DELTA";
- case Inference::RE_DELTA_CONF: return "RE_DELTA_CONF";
- case Inference::RE_DERIVE: return "RE_DERIVE";
- case Inference::EXTF: return "EXTF";
- case Inference::EXTF_N: return "EXTF_N";
- case Inference::EXTF_D: return "EXTF_D";
- case Inference::EXTF_D_N: return "EXTF_D_N";
- case Inference::EXTF_EQ_REW: return "EXTF_EQ_REW";
- case Inference::CTN_TRANS: return "CTN_TRANS";
- case Inference::CTN_DECOMPOSE: return "CTN_DECOMPOSE";
- case Inference::CTN_NEG_EQUAL: return "CTN_NEG_EQUAL";
- case Inference::CTN_POS: return "CTN_POS";
- case Inference::REDUCTION: return "REDUCTION";
- case Inference::PREFIX_CONFLICT: return "PREFIX_CONFLICT";
- default: return "?";
- }
-}
-
-std::ostream& operator<<(std::ostream& out, Inference i)
-{
- out << toString(i);
- return out;
-}
-
-InferInfo::InferInfo() : d_sim(nullptr), d_id(Inference::NONE), d_idRev(false)
+InferInfo::InferInfo() : d_sim(nullptr), d_id(InferenceId::UNKNOWN), d_idRev(false)
{
}
#include <vector>
#include "expr/node.h"
+#include "theory/inference_id.h"
#include "theory/theory_inference.h"
#include "util/safe_print.h"
namespace theory {
namespace strings {
-/** Types of inferences used in the procedure
- *
- * These are variants of the inference rules in Figures 3-5 of Liang et al.
- * "A DPLL(T) Solver for a Theory of Strings and Regular Expressions", CAV 2014.
- *
- * Note: The order in this enum matters in certain cases (e.g. inferences
- * related to normal forms), inferences that come first are generally
- * preferred.
- *
- * Notice that an inference is intentionally distinct from PfRule. An
- * inference captures *why* we performed a reasoning step, and a PfRule
- * rule captures *what* reasoning step was used. For instance, the inference
- * LEN_SPLIT translates to PfRule::SPLIT. The use of stats on inferences allows
- * us to know that we performed N splits (PfRule::SPLIT) because we wanted
- * to split on lengths for string equalities (Inference::LEN_SPLIT).
- */
-enum class Inference : uint32_t
-{
- BEGIN,
- //-------------------------------------- base solver
- // initial normalize singular
- // x1 = "" ^ ... ^ x_{i-1} = "" ^ x_{i+1} = "" ^ ... ^ xn = "" =>
- // x1 ++ ... ++ xn = xi
- I_NORM_S,
- // initial constant merge
- // explain_constant(x, c) => x = c
- // Above, explain_constant(x,c) is a basic explanation of why x must be equal
- // to string constant c, which is computed by taking arguments of
- // concatenation terms that are entailed to be constants. For example:
- // ( y = "AB" ^ z = "C" ) => y ++ z = "ABC"
- I_CONST_MERGE,
- // initial constant conflict
- // ( explain_constant(x, c1) ^ explain_constant(x, c2) ^ x = y) => false
- // where c1 != c2.
- I_CONST_CONFLICT,
- // initial normalize
- // Given two concatenation terms, this is applied when we find that they are
- // equal after e.g. removing strings that are currently empty. For example:
- // y = "" ^ z = "" => x ++ y = z ++ x
- I_NORM,
- // injectivity of seq.unit
- // (seq.unit x) = (seq.unit y) => x=y, or
- // (seq.unit x) = (seq.unit c) => x=c
- UNIT_INJ,
- // unit constant conflict
- // (seq.unit x) = C => false if |C| != 1.
- UNIT_CONST_CONFLICT,
- // injectivity of seq.unit for disequality
- // (seq.unit x) != (seq.unit y) => x != y, or
- // (seq.unit x) != (seq.unit c) => x != c
- UNIT_INJ_DEQ,
- // A split due to cardinality
- CARD_SP,
- // The cardinality inference for strings, see Liang et al CAV 2014.
- CARDINALITY,
- //-------------------------------------- end base solver
- //-------------------------------------- core solver
- // A cycle in the empty string equivalence class, e.g.:
- // x ++ y = "" => x = ""
- // This is typically not applied due to length constraints implying emptiness.
- I_CYCLE_E,
- // A cycle in the containment ordering.
- // x = y ++ x => y = "" or
- // x = y ++ z ^ y = x ++ w => z = "" ^ w = ""
- // This is typically not applied due to length constraints implying emptiness.
- I_CYCLE,
- // Flat form constant
- // x = y ^ x = z ++ c ... ^ y = z ++ d => false
- // where c and d are distinct constants.
- F_CONST,
- // Flat form unify
- // x = y ^ x = z ++ x' ... ^ y = z ++ y' ^ len(x') = len(y') => x' = y'
- // Notice flat form instances are similar to normal form inferences but do
- // not involve recursive explanations.
- F_UNIFY,
- // Flat form endpoint empty
- // x = y ^ x = z ^ y = z ++ y' => y' = ""
- F_ENDPOINT_EMP,
- // Flat form endpoint equal
- // x = y ^ x = z ++ x' ^ y = z ++ y' => x' = y'
- F_ENDPOINT_EQ,
- // Flat form not contained
- // x = c ^ x = y => false when rewrite( contains( y, c ) ) = false
- F_NCTN,
- // Normal form equality conflict
- // x = N[x] ^ y = N[y] ^ x=y => false
- // where Rewriter::rewrite(N[x]=N[y]) = false.
- N_EQ_CONF,
- // Given two normal forms, infers that the remainder one of them has to be
- // empty. For example:
- // If x1 ++ x2 = y1 and x1 = y1, then x2 = ""
- N_ENDPOINT_EMP,
- // Given two normal forms, infers that two components have to be the same if
- // they have the same length. For example:
- // If x1 ++ x2 = x3 ++ x4 and len(x1) = len(x3) then x1 = x3
- N_UNIFY,
- // Given two normal forms, infers that the endpoints have to be the same. For
- // example:
- // If x1 ++ x2 = x3 ++ x4 ++ x5 and x1 = x3 then x2 = x4 ++ x5
- N_ENDPOINT_EQ,
- // Given two normal forms with constant endpoints, infers a conflict if the
- // endpoints do not agree. For example:
- // If "abc" ++ ... = "bc" ++ ... then conflict
- N_CONST,
- // infer empty, for example:
- // (~) x = ""
- // This is inferred when we encounter an x such that x = "" rewrites to a
- // constant. This inference is used for instance when we otherwise would have
- // split on the emptiness of x but the rewriter tells us the emptiness of x
- // can be inferred.
- INFER_EMP,
- // string split constant propagation, for example:
- // x = y, x = "abc", y = y1 ++ "b" ++ y2
- // implies y1 = "a" ++ y1'
- SSPLIT_CST_PROP,
- // string split variable propagation, for example:
- // x = y, x = x1 ++ x2, y = y1 ++ y2, len( x1 ) >= len( y1 )
- // implies x1 = y1 ++ x1'
- // This is inspired by Zheng et al CAV 2015.
- SSPLIT_VAR_PROP,
- // length split, for example:
- // len( x1 ) = len( y1 ) V len( x1 ) != len( y1 )
- // This is inferred when e.g. x = y, x = x1 ++ x2, y = y1 ++ y2.
- LEN_SPLIT,
- // length split empty, for example:
- // z = "" V z != ""
- // This is inferred when, e.g. x = y, x = z ++ x1, y = y1 ++ z
- LEN_SPLIT_EMP,
- // string split constant
- // x = y, x = "c" ++ x2, y = y1 ++ y2, y1 != ""
- // implies y1 = "c" ++ y1'
- // This is a special case of F-Split in Figure 5 of Liang et al CAV 2014.
- SSPLIT_CST,
- // string split variable, for example:
- // x = y, x = x1 ++ x2, y = y1 ++ y2
- // implies x1 = y1 ++ x1' V y1 = x1 ++ y1'
- // This is rule F-Split in Figure 5 of Liang et al CAV 2014.
- SSPLIT_VAR,
- // flat form loop, for example:
- // x = y, x = x1 ++ z, y = z ++ y2
- // implies z = u2 ++ u1, u in ( u1 ++ u2 )*, x1 = u2 ++ u, y2 = u ++ u1
- // for fresh u, u1, u2.
- // This is the rule F-Loop from Figure 5 of Liang et al CAV 2014.
- FLOOP,
- // loop conflict ???
- FLOOP_CONFLICT,
- // Normal form inference
- // x = y ^ z = y => x = z
- // This is applied when y is the normal form of both x and z.
- NORMAL_FORM,
- // Normal form not contained, same as FFROM_NCTN but for normal forms
- N_NCTN,
- // Length normalization
- // x = y => len( x ) = len( y )
- // Typically applied when y is the normal form of x.
- LEN_NORM,
- // When x ++ x' ++ ... != "abc" ++ y' ++ ... ^ len(x) != len(y), we apply the
- // inference:
- // x = "" v x != ""
- DEQ_DISL_EMP_SPLIT,
- // When x ++ x' ++ ... != "abc" ++ y' ++ ... ^ len(x) = 1, we apply the
- // inference:
- // x = "a" v x != "a"
- DEQ_DISL_FIRST_CHAR_EQ_SPLIT,
- // When x ++ x' ++ ... != "abc" ++ y' ++ ... ^ len(x) != "", we apply the
- // inference:
- // ni = x ++ x' ++ ... ^ nj = "abc" ++ y' ++ ... ^ x != "" --->
- // x = k1 ++ k2 ^ len(k1) = 1 ^ (k1 != "a" v x = "a" ++ k2)
- DEQ_DISL_FIRST_CHAR_STRING_SPLIT,
- // When x ++ x' ++ ... != y ++ y' ++ ... ^ len(x) != len(y), we apply the
- // inference:
- // ni = x ++ x' ++ ... ^ nj = y ++ y' ++ ... ^ ni != nj ^ len(x) != len(y)
- // --->
- // len(k1) = len(x) ^ len(k2) = len(y) ^ (y = k1 ++ k3 v x = k1 ++ k2)
- DEQ_DISL_STRINGS_SPLIT,
- // When x ++ x' ++ ... != y ++ y' ++ ... ^ len(x) = len(y), we apply the
- // inference:
- // x = y v x != y
- DEQ_STRINGS_EQ,
- // When x ++ x' ++ ... != y ++ y' ++ ... and we do not know how the lengths
- // of x and y compare, we apply the inference:
- // len(x) = len(y) v len(x) != len(y)
- DEQ_LENS_EQ,
- // When px ++ x ++ ... != py ^ len(px ++ x ++ ...) = len(py), we apply the
- // following inference that infers that the remainder of the longer normal
- // form must be empty:
- // ni = px ++ x ++ ... ^ nj = py ^ len(ni) = len(nj) --->
- // x = "" ^ ...
- DEQ_NORM_EMP,
- // When two strings are disequal s != t and the comparison of their lengths
- // is unknown, we apply the inference:
- // len(s) != len(t) V len(s) = len(t)
- DEQ_LENGTH_SP,
- //-------------------------------------- end core solver
- //-------------------------------------- codes solver
- // str.to_code( v ) = rewrite( str.to_code(c) )
- // where v is the proxy variable for c.
- CODE_PROXY,
- // str.code(x) = -1 V str.code(x) != str.code(y) V x = y
- CODE_INJ,
- //-------------------------------------- end codes solver
- //-------------------------------------- regexp solver
- // regular expression normal form conflict
- // ( x in R ^ x = y ^ rewrite((str.in_re y R)) = false ) => false
- // where y is the normal form computed for x.
- RE_NF_CONFLICT,
- // regular expression unfolding
- // This is a general class of inferences of the form:
- // (x in R) => F
- // where F is formula expressing the next step of checking whether x is in
- // R. For example:
- // (x in (R)*) =>
- // x = "" V x in R V ( x = x1 ++ x2 ++ x3 ^ x1 in R ^ x2 in (R)* ^ x3 in R)
- RE_UNFOLD_POS,
- // Same as above, for negative memberships
- RE_UNFOLD_NEG,
- // intersection inclusion conflict
- // (x in R1 ^ ~ x in R2) => false where [[includes(R2,R1)]]
- // Where includes(R2,R1) is a heuristic check for whether R2 includes R1.
- RE_INTER_INCLUDE,
- // intersection conflict, using regexp intersection computation
- // (x in R1 ^ x in R2) => false where [[intersect(R1, R2) = empty]]
- RE_INTER_CONF,
- // intersection inference
- // (x in R1 ^ y in R2 ^ x = y) => (x in re.inter(R1,R2))
- RE_INTER_INFER,
- // regular expression delta
- // (x = "" ^ x in R) => C
- // where "" in R holds if and only if C holds.
- RE_DELTA,
- // regular expression delta conflict
- // (x = "" ^ x in R) => false
- // where R does not accept the empty string.
- RE_DELTA_CONF,
- // regular expression derive ???
- RE_DERIVE,
- //-------------------------------------- end regexp solver
- //-------------------------------------- extended function solver
- // Standard extended function inferences from context-dependent rewriting
- // produced by constant substitutions. See Reynolds et al CAV 2017. These are
- // inferences of the form:
- // X = Y => f(X) = t when rewrite( f(Y) ) = t
- // where X = Y is a vector of equalities, where some of Y may be constants.
- EXTF,
- // Same as above, for normal form substitutions.
- EXTF_N,
- // Decompositions based on extended function inferences from context-dependent
- // rewriting produced by constant substitutions. This is like the above, but
- // handles cases where the inferred predicate is not necessarily an equality
- // involving f(X). For example:
- // x = "A" ^ contains( y ++ x, "B" ) => contains( y, "B" )
- // This is generally only inferred if contains( y, "B" ) is a known term in
- // the current context.
- EXTF_D,
- // Same as above, for normal form substitutions.
- EXTF_D_N,
- // Extended function equality rewrite. This is an inference of the form:
- // t = s => P
- // where P is a predicate implied by rewrite( t = s ).
- // Typically, t is an application of an extended function and s is a constant.
- // It is generally only inferred if P is a predicate over known terms.
- EXTF_EQ_REW,
- // contain transitive
- // ( str.contains( s, t ) ^ ~contains( s, r ) ) => ~contains( t, r ).
- CTN_TRANS,
- // contain decompose
- // str.contains( x, str.++( y1, ..., yn ) ) => str.contains( x, yi ) or
- // ~str.contains( str.++( x1, ..., xn ), y ) => ~str.contains( xi, y )
- CTN_DECOMPOSE,
- // contain neg equal
- // ( len( x ) = len( s ) ^ ~contains( x, s ) ) => x != s
- CTN_NEG_EQUAL,
- // contain positive
- // str.contains( x, y ) => x = w1 ++ y ++ w2
- // where w1 and w2 are skolem variables.
- CTN_POS,
- // All reduction inferences of the form:
- // f(x1, .., xn) = y ^ P(x1, ..., xn, y)
- // where f is an extended function, y is the purification variable for
- // f(x1, .., xn) and P is the reduction predicate for f
- // (see theory_strings_preprocess).
- REDUCTION,
- //-------------------------------------- end extended function solver
- //-------------------------------------- prefix conflict
- // prefix conflict (coarse-grained)
- PREFIX_CONFLICT,
- //-------------------------------------- end prefix conflict
- NONE,
-};
-
-/**
- * Converts an inference to a string. Note: This function is also used in
- * `safe_print()`. Changing this functions name or signature will result in
- * `safe_print()` printing "<unsupported>" instead of the proper strings for
- * the enum values.
- *
- * @param i The inference
- * @return The name of the inference
- */
-const char* toString(Inference i);
-
-/**
- * Writes an inference name to a stream.
- *
- * @param out The stream to write to
- * @param i The inference to write to the stream
- * @return The stream
- */
-std::ostream& operator<<(std::ostream& out, Inference i);
-
/**
* Length status, used for indicating the length constraints for Skolems
* introduced by the theory of strings.
/** Pointer to the class used for processing this info */
InferenceManager* d_sim;
/** The inference identifier */
- Inference d_id;
+ InferenceId d_id;
/** Whether it is the reverse form of the above id */
bool d_idRev;
/** The conclusion */
d_lazyFactMap.insert(ii.d_conc, iic);
}
-void InferProofCons::convert(Inference infer,
+void InferProofCons::convert(InferenceId infer,
bool isRev,
Node conc,
const std::vector<Node>& exp,
switch (infer)
{
// ========================== equal by substitution+rewriting
- case Inference::I_NORM_S:
- case Inference::I_CONST_MERGE:
- case Inference::I_NORM:
- case Inference::LEN_NORM:
- case Inference::NORMAL_FORM:
- case Inference::CODE_PROXY:
+ case InferenceId::STRINGS_I_NORM_S:
+ case InferenceId::STRINGS_I_CONST_MERGE:
+ case InferenceId::STRINGS_I_NORM:
+ case InferenceId::STRINGS_LEN_NORM:
+ case InferenceId::STRINGS_NORMAL_FORM:
+ case InferenceId::STRINGS_CODE_PROXY:
{
ps.d_args.push_back(conc);
// will attempt this rule
}
break;
// ========================== substitution + rewriting
- case Inference::RE_NF_CONFLICT:
- case Inference::EXTF:
- case Inference::EXTF_N:
- case Inference::EXTF_D:
- case Inference::EXTF_D_N:
- case Inference::I_CONST_CONFLICT:
- case Inference::UNIT_CONST_CONFLICT:
+ case InferenceId::STRINGS_RE_NF_CONFLICT:
+ case InferenceId::STRINGS_EXTF:
+ case InferenceId::STRINGS_EXTF_N:
+ case InferenceId::STRINGS_EXTF_D:
+ case InferenceId::STRINGS_EXTF_D_N:
+ case InferenceId::STRINGS_I_CONST_CONFLICT:
+ case InferenceId::STRINGS_UNIT_CONST_CONFLICT:
{
if (!ps.d_children.empty())
{
}
break;
// ========================== rewrite pred
- case Inference::EXTF_EQ_REW:
- case Inference::INFER_EMP:
+ case InferenceId::STRINGS_EXTF_EQ_REW:
+ case InferenceId::STRINGS_INFER_EMP:
{
// the last child is the predicate we are operating on, move to front
Node src = ps.d_children[ps.d_children.size() - 1];
}
break;
// ========================== substitution+rewriting, CONCAT_EQ, ...
- case Inference::F_CONST:
- case Inference::F_UNIFY:
- case Inference::F_ENDPOINT_EMP:
- case Inference::F_ENDPOINT_EQ:
- case Inference::F_NCTN:
- case Inference::N_EQ_CONF:
- case Inference::N_CONST:
- case Inference::N_UNIFY:
- case Inference::N_ENDPOINT_EMP:
- case Inference::N_ENDPOINT_EQ:
- case Inference::N_NCTN:
- case Inference::SSPLIT_CST_PROP:
- case Inference::SSPLIT_VAR_PROP:
- case Inference::SSPLIT_CST:
- case Inference::SSPLIT_VAR:
+ case InferenceId::STRINGS_F_CONST:
+ case InferenceId::STRINGS_F_UNIFY:
+ case InferenceId::STRINGS_F_ENDPOINT_EMP:
+ case InferenceId::STRINGS_F_ENDPOINT_EQ:
+ case InferenceId::STRINGS_F_NCTN:
+ case InferenceId::STRINGS_N_EQ_CONF:
+ case InferenceId::STRINGS_N_CONST:
+ case InferenceId::STRINGS_N_UNIFY:
+ case InferenceId::STRINGS_N_ENDPOINT_EMP:
+ case InferenceId::STRINGS_N_ENDPOINT_EQ:
+ case InferenceId::STRINGS_N_NCTN:
+ case InferenceId::STRINGS_SSPLIT_CST_PROP:
+ case InferenceId::STRINGS_SSPLIT_VAR_PROP:
+ case InferenceId::STRINGS_SSPLIT_CST:
+ case InferenceId::STRINGS_SSPLIT_VAR:
{
Trace("strings-ipc-core") << "Generate core rule for " << infer
<< " (rev=" << isRev << ")" << std::endl;
// the length constraint
std::vector<Node> lenConstraint;
// these inferences have a length constraint as the last explain
- if (infer == Inference::N_UNIFY || infer == Inference::F_UNIFY
- || infer == Inference::SSPLIT_CST || infer == Inference::SSPLIT_VAR
- || infer == Inference::SSPLIT_VAR_PROP
- || infer == Inference::SSPLIT_CST_PROP)
+ if (infer == InferenceId::STRINGS_N_UNIFY || infer == InferenceId::STRINGS_F_UNIFY
+ || infer == InferenceId::STRINGS_SSPLIT_CST || infer == InferenceId::STRINGS_SSPLIT_VAR
+ || infer == InferenceId::STRINGS_SSPLIT_VAR_PROP
+ || infer == InferenceId::STRINGS_SSPLIT_CST_PROP)
{
if (exp.size() >= 2)
{
}
// Now, mainEqCeq is an equality t ++ ... == s ++ ... where the
// inference involved t and s.
- if (infer == Inference::N_ENDPOINT_EQ
- || infer == Inference::N_ENDPOINT_EMP
- || infer == Inference::F_ENDPOINT_EQ
- || infer == Inference::F_ENDPOINT_EMP)
+ if (infer == InferenceId::STRINGS_N_ENDPOINT_EQ
+ || infer == InferenceId::STRINGS_N_ENDPOINT_EMP
+ || infer == InferenceId::STRINGS_F_ENDPOINT_EQ
+ || infer == InferenceId::STRINGS_F_ENDPOINT_EMP)
{
// Should be equal to conclusion already, or rewrite to it.
// Notice that this step is necessary to handle the "rproc"
// t1 ++ ... ++ tn == "". However, these are very rarely applied, let
// alone for 2+ children. This case is intentionally unhandled here.
}
- else if (infer == Inference::N_CONST || infer == Inference::F_CONST
- || infer == Inference::N_EQ_CONF)
+ else if (infer == InferenceId::STRINGS_N_CONST || infer == InferenceId::STRINGS_F_CONST
+ || infer == InferenceId::STRINGS_N_EQ_CONF)
{
// should be a constant conflict
std::vector<Node> childrenC;
Node s0 = svec[isRev ? svec.size() - 1 : 0];
bool applySym = false;
// may need to apply symmetry
- if ((infer == Inference::SSPLIT_CST
- || infer == Inference::SSPLIT_CST_PROP)
+ if ((infer == InferenceId::STRINGS_SSPLIT_CST
+ || infer == InferenceId::STRINGS_SSPLIT_CST_PROP)
&& t0.isConst())
{
Assert(!s0.isConst());
applySym = true;
std::swap(t0, s0);
}
- if (infer == Inference::N_UNIFY || infer == Inference::F_UNIFY)
+ if (infer == InferenceId::STRINGS_N_UNIFY || infer == InferenceId::STRINGS_F_UNIFY)
{
if (conc.getKind() != EQUAL)
{
// the form of the required length constraint expected by the proof
Node lenReq;
bool lenSuccess = false;
- if (infer == Inference::N_UNIFY || infer == Inference::F_UNIFY)
+ if (infer == InferenceId::STRINGS_N_UNIFY || infer == InferenceId::STRINGS_F_UNIFY)
{
// the required premise for unify is always len(x) = len(y),
// however the explanation may not be literally this. Thus, we
lenSuccess = convertLengthPf(lenReq, lenConstraint, psb);
rule = PfRule::CONCAT_UNIFY;
}
- else if (infer == Inference::SSPLIT_VAR)
+ else if (infer == InferenceId::STRINGS_SSPLIT_VAR)
{
// it should be the case that lenConstraint => lenReq
lenReq = nm->mkNode(STRING_LENGTH, t0)
lenSuccess = convertLengthPf(lenReq, lenConstraint, psb);
rule = PfRule::CONCAT_SPLIT;
}
- else if (infer == Inference::SSPLIT_CST)
+ else if (infer == InferenceId::STRINGS_SSPLIT_CST)
{
// it should be the case that lenConstraint => lenReq
lenReq = nm->mkNode(STRING_LENGTH, t0)
lenSuccess = convertLengthPf(lenReq, lenConstraint, psb);
rule = PfRule::CONCAT_CSPLIT;
}
- else if (infer == Inference::SSPLIT_VAR_PROP)
+ else if (infer == InferenceId::STRINGS_SSPLIT_VAR_PROP)
{
// it should be the case that lenConstraint => lenReq
for (unsigned r = 0; r < 2; r++)
}
rule = PfRule::CONCAT_LPROP;
}
- else if (infer == Inference::SSPLIT_CST_PROP)
+ else if (infer == InferenceId::STRINGS_SSPLIT_CST_PROP)
{
// it should be the case that lenConstraint => lenReq
lenReq = nm->mkNode(STRING_LENGTH, t0)
}
break;
// ========================== Disequalities
- case Inference::DEQ_DISL_FIRST_CHAR_STRING_SPLIT:
- case Inference::DEQ_DISL_STRINGS_SPLIT:
+ case InferenceId::STRINGS_DEQ_DISL_FIRST_CHAR_STRING_SPLIT:
+ case InferenceId::STRINGS_DEQ_DISL_STRINGS_SPLIT:
{
if (conc.getKind() != AND || conc.getNumChildren() != 2
|| conc[0].getKind() != EQUAL || !conc[0][0].getType().isStringLike()
}
break;
// ========================== Boolean split
- case Inference::CARD_SP:
- case Inference::LEN_SPLIT:
- case Inference::LEN_SPLIT_EMP:
- case Inference::DEQ_DISL_EMP_SPLIT:
- case Inference::DEQ_DISL_FIRST_CHAR_EQ_SPLIT:
- case Inference::DEQ_STRINGS_EQ:
- case Inference::DEQ_LENS_EQ:
- case Inference::DEQ_LENGTH_SP:
+ case InferenceId::STRINGS_CARD_SP:
+ case InferenceId::STRINGS_LEN_SPLIT:
+ case InferenceId::STRINGS_LEN_SPLIT_EMP:
+ case InferenceId::STRINGS_DEQ_DISL_EMP_SPLIT:
+ case InferenceId::STRINGS_DEQ_DISL_FIRST_CHAR_EQ_SPLIT:
+ case InferenceId::STRINGS_DEQ_STRINGS_EQ:
+ case InferenceId::STRINGS_DEQ_LENS_EQ:
+ case InferenceId::STRINGS_DEQ_LENGTH_SP:
{
if (conc.getKind() != OR)
{
}
break;
// ========================== Regular expression unfolding
- case Inference::RE_UNFOLD_POS:
- case Inference::RE_UNFOLD_NEG:
+ case InferenceId::STRINGS_RE_UNFOLD_POS:
+ case InferenceId::STRINGS_RE_UNFOLD_NEG:
{
- if (infer == Inference::RE_UNFOLD_POS)
+ if (infer == InferenceId::STRINGS_RE_UNFOLD_POS)
{
ps.d_rule = PfRule::RE_UNFOLD_POS;
}
}
break;
// ========================== Reduction
- case Inference::CTN_POS:
- case Inference::CTN_NEG_EQUAL:
+ case InferenceId::STRINGS_CTN_POS:
+ case InferenceId::STRINGS_CTN_NEG_EQUAL:
{
if (ps.d_children.size() != 1)
{
}
break;
- case Inference::REDUCTION:
+ case InferenceId::STRINGS_REDUCTION:
{
size_t nchild = conc.getNumChildren();
Node mainEq;
}
break;
// ========================== code injectivity
- case Inference::CODE_INJ:
+ case InferenceId::STRINGS_CODE_INJ:
{
ps.d_rule = PfRule::STRING_CODE_INJ;
Assert(conc.getKind() == OR && conc.getNumChildren() == 3
}
break;
// ========================== unit injectivity
- case Inference::UNIT_INJ: { ps.d_rule = PfRule::STRING_SEQ_UNIT_INJ;
+ case InferenceId::STRINGS_UNIT_INJ: { ps.d_rule = PfRule::STRING_SEQ_UNIT_INJ;
}
break;
// ========================== prefix conflict
- case Inference::PREFIX_CONFLICT:
+ case InferenceId::STRINGS_PREFIX_CONFLICT:
{
Trace("strings-ipc-prefix") << "Prefix conflict..." << std::endl;
std::vector<Node> eqs;
}
break;
// ========================== regular expressions
- case Inference::RE_INTER_INCLUDE:
- case Inference::RE_INTER_CONF:
- case Inference::RE_INTER_INFER:
+ case InferenceId::STRINGS_RE_INTER_INCLUDE:
+ case InferenceId::STRINGS_RE_INTER_CONF:
+ case InferenceId::STRINGS_RE_INTER_INFER:
{
std::vector<Node> reiExp;
std::vector<Node> reis;
}
break;
// ========================== unknown and currently unsupported
- case Inference::CARDINALITY:
- case Inference::I_CYCLE_E:
- case Inference::I_CYCLE:
- case Inference::RE_DELTA:
- case Inference::RE_DELTA_CONF:
- case Inference::RE_DERIVE:
- case Inference::FLOOP:
- case Inference::FLOOP_CONFLICT:
- case Inference::DEQ_NORM_EMP:
- case Inference::CTN_TRANS:
- case Inference::CTN_DECOMPOSE:
+ case InferenceId::STRINGS_CARDINALITY:
+ case InferenceId::STRINGS_I_CYCLE_E:
+ case InferenceId::STRINGS_I_CYCLE:
+ case InferenceId::STRINGS_RE_DELTA:
+ case InferenceId::STRINGS_RE_DELTA_CONF:
+ case InferenceId::STRINGS_RE_DERIVE:
+ case InferenceId::STRINGS_FLOOP:
+ case InferenceId::STRINGS_FLOOP_CONFLICT:
+ case InferenceId::STRINGS_DEQ_NORM_EMP:
+ case InferenceId::STRINGS_CTN_TRANS:
+ case InferenceId::STRINGS_CTN_DECOMPOSE:
default:
// do nothing, these will be converted to STRING_TRUST below since the
// rule is unknown.
* In particular, psb will contain a set of steps that form a proof
* whose conclusion is ii.d_conc and whose free assumptions are ii.d_ant.
*/
- void convert(Inference infer,
+ void convert(InferenceId infer,
bool isRev,
Node conc,
const std::vector<Node>& exp,
bool InferenceManager::sendInternalInference(std::vector<Node>& exp,
Node conc,
- Inference infer)
+ InferenceId infer)
{
if (conc.getKind() == AND
|| (conc.getKind() == NOT && conc[0].getKind() == OR))
bool InferenceManager::sendInference(const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
Node eq,
- Inference infer,
+ InferenceId infer,
bool isRev,
bool asLemma)
{
bool InferenceManager::sendInference(const std::vector<Node>& exp,
Node eq,
- Inference infer,
+ InferenceId infer,
bool isRev,
bool asLemma)
{
addPendingFact(std::unique_ptr<InferInfo>(new InferInfo(ii)));
}
-bool InferenceManager::sendSplit(Node a, Node b, Inference infer, bool preq)
+bool InferenceManager::sendSplit(Node a, Node b, InferenceId infer, bool preq)
{
Node eq = a.eqNode(b);
eq = Rewriter::rewrite(eq);
}
}
LemmaProperty p = LemmaProperty::NONE;
- if (ii.d_id == Inference::REDUCTION)
+ if (ii.d_id == InferenceId::STRINGS_REDUCTION)
{
p |= LemmaProperty::NEEDS_JUSTIFY;
}
*/
bool sendInternalInference(std::vector<Node>& exp,
Node conc,
- Inference infer);
+ InferenceId infer);
/** send inference
*
bool sendInference(const std::vector<Node>& exp,
const std::vector<Node>& noExplain,
Node eq,
- Inference infer,
+ InferenceId infer,
bool isRev = false,
bool asLemma = false);
/** same as above, but where noExplain is empty */
bool sendInference(const std::vector<Node>& exp,
Node eq,
- Inference infer,
+ InferenceId infer,
bool isRev = false,
bool asLemma = false);
* This method returns true if the split was non-trivial, and false
* otherwise. A split is trivial if a=b rewrites to a constant.
*/
- bool sendSplit(Node a, Node b, Inference infer, bool preq = true);
+ bool sendSplit(Node a, Node b, InferenceId infer, bool preq = true);
/**
* Set that we are incomplete for the current set of assertions (in other
* words, we must answer "unknown" instead of "sat"); this calls the output
noExplain.push_back(assertion);
Node conc = Node::null();
d_im.sendInference(
- iexp, noExplain, conc, Inference::RE_NF_CONFLICT);
+ iexp, noExplain, conc, InferenceId::STRINGS_RE_NF_CONFLICT);
addedLemma = true;
break;
}
{
d_statistics.d_regexpUnfoldingsNeg << atom[1].getKind();
}
- Inference inf =
- polarity ? Inference::RE_UNFOLD_POS : Inference::RE_UNFOLD_NEG;
+ InferenceId inf =
+ polarity ? InferenceId::STRINGS_RE_UNFOLD_POS : InferenceId::STRINGS_RE_UNFOLD_NEG;
d_im.sendInference(iexp, noExplain, conc, inf);
addedLemma = true;
if (changed)
Node conc;
d_im.sendInference(
- vec_nodes, conc, Inference::RE_INTER_INCLUDE, false, true);
+ vec_nodes, conc, InferenceId::STRINGS_RE_INTER_INCLUDE, false, true);
return false;
}
}
}
Node conc;
d_im.sendInference(
- vec_nodes, conc, Inference::RE_INTER_CONF, false, true);
+ vec_nodes, conc, InferenceId::STRINGS_RE_INTER_CONF, false, true);
// conflict, return
return false;
}
vec_nodes.push_back(mi[0].eqNode(m[0]));
}
d_im.sendInference(
- vec_nodes, mres, Inference::RE_INTER_INFER, false, true);
+ vec_nodes, mres, InferenceId::STRINGS_RE_INTER_INFER, false, true);
// both are reduced
d_im.markReduced(m);
d_im.markReduced(mi);
noExplain.push_back(x.eqNode(d_emptyString));
std::vector<Node> iexp = nf_exp;
iexp.insert(iexp.end(), noExplain.begin(), noExplain.end());
- d_im.sendInference(iexp, noExplain, exp, Inference::RE_DELTA);
+ d_im.sendInference(iexp, noExplain, exp, InferenceId::STRINGS_RE_DELTA);
addedLemma = true;
d_regexp_ccached.insert(atom);
return false;
noExplain.push_back(x.eqNode(d_emptyString));
std::vector<Node> iexp = nf_exp;
iexp.insert(iexp.end(), noExplain.begin(), noExplain.end());
- d_im.sendInference(iexp, noExplain, d_false, Inference::RE_DELTA_CONF);
+ d_im.sendInference(iexp, noExplain, d_false, InferenceId::STRINGS_RE_DELTA_CONF);
addedLemma = true;
d_regexp_ccached.insert(atom);
return false;
std::vector<Node> noExplain;
noExplain.push_back(atom);
iexp.push_back(atom);
- d_im.sendInference(iexp, noExplain, conc, Inference::RE_DERIVE);
+ d_im.sendInference(iexp, noExplain, conc, InferenceId::STRINGS_RE_DERIVE);
return true;
}
return false;
IntStat d_strategyRuns;
//--------------- inferences
/** Counts the number of applications of each type of inference */
- HistogramStat<Inference> d_inferences;
+ HistogramStat<InferenceId> d_inferences;
/**
* Counts the number of applications of each type of inference that were not
* processed as a proof step. This is a subset of d_inferences.
*/
- HistogramStat<Inference> d_inferencesNoPf;
+ HistogramStat<InferenceId> d_inferencesNoPf;
/**
* Counts the number of applications of each type of context-dependent
* simplification. The sum of this map is equal to the number of EXTF or
return;
}
InferInfo iiPrefixConf;
- iiPrefixConf.d_id = Inference::PREFIX_CONFLICT;
+ iiPrefixConf.d_id = InferenceId::STRINGS_PREFIX_CONFLICT;
iiPrefixConf.d_conc = d_false;
utils::flattenOp(AND, conf, iiPrefixConf.d_premises);
setPendingConflict(iiPrefixConf);
if (!d_state.areEqual(cc, vc))
{
std::vector<Node> emptyVec;
- d_im.sendInference(emptyVec, cc.eqNode(vc), Inference::CODE_PROXY);
+ d_im.sendInference(emptyVec, cc.eqNode(vc), InferenceId::STRINGS_CODE_PROXY);
}
const_codes.push_back(vc);
}
deq = Rewriter::rewrite(deq);
d_im.addPendingPhaseRequirement(deq, false);
std::vector<Node> emptyVec;
- d_im.sendInference(emptyVec, inj_lem, Inference::CODE_INJ);
+ d_im.sendInference(emptyVec, inj_lem, InferenceId::STRINGS_CODE_INJ);
}
}
}