return false;
}
-bool DatatypesInference::process(TheoryInferenceManager* im)
+bool DatatypesInference::process(TheoryInferenceManager* im, bool asLemma)
{
// check to see if we have to communicate it to the rest of the system
if (mustCommunicateFact(d_conc, d_exp))
InferenceManager::InferenceManager(Theory& t,
TheoryState& state,
ProofNodeManager* pnm)
- : InferenceManagerBuffered(t, state, pnm)
+ : InferenceManagerBuffered(t, state, nullptr)
{
}
{
}
-bool SimpleTheoryLemma::process(TheoryInferenceManager* im)
+bool SimpleTheoryLemma::process(TheoryInferenceManager* im, bool asLemma)
{
Assert(!d_node.isNull());
+ Assert(asLemma);
// send (trusted) lemma on the output channel with property p
return im->trustedLemma(TrustNode::mkTrustLemma(d_node, d_pg), d_property);
}
{
}
-bool SimpleTheoryInternalFact::process(TheoryInferenceManager* im)
+bool SimpleTheoryInternalFact::process(TheoryInferenceManager* im, bool asLemma)
{
+ Assert(!asLemma);
bool polarity = d_conc.getKind() != NOT;
TNode atom = polarity ? d_conc : d_conc[0];
// no double negation or conjunctive conclusions
* method should make call(s) to inference manager to process this
* inference, as well as processing any specific side effects. This method
* typically makes a single call to the provided inference manager e.g.
- * d_im->trustedLemma or d_im->assertFactInternal. Notice it is the sole
+ * d_im->trustedLemma or d_im->assertInternalFact. Notice it is the sole
* responsibility of this class to make this call; the inference manager
* does not call itself otherwise when processing pending inferences.
*
+ * @param im The inference manager to use
+ * @param asLemma Whether this inference is being processed as a lemma
+ * during doPendingLemmas (otherwise it is being processed in doPendingFacts).
+ * Typically, this method calls lemma or conflict when asLemma is
+ * true, and assertInternalFact when this flag is false, although this
+ * behavior is (intentionally) not strictly enforced. In particular, the
+ * choice to send a conflict, lemma or fact may depend on local state of the
+ * theory, which may change while the inference is pending. Hence the
+ * implementation of this method may choose to implement any call to the
+ * inference manager. This flag simply serves to track whether the inference
+ * initially was added a pending lemma or not.
* @return true if the inference was successfully processed by the inference
* manager. This method for instance returns false if it corresponds to a
* lemma that was already cached by im and hence was discarded.
*/
- virtual bool process(TheoryInferenceManager* im) = 0;
+ virtual bool process(TheoryInferenceManager* im, bool asLemma) = 0;
};
/**
virtual ~SimpleTheoryLemma() {}
/**
* Send the lemma using inference manager im, return true if the lemma was
- * sent.
+ * sent. It should be the case that asLemma = true or an assertion failure
+ * is thrown.
*/
- virtual bool process(TheoryInferenceManager* im) override;
+ virtual bool process(TheoryInferenceManager* im, bool asLemma) override;
/** The lemma to send */
Node d_node;
/** The lemma property (see OutputChannel::lemma) */
/**
* A simple internal fact with no side effects. Makes a single call to
- * assertFactInternal in its process method.
+ * assertInternalFact in its process method.
*/
class SimpleTheoryInternalFact : public TheoryInference
{
virtual ~SimpleTheoryInternalFact() {}
/**
* Send the lemma using inference manager im, return true if the lemma was
- * sent.
+ * sent. It should be the case that asLemma = false or an assertion failure
+ * is thrown.
*/
- virtual bool process(TheoryInferenceManager* im) override;
+ virtual bool process(TheoryInferenceManager* im, bool asLemma) override;
/** The lemma to send */
Node d_conc;
/** The explanation */