Properly cache assertions in static learning preprocessing pass. (#7242)
authorMathias Preiner <mathias.preiner@gmail.com>
Thu, 30 Sep 2021 17:20:04 +0000 (10:20 -0700)
committerGitHub <noreply@github.com>
Thu, 30 Sep 2021 17:20:04 +0000 (17:20 +0000)
The cache is user-context dependent and guarantees that ppStaticLearn is only called once per assertion in a given context - if assertions are popped from the context and added again ppStaticLearn is called again on that assertion. Further, the preprocessing pass now also handles the splitting of top-level AND terms. Before that each theory was responsible for splitting and caching. Refactoring the corresponding ppStaticLearn theory methods will be part of future PRs.

Marking do-not-merge until cluster runs finished.

src/preprocessing/passes/static_learning.cpp
src/preprocessing/passes/static_learning.h

index 24d25e354ca28717ade377d89bde7941a46b4524..e55a8c001438daa097804c4cbb51d4d7558938aa 100644 (file)
@@ -28,19 +28,37 @@ namespace preprocessing {
 namespace passes {
 
 StaticLearning::StaticLearning(PreprocessingPassContext* preprocContext)
-    : PreprocessingPass(preprocContext, "static-learning"){};
+    : PreprocessingPass(preprocContext, "static-learning"),
+      d_cache(userContext()){};
 
 PreprocessingPassResult StaticLearning::applyInternal(
     AssertionPipeline* assertionsToPreprocess)
 {
   d_preprocContext->spendResource(Resource::PreprocessStep);
 
-  for (unsigned i = 0; i < assertionsToPreprocess->size(); ++i)
+  std::vector<TNode> toProcess;
+
+  for (size_t i = 0, size = assertionsToPreprocess->size(); i < size; ++i)
   {
+    const Node& n = (*assertionsToPreprocess)[i];
+
+    /* Already processed in this context. */
+    if (d_cache.find(n) != d_cache.end())
+    {
+      continue;
+    }
+
     NodeBuilder learned(kind::AND);
-    learned << (*assertionsToPreprocess)[i];
-    d_preprocContext->getTheoryEngine()->ppStaticLearn(
-        (*assertionsToPreprocess)[i], learned);
+    learned << n;
+
+    /* Process all assertions in nested AND terms. */
+    std::vector<TNode> assertions;
+    flattenAnd(n, assertions);
+    for (TNode a : assertions)
+    {
+      d_preprocContext->getTheoryEngine()->ppStaticLearn(a, learned);
+    }
+
     if (learned.getNumChildren() == 1)
     {
       learned.clear();
@@ -53,6 +71,30 @@ PreprocessingPassResult StaticLearning::applyInternal(
   return PreprocessingPassResult::NO_CONFLICT;
 }
 
+void StaticLearning::flattenAnd(TNode node, std::vector<TNode>& children)
+{
+  std::vector<TNode> visit = {node};
+  do
+  {
+    TNode cur = visit.back();
+    visit.pop_back();
+
+    if (d_cache.find(cur) != d_cache.end())
+    {
+      continue;
+    }
+    d_cache.insert(cur);
+
+    if (cur.getKind() == kind::AND)
+    {
+      visit.insert(visit.end(), cur.begin(), cur.end());
+    }
+    else
+    {
+      children.push_back(cur);
+    }
+  } while (!visit.empty());
+}
 
 }  // namespace passes
 }  // namespace preprocessing
index ac9ab6251cd521e758cfc66df640c876779b8f79..b44c2c86ff464c2c16e1ea92d56352e38e039086 100644 (file)
@@ -18,6 +18,7 @@
 #ifndef CVC5__PREPROCESSING__PASSES__STATIC_LEARNING_H
 #define CVC5__PREPROCESSING__PASSES__STATIC_LEARNING_H
 
+#include "context/cdhashset.h"
 #include "preprocessing/preprocessing_pass.h"
 
 namespace cvc5 {
@@ -32,6 +33,13 @@ class StaticLearning : public PreprocessingPass
  protected:
   PreprocessingPassResult applyInternal(
       AssertionPipeline* assertionsToPreprocess) override;
+
+ private:
+  /** Collect children of flattened AND term. */
+  void flattenAnd(TNode node, std::vector<TNode>& children);
+
+  /** CD-cache for visiting nodes used by `flattenAnd`. */
+  context::CDHashSet<Node> d_cache;
 };
 
 }  // namespace passes