anv: Force a full re-compile when CAPTURE_INTERNAL_REPRESENTATION_TEXT is set
authorJason Ekstrand <jason@jlekstrand.net>
Thu, 25 Apr 2019 00:56:39 +0000 (19:56 -0500)
committerJason Ekstrand <jason@jlekstrand.net>
Mon, 12 Aug 2019 22:56:07 +0000 (22:56 +0000)
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
src/intel/vulkan/anv_pipeline.c
src/intel/vulkan/anv_private.h
src/intel/vulkan/genX_pipeline.c

index c1941ecfe04d7f7495774ffe2923c551fa565c7d..1d0226676f6edaaef00b3385d6bdf4715df673c6 100644 (file)
@@ -1143,65 +1143,70 @@ anv_pipeline_compile_graphics(struct anv_pipeline *pipeline,
       memcpy(stages[s].cache_key.sha1, sha1, sizeof(sha1));
    }
 
-   unsigned found = 0;
-   unsigned cache_hits = 0;
-   for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
-      if (!stages[s].entrypoint)
-         continue;
+   const bool skip_cache_lookup =
+      (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
 
-      int64_t stage_start = os_time_get_nano();
+   if (!skip_cache_lookup) {
+      unsigned found = 0;
+      unsigned cache_hits = 0;
+      for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+         if (!stages[s].entrypoint)
+            continue;
 
-      bool cache_hit;
-      struct anv_shader_bin *bin =
-         anv_device_search_for_kernel(pipeline->device, cache,
-                                      &stages[s].cache_key,
-                                      sizeof(stages[s].cache_key), &cache_hit);
-      if (bin) {
-         found++;
-         pipeline->shaders[s] = bin;
-      }
+         int64_t stage_start = os_time_get_nano();
 
-      if (cache_hit) {
-         cache_hits++;
-         stages[s].feedback.flags |=
-            VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
-      }
-      stages[s].feedback.duration += os_time_get_nano() - stage_start;
-   }
+         bool cache_hit;
+         struct anv_shader_bin *bin =
+            anv_device_search_for_kernel(pipeline->device, cache,
+                                         &stages[s].cache_key,
+                                         sizeof(stages[s].cache_key), &cache_hit);
+         if (bin) {
+            found++;
+            pipeline->shaders[s] = bin;
+         }
 
-   if (found == __builtin_popcount(pipeline->active_stages)) {
-      if (cache_hits == found) {
-         pipeline_feedback.flags |=
-            VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+         if (cache_hit) {
+            cache_hits++;
+            stages[s].feedback.flags |=
+               VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+         }
+         stages[s].feedback.duration += os_time_get_nano() - stage_start;
       }
-      /* We found all our shaders in the cache.  We're done. */
-      goto done;
-   } else if (found > 0) {
-      /* We found some but not all of our shaders.  This shouldn't happen
-       * most of the time but it can if we have a partially populated
-       * pipeline cache.
-       */
-      assert(found < __builtin_popcount(pipeline->active_stages));
-
-      vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
-                      VK_DEBUG_REPORT_WARNING_BIT_EXT |
-                      VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
-                      VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
-                      (uint64_t)(uintptr_t)cache,
-                      0, 0, "anv",
-                      "Found a partial pipeline in the cache.  This is "
-                      "most likely caused by an incomplete pipeline cache "
-                      "import or export");
-
-      /* We're going to have to recompile anyway, so just throw away our
-       * references to the shaders in the cache.  We'll get them out of the
-       * cache again as part of the compilation process.
-       */
-      for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
-         stages[s].feedback.flags = 0;
-         if (pipeline->shaders[s]) {
-            anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
-            pipeline->shaders[s] = NULL;
+
+      if (found == __builtin_popcount(pipeline->active_stages)) {
+         if (cache_hits == found) {
+            pipeline_feedback.flags |=
+               VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT;
+         }
+         /* We found all our shaders in the cache.  We're done. */
+         goto done;
+      } else if (found > 0) {
+         /* We found some but not all of our shaders.  This shouldn't happen
+          * most of the time but it can if we have a partially populated
+          * pipeline cache.
+          */
+         assert(found < __builtin_popcount(pipeline->active_stages));
+
+         vk_debug_report(&pipeline->device->instance->debug_report_callbacks,
+                         VK_DEBUG_REPORT_WARNING_BIT_EXT |
+                         VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+                         VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT,
+                         (uint64_t)(uintptr_t)cache,
+                         0, 0, "anv",
+                         "Found a partial pipeline in the cache.  This is "
+                         "most likely caused by an incomplete pipeline cache "
+                         "import or export");
+
+         /* We're going to have to recompile anyway, so just throw away our
+          * references to the shaders in the cache.  We'll get them out of the
+          * cache again as part of the compilation process.
+          */
+         for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+            stages[s].feedback.flags = 0;
+            if (pipeline->shaders[s]) {
+               anv_shader_bin_unref(pipeline->device, pipeline->shaders[s]);
+               pipeline->shaders[s] = NULL;
+            }
          }
       }
    }
@@ -1434,10 +1439,18 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
 
    ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
 
+   const bool skip_cache_lookup =
+      (pipeline->flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
+
    anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
-   bool cache_hit;
-   bin = anv_device_search_for_kernel(pipeline->device, cache, &stage.cache_key,
-                                      sizeof(stage.cache_key), &cache_hit);
+
+   bool cache_hit = false;
+   if (!skip_cache_lookup) {
+      bin = anv_device_search_for_kernel(pipeline->device, cache,
+                                         &stage.cache_key,
+                                         sizeof(stage.cache_key),
+                                         &cache_hit);
+   }
 
    if (bin == NULL) {
       int64_t stage_start = os_time_get_nano();
@@ -1781,6 +1794,8 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
    pipeline->batch.relocs = &pipeline->batch_relocs;
    pipeline->batch.status = VK_SUCCESS;
 
+   pipeline->flags = pCreateInfo->flags;
+
    copy_non_dynamic_state(pipeline, pCreateInfo);
    pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
                                   pCreateInfo->pRasterizationState->depthClampEnable;
index 6f839db94d7d1610d70cb58fd689362a314c3700..ad2de7fae76133c503be84dd9376b10627e45f54 100644 (file)
@@ -2802,6 +2802,7 @@ struct anv_pipeline {
    anv_cmd_dirty_mask_t                         dynamic_state_mask;
    struct anv_dynamic_state                     dynamic_state;
 
+   VkPipelineCreateFlags                        flags;
    struct anv_subpass *                         subpass;
 
    bool                                         needs_data_cache;
index efe18cc3ae19a18b62243e01671912a4db0ae155..0fc25f8f9ff89f659b34327286fc0f3b3acc4947 100644 (file)
@@ -2226,6 +2226,8 @@ compute_pipeline_create(
    pipeline->batch.relocs = &pipeline->batch_relocs;
    pipeline->batch.status = VK_SUCCESS;
 
+   pipeline->flags = pCreateInfo->flags;
+
    /* When we free the pipeline, we detect stages based on the NULL status
     * of various prog_data pointers.  Make them NULL by default.
     */