anv: support externally synchronized pipeline caches
authorIván Briano <ivan.briano@intel.com>
Wed, 20 May 2020 19:30:46 +0000 (12:30 -0700)
committerMarge Bot <eric+marge@anholt.net>
Wed, 17 Jun 2020 00:48:39 +0000 (00:48 +0000)
Implement the VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT
bits of the VK_EXT_pipeline_creation_cache_control extension.

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5136>

src/intel/vulkan/anv_pipeline_cache.c
src/intel/vulkan/anv_private.h

index bdbca77bc0833a1c5fba508ae040518b5d902bc9..2c6ecb7fbc9ce0519716543c1a92f5667036d036 100644 (file)
@@ -340,6 +340,20 @@ anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
       return NULL;
 }
 
+static inline void
+anv_cache_lock(struct anv_pipeline_cache *cache)
+{
+   if (!cache->external_sync)
+      pthread_mutex_lock(&cache->mutex);
+}
+
+static inline void
+anv_cache_unlock(struct anv_pipeline_cache *cache)
+{
+   if (!cache->external_sync)
+      pthread_mutex_unlock(&cache->mutex);
+}
+
 struct anv_shader_bin *
 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
                           const void *key_data, uint32_t key_size)
@@ -347,12 +361,12 @@ anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
    if (!cache->cache)
       return NULL;
 
-   pthread_mutex_lock(&cache->mutex);
+   anv_cache_lock(cache);
 
    struct anv_shader_bin *shader =
       anv_pipeline_cache_search_locked(cache, key_data, key_size);
 
-   pthread_mutex_unlock(&cache->mutex);
+   anv_cache_unlock(cache);
 
    /* We increment refcount before handing it to the caller */
    if (shader)
@@ -368,7 +382,7 @@ anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
    if (!cache->cache)
       return;
 
-   pthread_mutex_lock(&cache->mutex);
+   anv_cache_lock(cache);
 
    struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
    if (entry == NULL) {
@@ -377,7 +391,7 @@ anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
       _mesa_hash_table_insert(cache->cache, bin->key, bin);
    }
 
-   pthread_mutex_unlock(&cache->mutex);
+   anv_cache_unlock(cache);
 }
 
 static struct anv_shader_bin *
@@ -430,7 +444,7 @@ anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
                                  const struct anv_pipeline_bind_map *bind_map)
 {
    if (cache->cache) {
-      pthread_mutex_lock(&cache->mutex);
+      anv_cache_lock(cache);
 
       struct anv_shader_bin *bin =
          anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size,
@@ -440,7 +454,7 @@ anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
                                               stats, num_stats,
                                               xfb_info, bind_map);
 
-      pthread_mutex_unlock(&cache->mutex);
+      anv_cache_unlock(cache);
 
       /* We increment refcount before handing it to the caller */
       if (bin)
@@ -524,6 +538,9 @@ VkResult anv_CreatePipelineCache(
    if (cache == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
+   cache->external_sync =
+      (pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT);
+
    anv_pipeline_cache_init(cache, device,
                            device->physical->instance->pipeline_cache_enabled);
 
@@ -757,12 +774,12 @@ anv_device_search_for_nir(struct anv_device *device,
    if (cache && cache->nir_cache) {
       const struct serialized_nir *snir = NULL;
 
-      pthread_mutex_lock(&cache->mutex);
+      anv_cache_lock(cache);
       struct hash_entry *entry =
          _mesa_hash_table_search(cache->nir_cache, sha1_key);
       if (entry)
          snir = entry->data;
-      pthread_mutex_unlock(&cache->mutex);
+      anv_cache_unlock(cache);
 
       if (snir) {
          struct blob_reader blob;
@@ -787,10 +804,10 @@ anv_device_upload_nir(struct anv_device *device,
                       unsigned char sha1_key[20])
 {
    if (cache && cache->nir_cache) {
-      pthread_mutex_lock(&cache->mutex);
+      anv_cache_lock(cache);
       struct hash_entry *entry =
          _mesa_hash_table_search(cache->nir_cache, sha1_key);
-      pthread_mutex_unlock(&cache->mutex);
+      anv_cache_unlock(cache);
       if (entry)
          return;
 
@@ -803,7 +820,7 @@ anv_device_upload_nir(struct anv_device *device,
          return;
       }
 
-      pthread_mutex_lock(&cache->mutex);
+      anv_cache_lock(cache);
       /* Because ralloc isn't thread-safe, we have to do all this inside the
        * lock.  We could unlock for the big memcpy but it's probably not worth
        * the hassle.
@@ -811,7 +828,7 @@ anv_device_upload_nir(struct anv_device *device,
       entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
       if (entry) {
          blob_finish(&blob);
-         pthread_mutex_unlock(&cache->mutex);
+         anv_cache_unlock(cache);
          return;
       }
 
@@ -825,6 +842,6 @@ anv_device_upload_nir(struct anv_device *device,
 
       _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
 
-      pthread_mutex_unlock(&cache->mutex);
+      anv_cache_unlock(cache);
    }
 }
index 56fadcd8e0a5ea5256654025f23ee088a169d55f..884396089ee831698d90ea8057173af1ba7a26ca 100644 (file)
@@ -1241,6 +1241,8 @@ struct anv_pipeline_cache {
    struct hash_table *                          nir_cache;
 
    struct hash_table *                          cache;
+
+   bool                                         external_sync;
 };
 
 struct nir_xfb_info;