From 13f44596d72d1d0e0651e6ccd71831dcc2b8a3e5 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Iv=C3=A1n=20Briano?= Date: Wed, 20 May 2020 12:30:46 -0700 Subject: [PATCH] anv: support externally synchronized pipeline caches Implement the VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT bits of the VK_EXT_pipeline_creation_cache_control extension. Reviewed-by: Lionel Landwerlin Part-of: --- src/intel/vulkan/anv_pipeline_cache.c | 43 +++++++++++++++++++-------- src/intel/vulkan/anv_private.h | 2 ++ 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/src/intel/vulkan/anv_pipeline_cache.c b/src/intel/vulkan/anv_pipeline_cache.c index bdbca77bc08..2c6ecb7fbc9 100644 --- a/src/intel/vulkan/anv_pipeline_cache.c +++ b/src/intel/vulkan/anv_pipeline_cache.c @@ -340,6 +340,20 @@ anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache, return NULL; } +static inline void +anv_cache_lock(struct anv_pipeline_cache *cache) +{ + if (!cache->external_sync) + pthread_mutex_lock(&cache->mutex); +} + +static inline void +anv_cache_unlock(struct anv_pipeline_cache *cache) +{ + if (!cache->external_sync) + pthread_mutex_unlock(&cache->mutex); +} + struct anv_shader_bin * anv_pipeline_cache_search(struct anv_pipeline_cache *cache, const void *key_data, uint32_t key_size) @@ -347,12 +361,12 @@ anv_pipeline_cache_search(struct anv_pipeline_cache *cache, if (!cache->cache) return NULL; - pthread_mutex_lock(&cache->mutex); + anv_cache_lock(cache); struct anv_shader_bin *shader = anv_pipeline_cache_search_locked(cache, key_data, key_size); - pthread_mutex_unlock(&cache->mutex); + anv_cache_unlock(cache); /* We increment refcount before handing it to the caller */ if (shader) @@ -368,7 +382,7 @@ anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache, if (!cache->cache) return; - pthread_mutex_lock(&cache->mutex); + anv_cache_lock(cache); struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key); if (entry == NULL) { @@ -377,7 +391,7 @@ anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache, _mesa_hash_table_insert(cache->cache, bin->key, bin); } - pthread_mutex_unlock(&cache->mutex); + anv_cache_unlock(cache); } static struct anv_shader_bin * @@ -430,7 +444,7 @@ anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache, const struct anv_pipeline_bind_map *bind_map) { if (cache->cache) { - pthread_mutex_lock(&cache->mutex); + anv_cache_lock(cache); struct anv_shader_bin *bin = anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size, @@ -440,7 +454,7 @@ anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache, stats, num_stats, xfb_info, bind_map); - pthread_mutex_unlock(&cache->mutex); + anv_cache_unlock(cache); /* We increment refcount before handing it to the caller */ if (bin) @@ -524,6 +538,9 @@ VkResult anv_CreatePipelineCache( if (cache == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); + cache->external_sync = + (pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT); + anv_pipeline_cache_init(cache, device, device->physical->instance->pipeline_cache_enabled); @@ -757,12 +774,12 @@ anv_device_search_for_nir(struct anv_device *device, if (cache && cache->nir_cache) { const struct serialized_nir *snir = NULL; - pthread_mutex_lock(&cache->mutex); + anv_cache_lock(cache); struct hash_entry *entry = _mesa_hash_table_search(cache->nir_cache, sha1_key); if (entry) snir = entry->data; - pthread_mutex_unlock(&cache->mutex); + anv_cache_unlock(cache); if (snir) { struct blob_reader blob; @@ -787,10 +804,10 @@ anv_device_upload_nir(struct anv_device *device, unsigned char sha1_key[20]) { if (cache && cache->nir_cache) { - pthread_mutex_lock(&cache->mutex); + anv_cache_lock(cache); struct hash_entry *entry = _mesa_hash_table_search(cache->nir_cache, sha1_key); - pthread_mutex_unlock(&cache->mutex); + anv_cache_unlock(cache); if (entry) return; @@ -803,7 +820,7 @@ anv_device_upload_nir(struct anv_device *device, return; } - pthread_mutex_lock(&cache->mutex); + anv_cache_lock(cache); /* Because ralloc isn't thread-safe, we have to do all this inside the * lock. We could unlock for the big memcpy but it's probably not worth * the hassle. @@ -811,7 +828,7 @@ anv_device_upload_nir(struct anv_device *device, entry = _mesa_hash_table_search(cache->nir_cache, sha1_key); if (entry) { blob_finish(&blob); - pthread_mutex_unlock(&cache->mutex); + anv_cache_unlock(cache); return; } @@ -825,6 +842,6 @@ anv_device_upload_nir(struct anv_device *device, _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir); - pthread_mutex_unlock(&cache->mutex); + anv_cache_unlock(cache); } } diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 56fadcd8e0a..884396089ee 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -1241,6 +1241,8 @@ struct anv_pipeline_cache { struct hash_table * nir_cache; struct hash_table * cache; + + bool external_sync; }; struct nir_xfb_info; -- 2.30.2