return NULL;
}
+static inline void
+anv_cache_lock(struct anv_pipeline_cache *cache)
+{
+ if (!cache->external_sync)
+ pthread_mutex_lock(&cache->mutex);
+}
+
+static inline void
+anv_cache_unlock(struct anv_pipeline_cache *cache)
+{
+ if (!cache->external_sync)
+ pthread_mutex_unlock(&cache->mutex);
+}
+
struct anv_shader_bin *
anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
const void *key_data, uint32_t key_size)
if (!cache->cache)
return NULL;
- pthread_mutex_lock(&cache->mutex);
+ anv_cache_lock(cache);
struct anv_shader_bin *shader =
anv_pipeline_cache_search_locked(cache, key_data, key_size);
- pthread_mutex_unlock(&cache->mutex);
+ anv_cache_unlock(cache);
/* We increment refcount before handing it to the caller */
if (shader)
if (!cache->cache)
return;
- pthread_mutex_lock(&cache->mutex);
+ anv_cache_lock(cache);
struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
if (entry == NULL) {
_mesa_hash_table_insert(cache->cache, bin->key, bin);
}
- pthread_mutex_unlock(&cache->mutex);
+ anv_cache_unlock(cache);
}
static struct anv_shader_bin *
const struct anv_pipeline_bind_map *bind_map)
{
if (cache->cache) {
- pthread_mutex_lock(&cache->mutex);
+ anv_cache_lock(cache);
struct anv_shader_bin *bin =
anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size,
stats, num_stats,
xfb_info, bind_map);
- pthread_mutex_unlock(&cache->mutex);
+ anv_cache_unlock(cache);
/* We increment refcount before handing it to the caller */
if (bin)
if (cache == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ cache->external_sync =
+ (pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT);
+
anv_pipeline_cache_init(cache, device,
device->physical->instance->pipeline_cache_enabled);
if (cache && cache->nir_cache) {
const struct serialized_nir *snir = NULL;
- pthread_mutex_lock(&cache->mutex);
+ anv_cache_lock(cache);
struct hash_entry *entry =
_mesa_hash_table_search(cache->nir_cache, sha1_key);
if (entry)
snir = entry->data;
- pthread_mutex_unlock(&cache->mutex);
+ anv_cache_unlock(cache);
if (snir) {
struct blob_reader blob;
unsigned char sha1_key[20])
{
if (cache && cache->nir_cache) {
- pthread_mutex_lock(&cache->mutex);
+ anv_cache_lock(cache);
struct hash_entry *entry =
_mesa_hash_table_search(cache->nir_cache, sha1_key);
- pthread_mutex_unlock(&cache->mutex);
+ anv_cache_unlock(cache);
if (entry)
return;
return;
}
- pthread_mutex_lock(&cache->mutex);
+ anv_cache_lock(cache);
/* Because ralloc isn't thread-safe, we have to do all this inside the
* lock. We could unlock for the big memcpy but it's probably not worth
* the hassle.
entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
if (entry) {
blob_finish(&blob);
- pthread_mutex_unlock(&cache->mutex);
+ anv_cache_unlock(cache);
return;
}
_mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
- pthread_mutex_unlock(&cache->mutex);
+ anv_cache_unlock(cache);
}
}