#include "compiler/blob.h"
#include "util/hash_table.h"
#include "util/debug.h"
+#include "util/disk_cache.h"
+#include "util/mesa-sha1.h"
+#include "nir/nir_serialize.h"
#include "anv_private.h"
struct anv_shader_bin *
anv_shader_bin_create(struct anv_device *device,
const void *key_data, uint32_t key_size,
const void *kernel_data, uint32_t kernel_size,
+ const void *constant_data, uint32_t constant_data_size,
const struct brw_stage_prog_data *prog_data_in,
uint32_t prog_data_size, const void *prog_data_param_in,
const struct anv_pipeline_bind_map *bind_map)
memcpy(shader->kernel.map, kernel_data, kernel_size);
shader->kernel_size = kernel_size;
+ if (constant_data_size) {
+ shader->constant_data =
+ anv_state_pool_alloc(&device->dynamic_state_pool,
+ constant_data_size, 32);
+ memcpy(shader->constant_data.map, constant_data, constant_data_size);
+ } else {
+ shader->constant_data = ANV_STATE_NULL;
+ }
+ shader->constant_data_size = constant_data_size;
+
memcpy(prog_data, prog_data_in, prog_data_size);
memcpy(prog_data_param, prog_data_param_in,
prog_data->nr_params * sizeof(*prog_data_param));
{
assert(shader->ref_cnt == 0);
anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
+ anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
vk_free(&device->alloc, shader);
}
ok = blob_write_uint32(blob, shader->kernel_size);
ok = blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
+ ok = blob_write_uint32(blob, shader->constant_data_size);
+ ok = blob_write_bytes(blob, shader->constant_data.map,
+ shader->constant_data_size);
+
ok = blob_write_uint32(blob, shader->prog_data_size);
ok = blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
ok = blob_write_bytes(blob, shader->prog_data->param,
uint32_t kernel_size = blob_read_uint32(blob);
const void *kernel_data = blob_read_bytes(blob, kernel_size);
+ uint32_t constant_data_size = blob_read_uint32(blob);
+ const void *constant_data = blob_read_bytes(blob, constant_data_size);
+
uint32_t prog_data_size = blob_read_uint32(blob);
const struct brw_stage_prog_data *prog_data =
blob_read_bytes(blob, prog_data_size);
return anv_shader_bin_create(device,
key_data, key_size,
kernel_data, kernel_size,
+ constant_data, constant_data_size,
prog_data, prog_data_size, prog_data_param,
&bind_map);
}
return memcmp(a->data, b->data, a->size) == 0;
}
+static uint32_t
+sha1_hash_func(const void *sha1)
+{
+ return _mesa_hash_data(sha1, 20);
+}
+
+static bool
+sha1_compare_func(const void *sha1_a, const void *sha1_b)
+{
+ return memcmp(sha1_a, sha1_b, 20) == 0;
+}
+
void
anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
struct anv_device *device,
if (cache_enabled) {
cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
shader_bin_key_compare_func);
+ cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
+ sha1_compare_func);
} else {
cache->cache = NULL;
+ cache->nir_cache = NULL;
}
}
* going away, the shader cache has to hold a reference to all shader
* binaries it contains. We unref them when we destroy the cache.
*/
- struct hash_entry *entry;
hash_table_foreach(cache->cache, entry)
anv_shader_bin_unref(cache->device, entry->data);
return shader;
}
+static void
+anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
+ struct anv_shader_bin *bin)
+{
+ if (!cache->cache)
+ return;
+
+ pthread_mutex_lock(&cache->mutex);
+
+ struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
+ if (entry == NULL) {
+ /* Take a reference for the cache */
+ anv_shader_bin_ref(bin);
+ _mesa_hash_table_insert(cache->cache, bin->key, bin);
+ }
+
+ pthread_mutex_unlock(&cache->mutex);
+}
+
static struct anv_shader_bin *
-anv_pipeline_cache_add_shader(struct anv_pipeline_cache *cache,
- const void *key_data, uint32_t key_size,
- const void *kernel_data, uint32_t kernel_size,
- const struct brw_stage_prog_data *prog_data,
- uint32_t prog_data_size,
- const void *prog_data_param,
- const struct anv_pipeline_bind_map *bind_map)
+anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size,
+ const void *kernel_data,
+ uint32_t kernel_size,
+ const void *constant_data,
+ uint32_t constant_data_size,
+ const struct brw_stage_prog_data *prog_data,
+ uint32_t prog_data_size,
+ const void *prog_data_param,
+ const struct anv_pipeline_bind_map *bind_map)
{
struct anv_shader_bin *shader =
anv_pipeline_cache_search_locked(cache, key_data, key_size);
struct anv_shader_bin *bin =
anv_shader_bin_create(cache->device, key_data, key_size,
kernel_data, kernel_size,
+ constant_data, constant_data_size,
prog_data, prog_data_size, prog_data_param,
bind_map);
if (!bin)
anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
const void *key_data, uint32_t key_size,
const void *kernel_data, uint32_t kernel_size,
+ const void *constant_data,
+ uint32_t constant_data_size,
const struct brw_stage_prog_data *prog_data,
uint32_t prog_data_size,
const struct anv_pipeline_bind_map *bind_map)
pthread_mutex_lock(&cache->mutex);
struct anv_shader_bin *bin =
- anv_pipeline_cache_add_shader(cache, key_data, key_size,
- kernel_data, kernel_size,
- prog_data, prog_data_size,
- prog_data->param, bind_map);
+ anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
+ kernel_data, kernel_size,
+ constant_data, constant_data_size,
+ prog_data, prog_data_size,
+ prog_data->param, bind_map);
pthread_mutex_unlock(&cache->mutex);
/* In this case, we're not caching it so the caller owns it entirely */
return anv_shader_bin_create(cache->device, key_data, key_size,
kernel_data, kernel_size,
+ constant_data, constant_data_size,
prog_data, prog_data_size,
prog_data->param, bind_map);
}
}
}
-static bool
-pipeline_cache_enabled()
-{
- static int enabled = -1;
- if (enabled < 0)
- enabled = env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true);
- return enabled;
-}
-
VkResult anv_CreatePipelineCache(
VkDevice _device,
const VkPipelineCacheCreateInfo* pCreateInfo,
if (cache == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- anv_pipeline_cache_init(cache, device, pipeline_cache_enabled());
+ anv_pipeline_cache_init(cache, device,
+ device->instance->pipeline_cache_enabled);
if (pCreateInfo->initialDataSize > 0)
anv_pipeline_cache_load(cache,
blob_write_bytes(&blob, &header, sizeof(header));
uint32_t count = 0;
- ssize_t count_offset = blob_reserve_uint32(&blob);
+ intptr_t count_offset = blob_reserve_uint32(&blob);
if (count_offset < 0) {
*pDataSize = 0;
blob_finish(&blob);
VkResult result = VK_SUCCESS;
if (cache->cache) {
- struct hash_entry *entry;
hash_table_foreach(cache->cache, entry) {
struct anv_shader_bin *shader = entry->data;
if (!src->cache)
continue;
- struct hash_entry *entry;
hash_table_foreach(src->cache, entry) {
struct anv_shader_bin *bin = entry->data;
assert(bin);
return VK_SUCCESS;
}
+
+struct anv_shader_bin *
+anv_device_search_for_kernel(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size)
+{
+ struct anv_shader_bin *bin;
+
+ if (cache) {
+ bin = anv_pipeline_cache_search(cache, key_data, key_size);
+ if (bin)
+ return bin;
+ }
+
+#ifdef ENABLE_SHADER_CACHE
+ struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
+ if (disk_cache && device->instance->pipeline_cache_enabled) {
+ cache_key cache_key;
+ disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
+
+ size_t buffer_size;
+ uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
+ if (buffer) {
+ struct blob_reader blob;
+ blob_reader_init(&blob, buffer, buffer_size);
+ bin = anv_shader_bin_create_from_blob(device, &blob);
+ free(buffer);
+
+ if (bin) {
+ if (cache)
+ anv_pipeline_cache_add_shader_bin(cache, bin);
+ return bin;
+ }
+ }
+ }
+#endif
+
+ return NULL;
+}
+
+struct anv_shader_bin *
+anv_device_upload_kernel(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size,
+ const void *kernel_data, uint32_t kernel_size,
+ const void *constant_data,
+ uint32_t constant_data_size,
+ const struct brw_stage_prog_data *prog_data,
+ uint32_t prog_data_size,
+ const struct anv_pipeline_bind_map *bind_map)
+{
+ struct anv_shader_bin *bin;
+ if (cache) {
+ bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
+ kernel_data, kernel_size,
+ constant_data, constant_data_size,
+ prog_data, prog_data_size,
+ bind_map);
+ } else {
+ bin = anv_shader_bin_create(device, key_data, key_size,
+ kernel_data, kernel_size,
+ constant_data, constant_data_size,
+ prog_data, prog_data_size,
+ prog_data->param, bind_map);
+ }
+
+ if (bin == NULL)
+ return NULL;
+
+#ifdef ENABLE_SHADER_CACHE
+ struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
+ if (disk_cache) {
+ struct blob binary;
+ blob_init(&binary);
+ anv_shader_bin_write_to_blob(bin, &binary);
+
+ if (!binary.out_of_memory) {
+ cache_key cache_key;
+ disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
+
+ disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
+ }
+
+ blob_finish(&binary);
+ }
+#endif
+
+ return bin;
+}
+
+struct serialized_nir {
+ unsigned char sha1_key[20];
+ size_t size;
+ char data[0];
+};
+
+struct nir_shader *
+anv_device_search_for_nir(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const nir_shader_compiler_options *nir_options,
+ unsigned char sha1_key[20],
+ void *mem_ctx)
+{
+ if (cache && cache->nir_cache) {
+ const struct serialized_nir *snir = NULL;
+
+ pthread_mutex_lock(&cache->mutex);
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->nir_cache, sha1_key);
+ if (entry)
+ snir = entry->data;
+ pthread_mutex_unlock(&cache->mutex);
+
+ if (snir) {
+ struct blob_reader blob;
+ blob_reader_init(&blob, snir->data, snir->size);
+
+ nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
+ if (blob.overrun) {
+ ralloc_free(nir);
+ } else {
+ return nir;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void
+anv_device_upload_nir(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const struct nir_shader *nir,
+ unsigned char sha1_key[20])
+{
+ if (cache && cache->nir_cache) {
+ pthread_mutex_lock(&cache->mutex);
+ struct hash_entry *entry =
+ _mesa_hash_table_search(cache->nir_cache, sha1_key);
+ pthread_mutex_unlock(&cache->mutex);
+ if (entry)
+ return;
+
+ struct blob blob;
+ blob_init(&blob);
+
+ nir_serialize(&blob, nir);
+ if (blob.out_of_memory) {
+ blob_finish(&blob);
+ return;
+ }
+
+ pthread_mutex_lock(&cache->mutex);
+ /* Because ralloc isn't thread-safe, we have to do all this inside the
+ * lock. We could unlock for the big memcpy but it's probably not worth
+ * the hassle.
+ */
+ entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
+ if (entry) {
+ pthread_mutex_unlock(&cache->mutex);
+ return;
+ }
+
+ struct serialized_nir *snir =
+ ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
+ memcpy(snir->sha1_key, sha1_key, 20);
+ snir->size = blob.size;
+ memcpy(snir->data, blob.data, blob.size);
+
+ _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
+
+ pthread_mutex_unlock(&cache->mutex);
+ }
+}