X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2Fanv_pipeline_cache.c;h=e1d48b879b07effede1be87a0355a76b0e36a120;hb=f118ca20758c85da1aaf1792e61aadb298b32a47;hp=0b260528f81d8257035e7bee047259a754906844;hpb=07441c344c845bd663398529dbf484759d09cd54;p=mesa.git diff --git a/src/intel/vulkan/anv_pipeline_cache.c b/src/intel/vulkan/anv_pipeline_cache.c index 0b260528f81..e1d48b879b0 100644 --- a/src/intel/vulkan/anv_pipeline_cache.c +++ b/src/intel/vulkan/anv_pipeline_cache.c @@ -21,9 +21,208 @@ * IN THE SOFTWARE. */ -#include "util/mesa-sha1.h" +#include "util/blob.h" +#include "util/hash_table.h" #include "util/debug.h" +#include "util/disk_cache.h" +#include "util/mesa-sha1.h" +#include "nir/nir_serialize.h" #include "anv_private.h" +#include "nir/nir_xfb_info.h" + +struct anv_shader_bin * +anv_shader_bin_create(struct anv_device *device, + const void *key_data, uint32_t key_size, + const void *kernel_data, uint32_t kernel_size, + const void *constant_data, uint32_t constant_data_size, + const struct brw_stage_prog_data *prog_data_in, + uint32_t prog_data_size, const void *prog_data_param_in, + const struct brw_compile_stats *stats, uint32_t num_stats, + const nir_xfb_info *xfb_info_in, + const struct anv_pipeline_bind_map *bind_map) +{ + struct anv_shader_bin *shader; + struct anv_shader_bin_key *key; + struct brw_stage_prog_data *prog_data; + uint32_t *prog_data_param; + nir_xfb_info *xfb_info; + struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor; + + ANV_MULTIALLOC(ma); + anv_multialloc_add(&ma, &shader, 1); + anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size); + anv_multialloc_add_size(&ma, &prog_data, prog_data_size); + anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params); + if (xfb_info_in) { + uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count); + anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size); + } + anv_multialloc_add(&ma, &surface_to_descriptor, + bind_map->surface_count); + anv_multialloc_add(&ma, &sampler_to_descriptor, + bind_map->sampler_count); + + if (!anv_multialloc_alloc(&ma, &device->alloc, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE)) + return NULL; + + shader->ref_cnt = 1; + + key->size = key_size; + memcpy(key->data, key_data, key_size); + shader->key = key; + + shader->kernel = + anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64); + memcpy(shader->kernel.map, kernel_data, kernel_size); + shader->kernel_size = kernel_size; + + if (constant_data_size) { + shader->constant_data = + anv_state_pool_alloc(&device->dynamic_state_pool, + constant_data_size, 32); + memcpy(shader->constant_data.map, constant_data, constant_data_size); + } else { + shader->constant_data = ANV_STATE_NULL; + } + shader->constant_data_size = constant_data_size; + + memcpy(prog_data, prog_data_in, prog_data_size); + memcpy(prog_data_param, prog_data_param_in, + prog_data->nr_params * sizeof(*prog_data_param)); + prog_data->param = prog_data_param; + shader->prog_data = prog_data; + shader->prog_data_size = prog_data_size; + + assert(num_stats <= ARRAY_SIZE(shader->stats)); + typed_memcpy(shader->stats, stats, num_stats); + shader->num_stats = num_stats; + + if (xfb_info_in) { + *xfb_info = *xfb_info_in; + typed_memcpy(xfb_info->outputs, xfb_info_in->outputs, + xfb_info_in->output_count); + shader->xfb_info = xfb_info; + } else { + shader->xfb_info = NULL; + } + + shader->bind_map = *bind_map; + typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor, + bind_map->surface_count); + shader->bind_map.surface_to_descriptor = surface_to_descriptor; + typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor, + bind_map->sampler_count); + shader->bind_map.sampler_to_descriptor = sampler_to_descriptor; + + return shader; +} + +void +anv_shader_bin_destroy(struct anv_device *device, + struct anv_shader_bin *shader) +{ + assert(shader->ref_cnt == 0); + anv_state_pool_free(&device->instruction_state_pool, shader->kernel); + anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data); + vk_free(&device->alloc, shader); +} + +static bool +anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader, + struct blob *blob) +{ + blob_write_uint32(blob, shader->key->size); + blob_write_bytes(blob, shader->key->data, shader->key->size); + + blob_write_uint32(blob, shader->kernel_size); + blob_write_bytes(blob, shader->kernel.map, shader->kernel_size); + + blob_write_uint32(blob, shader->constant_data_size); + blob_write_bytes(blob, shader->constant_data.map, + shader->constant_data_size); + + blob_write_uint32(blob, shader->prog_data_size); + blob_write_bytes(blob, shader->prog_data, shader->prog_data_size); + blob_write_bytes(blob, shader->prog_data->param, + shader->prog_data->nr_params * + sizeof(*shader->prog_data->param)); + + blob_write_uint32(blob, shader->num_stats); + blob_write_bytes(blob, shader->stats, + shader->num_stats * sizeof(shader->stats[0])); + + if (shader->xfb_info) { + uint32_t xfb_info_size = + nir_xfb_info_size(shader->xfb_info->output_count); + blob_write_uint32(blob, xfb_info_size); + blob_write_bytes(blob, shader->xfb_info, xfb_info_size); + } else { + blob_write_uint32(blob, 0); + } + + blob_write_uint32(blob, shader->bind_map.surface_count); + blob_write_uint32(blob, shader->bind_map.sampler_count); + blob_write_bytes(blob, shader->bind_map.surface_to_descriptor, + shader->bind_map.surface_count * + sizeof(*shader->bind_map.surface_to_descriptor)); + blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor, + shader->bind_map.sampler_count * + sizeof(*shader->bind_map.sampler_to_descriptor)); + + return !blob->out_of_memory; +} + +static struct anv_shader_bin * +anv_shader_bin_create_from_blob(struct anv_device *device, + struct blob_reader *blob) +{ + uint32_t key_size = blob_read_uint32(blob); + const void *key_data = blob_read_bytes(blob, key_size); + + uint32_t kernel_size = blob_read_uint32(blob); + const void *kernel_data = blob_read_bytes(blob, kernel_size); + + uint32_t constant_data_size = blob_read_uint32(blob); + const void *constant_data = blob_read_bytes(blob, constant_data_size); + + uint32_t prog_data_size = blob_read_uint32(blob); + const struct brw_stage_prog_data *prog_data = + blob_read_bytes(blob, prog_data_size); + if (blob->overrun) + return NULL; + const void *prog_data_param = + blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param)); + + uint32_t num_stats = blob_read_uint32(blob); + const struct brw_compile_stats *stats = + blob_read_bytes(blob, num_stats * sizeof(stats[0])); + + const nir_xfb_info *xfb_info = NULL; + uint32_t xfb_size = blob_read_uint32(blob); + if (xfb_size) + xfb_info = blob_read_bytes(blob, xfb_size); + + struct anv_pipeline_bind_map bind_map; + bind_map.surface_count = blob_read_uint32(blob); + bind_map.sampler_count = blob_read_uint32(blob); + bind_map.surface_to_descriptor = (void *) + blob_read_bytes(blob, bind_map.surface_count * + sizeof(*bind_map.surface_to_descriptor)); + bind_map.sampler_to_descriptor = (void *) + blob_read_bytes(blob, bind_map.sampler_count * + sizeof(*bind_map.sampler_to_descriptor)); + + if (blob->overrun) + return NULL; + + return anv_shader_bin_create(device, + key_data, key_size, + kernel_data, kernel_size, + constant_data, constant_data_size, + prog_data, prog_data_size, prog_data_param, + stats, num_stats, xfb_info, &bind_map); +} /* Remaining work: * @@ -32,216 +231,213 @@ * * - Review prog_data struct for size and cacheability: struct * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8 - * bit quantities etc; param, pull_param, and image_params are pointers, we - * just need the compation map. use bit fields for all bools, eg - * dual_src_blend. + * bit quantities etc; use bit fields for all bools, eg dual_src_blend. */ +static uint32_t +shader_bin_key_hash_func(const void *void_key) +{ + const struct anv_shader_bin_key *key = void_key; + return _mesa_hash_data(key->data, key->size); +} + +static bool +shader_bin_key_compare_func(const void *void_a, const void *void_b) +{ + const struct anv_shader_bin_key *a = void_a, *b = void_b; + if (a->size != b->size) + return false; + + return memcmp(a->data, b->data, a->size) == 0; +} + +static uint32_t +sha1_hash_func(const void *sha1) +{ + return _mesa_hash_data(sha1, 20); +} + +static bool +sha1_compare_func(const void *sha1_a, const void *sha1_b) +{ + return memcmp(sha1_a, sha1_b, 20) == 0; +} + void anv_pipeline_cache_init(struct anv_pipeline_cache *cache, - struct anv_device *device) + struct anv_device *device, + bool cache_enabled) { cache->device = device; - anv_state_stream_init(&cache->program_stream, - &device->instruction_block_pool); pthread_mutex_init(&cache->mutex, NULL); - cache->kernel_count = 0; - cache->total_size = 0; - cache->table_size = 1024; - const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]); - cache->hash_table = malloc(byte_size); - - /* We don't consider allocation failure fatal, we just start with a 0-sized - * cache. */ - if (cache->hash_table == NULL) - cache->table_size = 0; - else - memset(cache->hash_table, 0xff, byte_size); + if (cache_enabled) { + cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func, + shader_bin_key_compare_func); + cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func, + sha1_compare_func); + } else { + cache->cache = NULL; + cache->nir_cache = NULL; + } } void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache) { - anv_state_stream_finish(&cache->program_stream); pthread_mutex_destroy(&cache->mutex); - free(cache->hash_table); -} -struct cache_entry { - unsigned char sha1[20]; - uint32_t prog_data_size; - uint32_t kernel_size; - char prog_data[0]; + if (cache->cache) { + /* This is a bit unfortunate. In order to keep things from randomly + * going away, the shader cache has to hold a reference to all shader + * binaries it contains. We unref them when we destroy the cache. + */ + hash_table_foreach(cache->cache, entry) + anv_shader_bin_unref(cache->device, entry->data); - /* kernel follows prog_data at next 64 byte aligned address */ -}; + _mesa_hash_table_destroy(cache->cache, NULL); + } -static uint32_t -entry_size(struct cache_entry *entry) -{ - /* This returns the number of bytes needed to serialize an entry, which - * doesn't include the alignment padding bytes. - */ + if (cache->nir_cache) { + hash_table_foreach(cache->nir_cache, entry) + ralloc_free(entry->data); - return sizeof(*entry) + entry->prog_data_size + entry->kernel_size; + _mesa_hash_table_destroy(cache->nir_cache, NULL); + } } -void -anv_hash_shader(unsigned char *hash, const void *key, size_t key_size, - struct anv_shader_module *module, - const char *entrypoint, - const VkSpecializationInfo *spec_info) +static struct anv_shader_bin * +anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache, + const void *key_data, uint32_t key_size) { - struct mesa_sha1 *ctx; - - ctx = _mesa_sha1_init(); - _mesa_sha1_update(ctx, key, key_size); - _mesa_sha1_update(ctx, module->sha1, sizeof(module->sha1)); - _mesa_sha1_update(ctx, entrypoint, strlen(entrypoint)); - /* hash in shader stage, pipeline layout? */ - if (spec_info) { - _mesa_sha1_update(ctx, spec_info->pMapEntries, - spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]); - _mesa_sha1_update(ctx, spec_info->pData, spec_info->dataSize); - } - _mesa_sha1_final(ctx, hash); + uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))]; + struct anv_shader_bin_key *key = (void *)vla; + key->size = key_size; + memcpy(key->data, key_data, key_size); + + struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key); + if (entry) + return entry->data; + else + return NULL; } -uint32_t +struct anv_shader_bin * anv_pipeline_cache_search(struct anv_pipeline_cache *cache, - const unsigned char *sha1, - const struct brw_stage_prog_data **prog_data) + const void *key_data, uint32_t key_size) { - const uint32_t mask = cache->table_size - 1; - const uint32_t start = (*(uint32_t *) sha1); + if (!cache->cache) + return NULL; - for (uint32_t i = 0; i < cache->table_size; i++) { - const uint32_t index = (start + i) & mask; - const uint32_t offset = cache->hash_table[index]; - - if (offset == ~0) - return NO_KERNEL; + pthread_mutex_lock(&cache->mutex); - struct cache_entry *entry = - cache->program_stream.block_pool->map + offset; - if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) { - if (prog_data) - *prog_data = (const struct brw_stage_prog_data *) entry->prog_data; + struct anv_shader_bin *shader = + anv_pipeline_cache_search_locked(cache, key_data, key_size); - const uint32_t preamble_size = - align_u32(sizeof(*entry) + entry->prog_data_size, 64); + pthread_mutex_unlock(&cache->mutex); - return offset + preamble_size; - } - } + /* We increment refcount before handing it to the caller */ + if (shader) + anv_shader_bin_ref(shader); - return NO_KERNEL; + return shader; } static void -anv_pipeline_cache_set_entry(struct anv_pipeline_cache *cache, - struct cache_entry *entry, uint32_t entry_offset) +anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache, + struct anv_shader_bin *bin) { - const uint32_t mask = cache->table_size - 1; - const uint32_t start = (*(uint32_t *) entry->sha1); + if (!cache->cache) + return; - /* We'll always be able to insert when we get here. */ - assert(cache->kernel_count < cache->table_size / 2); + pthread_mutex_lock(&cache->mutex); - for (uint32_t i = 0; i < cache->table_size; i++) { - const uint32_t index = (start + i) & mask; - if (cache->hash_table[index] == ~0) { - cache->hash_table[index] = entry_offset; - break; - } + struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key); + if (entry == NULL) { + /* Take a reference for the cache */ + anv_shader_bin_ref(bin); + _mesa_hash_table_insert(cache->cache, bin->key, bin); } - cache->total_size += entry_size(entry); - cache->kernel_count++; + pthread_mutex_unlock(&cache->mutex); } -static VkResult -anv_pipeline_cache_grow(struct anv_pipeline_cache *cache) +static struct anv_shader_bin * +anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache, + const void *key_data, uint32_t key_size, + const void *kernel_data, + uint32_t kernel_size, + const void *constant_data, + uint32_t constant_data_size, + const struct brw_stage_prog_data *prog_data, + uint32_t prog_data_size, + const void *prog_data_param, + const struct brw_compile_stats *stats, + uint32_t num_stats, + const nir_xfb_info *xfb_info, + const struct anv_pipeline_bind_map *bind_map) { - const uint32_t table_size = cache->table_size * 2; - const uint32_t old_table_size = cache->table_size; - const size_t byte_size = table_size * sizeof(cache->hash_table[0]); - uint32_t *table; - uint32_t *old_table = cache->hash_table; - - table = malloc(byte_size); - if (table == NULL) - return VK_ERROR_OUT_OF_HOST_MEMORY; - - cache->hash_table = table; - cache->table_size = table_size; - cache->kernel_count = 0; - cache->total_size = 0; - - memset(cache->hash_table, 0xff, byte_size); - for (uint32_t i = 0; i < old_table_size; i++) { - const uint32_t offset = old_table[i]; - if (offset == ~0) - continue; - - struct cache_entry *entry = - cache->program_stream.block_pool->map + offset; - anv_pipeline_cache_set_entry(cache, entry, offset); - } - - free(old_table); - - return VK_SUCCESS; + struct anv_shader_bin *shader = + anv_pipeline_cache_search_locked(cache, key_data, key_size); + if (shader) + return shader; + + struct anv_shader_bin *bin = + anv_shader_bin_create(cache->device, key_data, key_size, + kernel_data, kernel_size, + constant_data, constant_data_size, + prog_data, prog_data_size, prog_data_param, + stats, num_stats, xfb_info, bind_map); + if (!bin) + return NULL; + + _mesa_hash_table_insert(cache->cache, bin->key, bin); + + return bin; } -uint32_t +struct anv_shader_bin * anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache, - const unsigned char *sha1, - const void *kernel, size_t kernel_size, - const struct brw_stage_prog_data **prog_data, - size_t prog_data_size) + const void *key_data, uint32_t key_size, + const void *kernel_data, uint32_t kernel_size, + const void *constant_data, + uint32_t constant_data_size, + const struct brw_stage_prog_data *prog_data, + uint32_t prog_data_size, + const struct brw_compile_stats *stats, + uint32_t num_stats, + const nir_xfb_info *xfb_info, + const struct anv_pipeline_bind_map *bind_map) { - pthread_mutex_lock(&cache->mutex); - struct cache_entry *entry; - - const uint32_t preamble_size = - align_u32(sizeof(*entry) + prog_data_size, 64); - - const uint32_t size = preamble_size + kernel_size; - - assert(size < cache->program_stream.block_pool->block_size); - const struct anv_state state = - anv_state_stream_alloc(&cache->program_stream, size, 64); - - entry = state.map; - entry->prog_data_size = prog_data_size; - memcpy(entry->prog_data, *prog_data, prog_data_size); - *prog_data = (const struct brw_stage_prog_data *) entry->prog_data; - entry->kernel_size = kernel_size; - - if (sha1 && env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", false)) { - assert(anv_pipeline_cache_search(cache, sha1, NULL) == NO_KERNEL); - - memcpy(entry->sha1, sha1, sizeof(entry->sha1)); - if (cache->kernel_count == cache->table_size / 2) - anv_pipeline_cache_grow(cache); - - /* Failing to grow that hash table isn't fatal, but may mean we don't - * have enough space to add this new kernel. Only add it if there's room. - */ - if (cache->kernel_count < cache->table_size / 2) - anv_pipeline_cache_set_entry(cache, entry, state.offset); + if (cache->cache) { + pthread_mutex_lock(&cache->mutex); + + struct anv_shader_bin *bin = + anv_pipeline_cache_add_shader_locked(cache, key_data, key_size, + kernel_data, kernel_size, + constant_data, constant_data_size, + prog_data, prog_data_size, + prog_data->param, + stats, num_stats, + xfb_info, bind_map); + + pthread_mutex_unlock(&cache->mutex); + + /* We increment refcount before handing it to the caller */ + if (bin) + anv_shader_bin_ref(bin); + + return bin; + } else { + /* In this case, we're not caching it so the caller owns it entirely */ + return anv_shader_bin_create(cache->device, key_data, key_size, + kernel_data, kernel_size, + constant_data, constant_data_size, + prog_data, prog_data_size, + prog_data->param, + stats, num_stats, + xfb_info, bind_map); } - - pthread_mutex_unlock(&cache->mutex); - - memcpy(state.map + preamble_size, kernel, kernel_size); - - if (!cache->device->info.has_llc) - anv_state_clflush(state); - - return state.offset + preamble_size; } struct cache_header { @@ -257,12 +453,20 @@ anv_pipeline_cache_load(struct anv_pipeline_cache *cache, const void *data, size_t size) { struct anv_device *device = cache->device; - struct cache_header header; - uint8_t uuid[VK_UUID_SIZE]; + struct anv_physical_device *pdevice = &device->instance->physicalDevice; + + if (cache->cache == NULL) + return; - if (size < sizeof(header)) + struct blob_reader blob; + blob_reader_init(&blob, data, size); + + struct cache_header header; + blob_copy_bytes(&blob, &header, sizeof(header)); + uint32_t count = blob_read_uint32(&blob); + if (blob.overrun) return; - memcpy(&header, data, sizeof(header)); + if (header.header_size < sizeof(header)) return; if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE) @@ -271,28 +475,15 @@ anv_pipeline_cache_load(struct anv_pipeline_cache *cache, return; if (header.device_id != device->chipset_id) return; - anv_device_get_cache_uuid(uuid); - if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0) + if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0) return; - const void *end = data + size; - const void *p = data + header.header_size; - - while (p < end) { - /* The kernels aren't 64 byte aligned in the serialized format so - * they're always right after the prog_data. - */ - const struct cache_entry *entry = p; - const void *kernel = &entry->prog_data[entry->prog_data_size]; - - const struct brw_stage_prog_data *prog_data = - (const struct brw_stage_prog_data *) entry->prog_data; - - anv_pipeline_cache_upload_kernel(cache, entry->sha1, - kernel, entry->kernel_size, - &prog_data, - entry->prog_data_size); - p = kernel + entry->kernel_size; + for (uint32_t i = 0; i < count; i++) { + struct anv_shader_bin *bin = + anv_shader_bin_create_from_blob(device, &blob); + if (!bin) + break; + _mesa_hash_table_insert(cache->cache, bin->key, bin); } } @@ -308,13 +499,14 @@ VkResult anv_CreatePipelineCache( assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO); assert(pCreateInfo->flags == 0); - cache = anv_alloc2(&device->alloc, pAllocator, + cache = vk_alloc2(&device->alloc, pAllocator, sizeof(*cache), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); if (cache == NULL) return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); - anv_pipeline_cache_init(cache, device); + anv_pipeline_cache_init(cache, device, + device->instance->pipeline_cache_enabled); if (pCreateInfo->initialDataSize > 0) anv_pipeline_cache_load(cache, @@ -334,9 +526,12 @@ void anv_DestroyPipelineCache( ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache); + if (!cache) + return; + anv_pipeline_cache_finish(cache); - anv_free2(&device->alloc, pAllocator, cache); + vk_free2(&device->alloc, pAllocator, cache); } VkResult anv_GetPipelineCacheData( @@ -347,76 +542,56 @@ VkResult anv_GetPipelineCacheData( { ANV_FROM_HANDLE(anv_device, device, _device); ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache); - struct cache_header *header; + struct anv_physical_device *pdevice = &device->instance->physicalDevice; - const size_t size = sizeof(*header) + cache->total_size; - - if (pData == NULL) { - *pDataSize = size; - return VK_SUCCESS; + struct blob blob; + if (pData) { + blob_init_fixed(&blob, pData, *pDataSize); + } else { + blob_init_fixed(&blob, NULL, SIZE_MAX); } - if (*pDataSize < sizeof(*header)) { + struct cache_header header = { + .header_size = sizeof(struct cache_header), + .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + .vendor_id = 0x8086, + .device_id = device->chipset_id, + }; + memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE); + blob_write_bytes(&blob, &header, sizeof(header)); + + uint32_t count = 0; + intptr_t count_offset = blob_reserve_uint32(&blob); + if (count_offset < 0) { *pDataSize = 0; + blob_finish(&blob); return VK_INCOMPLETE; } - void *p = pData, *end = pData + *pDataSize; - header = p; - header->header_size = sizeof(*header); - header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE; - header->vendor_id = 0x8086; - header->device_id = device->chipset_id; - anv_device_get_cache_uuid(header->uuid); - p += header->header_size; - - struct cache_entry *entry; - for (uint32_t i = 0; i < cache->table_size; i++) { - if (cache->hash_table[i] == ~0) - continue; - - entry = cache->program_stream.block_pool->map + cache->hash_table[i]; - if (end < p + entry_size(entry)) - break; + VkResult result = VK_SUCCESS; + if (cache->cache) { + hash_table_foreach(cache->cache, entry) { + struct anv_shader_bin *shader = entry->data; - memcpy(p, entry, sizeof(*entry) + entry->prog_data_size); - p += sizeof(*entry) + entry->prog_data_size; + size_t save_size = blob.size; + if (!anv_shader_bin_write_to_blob(shader, &blob)) { + /* If it fails reset to the previous size and bail */ + blob.size = save_size; + result = VK_INCOMPLETE; + break; + } - void *kernel = (void *) entry + - align_u32(sizeof(*entry) + entry->prog_data_size, 64); - - memcpy(p, kernel, entry->kernel_size); - p += entry->kernel_size; + count++; + } } - *pDataSize = p - pData; + blob_overwrite_uint32(&blob, count_offset, count); - return VK_SUCCESS; -} - -static void -anv_pipeline_cache_merge(struct anv_pipeline_cache *dst, - struct anv_pipeline_cache *src) -{ - for (uint32_t i = 0; i < src->table_size; i++) { - if (src->hash_table[i] == ~0) - continue; - - struct cache_entry *entry = - src->program_stream.block_pool->map + src->hash_table[i]; + *pDataSize = blob.size; - if (anv_pipeline_cache_search(dst, entry->sha1, NULL) != NO_KERNEL) - continue; + blob_finish(&blob); - const void *kernel = (void *) entry + - align_u32(sizeof(*entry) + entry->prog_data_size, 64); - const struct brw_stage_prog_data *prog_data = - (const struct brw_stage_prog_data *) entry->prog_data; - - anv_pipeline_cache_upload_kernel(dst, entry->sha1, - kernel, entry->kernel_size, - &prog_data, entry->prog_data_size); - } + return result; } VkResult anv_MergePipelineCaches( @@ -427,11 +602,211 @@ VkResult anv_MergePipelineCaches( { ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache); + if (!dst->cache) + return VK_SUCCESS; + for (uint32_t i = 0; i < srcCacheCount; i++) { ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]); + if (!src->cache) + continue; + + hash_table_foreach(src->cache, entry) { + struct anv_shader_bin *bin = entry->data; + assert(bin); + + if (_mesa_hash_table_search(dst->cache, bin->key)) + continue; - anv_pipeline_cache_merge(dst, src); + anv_shader_bin_ref(bin); + _mesa_hash_table_insert(dst->cache, bin->key, bin); + } } return VK_SUCCESS; } + +struct anv_shader_bin * +anv_device_search_for_kernel(struct anv_device *device, + struct anv_pipeline_cache *cache, + const void *key_data, uint32_t key_size, + bool *user_cache_hit) +{ + struct anv_shader_bin *bin; + + *user_cache_hit = false; + + if (cache) { + bin = anv_pipeline_cache_search(cache, key_data, key_size); + if (bin) { + *user_cache_hit = cache != &device->default_pipeline_cache; + return bin; + } + } + +#ifdef ENABLE_SHADER_CACHE + struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache; + if (disk_cache && device->instance->pipeline_cache_enabled) { + cache_key cache_key; + disk_cache_compute_key(disk_cache, key_data, key_size, cache_key); + + size_t buffer_size; + uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size); + if (buffer) { + struct blob_reader blob; + blob_reader_init(&blob, buffer, buffer_size); + bin = anv_shader_bin_create_from_blob(device, &blob); + free(buffer); + + if (bin) { + if (cache) + anv_pipeline_cache_add_shader_bin(cache, bin); + return bin; + } + } + } +#endif + + return NULL; +} + +struct anv_shader_bin * +anv_device_upload_kernel(struct anv_device *device, + struct anv_pipeline_cache *cache, + const void *key_data, uint32_t key_size, + const void *kernel_data, uint32_t kernel_size, + const void *constant_data, + uint32_t constant_data_size, + const struct brw_stage_prog_data *prog_data, + uint32_t prog_data_size, + const struct brw_compile_stats *stats, + uint32_t num_stats, + const nir_xfb_info *xfb_info, + const struct anv_pipeline_bind_map *bind_map) +{ + struct anv_shader_bin *bin; + if (cache) { + bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size, + kernel_data, kernel_size, + constant_data, constant_data_size, + prog_data, prog_data_size, + stats, num_stats, + xfb_info, bind_map); + } else { + bin = anv_shader_bin_create(device, key_data, key_size, + kernel_data, kernel_size, + constant_data, constant_data_size, + prog_data, prog_data_size, + prog_data->param, + stats, num_stats, + xfb_info, bind_map); + } + + if (bin == NULL) + return NULL; + +#ifdef ENABLE_SHADER_CACHE + struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache; + if (disk_cache) { + struct blob binary; + blob_init(&binary); + if (anv_shader_bin_write_to_blob(bin, &binary)) { + cache_key cache_key; + disk_cache_compute_key(disk_cache, key_data, key_size, cache_key); + + disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL); + } + + blob_finish(&binary); + } +#endif + + return bin; +} + +struct serialized_nir { + unsigned char sha1_key[20]; + size_t size; + char data[0]; +}; + +struct nir_shader * +anv_device_search_for_nir(struct anv_device *device, + struct anv_pipeline_cache *cache, + const nir_shader_compiler_options *nir_options, + unsigned char sha1_key[20], + void *mem_ctx) +{ + if (cache && cache->nir_cache) { + const struct serialized_nir *snir = NULL; + + pthread_mutex_lock(&cache->mutex); + struct hash_entry *entry = + _mesa_hash_table_search(cache->nir_cache, sha1_key); + if (entry) + snir = entry->data; + pthread_mutex_unlock(&cache->mutex); + + if (snir) { + struct blob_reader blob; + blob_reader_init(&blob, snir->data, snir->size); + + nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob); + if (blob.overrun) { + ralloc_free(nir); + } else { + return nir; + } + } + } + + return NULL; +} + +void +anv_device_upload_nir(struct anv_device *device, + struct anv_pipeline_cache *cache, + const struct nir_shader *nir, + unsigned char sha1_key[20]) +{ + if (cache && cache->nir_cache) { + pthread_mutex_lock(&cache->mutex); + struct hash_entry *entry = + _mesa_hash_table_search(cache->nir_cache, sha1_key); + pthread_mutex_unlock(&cache->mutex); + if (entry) + return; + + struct blob blob; + blob_init(&blob); + + nir_serialize(&blob, nir, false); + if (blob.out_of_memory) { + blob_finish(&blob); + return; + } + + pthread_mutex_lock(&cache->mutex); + /* Because ralloc isn't thread-safe, we have to do all this inside the + * lock. We could unlock for the big memcpy but it's probably not worth + * the hassle. + */ + entry = _mesa_hash_table_search(cache->nir_cache, sha1_key); + if (entry) { + blob_finish(&blob); + pthread_mutex_unlock(&cache->mutex); + return; + } + + struct serialized_nir *snir = + ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size); + memcpy(snir->sha1_key, sha1_key, 20); + snir->size = blob.size; + memcpy(snir->data, blob.data, blob.size); + + blob_finish(&blob); + + _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir); + + pthread_mutex_unlock(&cache->mutex); + } +}