anv: Properly cache brw_stage_prog_data::relocs
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
index c8ff7e52996039f9b021e59e513b5df95dcbba8d..739b76ce04ff97974b06d99de802a0d321690f8e 100644 (file)
  * IN THE SOFTWARE.
  */
 
-#include "util/mesa-sha1.h"
+#include "util/blob.h"
+#include "util/hash_table.h"
 #include "util/debug.h"
+#include "util/disk_cache.h"
+#include "util/mesa-sha1.h"
+#include "nir/nir_serialize.h"
 #include "anv_private.h"
+#include "nir/nir_xfb_info.h"
+#include "vulkan/util/vk_util.h"
+
+struct anv_shader_bin *
+anv_shader_bin_create(struct anv_device *device,
+                      gl_shader_stage stage,
+                      const void *key_data, uint32_t key_size,
+                      const void *kernel_data, uint32_t kernel_size,
+                      const struct brw_stage_prog_data *prog_data_in,
+                      uint32_t prog_data_size,
+                      const struct brw_compile_stats *stats, uint32_t num_stats,
+                      const nir_xfb_info *xfb_info_in,
+                      const struct anv_pipeline_bind_map *bind_map)
+{
+   struct anv_shader_bin *shader;
+   struct anv_shader_bin_key *key;
+   struct brw_stage_prog_data *prog_data;
+   struct brw_shader_reloc *prog_data_relocs;
+   uint32_t *prog_data_param;
+   nir_xfb_info *xfb_info;
+   struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
+
+   ANV_MULTIALLOC(ma);
+   anv_multialloc_add(&ma, &shader, 1);
+   anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
+   anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
+   anv_multialloc_add(&ma, &prog_data_relocs, prog_data_in->num_relocs);
+   anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
+   if (xfb_info_in) {
+      uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
+      anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
+   }
+   anv_multialloc_add(&ma, &surface_to_descriptor,
+                           bind_map->surface_count);
+   anv_multialloc_add(&ma, &sampler_to_descriptor,
+                           bind_map->sampler_count);
+
+   if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
+                             VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
+      return NULL;
+
+   shader->ref_cnt = 1;
+
+   shader->stage = stage;
+
+   key->size = key_size;
+   memcpy(key->data, key_data, key_size);
+   shader->key = key;
+
+   shader->kernel =
+      anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
+   memcpy(shader->kernel.map, kernel_data, kernel_size);
+   shader->kernel_size = kernel_size;
+
+   memcpy(prog_data, prog_data_in, prog_data_size);
+   typed_memcpy(prog_data_relocs, prog_data_in->relocs,
+                prog_data_in->num_relocs);
+   prog_data->relocs = prog_data_relocs;
+   memset(prog_data_param, 0,
+          prog_data->nr_params * sizeof(*prog_data_param));
+   prog_data->param = prog_data_param;
+   shader->prog_data = prog_data;
+   shader->prog_data_size = prog_data_size;
+
+   assert(num_stats <= ARRAY_SIZE(shader->stats));
+   typed_memcpy(shader->stats, stats, num_stats);
+   shader->num_stats = num_stats;
+
+   if (xfb_info_in) {
+      *xfb_info = *xfb_info_in;
+      typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
+                   xfb_info_in->output_count);
+      shader->xfb_info = xfb_info;
+   } else {
+      shader->xfb_info = NULL;
+   }
+
+   shader->bind_map = *bind_map;
+   typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
+                bind_map->surface_count);
+   shader->bind_map.surface_to_descriptor = surface_to_descriptor;
+   typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
+                bind_map->sampler_count);
+   shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
+
+   return shader;
+}
+
+void
+anv_shader_bin_destroy(struct anv_device *device,
+                       struct anv_shader_bin *shader)
+{
+   assert(shader->ref_cnt == 0);
+   anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
+   vk_free(&device->vk.alloc, shader);
+}
+
+static bool
+anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
+                             struct blob *blob)
+{
+   blob_write_uint32(blob, shader->stage);
+
+   blob_write_uint32(blob, shader->key->size);
+   blob_write_bytes(blob, shader->key->data, shader->key->size);
+
+   blob_write_uint32(blob, shader->kernel_size);
+   blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
+
+   blob_write_uint32(blob, shader->prog_data_size);
+   blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
+   blob_write_bytes(blob, shader->prog_data->relocs,
+                    shader->prog_data->num_relocs *
+                    sizeof(shader->prog_data->relocs[0]));
+
+   blob_write_uint32(blob, shader->num_stats);
+   blob_write_bytes(blob, shader->stats,
+                    shader->num_stats * sizeof(shader->stats[0]));
+
+   if (shader->xfb_info) {
+      uint32_t xfb_info_size =
+         nir_xfb_info_size(shader->xfb_info->output_count);
+      blob_write_uint32(blob, xfb_info_size);
+      blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
+   } else {
+      blob_write_uint32(blob, 0);
+   }
+
+   blob_write_bytes(blob, shader->bind_map.surface_sha1,
+                    sizeof(shader->bind_map.surface_sha1));
+   blob_write_bytes(blob, shader->bind_map.sampler_sha1,
+                    sizeof(shader->bind_map.sampler_sha1));
+   blob_write_bytes(blob, shader->bind_map.push_sha1,
+                    sizeof(shader->bind_map.push_sha1));
+   blob_write_uint32(blob, shader->bind_map.surface_count);
+   blob_write_uint32(blob, shader->bind_map.sampler_count);
+   blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
+                    shader->bind_map.surface_count *
+                    sizeof(*shader->bind_map.surface_to_descriptor));
+   blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
+                    shader->bind_map.sampler_count *
+                    sizeof(*shader->bind_map.sampler_to_descriptor));
+   blob_write_bytes(blob, shader->bind_map.push_ranges,
+                    sizeof(shader->bind_map.push_ranges));
+
+   return !blob->out_of_memory;
+}
+
+static struct anv_shader_bin *
+anv_shader_bin_create_from_blob(struct anv_device *device,
+                                struct blob_reader *blob)
+{
+   gl_shader_stage stage = blob_read_uint32(blob);
+
+   uint32_t key_size = blob_read_uint32(blob);
+   const void *key_data = blob_read_bytes(blob, key_size);
+
+   uint32_t kernel_size = blob_read_uint32(blob);
+   const void *kernel_data = blob_read_bytes(blob, kernel_size);
+
+   uint32_t prog_data_size = blob_read_uint32(blob);
+   const void *prog_data_bytes = blob_read_bytes(blob, prog_data_size);
+   if (blob->overrun)
+      return NULL;
+
+   union brw_any_prog_data prog_data;
+   memcpy(&prog_data, prog_data_bytes,
+          MIN2(sizeof(prog_data), prog_data_size));
+   prog_data.base.relocs =
+      blob_read_bytes(blob, prog_data.base.num_relocs *
+                            sizeof(prog_data.base.relocs[0]));
+
+   uint32_t num_stats = blob_read_uint32(blob);
+   const struct brw_compile_stats *stats =
+      blob_read_bytes(blob, num_stats * sizeof(stats[0]));
+
+   const nir_xfb_info *xfb_info = NULL;
+   uint32_t xfb_size = blob_read_uint32(blob);
+   if (xfb_size)
+      xfb_info = blob_read_bytes(blob, xfb_size);
+
+   struct anv_pipeline_bind_map bind_map;
+   blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
+   blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
+   blob_copy_bytes(blob, bind_map.push_sha1, sizeof(bind_map.push_sha1));
+   bind_map.surface_count = blob_read_uint32(blob);
+   bind_map.sampler_count = blob_read_uint32(blob);
+   bind_map.surface_to_descriptor = (void *)
+      blob_read_bytes(blob, bind_map.surface_count *
+                            sizeof(*bind_map.surface_to_descriptor));
+   bind_map.sampler_to_descriptor = (void *)
+      blob_read_bytes(blob, bind_map.sampler_count *
+                            sizeof(*bind_map.sampler_to_descriptor));
+   blob_copy_bytes(blob, bind_map.push_ranges, sizeof(bind_map.push_ranges));
+
+   if (blob->overrun)
+      return NULL;
+
+   return anv_shader_bin_create(device, stage,
+                                key_data, key_size,
+                                kernel_data, kernel_size,
+                                &prog_data.base, prog_data_size,
+                                stats, num_stats, xfb_info, &bind_map);
+}
 
 /* Remaining work:
  *
  *
  * - Review prog_data struct for size and cacheability: struct
  *   brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
- *   bit quantities etc; param, pull_param, and image_params are pointers, we
- *   just need the compation map. use bit fields for all bools, eg
- *   dual_src_blend.
+ *   bit quantities etc; use bit fields for all bools, eg dual_src_blend.
  */
 
+static uint32_t
+shader_bin_key_hash_func(const void *void_key)
+{
+   const struct anv_shader_bin_key *key = void_key;
+   return _mesa_hash_data(key->data, key->size);
+}
+
+static bool
+shader_bin_key_compare_func(const void *void_a, const void *void_b)
+{
+   const struct anv_shader_bin_key *a = void_a, *b = void_b;
+   if (a->size != b->size)
+      return false;
+
+   return memcmp(a->data, b->data, a->size) == 0;
+}
+
+static uint32_t
+sha1_hash_func(const void *sha1)
+{
+   return _mesa_hash_data(sha1, 20);
+}
+
+static bool
+sha1_compare_func(const void *sha1_a, const void *sha1_b)
+{
+   return memcmp(sha1_a, sha1_b, 20) == 0;
+}
+
 void
 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
-                        struct anv_device *device)
+                        struct anv_device *device,
+                        bool cache_enabled,
+                        bool external_sync)
 {
+   vk_object_base_init(&device->vk, &cache->base,
+                       VK_OBJECT_TYPE_PIPELINE_CACHE);
    cache->device = device;
-   anv_state_stream_init(&cache->program_stream,
-                         &device->instruction_block_pool);
+   cache->external_sync = external_sync;
    pthread_mutex_init(&cache->mutex, NULL);
 
-   cache->kernel_count = 0;
-   cache->total_size = 0;
-   cache->table_size = 1024;
-   const size_t byte_size = cache->table_size * sizeof(cache->table[0]);
-   cache->table = malloc(byte_size);
-
-   /* We don't consider allocation failure fatal, we just start with a 0-sized
-    * cache. */
-   if (cache->table == NULL)
-      cache->table_size = 0;
-   else
-      memset(cache->table, 0xff, byte_size);
+   if (cache_enabled) {
+      cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
+                                             shader_bin_key_compare_func);
+      cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
+                                                 sha1_compare_func);
+   } else {
+      cache->cache = NULL;
+      cache->nir_cache = NULL;
+   }
 }
 
 void
 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
 {
-   anv_state_stream_finish(&cache->program_stream);
    pthread_mutex_destroy(&cache->mutex);
-   free(cache->table);
+
+   if (cache->cache) {
+      /* This is a bit unfortunate.  In order to keep things from randomly
+       * going away, the shader cache has to hold a reference to all shader
+       * binaries it contains.  We unref them when we destroy the cache.
+       */
+      hash_table_foreach(cache->cache, entry)
+         anv_shader_bin_unref(cache->device, entry->data);
+
+      _mesa_hash_table_destroy(cache->cache, NULL);
+   }
+
+   if (cache->nir_cache) {
+      hash_table_foreach(cache->nir_cache, entry)
+         ralloc_free(entry->data);
+
+      _mesa_hash_table_destroy(cache->nir_cache, NULL);
+   }
+
+   vk_object_base_finish(&cache->base);
 }
 
-struct cache_entry {
-   unsigned char sha1[20];
-   uint32_t prog_data_size;
-   uint32_t kernel_size;
-   char prog_data[0];
+static struct anv_shader_bin *
+anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
+                                 const void *key_data, uint32_t key_size)
+{
+   uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
+   struct anv_shader_bin_key *key = (void *)vla;
+   key->size = key_size;
+   memcpy(key->data, key_data, key_size);
+
+   struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
+   if (entry)
+      return entry->data;
+   else
+      return NULL;
+}
 
-   /* kernel follows prog_data at next 64 byte aligned address */
-};
+static inline void
+anv_cache_lock(struct anv_pipeline_cache *cache)
+{
+   if (!cache->external_sync)
+      pthread_mutex_lock(&cache->mutex);
+}
 
-void
-anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
-                struct anv_shader_module *module,
-                const char *entrypoint,
-                const VkSpecializationInfo *spec_info)
+static inline void
+anv_cache_unlock(struct anv_pipeline_cache *cache)
 {
-   struct mesa_sha1 *ctx;
-
-   ctx = _mesa_sha1_init();
-   _mesa_sha1_update(ctx, &key, sizeof(key));
-   _mesa_sha1_update(ctx, module->sha1, sizeof(module->sha1));
-   _mesa_sha1_update(ctx, entrypoint, strlen(entrypoint));
-   /* hash in shader stage, pipeline layout? */
-   if (spec_info) {
-      _mesa_sha1_update(ctx, spec_info->pMapEntries,
-                        spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
-      _mesa_sha1_update(ctx, spec_info->pData, spec_info->dataSize);
-   }
-   _mesa_sha1_final(ctx, hash);
+   if (!cache->external_sync)
+      pthread_mutex_unlock(&cache->mutex);
 }
 
-uint32_t
+struct anv_shader_bin *
 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
-                          const unsigned char *sha1, void *prog_data)
+                          const void *key_data, uint32_t key_size)
 {
-   const uint32_t mask = cache->table_size - 1;
-   const uint32_t start = (*(uint32_t *) sha1);
+   if (!cache->cache)
+      return NULL;
 
-   for (uint32_t i = 0; i < cache->table_size; i++) {
-      const uint32_t index = (start + i) & mask;
-      const uint32_t offset = cache->table[index];
+   anv_cache_lock(cache);
 
-      if (offset == ~0)
-         return NO_KERNEL;
+   struct anv_shader_bin *shader =
+      anv_pipeline_cache_search_locked(cache, key_data, key_size);
 
-      struct cache_entry *entry =
-         cache->program_stream.block_pool->map + offset;
-      if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
-         if (prog_data)
-            memcpy(prog_data, entry->prog_data, entry->prog_data_size);
+   anv_cache_unlock(cache);
 
-         const uint32_t preamble_size =
-            align_u32(sizeof(*entry) + entry->prog_data_size, 64);
+   /* We increment refcount before handing it to the caller */
+   if (shader)
+      anv_shader_bin_ref(shader);
 
-         return offset + preamble_size;
-      }
-   }
-
-   return NO_KERNEL;
+   return shader;
 }
 
 static void
-anv_pipeline_cache_add_entry(struct anv_pipeline_cache *cache,
-                             struct cache_entry *entry, uint32_t entry_offset)
+anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
+                                  struct anv_shader_bin *bin)
 {
-   const uint32_t mask = cache->table_size - 1;
-   const uint32_t start = (*(uint32_t *) entry->sha1);
+   if (!cache->cache)
+      return;
 
-   /* We'll always be able to insert when we get here. */
-   assert(cache->kernel_count < cache->table_size / 2);
+   anv_cache_lock(cache);
 
-   for (uint32_t i = 0; i < cache->table_size; i++) {
-      const uint32_t index = (start + i) & mask;
-      if (cache->table[index] == ~0) {
-         cache->table[index] = entry_offset;
-         break;
-      }
+   struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
+   if (entry == NULL) {
+      /* Take a reference for the cache */
+      anv_shader_bin_ref(bin);
+      _mesa_hash_table_insert(cache->cache, bin->key, bin);
    }
 
-   /* We don't include the alignment padding bytes when we serialize, so
-    * don't include taht in the the total size. */
-   cache->total_size +=
-      sizeof(*entry) + entry->prog_data_size + entry->kernel_size;
-   cache->kernel_count++;
+   anv_cache_unlock(cache);
 }
 
-static VkResult
-anv_pipeline_cache_grow(struct anv_pipeline_cache *cache)
+static struct anv_shader_bin *
+anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
+                                     gl_shader_stage stage,
+                                     const void *key_data, uint32_t key_size,
+                                     const void *kernel_data,
+                                     uint32_t kernel_size,
+                                     const struct brw_stage_prog_data *prog_data,
+                                     uint32_t prog_data_size,
+                                     const struct brw_compile_stats *stats,
+                                     uint32_t num_stats,
+                                     const nir_xfb_info *xfb_info,
+                                     const struct anv_pipeline_bind_map *bind_map)
 {
-   const uint32_t table_size = cache->table_size * 2;
-   const uint32_t old_table_size = cache->table_size;
-   const size_t byte_size = table_size * sizeof(cache->table[0]);
-   uint32_t *table;
-   uint32_t *old_table = cache->table;
-
-   table = malloc(byte_size);
-   if (table == NULL)
-      return VK_ERROR_OUT_OF_HOST_MEMORY;
-
-   cache->table = table;
-   cache->table_size = table_size;
-   cache->kernel_count = 0;
-   cache->total_size = 0;
-
-   memset(cache->table, 0xff, byte_size);
-   for (uint32_t i = 0; i < old_table_size; i++) {
-      const uint32_t offset = old_table[i];
-      if (offset == ~0)
-         continue;
-
-      struct cache_entry *entry =
-         cache->program_stream.block_pool->map + offset;
-      anv_pipeline_cache_add_entry(cache, entry, offset);
-   }
-
-   free(old_table);
-
-   return VK_SUCCESS;
+   struct anv_shader_bin *shader =
+      anv_pipeline_cache_search_locked(cache, key_data, key_size);
+   if (shader)
+      return shader;
+
+   struct anv_shader_bin *bin =
+      anv_shader_bin_create(cache->device, stage,
+                            key_data, key_size,
+                            kernel_data, kernel_size,
+                            prog_data, prog_data_size,
+                            stats, num_stats, xfb_info, bind_map);
+   if (!bin)
+      return NULL;
+
+   _mesa_hash_table_insert(cache->cache, bin->key, bin);
+
+   return bin;
 }
 
-uint32_t
+struct anv_shader_bin *
 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
-                                 const unsigned char *sha1,
-                                 const void *kernel, size_t kernel_size,
-                                 const void *prog_data, size_t prog_data_size)
+                                 gl_shader_stage stage,
+                                 const void *key_data, uint32_t key_size,
+                                 const void *kernel_data, uint32_t kernel_size,
+                                 const struct brw_stage_prog_data *prog_data,
+                                 uint32_t prog_data_size,
+                                 const struct brw_compile_stats *stats,
+                                 uint32_t num_stats,
+                                 const nir_xfb_info *xfb_info,
+                                 const struct anv_pipeline_bind_map *bind_map)
 {
-   pthread_mutex_lock(&cache->mutex);
-   struct cache_entry *entry;
-
-   /* Meta pipelines don't have SPIR-V, so we can't hash them.
-    * Consequentally, they just don't get cached.
-    */
-   const uint32_t preamble_size = sha1 ?
-      align_u32(sizeof(*entry) + prog_data_size, 64) :
-      0;
-
-   const uint32_t size = preamble_size + kernel_size;
-
-   assert(size < cache->program_stream.block_pool->block_size);
-   const struct anv_state state =
-      anv_state_stream_alloc(&cache->program_stream, size, 64);
-
-   if (sha1 && env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", false)) {
-      assert(anv_pipeline_cache_search(cache, sha1, NULL) == NO_KERNEL);
-      entry = state.map;
-      memcpy(entry->sha1, sha1, sizeof(entry->sha1));
-      entry->prog_data_size = prog_data_size;
-      memcpy(entry->prog_data, prog_data, prog_data_size);
-      entry->kernel_size = kernel_size;
-
-      if (cache->kernel_count == cache->table_size / 2)
-         anv_pipeline_cache_grow(cache);
-
-      /* Failing to grow that hash table isn't fatal, but may mean we don't
-       * have enough space to add this new kernel. Only add it if there's room.
-       */
-      if (cache->kernel_count < cache->table_size / 2)
-         anv_pipeline_cache_add_entry(cache, entry, state.offset);
+   if (cache->cache) {
+      anv_cache_lock(cache);
+
+      struct anv_shader_bin *bin =
+         anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size,
+                                              kernel_data, kernel_size,
+                                              prog_data, prog_data_size,
+                                              stats, num_stats,
+                                              xfb_info, bind_map);
+
+      anv_cache_unlock(cache);
+
+      /* We increment refcount before handing it to the caller */
+      if (bin)
+         anv_shader_bin_ref(bin);
+
+      return bin;
+   } else {
+      /* In this case, we're not caching it so the caller owns it entirely */
+      return anv_shader_bin_create(cache->device, stage,
+                                   key_data, key_size,
+                                   kernel_data, kernel_size,
+                                   prog_data, prog_data_size,
+                                   stats, num_stats,
+                                   xfb_info, bind_map);
    }
-
-   pthread_mutex_unlock(&cache->mutex);
-
-   memcpy(state.map + preamble_size, kernel, kernel_size);
-
-   if (!cache->device->info.has_llc)
-      anv_state_clflush(state);
-
-   return state.offset + preamble_size;
 }
 
 static void
@@ -242,35 +468,37 @@ anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
                         const void *data, size_t size)
 {
    struct anv_device *device = cache->device;
-   uint8_t uuid[VK_UUID_SIZE];
-   struct {
-      uint32_t device_id;
-      uint8_t uuid[VK_UUID_SIZE];
-   } header;
+   struct anv_physical_device *pdevice = device->physical;
 
-   if (size < sizeof(header))
-      return;
-   memcpy(&header, data, sizeof(header));
-   if (header.device_id != device->chipset_id)
-      return;
-   anv_device_get_cache_uuid(uuid);
-   if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0)
+   if (cache->cache == NULL)
       return;
 
-   const void *end = data + size;
-   const void *p = data + sizeof(header);
+   struct blob_reader blob;
+   blob_reader_init(&blob, data, size);
 
-   while (p < end) {
-      /* The kernels aren't 64 byte aligned in the serialized format so
-       * they're always right after the prog_data.
-       */
-      const struct cache_entry *entry = p;
-      const void *kernel = &entry->prog_data[entry->prog_data_size];
+   struct vk_pipeline_cache_header header;
+   blob_copy_bytes(&blob, &header, sizeof(header));
+   uint32_t count = blob_read_uint32(&blob);
+   if (blob.overrun)
+      return;
 
-      anv_pipeline_cache_upload_kernel(cache, entry->sha1,
-                                       kernel, entry->kernel_size,
-                                       entry->prog_data, entry->prog_data_size);
-      p = kernel + entry->kernel_size;
+   if (header.header_size < sizeof(header))
+      return;
+   if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
+      return;
+   if (header.vendor_id != 0x8086)
+      return;
+   if (header.device_id != device->info.chipset_id)
+      return;
+   if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
+      return;
+
+   for (uint32_t i = 0; i < count; i++) {
+      struct anv_shader_bin *bin =
+         anv_shader_bin_create_from_blob(device, &blob);
+      if (!bin)
+         break;
+      _mesa_hash_table_insert(cache->cache, bin->key, bin);
    }
 }
 
@@ -286,13 +514,15 @@ VkResult anv_CreatePipelineCache(
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
    assert(pCreateInfo->flags == 0);
 
-   cache = anv_alloc2(&device->alloc, pAllocator,
+   cache = vk_alloc2(&device->vk.alloc, pAllocator,
                        sizeof(*cache), 8,
                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    if (cache == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   anv_pipeline_cache_init(cache, device);
+   anv_pipeline_cache_init(cache, device,
+                           device->physical->instance->pipeline_cache_enabled,
+                           pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT);
 
    if (pCreateInfo->initialDataSize > 0)
       anv_pipeline_cache_load(cache,
@@ -312,9 +542,12 @@ void anv_DestroyPipelineCache(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
 
+   if (!cache)
+      return;
+
    anv_pipeline_cache_finish(cache);
 
-   anv_free2(&device->alloc, pAllocator, cache);
+   vk_free2(&device->vk.alloc, pAllocator, cache);
 }
 
 VkResult anv_GetPipelineCacheData(
@@ -326,65 +559,54 @@ VkResult anv_GetPipelineCacheData(
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
 
-   const size_t size = 4 + VK_UUID_SIZE + cache->total_size;
-
-   if (pData == NULL) {
-      *pDataSize = size;
-      return VK_SUCCESS;
+   struct blob blob;
+   if (pData) {
+      blob_init_fixed(&blob, pData, *pDataSize);
+   } else {
+      blob_init_fixed(&blob, NULL, SIZE_MAX);
    }
 
-   if (*pDataSize < size) {
+   struct vk_pipeline_cache_header header = {
+      .header_size = sizeof(struct vk_pipeline_cache_header),
+      .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
+      .vendor_id = 0x8086,
+      .device_id = device->info.chipset_id,
+   };
+   memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
+   blob_write_bytes(&blob, &header, sizeof(header));
+
+   uint32_t count = 0;
+   intptr_t count_offset = blob_reserve_uint32(&blob);
+   if (count_offset < 0) {
       *pDataSize = 0;
+      blob_finish(&blob);
       return VK_INCOMPLETE;
    }
 
-   void *p = pData;
-   memcpy(p, &device->chipset_id, sizeof(device->chipset_id));
-   p += sizeof(device->chipset_id);
+   VkResult result = VK_SUCCESS;
+   if (cache->cache) {
+      hash_table_foreach(cache->cache, entry) {
+         struct anv_shader_bin *shader = entry->data;
 
-   anv_device_get_cache_uuid(p);
-   p += VK_UUID_SIZE;
+         size_t save_size = blob.size;
+         if (!anv_shader_bin_write_to_blob(shader, &blob)) {
+            /* If it fails reset to the previous size and bail */
+            blob.size = save_size;
+            result = VK_INCOMPLETE;
+            break;
+         }
 
-   struct cache_entry *entry;
-   for (uint32_t i = 0; i < cache->table_size; i++) {
-      if (cache->table[i] == ~0)
-         continue;
-
-      entry = cache->program_stream.block_pool->map + cache->table[i];
-
-      memcpy(p, entry, sizeof(*entry) + entry->prog_data_size);
-      p += sizeof(*entry) + entry->prog_data_size;
-
-      void *kernel = (void *) entry +
-         align_u32(sizeof(*entry) + entry->prog_data_size, 64);
-
-      memcpy(p, kernel, entry->kernel_size);
-      p += entry->kernel_size;
+         count++;
+      }
    }
 
-   return VK_SUCCESS;
-}
+   blob_overwrite_uint32(&blob, count_offset, count);
 
-static void
-anv_pipeline_cache_merge(struct anv_pipeline_cache *dst,
-                         struct anv_pipeline_cache *src)
-{
-   for (uint32_t i = 0; i < src->table_size; i++) {
-      if (src->table[i] == ~0)
-         continue;
+   *pDataSize = blob.size;
 
-      struct cache_entry *entry =
-         src->program_stream.block_pool->map + src->table[i];
+   blob_finish(&blob);
 
-      if (anv_pipeline_cache_search(dst, entry->sha1, NULL) != NO_KERNEL)
-         continue;
-
-      const void *kernel = (void *) entry +
-         align_u32(sizeof(*entry) + entry->prog_data_size, 64);
-      anv_pipeline_cache_upload_kernel(dst, entry->sha1,
-                                       kernel, entry->kernel_size,
-                                       entry->prog_data, entry->prog_data_size);
-   }
+   return result;
 }
 
 VkResult anv_MergePipelineCaches(
@@ -395,11 +617,207 @@ VkResult anv_MergePipelineCaches(
 {
    ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
 
+   if (!dst->cache)
+      return VK_SUCCESS;
+
    for (uint32_t i = 0; i < srcCacheCount; i++) {
       ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
+      if (!src->cache)
+         continue;
+
+      hash_table_foreach(src->cache, entry) {
+         struct anv_shader_bin *bin = entry->data;
+         assert(bin);
 
-      anv_pipeline_cache_merge(dst, src);
+         if (_mesa_hash_table_search(dst->cache, bin->key))
+            continue;
+
+         anv_shader_bin_ref(bin);
+         _mesa_hash_table_insert(dst->cache, bin->key, bin);
+      }
    }
 
    return VK_SUCCESS;
 }
+
+struct anv_shader_bin *
+anv_device_search_for_kernel(struct anv_device *device,
+                             struct anv_pipeline_cache *cache,
+                             const void *key_data, uint32_t key_size,
+                             bool *user_cache_hit)
+{
+   struct anv_shader_bin *bin;
+
+   *user_cache_hit = false;
+
+   if (cache) {
+      bin = anv_pipeline_cache_search(cache, key_data, key_size);
+      if (bin) {
+         *user_cache_hit = cache != &device->default_pipeline_cache;
+         return bin;
+      }
+   }
+
+#ifdef ENABLE_SHADER_CACHE
+   struct disk_cache *disk_cache = device->physical->disk_cache;
+   if (disk_cache && device->physical->instance->pipeline_cache_enabled) {
+      cache_key cache_key;
+      disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
+
+      size_t buffer_size;
+      uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
+      if (buffer) {
+         struct blob_reader blob;
+         blob_reader_init(&blob, buffer, buffer_size);
+         bin = anv_shader_bin_create_from_blob(device, &blob);
+         free(buffer);
+
+         if (bin) {
+            if (cache)
+               anv_pipeline_cache_add_shader_bin(cache, bin);
+            return bin;
+         }
+      }
+   }
+#endif
+
+   return NULL;
+}
+
+struct anv_shader_bin *
+anv_device_upload_kernel(struct anv_device *device,
+                         struct anv_pipeline_cache *cache,
+                         gl_shader_stage stage,
+                         const void *key_data, uint32_t key_size,
+                         const void *kernel_data, uint32_t kernel_size,
+                         const struct brw_stage_prog_data *prog_data,
+                         uint32_t prog_data_size,
+                         const struct brw_compile_stats *stats,
+                         uint32_t num_stats,
+                         const nir_xfb_info *xfb_info,
+                         const struct anv_pipeline_bind_map *bind_map)
+{
+   struct anv_shader_bin *bin;
+   if (cache) {
+      bin = anv_pipeline_cache_upload_kernel(cache, stage, key_data, key_size,
+                                             kernel_data, kernel_size,
+                                             prog_data, prog_data_size,
+                                             stats, num_stats,
+                                             xfb_info, bind_map);
+   } else {
+      bin = anv_shader_bin_create(device, stage, key_data, key_size,
+                                  kernel_data, kernel_size,
+                                  prog_data, prog_data_size,
+                                  stats, num_stats,
+                                  xfb_info, bind_map);
+   }
+
+   if (bin == NULL)
+      return NULL;
+
+#ifdef ENABLE_SHADER_CACHE
+   struct disk_cache *disk_cache = device->physical->disk_cache;
+   if (disk_cache) {
+      struct blob binary;
+      blob_init(&binary);
+      if (anv_shader_bin_write_to_blob(bin, &binary)) {
+         cache_key cache_key;
+         disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
+
+         disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
+      }
+
+      blob_finish(&binary);
+   }
+#endif
+
+   return bin;
+}
+
+struct serialized_nir {
+   unsigned char sha1_key[20];
+   size_t size;
+   char data[0];
+};
+
+struct nir_shader *
+anv_device_search_for_nir(struct anv_device *device,
+                          struct anv_pipeline_cache *cache,
+                          const nir_shader_compiler_options *nir_options,
+                          unsigned char sha1_key[20],
+                          void *mem_ctx)
+{
+   if (cache && cache->nir_cache) {
+      const struct serialized_nir *snir = NULL;
+
+      anv_cache_lock(cache);
+      struct hash_entry *entry =
+         _mesa_hash_table_search(cache->nir_cache, sha1_key);
+      if (entry)
+         snir = entry->data;
+      anv_cache_unlock(cache);
+
+      if (snir) {
+         struct blob_reader blob;
+         blob_reader_init(&blob, snir->data, snir->size);
+
+         nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
+         if (blob.overrun) {
+            ralloc_free(nir);
+         } else {
+            return nir;
+         }
+      }
+   }
+
+   return NULL;
+}
+
+void
+anv_device_upload_nir(struct anv_device *device,
+                      struct anv_pipeline_cache *cache,
+                      const struct nir_shader *nir,
+                      unsigned char sha1_key[20])
+{
+   if (cache && cache->nir_cache) {
+      anv_cache_lock(cache);
+      struct hash_entry *entry =
+         _mesa_hash_table_search(cache->nir_cache, sha1_key);
+      anv_cache_unlock(cache);
+      if (entry)
+         return;
+
+      struct blob blob;
+      blob_init(&blob);
+
+      nir_serialize(&blob, nir, false);
+      if (blob.out_of_memory) {
+         blob_finish(&blob);
+         return;
+      }
+
+      anv_cache_lock(cache);
+      /* Because ralloc isn't thread-safe, we have to do all this inside the
+       * lock.  We could unlock for the big memcpy but it's probably not worth
+       * the hassle.
+       */
+      entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
+      if (entry) {
+         blob_finish(&blob);
+         anv_cache_unlock(cache);
+         return;
+      }
+
+      struct serialized_nir *snir =
+         ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
+      memcpy(snir->sha1_key, sha1_key, 20);
+      snir->size = blob.size;
+      memcpy(snir->data, blob.data, blob.size);
+
+      blob_finish(&blob);
+
+      _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
+
+      anv_cache_unlock(cache);
+   }
+}