vulkan/util: add struct vk_pipeline_cache_header
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32 #include "vulkan/util/vk_util.h"
33
34 struct anv_shader_bin *
35 anv_shader_bin_create(struct anv_device *device,
36 gl_shader_stage stage,
37 const void *key_data, uint32_t key_size,
38 const void *kernel_data, uint32_t kernel_size,
39 const void *constant_data, uint32_t constant_data_size,
40 const struct brw_stage_prog_data *prog_data_in,
41 uint32_t prog_data_size,
42 const struct brw_compile_stats *stats, uint32_t num_stats,
43 const nir_xfb_info *xfb_info_in,
44 const struct anv_pipeline_bind_map *bind_map)
45 {
46 struct anv_shader_bin *shader;
47 struct anv_shader_bin_key *key;
48 struct brw_stage_prog_data *prog_data;
49 uint32_t *prog_data_param;
50 nir_xfb_info *xfb_info;
51 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
52
53 ANV_MULTIALLOC(ma);
54 anv_multialloc_add(&ma, &shader, 1);
55 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
56 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
57 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
58 if (xfb_info_in) {
59 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
60 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
61 }
62 anv_multialloc_add(&ma, &surface_to_descriptor,
63 bind_map->surface_count);
64 anv_multialloc_add(&ma, &sampler_to_descriptor,
65 bind_map->sampler_count);
66
67 if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
68 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
69 return NULL;
70
71 shader->ref_cnt = 1;
72
73 shader->stage = stage;
74
75 key->size = key_size;
76 memcpy(key->data, key_data, key_size);
77 shader->key = key;
78
79 shader->kernel =
80 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
81 memcpy(shader->kernel.map, kernel_data, kernel_size);
82 shader->kernel_size = kernel_size;
83
84 if (constant_data_size) {
85 shader->constant_data =
86 anv_state_pool_alloc(&device->dynamic_state_pool,
87 constant_data_size, 32);
88 memcpy(shader->constant_data.map, constant_data, constant_data_size);
89 } else {
90 shader->constant_data = ANV_STATE_NULL;
91 }
92 shader->constant_data_size = constant_data_size;
93
94 memcpy(prog_data, prog_data_in, prog_data_size);
95 memset(prog_data_param, 0,
96 prog_data->nr_params * sizeof(*prog_data_param));
97 prog_data->param = prog_data_param;
98 shader->prog_data = prog_data;
99 shader->prog_data_size = prog_data_size;
100
101 assert(num_stats <= ARRAY_SIZE(shader->stats));
102 typed_memcpy(shader->stats, stats, num_stats);
103 shader->num_stats = num_stats;
104
105 if (xfb_info_in) {
106 *xfb_info = *xfb_info_in;
107 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
108 xfb_info_in->output_count);
109 shader->xfb_info = xfb_info;
110 } else {
111 shader->xfb_info = NULL;
112 }
113
114 shader->bind_map = *bind_map;
115 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
116 bind_map->surface_count);
117 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
118 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
119 bind_map->sampler_count);
120 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
121
122 return shader;
123 }
124
125 void
126 anv_shader_bin_destroy(struct anv_device *device,
127 struct anv_shader_bin *shader)
128 {
129 assert(shader->ref_cnt == 0);
130 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
131 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
132 vk_free(&device->vk.alloc, shader);
133 }
134
135 static bool
136 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
137 struct blob *blob)
138 {
139 blob_write_uint32(blob, shader->stage);
140
141 blob_write_uint32(blob, shader->key->size);
142 blob_write_bytes(blob, shader->key->data, shader->key->size);
143
144 blob_write_uint32(blob, shader->kernel_size);
145 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
146
147 blob_write_uint32(blob, shader->constant_data_size);
148 blob_write_bytes(blob, shader->constant_data.map,
149 shader->constant_data_size);
150
151 blob_write_uint32(blob, shader->prog_data_size);
152 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
153
154 blob_write_uint32(blob, shader->num_stats);
155 blob_write_bytes(blob, shader->stats,
156 shader->num_stats * sizeof(shader->stats[0]));
157
158 if (shader->xfb_info) {
159 uint32_t xfb_info_size =
160 nir_xfb_info_size(shader->xfb_info->output_count);
161 blob_write_uint32(blob, xfb_info_size);
162 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
163 } else {
164 blob_write_uint32(blob, 0);
165 }
166
167 blob_write_bytes(blob, shader->bind_map.surface_sha1,
168 sizeof(shader->bind_map.surface_sha1));
169 blob_write_bytes(blob, shader->bind_map.sampler_sha1,
170 sizeof(shader->bind_map.sampler_sha1));
171 blob_write_bytes(blob, shader->bind_map.push_sha1,
172 sizeof(shader->bind_map.push_sha1));
173 blob_write_uint32(blob, shader->bind_map.surface_count);
174 blob_write_uint32(blob, shader->bind_map.sampler_count);
175 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
176 shader->bind_map.surface_count *
177 sizeof(*shader->bind_map.surface_to_descriptor));
178 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
179 shader->bind_map.sampler_count *
180 sizeof(*shader->bind_map.sampler_to_descriptor));
181 blob_write_bytes(blob, shader->bind_map.push_ranges,
182 sizeof(shader->bind_map.push_ranges));
183
184 return !blob->out_of_memory;
185 }
186
187 static struct anv_shader_bin *
188 anv_shader_bin_create_from_blob(struct anv_device *device,
189 struct blob_reader *blob)
190 {
191 gl_shader_stage stage = blob_read_uint32(blob);
192
193 uint32_t key_size = blob_read_uint32(blob);
194 const void *key_data = blob_read_bytes(blob, key_size);
195
196 uint32_t kernel_size = blob_read_uint32(blob);
197 const void *kernel_data = blob_read_bytes(blob, kernel_size);
198
199 uint32_t constant_data_size = blob_read_uint32(blob);
200 const void *constant_data = blob_read_bytes(blob, constant_data_size);
201
202 uint32_t prog_data_size = blob_read_uint32(blob);
203 const struct brw_stage_prog_data *prog_data =
204 blob_read_bytes(blob, prog_data_size);
205 if (blob->overrun)
206 return NULL;
207
208 uint32_t num_stats = blob_read_uint32(blob);
209 const struct brw_compile_stats *stats =
210 blob_read_bytes(blob, num_stats * sizeof(stats[0]));
211
212 const nir_xfb_info *xfb_info = NULL;
213 uint32_t xfb_size = blob_read_uint32(blob);
214 if (xfb_size)
215 xfb_info = blob_read_bytes(blob, xfb_size);
216
217 struct anv_pipeline_bind_map bind_map;
218 blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
219 blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
220 blob_copy_bytes(blob, bind_map.push_sha1, sizeof(bind_map.push_sha1));
221 bind_map.surface_count = blob_read_uint32(blob);
222 bind_map.sampler_count = blob_read_uint32(blob);
223 bind_map.surface_to_descriptor = (void *)
224 blob_read_bytes(blob, bind_map.surface_count *
225 sizeof(*bind_map.surface_to_descriptor));
226 bind_map.sampler_to_descriptor = (void *)
227 blob_read_bytes(blob, bind_map.sampler_count *
228 sizeof(*bind_map.sampler_to_descriptor));
229 blob_copy_bytes(blob, bind_map.push_ranges, sizeof(bind_map.push_ranges));
230
231 if (blob->overrun)
232 return NULL;
233
234 return anv_shader_bin_create(device, stage,
235 key_data, key_size,
236 kernel_data, kernel_size,
237 constant_data, constant_data_size,
238 prog_data, prog_data_size,
239 stats, num_stats, xfb_info, &bind_map);
240 }
241
242 /* Remaining work:
243 *
244 * - Compact binding table layout so it's tight and not dependent on
245 * descriptor set layout.
246 *
247 * - Review prog_data struct for size and cacheability: struct
248 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
249 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
250 */
251
252 static uint32_t
253 shader_bin_key_hash_func(const void *void_key)
254 {
255 const struct anv_shader_bin_key *key = void_key;
256 return _mesa_hash_data(key->data, key->size);
257 }
258
259 static bool
260 shader_bin_key_compare_func(const void *void_a, const void *void_b)
261 {
262 const struct anv_shader_bin_key *a = void_a, *b = void_b;
263 if (a->size != b->size)
264 return false;
265
266 return memcmp(a->data, b->data, a->size) == 0;
267 }
268
269 static uint32_t
270 sha1_hash_func(const void *sha1)
271 {
272 return _mesa_hash_data(sha1, 20);
273 }
274
275 static bool
276 sha1_compare_func(const void *sha1_a, const void *sha1_b)
277 {
278 return memcmp(sha1_a, sha1_b, 20) == 0;
279 }
280
281 void
282 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
283 struct anv_device *device,
284 bool cache_enabled,
285 bool external_sync)
286 {
287 vk_object_base_init(&device->vk, &cache->base,
288 VK_OBJECT_TYPE_PIPELINE_CACHE);
289 cache->device = device;
290 cache->external_sync = external_sync;
291 pthread_mutex_init(&cache->mutex, NULL);
292
293 if (cache_enabled) {
294 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
295 shader_bin_key_compare_func);
296 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
297 sha1_compare_func);
298 } else {
299 cache->cache = NULL;
300 cache->nir_cache = NULL;
301 }
302 }
303
304 void
305 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
306 {
307 pthread_mutex_destroy(&cache->mutex);
308
309 if (cache->cache) {
310 /* This is a bit unfortunate. In order to keep things from randomly
311 * going away, the shader cache has to hold a reference to all shader
312 * binaries it contains. We unref them when we destroy the cache.
313 */
314 hash_table_foreach(cache->cache, entry)
315 anv_shader_bin_unref(cache->device, entry->data);
316
317 _mesa_hash_table_destroy(cache->cache, NULL);
318 }
319
320 if (cache->nir_cache) {
321 hash_table_foreach(cache->nir_cache, entry)
322 ralloc_free(entry->data);
323
324 _mesa_hash_table_destroy(cache->nir_cache, NULL);
325 }
326
327 vk_object_base_finish(&cache->base);
328 }
329
330 static struct anv_shader_bin *
331 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
332 const void *key_data, uint32_t key_size)
333 {
334 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
335 struct anv_shader_bin_key *key = (void *)vla;
336 key->size = key_size;
337 memcpy(key->data, key_data, key_size);
338
339 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
340 if (entry)
341 return entry->data;
342 else
343 return NULL;
344 }
345
346 static inline void
347 anv_cache_lock(struct anv_pipeline_cache *cache)
348 {
349 if (!cache->external_sync)
350 pthread_mutex_lock(&cache->mutex);
351 }
352
353 static inline void
354 anv_cache_unlock(struct anv_pipeline_cache *cache)
355 {
356 if (!cache->external_sync)
357 pthread_mutex_unlock(&cache->mutex);
358 }
359
360 struct anv_shader_bin *
361 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
362 const void *key_data, uint32_t key_size)
363 {
364 if (!cache->cache)
365 return NULL;
366
367 anv_cache_lock(cache);
368
369 struct anv_shader_bin *shader =
370 anv_pipeline_cache_search_locked(cache, key_data, key_size);
371
372 anv_cache_unlock(cache);
373
374 /* We increment refcount before handing it to the caller */
375 if (shader)
376 anv_shader_bin_ref(shader);
377
378 return shader;
379 }
380
381 static void
382 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
383 struct anv_shader_bin *bin)
384 {
385 if (!cache->cache)
386 return;
387
388 anv_cache_lock(cache);
389
390 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
391 if (entry == NULL) {
392 /* Take a reference for the cache */
393 anv_shader_bin_ref(bin);
394 _mesa_hash_table_insert(cache->cache, bin->key, bin);
395 }
396
397 anv_cache_unlock(cache);
398 }
399
400 static struct anv_shader_bin *
401 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
402 gl_shader_stage stage,
403 const void *key_data, uint32_t key_size,
404 const void *kernel_data,
405 uint32_t kernel_size,
406 const void *constant_data,
407 uint32_t constant_data_size,
408 const struct brw_stage_prog_data *prog_data,
409 uint32_t prog_data_size,
410 const struct brw_compile_stats *stats,
411 uint32_t num_stats,
412 const nir_xfb_info *xfb_info,
413 const struct anv_pipeline_bind_map *bind_map)
414 {
415 struct anv_shader_bin *shader =
416 anv_pipeline_cache_search_locked(cache, key_data, key_size);
417 if (shader)
418 return shader;
419
420 struct anv_shader_bin *bin =
421 anv_shader_bin_create(cache->device, stage,
422 key_data, key_size,
423 kernel_data, kernel_size,
424 constant_data, constant_data_size,
425 prog_data, prog_data_size,
426 stats, num_stats, xfb_info, bind_map);
427 if (!bin)
428 return NULL;
429
430 _mesa_hash_table_insert(cache->cache, bin->key, bin);
431
432 return bin;
433 }
434
435 struct anv_shader_bin *
436 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
437 gl_shader_stage stage,
438 const void *key_data, uint32_t key_size,
439 const void *kernel_data, uint32_t kernel_size,
440 const void *constant_data,
441 uint32_t constant_data_size,
442 const struct brw_stage_prog_data *prog_data,
443 uint32_t prog_data_size,
444 const struct brw_compile_stats *stats,
445 uint32_t num_stats,
446 const nir_xfb_info *xfb_info,
447 const struct anv_pipeline_bind_map *bind_map)
448 {
449 if (cache->cache) {
450 anv_cache_lock(cache);
451
452 struct anv_shader_bin *bin =
453 anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size,
454 kernel_data, kernel_size,
455 constant_data, constant_data_size,
456 prog_data, prog_data_size,
457 stats, num_stats,
458 xfb_info, bind_map);
459
460 anv_cache_unlock(cache);
461
462 /* We increment refcount before handing it to the caller */
463 if (bin)
464 anv_shader_bin_ref(bin);
465
466 return bin;
467 } else {
468 /* In this case, we're not caching it so the caller owns it entirely */
469 return anv_shader_bin_create(cache->device, stage,
470 key_data, key_size,
471 kernel_data, kernel_size,
472 constant_data, constant_data_size,
473 prog_data, prog_data_size,
474 stats, num_stats,
475 xfb_info, bind_map);
476 }
477 }
478
479 static void
480 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
481 const void *data, size_t size)
482 {
483 struct anv_device *device = cache->device;
484 struct anv_physical_device *pdevice = device->physical;
485
486 if (cache->cache == NULL)
487 return;
488
489 struct blob_reader blob;
490 blob_reader_init(&blob, data, size);
491
492 struct vk_pipeline_cache_header header;
493 blob_copy_bytes(&blob, &header, sizeof(header));
494 uint32_t count = blob_read_uint32(&blob);
495 if (blob.overrun)
496 return;
497
498 if (header.header_size < sizeof(header))
499 return;
500 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
501 return;
502 if (header.vendor_id != 0x8086)
503 return;
504 if (header.device_id != device->info.chipset_id)
505 return;
506 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
507 return;
508
509 for (uint32_t i = 0; i < count; i++) {
510 struct anv_shader_bin *bin =
511 anv_shader_bin_create_from_blob(device, &blob);
512 if (!bin)
513 break;
514 _mesa_hash_table_insert(cache->cache, bin->key, bin);
515 }
516 }
517
518 VkResult anv_CreatePipelineCache(
519 VkDevice _device,
520 const VkPipelineCacheCreateInfo* pCreateInfo,
521 const VkAllocationCallbacks* pAllocator,
522 VkPipelineCache* pPipelineCache)
523 {
524 ANV_FROM_HANDLE(anv_device, device, _device);
525 struct anv_pipeline_cache *cache;
526
527 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
528 assert(pCreateInfo->flags == 0);
529
530 cache = vk_alloc2(&device->vk.alloc, pAllocator,
531 sizeof(*cache), 8,
532 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
533 if (cache == NULL)
534 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
535
536 anv_pipeline_cache_init(cache, device,
537 device->physical->instance->pipeline_cache_enabled,
538 pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT);
539
540 if (pCreateInfo->initialDataSize > 0)
541 anv_pipeline_cache_load(cache,
542 pCreateInfo->pInitialData,
543 pCreateInfo->initialDataSize);
544
545 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
546
547 return VK_SUCCESS;
548 }
549
550 void anv_DestroyPipelineCache(
551 VkDevice _device,
552 VkPipelineCache _cache,
553 const VkAllocationCallbacks* pAllocator)
554 {
555 ANV_FROM_HANDLE(anv_device, device, _device);
556 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
557
558 if (!cache)
559 return;
560
561 anv_pipeline_cache_finish(cache);
562
563 vk_free2(&device->vk.alloc, pAllocator, cache);
564 }
565
566 VkResult anv_GetPipelineCacheData(
567 VkDevice _device,
568 VkPipelineCache _cache,
569 size_t* pDataSize,
570 void* pData)
571 {
572 ANV_FROM_HANDLE(anv_device, device, _device);
573 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
574
575 struct blob blob;
576 if (pData) {
577 blob_init_fixed(&blob, pData, *pDataSize);
578 } else {
579 blob_init_fixed(&blob, NULL, SIZE_MAX);
580 }
581
582 struct vk_pipeline_cache_header header = {
583 .header_size = sizeof(struct vk_pipeline_cache_header),
584 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
585 .vendor_id = 0x8086,
586 .device_id = device->info.chipset_id,
587 };
588 memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
589 blob_write_bytes(&blob, &header, sizeof(header));
590
591 uint32_t count = 0;
592 intptr_t count_offset = blob_reserve_uint32(&blob);
593 if (count_offset < 0) {
594 *pDataSize = 0;
595 blob_finish(&blob);
596 return VK_INCOMPLETE;
597 }
598
599 VkResult result = VK_SUCCESS;
600 if (cache->cache) {
601 hash_table_foreach(cache->cache, entry) {
602 struct anv_shader_bin *shader = entry->data;
603
604 size_t save_size = blob.size;
605 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
606 /* If it fails reset to the previous size and bail */
607 blob.size = save_size;
608 result = VK_INCOMPLETE;
609 break;
610 }
611
612 count++;
613 }
614 }
615
616 blob_overwrite_uint32(&blob, count_offset, count);
617
618 *pDataSize = blob.size;
619
620 blob_finish(&blob);
621
622 return result;
623 }
624
625 VkResult anv_MergePipelineCaches(
626 VkDevice _device,
627 VkPipelineCache destCache,
628 uint32_t srcCacheCount,
629 const VkPipelineCache* pSrcCaches)
630 {
631 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
632
633 if (!dst->cache)
634 return VK_SUCCESS;
635
636 for (uint32_t i = 0; i < srcCacheCount; i++) {
637 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
638 if (!src->cache)
639 continue;
640
641 hash_table_foreach(src->cache, entry) {
642 struct anv_shader_bin *bin = entry->data;
643 assert(bin);
644
645 if (_mesa_hash_table_search(dst->cache, bin->key))
646 continue;
647
648 anv_shader_bin_ref(bin);
649 _mesa_hash_table_insert(dst->cache, bin->key, bin);
650 }
651 }
652
653 return VK_SUCCESS;
654 }
655
656 struct anv_shader_bin *
657 anv_device_search_for_kernel(struct anv_device *device,
658 struct anv_pipeline_cache *cache,
659 const void *key_data, uint32_t key_size,
660 bool *user_cache_hit)
661 {
662 struct anv_shader_bin *bin;
663
664 *user_cache_hit = false;
665
666 if (cache) {
667 bin = anv_pipeline_cache_search(cache, key_data, key_size);
668 if (bin) {
669 *user_cache_hit = cache != &device->default_pipeline_cache;
670 return bin;
671 }
672 }
673
674 #ifdef ENABLE_SHADER_CACHE
675 struct disk_cache *disk_cache = device->physical->disk_cache;
676 if (disk_cache && device->physical->instance->pipeline_cache_enabled) {
677 cache_key cache_key;
678 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
679
680 size_t buffer_size;
681 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
682 if (buffer) {
683 struct blob_reader blob;
684 blob_reader_init(&blob, buffer, buffer_size);
685 bin = anv_shader_bin_create_from_blob(device, &blob);
686 free(buffer);
687
688 if (bin) {
689 if (cache)
690 anv_pipeline_cache_add_shader_bin(cache, bin);
691 return bin;
692 }
693 }
694 }
695 #endif
696
697 return NULL;
698 }
699
700 struct anv_shader_bin *
701 anv_device_upload_kernel(struct anv_device *device,
702 struct anv_pipeline_cache *cache,
703 gl_shader_stage stage,
704 const void *key_data, uint32_t key_size,
705 const void *kernel_data, uint32_t kernel_size,
706 const void *constant_data,
707 uint32_t constant_data_size,
708 const struct brw_stage_prog_data *prog_data,
709 uint32_t prog_data_size,
710 const struct brw_compile_stats *stats,
711 uint32_t num_stats,
712 const nir_xfb_info *xfb_info,
713 const struct anv_pipeline_bind_map *bind_map)
714 {
715 struct anv_shader_bin *bin;
716 if (cache) {
717 bin = anv_pipeline_cache_upload_kernel(cache, stage, key_data, key_size,
718 kernel_data, kernel_size,
719 constant_data, constant_data_size,
720 prog_data, prog_data_size,
721 stats, num_stats,
722 xfb_info, bind_map);
723 } else {
724 bin = anv_shader_bin_create(device, stage, key_data, key_size,
725 kernel_data, kernel_size,
726 constant_data, constant_data_size,
727 prog_data, prog_data_size,
728 stats, num_stats,
729 xfb_info, bind_map);
730 }
731
732 if (bin == NULL)
733 return NULL;
734
735 #ifdef ENABLE_SHADER_CACHE
736 struct disk_cache *disk_cache = device->physical->disk_cache;
737 if (disk_cache) {
738 struct blob binary;
739 blob_init(&binary);
740 if (anv_shader_bin_write_to_blob(bin, &binary)) {
741 cache_key cache_key;
742 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
743
744 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
745 }
746
747 blob_finish(&binary);
748 }
749 #endif
750
751 return bin;
752 }
753
754 struct serialized_nir {
755 unsigned char sha1_key[20];
756 size_t size;
757 char data[0];
758 };
759
760 struct nir_shader *
761 anv_device_search_for_nir(struct anv_device *device,
762 struct anv_pipeline_cache *cache,
763 const nir_shader_compiler_options *nir_options,
764 unsigned char sha1_key[20],
765 void *mem_ctx)
766 {
767 if (cache && cache->nir_cache) {
768 const struct serialized_nir *snir = NULL;
769
770 anv_cache_lock(cache);
771 struct hash_entry *entry =
772 _mesa_hash_table_search(cache->nir_cache, sha1_key);
773 if (entry)
774 snir = entry->data;
775 anv_cache_unlock(cache);
776
777 if (snir) {
778 struct blob_reader blob;
779 blob_reader_init(&blob, snir->data, snir->size);
780
781 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
782 if (blob.overrun) {
783 ralloc_free(nir);
784 } else {
785 return nir;
786 }
787 }
788 }
789
790 return NULL;
791 }
792
793 void
794 anv_device_upload_nir(struct anv_device *device,
795 struct anv_pipeline_cache *cache,
796 const struct nir_shader *nir,
797 unsigned char sha1_key[20])
798 {
799 if (cache && cache->nir_cache) {
800 anv_cache_lock(cache);
801 struct hash_entry *entry =
802 _mesa_hash_table_search(cache->nir_cache, sha1_key);
803 anv_cache_unlock(cache);
804 if (entry)
805 return;
806
807 struct blob blob;
808 blob_init(&blob);
809
810 nir_serialize(&blob, nir, false);
811 if (blob.out_of_memory) {
812 blob_finish(&blob);
813 return;
814 }
815
816 anv_cache_lock(cache);
817 /* Because ralloc isn't thread-safe, we have to do all this inside the
818 * lock. We could unlock for the big memcpy but it's probably not worth
819 * the hassle.
820 */
821 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
822 if (entry) {
823 blob_finish(&blob);
824 anv_cache_unlock(cache);
825 return;
826 }
827
828 struct serialized_nir *snir =
829 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
830 memcpy(snir->sha1_key, sha1_key, 20);
831 snir->size = blob.size;
832 memcpy(snir->data, blob.data, blob.size);
833
834 blob_finish(&blob);
835
836 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
837
838 anv_cache_unlock(cache);
839 }
840 }