anv/pipeline: Split VFE/INTERFACE_DESCRIPTOR out to emit_media_cs_state
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32
33 struct anv_shader_bin *
34 anv_shader_bin_create(struct anv_device *device,
35 gl_shader_stage stage,
36 const void *key_data, uint32_t key_size,
37 const void *kernel_data, uint32_t kernel_size,
38 const void *constant_data, uint32_t constant_data_size,
39 const struct brw_stage_prog_data *prog_data_in,
40 uint32_t prog_data_size,
41 const struct brw_compile_stats *stats, uint32_t num_stats,
42 const nir_xfb_info *xfb_info_in,
43 const struct anv_pipeline_bind_map *bind_map)
44 {
45 struct anv_shader_bin *shader;
46 struct anv_shader_bin_key *key;
47 struct brw_stage_prog_data *prog_data;
48 uint32_t *prog_data_param;
49 nir_xfb_info *xfb_info;
50 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
51
52 ANV_MULTIALLOC(ma);
53 anv_multialloc_add(&ma, &shader, 1);
54 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
55 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
56 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
57 if (xfb_info_in) {
58 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
59 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
60 }
61 anv_multialloc_add(&ma, &surface_to_descriptor,
62 bind_map->surface_count);
63 anv_multialloc_add(&ma, &sampler_to_descriptor,
64 bind_map->sampler_count);
65
66 if (!anv_multialloc_alloc(&ma, &device->vk.alloc,
67 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
68 return NULL;
69
70 shader->ref_cnt = 1;
71
72 shader->stage = stage;
73
74 key->size = key_size;
75 memcpy(key->data, key_data, key_size);
76 shader->key = key;
77
78 shader->kernel =
79 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
80 memcpy(shader->kernel.map, kernel_data, kernel_size);
81 shader->kernel_size = kernel_size;
82
83 if (constant_data_size) {
84 shader->constant_data =
85 anv_state_pool_alloc(&device->dynamic_state_pool,
86 constant_data_size, 32);
87 memcpy(shader->constant_data.map, constant_data, constant_data_size);
88 } else {
89 shader->constant_data = ANV_STATE_NULL;
90 }
91 shader->constant_data_size = constant_data_size;
92
93 memcpy(prog_data, prog_data_in, prog_data_size);
94 memset(prog_data_param, 0,
95 prog_data->nr_params * sizeof(*prog_data_param));
96 prog_data->param = prog_data_param;
97 shader->prog_data = prog_data;
98 shader->prog_data_size = prog_data_size;
99
100 assert(num_stats <= ARRAY_SIZE(shader->stats));
101 typed_memcpy(shader->stats, stats, num_stats);
102 shader->num_stats = num_stats;
103
104 if (xfb_info_in) {
105 *xfb_info = *xfb_info_in;
106 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
107 xfb_info_in->output_count);
108 shader->xfb_info = xfb_info;
109 } else {
110 shader->xfb_info = NULL;
111 }
112
113 shader->bind_map = *bind_map;
114 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
115 bind_map->surface_count);
116 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
117 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
118 bind_map->sampler_count);
119 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
120
121 return shader;
122 }
123
124 void
125 anv_shader_bin_destroy(struct anv_device *device,
126 struct anv_shader_bin *shader)
127 {
128 assert(shader->ref_cnt == 0);
129 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
130 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
131 vk_free(&device->vk.alloc, shader);
132 }
133
134 static bool
135 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
136 struct blob *blob)
137 {
138 blob_write_uint32(blob, shader->stage);
139
140 blob_write_uint32(blob, shader->key->size);
141 blob_write_bytes(blob, shader->key->data, shader->key->size);
142
143 blob_write_uint32(blob, shader->kernel_size);
144 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
145
146 blob_write_uint32(blob, shader->constant_data_size);
147 blob_write_bytes(blob, shader->constant_data.map,
148 shader->constant_data_size);
149
150 blob_write_uint32(blob, shader->prog_data_size);
151 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
152
153 blob_write_uint32(blob, shader->num_stats);
154 blob_write_bytes(blob, shader->stats,
155 shader->num_stats * sizeof(shader->stats[0]));
156
157 if (shader->xfb_info) {
158 uint32_t xfb_info_size =
159 nir_xfb_info_size(shader->xfb_info->output_count);
160 blob_write_uint32(blob, xfb_info_size);
161 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
162 } else {
163 blob_write_uint32(blob, 0);
164 }
165
166 blob_write_bytes(blob, shader->bind_map.surface_sha1,
167 sizeof(shader->bind_map.surface_sha1));
168 blob_write_bytes(blob, shader->bind_map.sampler_sha1,
169 sizeof(shader->bind_map.sampler_sha1));
170 blob_write_bytes(blob, shader->bind_map.push_sha1,
171 sizeof(shader->bind_map.push_sha1));
172 blob_write_uint32(blob, shader->bind_map.surface_count);
173 blob_write_uint32(blob, shader->bind_map.sampler_count);
174 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
175 shader->bind_map.surface_count *
176 sizeof(*shader->bind_map.surface_to_descriptor));
177 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
178 shader->bind_map.sampler_count *
179 sizeof(*shader->bind_map.sampler_to_descriptor));
180 blob_write_bytes(blob, shader->bind_map.push_ranges,
181 sizeof(shader->bind_map.push_ranges));
182
183 return !blob->out_of_memory;
184 }
185
186 static struct anv_shader_bin *
187 anv_shader_bin_create_from_blob(struct anv_device *device,
188 struct blob_reader *blob)
189 {
190 gl_shader_stage stage = blob_read_uint32(blob);
191
192 uint32_t key_size = blob_read_uint32(blob);
193 const void *key_data = blob_read_bytes(blob, key_size);
194
195 uint32_t kernel_size = blob_read_uint32(blob);
196 const void *kernel_data = blob_read_bytes(blob, kernel_size);
197
198 uint32_t constant_data_size = blob_read_uint32(blob);
199 const void *constant_data = blob_read_bytes(blob, constant_data_size);
200
201 uint32_t prog_data_size = blob_read_uint32(blob);
202 const struct brw_stage_prog_data *prog_data =
203 blob_read_bytes(blob, prog_data_size);
204 if (blob->overrun)
205 return NULL;
206
207 uint32_t num_stats = blob_read_uint32(blob);
208 const struct brw_compile_stats *stats =
209 blob_read_bytes(blob, num_stats * sizeof(stats[0]));
210
211 const nir_xfb_info *xfb_info = NULL;
212 uint32_t xfb_size = blob_read_uint32(blob);
213 if (xfb_size)
214 xfb_info = blob_read_bytes(blob, xfb_size);
215
216 struct anv_pipeline_bind_map bind_map;
217 blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
218 blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
219 blob_copy_bytes(blob, bind_map.push_sha1, sizeof(bind_map.push_sha1));
220 bind_map.surface_count = blob_read_uint32(blob);
221 bind_map.sampler_count = blob_read_uint32(blob);
222 bind_map.surface_to_descriptor = (void *)
223 blob_read_bytes(blob, bind_map.surface_count *
224 sizeof(*bind_map.surface_to_descriptor));
225 bind_map.sampler_to_descriptor = (void *)
226 blob_read_bytes(blob, bind_map.sampler_count *
227 sizeof(*bind_map.sampler_to_descriptor));
228 blob_copy_bytes(blob, bind_map.push_ranges, sizeof(bind_map.push_ranges));
229
230 if (blob->overrun)
231 return NULL;
232
233 return anv_shader_bin_create(device, stage,
234 key_data, key_size,
235 kernel_data, kernel_size,
236 constant_data, constant_data_size,
237 prog_data, prog_data_size,
238 stats, num_stats, xfb_info, &bind_map);
239 }
240
241 /* Remaining work:
242 *
243 * - Compact binding table layout so it's tight and not dependent on
244 * descriptor set layout.
245 *
246 * - Review prog_data struct for size and cacheability: struct
247 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
248 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
249 */
250
251 static uint32_t
252 shader_bin_key_hash_func(const void *void_key)
253 {
254 const struct anv_shader_bin_key *key = void_key;
255 return _mesa_hash_data(key->data, key->size);
256 }
257
258 static bool
259 shader_bin_key_compare_func(const void *void_a, const void *void_b)
260 {
261 const struct anv_shader_bin_key *a = void_a, *b = void_b;
262 if (a->size != b->size)
263 return false;
264
265 return memcmp(a->data, b->data, a->size) == 0;
266 }
267
268 static uint32_t
269 sha1_hash_func(const void *sha1)
270 {
271 return _mesa_hash_data(sha1, 20);
272 }
273
274 static bool
275 sha1_compare_func(const void *sha1_a, const void *sha1_b)
276 {
277 return memcmp(sha1_a, sha1_b, 20) == 0;
278 }
279
280 void
281 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
282 struct anv_device *device,
283 bool cache_enabled)
284 {
285 vk_object_base_init(&device->vk, &cache->base,
286 VK_OBJECT_TYPE_PIPELINE_CACHE);
287 cache->device = device;
288 pthread_mutex_init(&cache->mutex, NULL);
289
290 if (cache_enabled) {
291 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
292 shader_bin_key_compare_func);
293 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
294 sha1_compare_func);
295 } else {
296 cache->cache = NULL;
297 cache->nir_cache = NULL;
298 }
299 }
300
301 void
302 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
303 {
304 pthread_mutex_destroy(&cache->mutex);
305
306 if (cache->cache) {
307 /* This is a bit unfortunate. In order to keep things from randomly
308 * going away, the shader cache has to hold a reference to all shader
309 * binaries it contains. We unref them when we destroy the cache.
310 */
311 hash_table_foreach(cache->cache, entry)
312 anv_shader_bin_unref(cache->device, entry->data);
313
314 _mesa_hash_table_destroy(cache->cache, NULL);
315 }
316
317 if (cache->nir_cache) {
318 hash_table_foreach(cache->nir_cache, entry)
319 ralloc_free(entry->data);
320
321 _mesa_hash_table_destroy(cache->nir_cache, NULL);
322 }
323
324 vk_object_base_finish(&cache->base);
325 }
326
327 static struct anv_shader_bin *
328 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
329 const void *key_data, uint32_t key_size)
330 {
331 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
332 struct anv_shader_bin_key *key = (void *)vla;
333 key->size = key_size;
334 memcpy(key->data, key_data, key_size);
335
336 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
337 if (entry)
338 return entry->data;
339 else
340 return NULL;
341 }
342
343 static inline void
344 anv_cache_lock(struct anv_pipeline_cache *cache)
345 {
346 if (!cache->external_sync)
347 pthread_mutex_lock(&cache->mutex);
348 }
349
350 static inline void
351 anv_cache_unlock(struct anv_pipeline_cache *cache)
352 {
353 if (!cache->external_sync)
354 pthread_mutex_unlock(&cache->mutex);
355 }
356
357 struct anv_shader_bin *
358 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
359 const void *key_data, uint32_t key_size)
360 {
361 if (!cache->cache)
362 return NULL;
363
364 anv_cache_lock(cache);
365
366 struct anv_shader_bin *shader =
367 anv_pipeline_cache_search_locked(cache, key_data, key_size);
368
369 anv_cache_unlock(cache);
370
371 /* We increment refcount before handing it to the caller */
372 if (shader)
373 anv_shader_bin_ref(shader);
374
375 return shader;
376 }
377
378 static void
379 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
380 struct anv_shader_bin *bin)
381 {
382 if (!cache->cache)
383 return;
384
385 anv_cache_lock(cache);
386
387 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
388 if (entry == NULL) {
389 /* Take a reference for the cache */
390 anv_shader_bin_ref(bin);
391 _mesa_hash_table_insert(cache->cache, bin->key, bin);
392 }
393
394 anv_cache_unlock(cache);
395 }
396
397 static struct anv_shader_bin *
398 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
399 gl_shader_stage stage,
400 const void *key_data, uint32_t key_size,
401 const void *kernel_data,
402 uint32_t kernel_size,
403 const void *constant_data,
404 uint32_t constant_data_size,
405 const struct brw_stage_prog_data *prog_data,
406 uint32_t prog_data_size,
407 const struct brw_compile_stats *stats,
408 uint32_t num_stats,
409 const nir_xfb_info *xfb_info,
410 const struct anv_pipeline_bind_map *bind_map)
411 {
412 struct anv_shader_bin *shader =
413 anv_pipeline_cache_search_locked(cache, key_data, key_size);
414 if (shader)
415 return shader;
416
417 struct anv_shader_bin *bin =
418 anv_shader_bin_create(cache->device, stage,
419 key_data, key_size,
420 kernel_data, kernel_size,
421 constant_data, constant_data_size,
422 prog_data, prog_data_size,
423 stats, num_stats, xfb_info, bind_map);
424 if (!bin)
425 return NULL;
426
427 _mesa_hash_table_insert(cache->cache, bin->key, bin);
428
429 return bin;
430 }
431
432 struct anv_shader_bin *
433 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
434 gl_shader_stage stage,
435 const void *key_data, uint32_t key_size,
436 const void *kernel_data, uint32_t kernel_size,
437 const void *constant_data,
438 uint32_t constant_data_size,
439 const struct brw_stage_prog_data *prog_data,
440 uint32_t prog_data_size,
441 const struct brw_compile_stats *stats,
442 uint32_t num_stats,
443 const nir_xfb_info *xfb_info,
444 const struct anv_pipeline_bind_map *bind_map)
445 {
446 if (cache->cache) {
447 anv_cache_lock(cache);
448
449 struct anv_shader_bin *bin =
450 anv_pipeline_cache_add_shader_locked(cache, stage, key_data, key_size,
451 kernel_data, kernel_size,
452 constant_data, constant_data_size,
453 prog_data, prog_data_size,
454 stats, num_stats,
455 xfb_info, bind_map);
456
457 anv_cache_unlock(cache);
458
459 /* We increment refcount before handing it to the caller */
460 if (bin)
461 anv_shader_bin_ref(bin);
462
463 return bin;
464 } else {
465 /* In this case, we're not caching it so the caller owns it entirely */
466 return anv_shader_bin_create(cache->device, stage,
467 key_data, key_size,
468 kernel_data, kernel_size,
469 constant_data, constant_data_size,
470 prog_data, prog_data_size,
471 stats, num_stats,
472 xfb_info, bind_map);
473 }
474 }
475
476 struct cache_header {
477 uint32_t header_size;
478 uint32_t header_version;
479 uint32_t vendor_id;
480 uint32_t device_id;
481 uint8_t uuid[VK_UUID_SIZE];
482 };
483
484 static void
485 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
486 const void *data, size_t size)
487 {
488 struct anv_device *device = cache->device;
489 struct anv_physical_device *pdevice = device->physical;
490
491 if (cache->cache == NULL)
492 return;
493
494 struct blob_reader blob;
495 blob_reader_init(&blob, data, size);
496
497 struct cache_header header;
498 blob_copy_bytes(&blob, &header, sizeof(header));
499 uint32_t count = blob_read_uint32(&blob);
500 if (blob.overrun)
501 return;
502
503 if (header.header_size < sizeof(header))
504 return;
505 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
506 return;
507 if (header.vendor_id != 0x8086)
508 return;
509 if (header.device_id != device->info.chipset_id)
510 return;
511 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
512 return;
513
514 for (uint32_t i = 0; i < count; i++) {
515 struct anv_shader_bin *bin =
516 anv_shader_bin_create_from_blob(device, &blob);
517 if (!bin)
518 break;
519 _mesa_hash_table_insert(cache->cache, bin->key, bin);
520 }
521 }
522
523 VkResult anv_CreatePipelineCache(
524 VkDevice _device,
525 const VkPipelineCacheCreateInfo* pCreateInfo,
526 const VkAllocationCallbacks* pAllocator,
527 VkPipelineCache* pPipelineCache)
528 {
529 ANV_FROM_HANDLE(anv_device, device, _device);
530 struct anv_pipeline_cache *cache;
531
532 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
533 assert(pCreateInfo->flags == 0);
534
535 cache = vk_alloc2(&device->vk.alloc, pAllocator,
536 sizeof(*cache), 8,
537 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
538 if (cache == NULL)
539 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
540
541 cache->external_sync =
542 (pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT);
543
544 anv_pipeline_cache_init(cache, device,
545 device->physical->instance->pipeline_cache_enabled);
546
547 if (pCreateInfo->initialDataSize > 0)
548 anv_pipeline_cache_load(cache,
549 pCreateInfo->pInitialData,
550 pCreateInfo->initialDataSize);
551
552 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
553
554 return VK_SUCCESS;
555 }
556
557 void anv_DestroyPipelineCache(
558 VkDevice _device,
559 VkPipelineCache _cache,
560 const VkAllocationCallbacks* pAllocator)
561 {
562 ANV_FROM_HANDLE(anv_device, device, _device);
563 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
564
565 if (!cache)
566 return;
567
568 anv_pipeline_cache_finish(cache);
569
570 vk_free2(&device->vk.alloc, pAllocator, cache);
571 }
572
573 VkResult anv_GetPipelineCacheData(
574 VkDevice _device,
575 VkPipelineCache _cache,
576 size_t* pDataSize,
577 void* pData)
578 {
579 ANV_FROM_HANDLE(anv_device, device, _device);
580 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
581
582 struct blob blob;
583 if (pData) {
584 blob_init_fixed(&blob, pData, *pDataSize);
585 } else {
586 blob_init_fixed(&blob, NULL, SIZE_MAX);
587 }
588
589 struct cache_header header = {
590 .header_size = sizeof(struct cache_header),
591 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
592 .vendor_id = 0x8086,
593 .device_id = device->info.chipset_id,
594 };
595 memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
596 blob_write_bytes(&blob, &header, sizeof(header));
597
598 uint32_t count = 0;
599 intptr_t count_offset = blob_reserve_uint32(&blob);
600 if (count_offset < 0) {
601 *pDataSize = 0;
602 blob_finish(&blob);
603 return VK_INCOMPLETE;
604 }
605
606 VkResult result = VK_SUCCESS;
607 if (cache->cache) {
608 hash_table_foreach(cache->cache, entry) {
609 struct anv_shader_bin *shader = entry->data;
610
611 size_t save_size = blob.size;
612 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
613 /* If it fails reset to the previous size and bail */
614 blob.size = save_size;
615 result = VK_INCOMPLETE;
616 break;
617 }
618
619 count++;
620 }
621 }
622
623 blob_overwrite_uint32(&blob, count_offset, count);
624
625 *pDataSize = blob.size;
626
627 blob_finish(&blob);
628
629 return result;
630 }
631
632 VkResult anv_MergePipelineCaches(
633 VkDevice _device,
634 VkPipelineCache destCache,
635 uint32_t srcCacheCount,
636 const VkPipelineCache* pSrcCaches)
637 {
638 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
639
640 if (!dst->cache)
641 return VK_SUCCESS;
642
643 for (uint32_t i = 0; i < srcCacheCount; i++) {
644 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
645 if (!src->cache)
646 continue;
647
648 hash_table_foreach(src->cache, entry) {
649 struct anv_shader_bin *bin = entry->data;
650 assert(bin);
651
652 if (_mesa_hash_table_search(dst->cache, bin->key))
653 continue;
654
655 anv_shader_bin_ref(bin);
656 _mesa_hash_table_insert(dst->cache, bin->key, bin);
657 }
658 }
659
660 return VK_SUCCESS;
661 }
662
663 struct anv_shader_bin *
664 anv_device_search_for_kernel(struct anv_device *device,
665 struct anv_pipeline_cache *cache,
666 const void *key_data, uint32_t key_size,
667 bool *user_cache_hit)
668 {
669 struct anv_shader_bin *bin;
670
671 *user_cache_hit = false;
672
673 if (cache) {
674 bin = anv_pipeline_cache_search(cache, key_data, key_size);
675 if (bin) {
676 *user_cache_hit = cache != &device->default_pipeline_cache;
677 return bin;
678 }
679 }
680
681 #ifdef ENABLE_SHADER_CACHE
682 struct disk_cache *disk_cache = device->physical->disk_cache;
683 if (disk_cache && device->physical->instance->pipeline_cache_enabled) {
684 cache_key cache_key;
685 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
686
687 size_t buffer_size;
688 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
689 if (buffer) {
690 struct blob_reader blob;
691 blob_reader_init(&blob, buffer, buffer_size);
692 bin = anv_shader_bin_create_from_blob(device, &blob);
693 free(buffer);
694
695 if (bin) {
696 if (cache)
697 anv_pipeline_cache_add_shader_bin(cache, bin);
698 return bin;
699 }
700 }
701 }
702 #endif
703
704 return NULL;
705 }
706
707 struct anv_shader_bin *
708 anv_device_upload_kernel(struct anv_device *device,
709 struct anv_pipeline_cache *cache,
710 gl_shader_stage stage,
711 const void *key_data, uint32_t key_size,
712 const void *kernel_data, uint32_t kernel_size,
713 const void *constant_data,
714 uint32_t constant_data_size,
715 const struct brw_stage_prog_data *prog_data,
716 uint32_t prog_data_size,
717 const struct brw_compile_stats *stats,
718 uint32_t num_stats,
719 const nir_xfb_info *xfb_info,
720 const struct anv_pipeline_bind_map *bind_map)
721 {
722 struct anv_shader_bin *bin;
723 if (cache) {
724 bin = anv_pipeline_cache_upload_kernel(cache, stage, key_data, key_size,
725 kernel_data, kernel_size,
726 constant_data, constant_data_size,
727 prog_data, prog_data_size,
728 stats, num_stats,
729 xfb_info, bind_map);
730 } else {
731 bin = anv_shader_bin_create(device, stage, key_data, key_size,
732 kernel_data, kernel_size,
733 constant_data, constant_data_size,
734 prog_data, prog_data_size,
735 stats, num_stats,
736 xfb_info, bind_map);
737 }
738
739 if (bin == NULL)
740 return NULL;
741
742 #ifdef ENABLE_SHADER_CACHE
743 struct disk_cache *disk_cache = device->physical->disk_cache;
744 if (disk_cache) {
745 struct blob binary;
746 blob_init(&binary);
747 if (anv_shader_bin_write_to_blob(bin, &binary)) {
748 cache_key cache_key;
749 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
750
751 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
752 }
753
754 blob_finish(&binary);
755 }
756 #endif
757
758 return bin;
759 }
760
761 struct serialized_nir {
762 unsigned char sha1_key[20];
763 size_t size;
764 char data[0];
765 };
766
767 struct nir_shader *
768 anv_device_search_for_nir(struct anv_device *device,
769 struct anv_pipeline_cache *cache,
770 const nir_shader_compiler_options *nir_options,
771 unsigned char sha1_key[20],
772 void *mem_ctx)
773 {
774 if (cache && cache->nir_cache) {
775 const struct serialized_nir *snir = NULL;
776
777 anv_cache_lock(cache);
778 struct hash_entry *entry =
779 _mesa_hash_table_search(cache->nir_cache, sha1_key);
780 if (entry)
781 snir = entry->data;
782 anv_cache_unlock(cache);
783
784 if (snir) {
785 struct blob_reader blob;
786 blob_reader_init(&blob, snir->data, snir->size);
787
788 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
789 if (blob.overrun) {
790 ralloc_free(nir);
791 } else {
792 return nir;
793 }
794 }
795 }
796
797 return NULL;
798 }
799
800 void
801 anv_device_upload_nir(struct anv_device *device,
802 struct anv_pipeline_cache *cache,
803 const struct nir_shader *nir,
804 unsigned char sha1_key[20])
805 {
806 if (cache && cache->nir_cache) {
807 anv_cache_lock(cache);
808 struct hash_entry *entry =
809 _mesa_hash_table_search(cache->nir_cache, sha1_key);
810 anv_cache_unlock(cache);
811 if (entry)
812 return;
813
814 struct blob blob;
815 blob_init(&blob);
816
817 nir_serialize(&blob, nir, false);
818 if (blob.out_of_memory) {
819 blob_finish(&blob);
820 return;
821 }
822
823 anv_cache_lock(cache);
824 /* Because ralloc isn't thread-safe, we have to do all this inside the
825 * lock. We could unlock for the big memcpy but it's probably not worth
826 * the hassle.
827 */
828 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
829 if (entry) {
830 blob_finish(&blob);
831 anv_cache_unlock(cache);
832 return;
833 }
834
835 struct serialized_nir *snir =
836 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
837 memcpy(snir->sha1_key, sha1_key, 20);
838 snir->size = blob.size;
839 memcpy(snir->data, blob.data, blob.size);
840
841 blob_finish(&blob);
842
843 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
844
845 anv_cache_unlock(cache);
846 }
847 }