Move blob from compiler/ to util/
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32
33 struct anv_shader_bin *
34 anv_shader_bin_create(struct anv_device *device,
35 const void *key_data, uint32_t key_size,
36 const void *kernel_data, uint32_t kernel_size,
37 const void *constant_data, uint32_t constant_data_size,
38 const struct brw_stage_prog_data *prog_data_in,
39 uint32_t prog_data_size, const void *prog_data_param_in,
40 const struct brw_compile_stats *stats, uint32_t num_stats,
41 const nir_xfb_info *xfb_info_in,
42 const struct anv_pipeline_bind_map *bind_map)
43 {
44 struct anv_shader_bin *shader;
45 struct anv_shader_bin_key *key;
46 struct brw_stage_prog_data *prog_data;
47 uint32_t *prog_data_param;
48 nir_xfb_info *xfb_info;
49 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
50
51 ANV_MULTIALLOC(ma);
52 anv_multialloc_add(&ma, &shader, 1);
53 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
54 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
55 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
56 if (xfb_info_in) {
57 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
58 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
59 }
60 anv_multialloc_add(&ma, &surface_to_descriptor,
61 bind_map->surface_count);
62 anv_multialloc_add(&ma, &sampler_to_descriptor,
63 bind_map->sampler_count);
64
65 if (!anv_multialloc_alloc(&ma, &device->alloc,
66 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
67 return NULL;
68
69 shader->ref_cnt = 1;
70
71 key->size = key_size;
72 memcpy(key->data, key_data, key_size);
73 shader->key = key;
74
75 shader->kernel =
76 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
77 memcpy(shader->kernel.map, kernel_data, kernel_size);
78 shader->kernel_size = kernel_size;
79
80 if (constant_data_size) {
81 shader->constant_data =
82 anv_state_pool_alloc(&device->dynamic_state_pool,
83 constant_data_size, 32);
84 memcpy(shader->constant_data.map, constant_data, constant_data_size);
85 } else {
86 shader->constant_data = ANV_STATE_NULL;
87 }
88 shader->constant_data_size = constant_data_size;
89
90 memcpy(prog_data, prog_data_in, prog_data_size);
91 memcpy(prog_data_param, prog_data_param_in,
92 prog_data->nr_params * sizeof(*prog_data_param));
93 prog_data->param = prog_data_param;
94 shader->prog_data = prog_data;
95 shader->prog_data_size = prog_data_size;
96
97 assert(num_stats <= ARRAY_SIZE(shader->stats));
98 typed_memcpy(shader->stats, stats, num_stats);
99 shader->num_stats = num_stats;
100
101 if (xfb_info_in) {
102 *xfb_info = *xfb_info_in;
103 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
104 xfb_info_in->output_count);
105 shader->xfb_info = xfb_info;
106 } else {
107 shader->xfb_info = NULL;
108 }
109
110 shader->bind_map = *bind_map;
111 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
112 bind_map->surface_count);
113 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
114 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
115 bind_map->sampler_count);
116 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
117
118 return shader;
119 }
120
121 void
122 anv_shader_bin_destroy(struct anv_device *device,
123 struct anv_shader_bin *shader)
124 {
125 assert(shader->ref_cnt == 0);
126 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
127 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
128 vk_free(&device->alloc, shader);
129 }
130
131 static bool
132 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
133 struct blob *blob)
134 {
135 blob_write_uint32(blob, shader->key->size);
136 blob_write_bytes(blob, shader->key->data, shader->key->size);
137
138 blob_write_uint32(blob, shader->kernel_size);
139 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
140
141 blob_write_uint32(blob, shader->constant_data_size);
142 blob_write_bytes(blob, shader->constant_data.map,
143 shader->constant_data_size);
144
145 blob_write_uint32(blob, shader->prog_data_size);
146 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
147 blob_write_bytes(blob, shader->prog_data->param,
148 shader->prog_data->nr_params *
149 sizeof(*shader->prog_data->param));
150
151 blob_write_uint32(blob, shader->num_stats);
152 blob_write_bytes(blob, shader->stats,
153 shader->num_stats * sizeof(shader->stats[0]));
154
155 if (shader->xfb_info) {
156 uint32_t xfb_info_size =
157 nir_xfb_info_size(shader->xfb_info->output_count);
158 blob_write_uint32(blob, xfb_info_size);
159 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
160 } else {
161 blob_write_uint32(blob, 0);
162 }
163
164 blob_write_uint32(blob, shader->bind_map.surface_count);
165 blob_write_uint32(blob, shader->bind_map.sampler_count);
166 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
167 shader->bind_map.surface_count *
168 sizeof(*shader->bind_map.surface_to_descriptor));
169 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
170 shader->bind_map.sampler_count *
171 sizeof(*shader->bind_map.sampler_to_descriptor));
172
173 return !blob->out_of_memory;
174 }
175
176 static struct anv_shader_bin *
177 anv_shader_bin_create_from_blob(struct anv_device *device,
178 struct blob_reader *blob)
179 {
180 uint32_t key_size = blob_read_uint32(blob);
181 const void *key_data = blob_read_bytes(blob, key_size);
182
183 uint32_t kernel_size = blob_read_uint32(blob);
184 const void *kernel_data = blob_read_bytes(blob, kernel_size);
185
186 uint32_t constant_data_size = blob_read_uint32(blob);
187 const void *constant_data = blob_read_bytes(blob, constant_data_size);
188
189 uint32_t prog_data_size = blob_read_uint32(blob);
190 const struct brw_stage_prog_data *prog_data =
191 blob_read_bytes(blob, prog_data_size);
192 if (blob->overrun)
193 return NULL;
194 const void *prog_data_param =
195 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
196
197 uint32_t num_stats = blob_read_uint32(blob);
198 const struct brw_compile_stats *stats =
199 blob_read_bytes(blob, num_stats * sizeof(stats[0]));
200
201 const nir_xfb_info *xfb_info = NULL;
202 uint32_t xfb_size = blob_read_uint32(blob);
203 if (xfb_size)
204 xfb_info = blob_read_bytes(blob, xfb_size);
205
206 struct anv_pipeline_bind_map bind_map;
207 bind_map.surface_count = blob_read_uint32(blob);
208 bind_map.sampler_count = blob_read_uint32(blob);
209 bind_map.surface_to_descriptor = (void *)
210 blob_read_bytes(blob, bind_map.surface_count *
211 sizeof(*bind_map.surface_to_descriptor));
212 bind_map.sampler_to_descriptor = (void *)
213 blob_read_bytes(blob, bind_map.sampler_count *
214 sizeof(*bind_map.sampler_to_descriptor));
215
216 if (blob->overrun)
217 return NULL;
218
219 return anv_shader_bin_create(device,
220 key_data, key_size,
221 kernel_data, kernel_size,
222 constant_data, constant_data_size,
223 prog_data, prog_data_size, prog_data_param,
224 stats, num_stats, xfb_info, &bind_map);
225 }
226
227 /* Remaining work:
228 *
229 * - Compact binding table layout so it's tight and not dependent on
230 * descriptor set layout.
231 *
232 * - Review prog_data struct for size and cacheability: struct
233 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
234 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
235 */
236
237 static uint32_t
238 shader_bin_key_hash_func(const void *void_key)
239 {
240 const struct anv_shader_bin_key *key = void_key;
241 return _mesa_hash_data(key->data, key->size);
242 }
243
244 static bool
245 shader_bin_key_compare_func(const void *void_a, const void *void_b)
246 {
247 const struct anv_shader_bin_key *a = void_a, *b = void_b;
248 if (a->size != b->size)
249 return false;
250
251 return memcmp(a->data, b->data, a->size) == 0;
252 }
253
254 static uint32_t
255 sha1_hash_func(const void *sha1)
256 {
257 return _mesa_hash_data(sha1, 20);
258 }
259
260 static bool
261 sha1_compare_func(const void *sha1_a, const void *sha1_b)
262 {
263 return memcmp(sha1_a, sha1_b, 20) == 0;
264 }
265
266 void
267 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
268 struct anv_device *device,
269 bool cache_enabled)
270 {
271 cache->device = device;
272 pthread_mutex_init(&cache->mutex, NULL);
273
274 if (cache_enabled) {
275 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
276 shader_bin_key_compare_func);
277 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
278 sha1_compare_func);
279 } else {
280 cache->cache = NULL;
281 cache->nir_cache = NULL;
282 }
283 }
284
285 void
286 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
287 {
288 pthread_mutex_destroy(&cache->mutex);
289
290 if (cache->cache) {
291 /* This is a bit unfortunate. In order to keep things from randomly
292 * going away, the shader cache has to hold a reference to all shader
293 * binaries it contains. We unref them when we destroy the cache.
294 */
295 hash_table_foreach(cache->cache, entry)
296 anv_shader_bin_unref(cache->device, entry->data);
297
298 _mesa_hash_table_destroy(cache->cache, NULL);
299 }
300
301 if (cache->nir_cache) {
302 hash_table_foreach(cache->nir_cache, entry)
303 ralloc_free(entry->data);
304
305 _mesa_hash_table_destroy(cache->nir_cache, NULL);
306 }
307 }
308
309 static struct anv_shader_bin *
310 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
311 const void *key_data, uint32_t key_size)
312 {
313 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
314 struct anv_shader_bin_key *key = (void *)vla;
315 key->size = key_size;
316 memcpy(key->data, key_data, key_size);
317
318 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
319 if (entry)
320 return entry->data;
321 else
322 return NULL;
323 }
324
325 struct anv_shader_bin *
326 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
327 const void *key_data, uint32_t key_size)
328 {
329 if (!cache->cache)
330 return NULL;
331
332 pthread_mutex_lock(&cache->mutex);
333
334 struct anv_shader_bin *shader =
335 anv_pipeline_cache_search_locked(cache, key_data, key_size);
336
337 pthread_mutex_unlock(&cache->mutex);
338
339 /* We increment refcount before handing it to the caller */
340 if (shader)
341 anv_shader_bin_ref(shader);
342
343 return shader;
344 }
345
346 static void
347 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
348 struct anv_shader_bin *bin)
349 {
350 if (!cache->cache)
351 return;
352
353 pthread_mutex_lock(&cache->mutex);
354
355 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
356 if (entry == NULL) {
357 /* Take a reference for the cache */
358 anv_shader_bin_ref(bin);
359 _mesa_hash_table_insert(cache->cache, bin->key, bin);
360 }
361
362 pthread_mutex_unlock(&cache->mutex);
363 }
364
365 static struct anv_shader_bin *
366 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
367 const void *key_data, uint32_t key_size,
368 const void *kernel_data,
369 uint32_t kernel_size,
370 const void *constant_data,
371 uint32_t constant_data_size,
372 const struct brw_stage_prog_data *prog_data,
373 uint32_t prog_data_size,
374 const void *prog_data_param,
375 const struct brw_compile_stats *stats,
376 uint32_t num_stats,
377 const nir_xfb_info *xfb_info,
378 const struct anv_pipeline_bind_map *bind_map)
379 {
380 struct anv_shader_bin *shader =
381 anv_pipeline_cache_search_locked(cache, key_data, key_size);
382 if (shader)
383 return shader;
384
385 struct anv_shader_bin *bin =
386 anv_shader_bin_create(cache->device, key_data, key_size,
387 kernel_data, kernel_size,
388 constant_data, constant_data_size,
389 prog_data, prog_data_size, prog_data_param,
390 stats, num_stats, xfb_info, bind_map);
391 if (!bin)
392 return NULL;
393
394 _mesa_hash_table_insert(cache->cache, bin->key, bin);
395
396 return bin;
397 }
398
399 struct anv_shader_bin *
400 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
401 const void *key_data, uint32_t key_size,
402 const void *kernel_data, uint32_t kernel_size,
403 const void *constant_data,
404 uint32_t constant_data_size,
405 const struct brw_stage_prog_data *prog_data,
406 uint32_t prog_data_size,
407 const struct brw_compile_stats *stats,
408 uint32_t num_stats,
409 const nir_xfb_info *xfb_info,
410 const struct anv_pipeline_bind_map *bind_map)
411 {
412 if (cache->cache) {
413 pthread_mutex_lock(&cache->mutex);
414
415 struct anv_shader_bin *bin =
416 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
417 kernel_data, kernel_size,
418 constant_data, constant_data_size,
419 prog_data, prog_data_size,
420 prog_data->param,
421 stats, num_stats,
422 xfb_info, bind_map);
423
424 pthread_mutex_unlock(&cache->mutex);
425
426 /* We increment refcount before handing it to the caller */
427 if (bin)
428 anv_shader_bin_ref(bin);
429
430 return bin;
431 } else {
432 /* In this case, we're not caching it so the caller owns it entirely */
433 return anv_shader_bin_create(cache->device, key_data, key_size,
434 kernel_data, kernel_size,
435 constant_data, constant_data_size,
436 prog_data, prog_data_size,
437 prog_data->param,
438 stats, num_stats,
439 xfb_info, bind_map);
440 }
441 }
442
443 struct cache_header {
444 uint32_t header_size;
445 uint32_t header_version;
446 uint32_t vendor_id;
447 uint32_t device_id;
448 uint8_t uuid[VK_UUID_SIZE];
449 };
450
451 static void
452 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
453 const void *data, size_t size)
454 {
455 struct anv_device *device = cache->device;
456 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
457
458 if (cache->cache == NULL)
459 return;
460
461 struct blob_reader blob;
462 blob_reader_init(&blob, data, size);
463
464 struct cache_header header;
465 blob_copy_bytes(&blob, &header, sizeof(header));
466 uint32_t count = blob_read_uint32(&blob);
467 if (blob.overrun)
468 return;
469
470 if (header.header_size < sizeof(header))
471 return;
472 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
473 return;
474 if (header.vendor_id != 0x8086)
475 return;
476 if (header.device_id != device->chipset_id)
477 return;
478 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
479 return;
480
481 for (uint32_t i = 0; i < count; i++) {
482 struct anv_shader_bin *bin =
483 anv_shader_bin_create_from_blob(device, &blob);
484 if (!bin)
485 break;
486 _mesa_hash_table_insert(cache->cache, bin->key, bin);
487 }
488 }
489
490 VkResult anv_CreatePipelineCache(
491 VkDevice _device,
492 const VkPipelineCacheCreateInfo* pCreateInfo,
493 const VkAllocationCallbacks* pAllocator,
494 VkPipelineCache* pPipelineCache)
495 {
496 ANV_FROM_HANDLE(anv_device, device, _device);
497 struct anv_pipeline_cache *cache;
498
499 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
500 assert(pCreateInfo->flags == 0);
501
502 cache = vk_alloc2(&device->alloc, pAllocator,
503 sizeof(*cache), 8,
504 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
505 if (cache == NULL)
506 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
507
508 anv_pipeline_cache_init(cache, device,
509 device->instance->pipeline_cache_enabled);
510
511 if (pCreateInfo->initialDataSize > 0)
512 anv_pipeline_cache_load(cache,
513 pCreateInfo->pInitialData,
514 pCreateInfo->initialDataSize);
515
516 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
517
518 return VK_SUCCESS;
519 }
520
521 void anv_DestroyPipelineCache(
522 VkDevice _device,
523 VkPipelineCache _cache,
524 const VkAllocationCallbacks* pAllocator)
525 {
526 ANV_FROM_HANDLE(anv_device, device, _device);
527 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
528
529 if (!cache)
530 return;
531
532 anv_pipeline_cache_finish(cache);
533
534 vk_free2(&device->alloc, pAllocator, cache);
535 }
536
537 VkResult anv_GetPipelineCacheData(
538 VkDevice _device,
539 VkPipelineCache _cache,
540 size_t* pDataSize,
541 void* pData)
542 {
543 ANV_FROM_HANDLE(anv_device, device, _device);
544 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
545 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
546
547 struct blob blob;
548 if (pData) {
549 blob_init_fixed(&blob, pData, *pDataSize);
550 } else {
551 blob_init_fixed(&blob, NULL, SIZE_MAX);
552 }
553
554 struct cache_header header = {
555 .header_size = sizeof(struct cache_header),
556 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
557 .vendor_id = 0x8086,
558 .device_id = device->chipset_id,
559 };
560 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
561 blob_write_bytes(&blob, &header, sizeof(header));
562
563 uint32_t count = 0;
564 intptr_t count_offset = blob_reserve_uint32(&blob);
565 if (count_offset < 0) {
566 *pDataSize = 0;
567 blob_finish(&blob);
568 return VK_INCOMPLETE;
569 }
570
571 VkResult result = VK_SUCCESS;
572 if (cache->cache) {
573 hash_table_foreach(cache->cache, entry) {
574 struct anv_shader_bin *shader = entry->data;
575
576 size_t save_size = blob.size;
577 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
578 /* If it fails reset to the previous size and bail */
579 blob.size = save_size;
580 result = VK_INCOMPLETE;
581 break;
582 }
583
584 count++;
585 }
586 }
587
588 blob_overwrite_uint32(&blob, count_offset, count);
589
590 *pDataSize = blob.size;
591
592 blob_finish(&blob);
593
594 return result;
595 }
596
597 VkResult anv_MergePipelineCaches(
598 VkDevice _device,
599 VkPipelineCache destCache,
600 uint32_t srcCacheCount,
601 const VkPipelineCache* pSrcCaches)
602 {
603 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
604
605 if (!dst->cache)
606 return VK_SUCCESS;
607
608 for (uint32_t i = 0; i < srcCacheCount; i++) {
609 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
610 if (!src->cache)
611 continue;
612
613 hash_table_foreach(src->cache, entry) {
614 struct anv_shader_bin *bin = entry->data;
615 assert(bin);
616
617 if (_mesa_hash_table_search(dst->cache, bin->key))
618 continue;
619
620 anv_shader_bin_ref(bin);
621 _mesa_hash_table_insert(dst->cache, bin->key, bin);
622 }
623 }
624
625 return VK_SUCCESS;
626 }
627
628 struct anv_shader_bin *
629 anv_device_search_for_kernel(struct anv_device *device,
630 struct anv_pipeline_cache *cache,
631 const void *key_data, uint32_t key_size,
632 bool *user_cache_hit)
633 {
634 struct anv_shader_bin *bin;
635
636 *user_cache_hit = false;
637
638 if (cache) {
639 bin = anv_pipeline_cache_search(cache, key_data, key_size);
640 if (bin) {
641 *user_cache_hit = cache != &device->default_pipeline_cache;
642 return bin;
643 }
644 }
645
646 #ifdef ENABLE_SHADER_CACHE
647 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
648 if (disk_cache && device->instance->pipeline_cache_enabled) {
649 cache_key cache_key;
650 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
651
652 size_t buffer_size;
653 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
654 if (buffer) {
655 struct blob_reader blob;
656 blob_reader_init(&blob, buffer, buffer_size);
657 bin = anv_shader_bin_create_from_blob(device, &blob);
658 free(buffer);
659
660 if (bin) {
661 if (cache)
662 anv_pipeline_cache_add_shader_bin(cache, bin);
663 return bin;
664 }
665 }
666 }
667 #endif
668
669 return NULL;
670 }
671
672 struct anv_shader_bin *
673 anv_device_upload_kernel(struct anv_device *device,
674 struct anv_pipeline_cache *cache,
675 const void *key_data, uint32_t key_size,
676 const void *kernel_data, uint32_t kernel_size,
677 const void *constant_data,
678 uint32_t constant_data_size,
679 const struct brw_stage_prog_data *prog_data,
680 uint32_t prog_data_size,
681 const struct brw_compile_stats *stats,
682 uint32_t num_stats,
683 const nir_xfb_info *xfb_info,
684 const struct anv_pipeline_bind_map *bind_map)
685 {
686 struct anv_shader_bin *bin;
687 if (cache) {
688 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
689 kernel_data, kernel_size,
690 constant_data, constant_data_size,
691 prog_data, prog_data_size,
692 stats, num_stats,
693 xfb_info, bind_map);
694 } else {
695 bin = anv_shader_bin_create(device, key_data, key_size,
696 kernel_data, kernel_size,
697 constant_data, constant_data_size,
698 prog_data, prog_data_size,
699 prog_data->param,
700 stats, num_stats,
701 xfb_info, bind_map);
702 }
703
704 if (bin == NULL)
705 return NULL;
706
707 #ifdef ENABLE_SHADER_CACHE
708 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
709 if (disk_cache) {
710 struct blob binary;
711 blob_init(&binary);
712 if (anv_shader_bin_write_to_blob(bin, &binary)) {
713 cache_key cache_key;
714 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
715
716 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
717 }
718
719 blob_finish(&binary);
720 }
721 #endif
722
723 return bin;
724 }
725
726 struct serialized_nir {
727 unsigned char sha1_key[20];
728 size_t size;
729 char data[0];
730 };
731
732 struct nir_shader *
733 anv_device_search_for_nir(struct anv_device *device,
734 struct anv_pipeline_cache *cache,
735 const nir_shader_compiler_options *nir_options,
736 unsigned char sha1_key[20],
737 void *mem_ctx)
738 {
739 if (cache && cache->nir_cache) {
740 const struct serialized_nir *snir = NULL;
741
742 pthread_mutex_lock(&cache->mutex);
743 struct hash_entry *entry =
744 _mesa_hash_table_search(cache->nir_cache, sha1_key);
745 if (entry)
746 snir = entry->data;
747 pthread_mutex_unlock(&cache->mutex);
748
749 if (snir) {
750 struct blob_reader blob;
751 blob_reader_init(&blob, snir->data, snir->size);
752
753 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
754 if (blob.overrun) {
755 ralloc_free(nir);
756 } else {
757 return nir;
758 }
759 }
760 }
761
762 return NULL;
763 }
764
765 void
766 anv_device_upload_nir(struct anv_device *device,
767 struct anv_pipeline_cache *cache,
768 const struct nir_shader *nir,
769 unsigned char sha1_key[20])
770 {
771 if (cache && cache->nir_cache) {
772 pthread_mutex_lock(&cache->mutex);
773 struct hash_entry *entry =
774 _mesa_hash_table_search(cache->nir_cache, sha1_key);
775 pthread_mutex_unlock(&cache->mutex);
776 if (entry)
777 return;
778
779 struct blob blob;
780 blob_init(&blob);
781
782 nir_serialize(&blob, nir);
783 if (blob.out_of_memory) {
784 blob_finish(&blob);
785 return;
786 }
787
788 pthread_mutex_lock(&cache->mutex);
789 /* Because ralloc isn't thread-safe, we have to do all this inside the
790 * lock. We could unlock for the big memcpy but it's probably not worth
791 * the hassle.
792 */
793 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
794 if (entry) {
795 blob_finish(&blob);
796 pthread_mutex_unlock(&cache->mutex);
797 return;
798 }
799
800 struct serialized_nir *snir =
801 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
802 memcpy(snir->sha1_key, sha1_key, 20);
803 snir->size = blob.size;
804 memcpy(snir->data, blob.data, blob.size);
805
806 blob_finish(&blob);
807
808 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
809
810 pthread_mutex_unlock(&cache->mutex);
811 }
812 }