anv: call blob_finish when done with it
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32
33 struct anv_shader_bin *
34 anv_shader_bin_create(struct anv_device *device,
35 const void *key_data, uint32_t key_size,
36 const void *kernel_data, uint32_t kernel_size,
37 const void *constant_data, uint32_t constant_data_size,
38 const struct brw_stage_prog_data *prog_data_in,
39 uint32_t prog_data_size, const void *prog_data_param_in,
40 const nir_xfb_info *xfb_info_in,
41 const struct anv_pipeline_bind_map *bind_map)
42 {
43 struct anv_shader_bin *shader;
44 struct anv_shader_bin_key *key;
45 struct brw_stage_prog_data *prog_data;
46 uint32_t *prog_data_param;
47 nir_xfb_info *xfb_info;
48 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
49
50 ANV_MULTIALLOC(ma);
51 anv_multialloc_add(&ma, &shader, 1);
52 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
53 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
54 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
55 if (xfb_info_in) {
56 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
57 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
58 }
59 anv_multialloc_add(&ma, &surface_to_descriptor,
60 bind_map->surface_count);
61 anv_multialloc_add(&ma, &sampler_to_descriptor,
62 bind_map->sampler_count);
63
64 if (!anv_multialloc_alloc(&ma, &device->alloc,
65 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
66 return NULL;
67
68 shader->ref_cnt = 1;
69
70 key->size = key_size;
71 memcpy(key->data, key_data, key_size);
72 shader->key = key;
73
74 shader->kernel =
75 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
76 memcpy(shader->kernel.map, kernel_data, kernel_size);
77 shader->kernel_size = kernel_size;
78
79 if (constant_data_size) {
80 shader->constant_data =
81 anv_state_pool_alloc(&device->dynamic_state_pool,
82 constant_data_size, 32);
83 memcpy(shader->constant_data.map, constant_data, constant_data_size);
84 } else {
85 shader->constant_data = ANV_STATE_NULL;
86 }
87 shader->constant_data_size = constant_data_size;
88
89 memcpy(prog_data, prog_data_in, prog_data_size);
90 memcpy(prog_data_param, prog_data_param_in,
91 prog_data->nr_params * sizeof(*prog_data_param));
92 prog_data->param = prog_data_param;
93 shader->prog_data = prog_data;
94 shader->prog_data_size = prog_data_size;
95
96 if (xfb_info_in) {
97 *xfb_info = *xfb_info_in;
98 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
99 xfb_info_in->output_count);
100 shader->xfb_info = xfb_info;
101 } else {
102 shader->xfb_info = NULL;
103 }
104
105 shader->bind_map = *bind_map;
106 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
107 bind_map->surface_count);
108 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
109 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
110 bind_map->sampler_count);
111 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
112
113 return shader;
114 }
115
116 void
117 anv_shader_bin_destroy(struct anv_device *device,
118 struct anv_shader_bin *shader)
119 {
120 assert(shader->ref_cnt == 0);
121 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
122 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
123 vk_free(&device->alloc, shader);
124 }
125
126 static bool
127 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
128 struct blob *blob)
129 {
130 blob_write_uint32(blob, shader->key->size);
131 blob_write_bytes(blob, shader->key->data, shader->key->size);
132
133 blob_write_uint32(blob, shader->kernel_size);
134 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
135
136 blob_write_uint32(blob, shader->constant_data_size);
137 blob_write_bytes(blob, shader->constant_data.map,
138 shader->constant_data_size);
139
140 blob_write_uint32(blob, shader->prog_data_size);
141 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
142 blob_write_bytes(blob, shader->prog_data->param,
143 shader->prog_data->nr_params *
144 sizeof(*shader->prog_data->param));
145
146 if (shader->xfb_info) {
147 uint32_t xfb_info_size =
148 nir_xfb_info_size(shader->xfb_info->output_count);
149 blob_write_uint32(blob, xfb_info_size);
150 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
151 } else {
152 blob_write_uint32(blob, 0);
153 }
154
155 blob_write_uint32(blob, shader->bind_map.surface_count);
156 blob_write_uint32(blob, shader->bind_map.sampler_count);
157 blob_write_uint32(blob, shader->bind_map.image_param_count);
158 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
159 shader->bind_map.surface_count *
160 sizeof(*shader->bind_map.surface_to_descriptor));
161 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
162 shader->bind_map.sampler_count *
163 sizeof(*shader->bind_map.sampler_to_descriptor));
164
165 return !blob->out_of_memory;
166 }
167
168 static struct anv_shader_bin *
169 anv_shader_bin_create_from_blob(struct anv_device *device,
170 struct blob_reader *blob)
171 {
172 uint32_t key_size = blob_read_uint32(blob);
173 const void *key_data = blob_read_bytes(blob, key_size);
174
175 uint32_t kernel_size = blob_read_uint32(blob);
176 const void *kernel_data = blob_read_bytes(blob, kernel_size);
177
178 uint32_t constant_data_size = blob_read_uint32(blob);
179 const void *constant_data = blob_read_bytes(blob, constant_data_size);
180
181 uint32_t prog_data_size = blob_read_uint32(blob);
182 const struct brw_stage_prog_data *prog_data =
183 blob_read_bytes(blob, prog_data_size);
184 if (blob->overrun)
185 return NULL;
186 const void *prog_data_param =
187 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
188
189 const nir_xfb_info *xfb_info = NULL;
190 uint32_t xfb_size = blob_read_uint32(blob);
191 if (xfb_size)
192 xfb_info = blob_read_bytes(blob, xfb_size);
193
194 struct anv_pipeline_bind_map bind_map;
195 bind_map.surface_count = blob_read_uint32(blob);
196 bind_map.sampler_count = blob_read_uint32(blob);
197 bind_map.image_param_count = blob_read_uint32(blob);
198 bind_map.surface_to_descriptor = (void *)
199 blob_read_bytes(blob, bind_map.surface_count *
200 sizeof(*bind_map.surface_to_descriptor));
201 bind_map.sampler_to_descriptor = (void *)
202 blob_read_bytes(blob, bind_map.sampler_count *
203 sizeof(*bind_map.sampler_to_descriptor));
204
205 if (blob->overrun)
206 return NULL;
207
208 return anv_shader_bin_create(device,
209 key_data, key_size,
210 kernel_data, kernel_size,
211 constant_data, constant_data_size,
212 prog_data, prog_data_size, prog_data_param,
213 xfb_info, &bind_map);
214 }
215
216 /* Remaining work:
217 *
218 * - Compact binding table layout so it's tight and not dependent on
219 * descriptor set layout.
220 *
221 * - Review prog_data struct for size and cacheability: struct
222 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
223 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
224 */
225
226 static uint32_t
227 shader_bin_key_hash_func(const void *void_key)
228 {
229 const struct anv_shader_bin_key *key = void_key;
230 return _mesa_hash_data(key->data, key->size);
231 }
232
233 static bool
234 shader_bin_key_compare_func(const void *void_a, const void *void_b)
235 {
236 const struct anv_shader_bin_key *a = void_a, *b = void_b;
237 if (a->size != b->size)
238 return false;
239
240 return memcmp(a->data, b->data, a->size) == 0;
241 }
242
243 static uint32_t
244 sha1_hash_func(const void *sha1)
245 {
246 return _mesa_hash_data(sha1, 20);
247 }
248
249 static bool
250 sha1_compare_func(const void *sha1_a, const void *sha1_b)
251 {
252 return memcmp(sha1_a, sha1_b, 20) == 0;
253 }
254
255 void
256 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
257 struct anv_device *device,
258 bool cache_enabled)
259 {
260 cache->device = device;
261 pthread_mutex_init(&cache->mutex, NULL);
262
263 if (cache_enabled) {
264 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
265 shader_bin_key_compare_func);
266 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
267 sha1_compare_func);
268 } else {
269 cache->cache = NULL;
270 cache->nir_cache = NULL;
271 }
272 }
273
274 void
275 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
276 {
277 pthread_mutex_destroy(&cache->mutex);
278
279 if (cache->cache) {
280 /* This is a bit unfortunate. In order to keep things from randomly
281 * going away, the shader cache has to hold a reference to all shader
282 * binaries it contains. We unref them when we destroy the cache.
283 */
284 hash_table_foreach(cache->cache, entry)
285 anv_shader_bin_unref(cache->device, entry->data);
286
287 _mesa_hash_table_destroy(cache->cache, NULL);
288 }
289
290 if (cache->nir_cache) {
291 hash_table_foreach(cache->nir_cache, entry)
292 ralloc_free(entry->data);
293
294 _mesa_hash_table_destroy(cache->nir_cache, NULL);
295 }
296 }
297
298 static struct anv_shader_bin *
299 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
300 const void *key_data, uint32_t key_size)
301 {
302 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
303 struct anv_shader_bin_key *key = (void *)vla;
304 key->size = key_size;
305 memcpy(key->data, key_data, key_size);
306
307 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
308 if (entry)
309 return entry->data;
310 else
311 return NULL;
312 }
313
314 struct anv_shader_bin *
315 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
316 const void *key_data, uint32_t key_size)
317 {
318 if (!cache->cache)
319 return NULL;
320
321 pthread_mutex_lock(&cache->mutex);
322
323 struct anv_shader_bin *shader =
324 anv_pipeline_cache_search_locked(cache, key_data, key_size);
325
326 pthread_mutex_unlock(&cache->mutex);
327
328 /* We increment refcount before handing it to the caller */
329 if (shader)
330 anv_shader_bin_ref(shader);
331
332 return shader;
333 }
334
335 static void
336 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
337 struct anv_shader_bin *bin)
338 {
339 if (!cache->cache)
340 return;
341
342 pthread_mutex_lock(&cache->mutex);
343
344 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
345 if (entry == NULL) {
346 /* Take a reference for the cache */
347 anv_shader_bin_ref(bin);
348 _mesa_hash_table_insert(cache->cache, bin->key, bin);
349 }
350
351 pthread_mutex_unlock(&cache->mutex);
352 }
353
354 static struct anv_shader_bin *
355 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
356 const void *key_data, uint32_t key_size,
357 const void *kernel_data,
358 uint32_t kernel_size,
359 const void *constant_data,
360 uint32_t constant_data_size,
361 const struct brw_stage_prog_data *prog_data,
362 uint32_t prog_data_size,
363 const void *prog_data_param,
364 const nir_xfb_info *xfb_info,
365 const struct anv_pipeline_bind_map *bind_map)
366 {
367 struct anv_shader_bin *shader =
368 anv_pipeline_cache_search_locked(cache, key_data, key_size);
369 if (shader)
370 return shader;
371
372 struct anv_shader_bin *bin =
373 anv_shader_bin_create(cache->device, key_data, key_size,
374 kernel_data, kernel_size,
375 constant_data, constant_data_size,
376 prog_data, prog_data_size, prog_data_param,
377 xfb_info, bind_map);
378 if (!bin)
379 return NULL;
380
381 _mesa_hash_table_insert(cache->cache, bin->key, bin);
382
383 return bin;
384 }
385
386 struct anv_shader_bin *
387 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
388 const void *key_data, uint32_t key_size,
389 const void *kernel_data, uint32_t kernel_size,
390 const void *constant_data,
391 uint32_t constant_data_size,
392 const struct brw_stage_prog_data *prog_data,
393 uint32_t prog_data_size,
394 const nir_xfb_info *xfb_info,
395 const struct anv_pipeline_bind_map *bind_map)
396 {
397 if (cache->cache) {
398 pthread_mutex_lock(&cache->mutex);
399
400 struct anv_shader_bin *bin =
401 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
402 kernel_data, kernel_size,
403 constant_data, constant_data_size,
404 prog_data, prog_data_size,
405 prog_data->param,
406 xfb_info, bind_map);
407
408 pthread_mutex_unlock(&cache->mutex);
409
410 /* We increment refcount before handing it to the caller */
411 if (bin)
412 anv_shader_bin_ref(bin);
413
414 return bin;
415 } else {
416 /* In this case, we're not caching it so the caller owns it entirely */
417 return anv_shader_bin_create(cache->device, key_data, key_size,
418 kernel_data, kernel_size,
419 constant_data, constant_data_size,
420 prog_data, prog_data_size,
421 prog_data->param,
422 xfb_info, bind_map);
423 }
424 }
425
426 struct cache_header {
427 uint32_t header_size;
428 uint32_t header_version;
429 uint32_t vendor_id;
430 uint32_t device_id;
431 uint8_t uuid[VK_UUID_SIZE];
432 };
433
434 static void
435 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
436 const void *data, size_t size)
437 {
438 struct anv_device *device = cache->device;
439 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
440
441 if (cache->cache == NULL)
442 return;
443
444 struct blob_reader blob;
445 blob_reader_init(&blob, data, size);
446
447 struct cache_header header;
448 blob_copy_bytes(&blob, &header, sizeof(header));
449 uint32_t count = blob_read_uint32(&blob);
450 if (blob.overrun)
451 return;
452
453 if (header.header_size < sizeof(header))
454 return;
455 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
456 return;
457 if (header.vendor_id != 0x8086)
458 return;
459 if (header.device_id != device->chipset_id)
460 return;
461 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
462 return;
463
464 for (uint32_t i = 0; i < count; i++) {
465 struct anv_shader_bin *bin =
466 anv_shader_bin_create_from_blob(device, &blob);
467 if (!bin)
468 break;
469 _mesa_hash_table_insert(cache->cache, bin->key, bin);
470 }
471 }
472
473 VkResult anv_CreatePipelineCache(
474 VkDevice _device,
475 const VkPipelineCacheCreateInfo* pCreateInfo,
476 const VkAllocationCallbacks* pAllocator,
477 VkPipelineCache* pPipelineCache)
478 {
479 ANV_FROM_HANDLE(anv_device, device, _device);
480 struct anv_pipeline_cache *cache;
481
482 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
483 assert(pCreateInfo->flags == 0);
484
485 cache = vk_alloc2(&device->alloc, pAllocator,
486 sizeof(*cache), 8,
487 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
488 if (cache == NULL)
489 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
490
491 anv_pipeline_cache_init(cache, device,
492 device->instance->pipeline_cache_enabled);
493
494 if (pCreateInfo->initialDataSize > 0)
495 anv_pipeline_cache_load(cache,
496 pCreateInfo->pInitialData,
497 pCreateInfo->initialDataSize);
498
499 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
500
501 return VK_SUCCESS;
502 }
503
504 void anv_DestroyPipelineCache(
505 VkDevice _device,
506 VkPipelineCache _cache,
507 const VkAllocationCallbacks* pAllocator)
508 {
509 ANV_FROM_HANDLE(anv_device, device, _device);
510 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
511
512 if (!cache)
513 return;
514
515 anv_pipeline_cache_finish(cache);
516
517 vk_free2(&device->alloc, pAllocator, cache);
518 }
519
520 VkResult anv_GetPipelineCacheData(
521 VkDevice _device,
522 VkPipelineCache _cache,
523 size_t* pDataSize,
524 void* pData)
525 {
526 ANV_FROM_HANDLE(anv_device, device, _device);
527 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
528 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
529
530 struct blob blob;
531 if (pData) {
532 blob_init_fixed(&blob, pData, *pDataSize);
533 } else {
534 blob_init_fixed(&blob, NULL, SIZE_MAX);
535 }
536
537 struct cache_header header = {
538 .header_size = sizeof(struct cache_header),
539 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
540 .vendor_id = 0x8086,
541 .device_id = device->chipset_id,
542 };
543 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
544 blob_write_bytes(&blob, &header, sizeof(header));
545
546 uint32_t count = 0;
547 intptr_t count_offset = blob_reserve_uint32(&blob);
548 if (count_offset < 0) {
549 *pDataSize = 0;
550 blob_finish(&blob);
551 return VK_INCOMPLETE;
552 }
553
554 VkResult result = VK_SUCCESS;
555 if (cache->cache) {
556 hash_table_foreach(cache->cache, entry) {
557 struct anv_shader_bin *shader = entry->data;
558
559 size_t save_size = blob.size;
560 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
561 /* If it fails reset to the previous size and bail */
562 blob.size = save_size;
563 result = VK_INCOMPLETE;
564 break;
565 }
566
567 count++;
568 }
569 }
570
571 blob_overwrite_uint32(&blob, count_offset, count);
572
573 *pDataSize = blob.size;
574
575 blob_finish(&blob);
576
577 return result;
578 }
579
580 VkResult anv_MergePipelineCaches(
581 VkDevice _device,
582 VkPipelineCache destCache,
583 uint32_t srcCacheCount,
584 const VkPipelineCache* pSrcCaches)
585 {
586 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
587
588 if (!dst->cache)
589 return VK_SUCCESS;
590
591 for (uint32_t i = 0; i < srcCacheCount; i++) {
592 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
593 if (!src->cache)
594 continue;
595
596 hash_table_foreach(src->cache, entry) {
597 struct anv_shader_bin *bin = entry->data;
598 assert(bin);
599
600 if (_mesa_hash_table_search(dst->cache, bin->key))
601 continue;
602
603 anv_shader_bin_ref(bin);
604 _mesa_hash_table_insert(dst->cache, bin->key, bin);
605 }
606 }
607
608 return VK_SUCCESS;
609 }
610
611 struct anv_shader_bin *
612 anv_device_search_for_kernel(struct anv_device *device,
613 struct anv_pipeline_cache *cache,
614 const void *key_data, uint32_t key_size)
615 {
616 struct anv_shader_bin *bin;
617
618 if (cache) {
619 bin = anv_pipeline_cache_search(cache, key_data, key_size);
620 if (bin)
621 return bin;
622 }
623
624 #ifdef ENABLE_SHADER_CACHE
625 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
626 if (disk_cache && device->instance->pipeline_cache_enabled) {
627 cache_key cache_key;
628 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
629
630 size_t buffer_size;
631 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
632 if (buffer) {
633 struct blob_reader blob;
634 blob_reader_init(&blob, buffer, buffer_size);
635 bin = anv_shader_bin_create_from_blob(device, &blob);
636 free(buffer);
637
638 if (bin) {
639 if (cache)
640 anv_pipeline_cache_add_shader_bin(cache, bin);
641 return bin;
642 }
643 }
644 }
645 #endif
646
647 return NULL;
648 }
649
650 struct anv_shader_bin *
651 anv_device_upload_kernel(struct anv_device *device,
652 struct anv_pipeline_cache *cache,
653 const void *key_data, uint32_t key_size,
654 const void *kernel_data, uint32_t kernel_size,
655 const void *constant_data,
656 uint32_t constant_data_size,
657 const struct brw_stage_prog_data *prog_data,
658 uint32_t prog_data_size,
659 const nir_xfb_info *xfb_info,
660 const struct anv_pipeline_bind_map *bind_map)
661 {
662 struct anv_shader_bin *bin;
663 if (cache) {
664 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
665 kernel_data, kernel_size,
666 constant_data, constant_data_size,
667 prog_data, prog_data_size,
668 xfb_info, bind_map);
669 } else {
670 bin = anv_shader_bin_create(device, key_data, key_size,
671 kernel_data, kernel_size,
672 constant_data, constant_data_size,
673 prog_data, prog_data_size,
674 prog_data->param,
675 xfb_info, bind_map);
676 }
677
678 if (bin == NULL)
679 return NULL;
680
681 #ifdef ENABLE_SHADER_CACHE
682 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
683 if (disk_cache) {
684 struct blob binary;
685 blob_init(&binary);
686 if (anv_shader_bin_write_to_blob(bin, &binary)) {
687 cache_key cache_key;
688 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
689
690 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
691 }
692
693 blob_finish(&binary);
694 }
695 #endif
696
697 return bin;
698 }
699
700 struct serialized_nir {
701 unsigned char sha1_key[20];
702 size_t size;
703 char data[0];
704 };
705
706 struct nir_shader *
707 anv_device_search_for_nir(struct anv_device *device,
708 struct anv_pipeline_cache *cache,
709 const nir_shader_compiler_options *nir_options,
710 unsigned char sha1_key[20],
711 void *mem_ctx)
712 {
713 if (cache && cache->nir_cache) {
714 const struct serialized_nir *snir = NULL;
715
716 pthread_mutex_lock(&cache->mutex);
717 struct hash_entry *entry =
718 _mesa_hash_table_search(cache->nir_cache, sha1_key);
719 if (entry)
720 snir = entry->data;
721 pthread_mutex_unlock(&cache->mutex);
722
723 if (snir) {
724 struct blob_reader blob;
725 blob_reader_init(&blob, snir->data, snir->size);
726
727 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
728 if (blob.overrun) {
729 ralloc_free(nir);
730 } else {
731 return nir;
732 }
733 }
734 }
735
736 return NULL;
737 }
738
739 void
740 anv_device_upload_nir(struct anv_device *device,
741 struct anv_pipeline_cache *cache,
742 const struct nir_shader *nir,
743 unsigned char sha1_key[20])
744 {
745 if (cache && cache->nir_cache) {
746 pthread_mutex_lock(&cache->mutex);
747 struct hash_entry *entry =
748 _mesa_hash_table_search(cache->nir_cache, sha1_key);
749 pthread_mutex_unlock(&cache->mutex);
750 if (entry)
751 return;
752
753 struct blob blob;
754 blob_init(&blob);
755
756 nir_serialize(&blob, nir);
757 if (blob.out_of_memory) {
758 blob_finish(&blob);
759 return;
760 }
761
762 pthread_mutex_lock(&cache->mutex);
763 /* Because ralloc isn't thread-safe, we have to do all this inside the
764 * lock. We could unlock for the big memcpy but it's probably not worth
765 * the hassle.
766 */
767 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
768 if (entry) {
769 blob_finish(&blob);
770 pthread_mutex_unlock(&cache->mutex);
771 return;
772 }
773
774 struct serialized_nir *snir =
775 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
776 memcpy(snir->sha1_key, sha1_key, 20);
777 snir->size = blob.size;
778 memcpy(snir->data, blob.data, blob.size);
779
780 blob_finish(&blob);
781
782 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
783
784 pthread_mutex_unlock(&cache->mutex);
785 }
786 }