anv/pipeline_cache: free NIR shader cache
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31
32 struct anv_shader_bin *
33 anv_shader_bin_create(struct anv_device *device,
34 const void *key_data, uint32_t key_size,
35 const void *kernel_data, uint32_t kernel_size,
36 const void *constant_data, uint32_t constant_data_size,
37 const struct brw_stage_prog_data *prog_data_in,
38 uint32_t prog_data_size, const void *prog_data_param_in,
39 const struct anv_pipeline_bind_map *bind_map)
40 {
41 struct anv_shader_bin *shader;
42 struct anv_shader_bin_key *key;
43 struct brw_stage_prog_data *prog_data;
44 uint32_t *prog_data_param;
45 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
46
47 ANV_MULTIALLOC(ma);
48 anv_multialloc_add(&ma, &shader, 1);
49 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
50 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
51 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
52 anv_multialloc_add(&ma, &surface_to_descriptor,
53 bind_map->surface_count);
54 anv_multialloc_add(&ma, &sampler_to_descriptor,
55 bind_map->sampler_count);
56
57 if (!anv_multialloc_alloc(&ma, &device->alloc,
58 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
59 return NULL;
60
61 shader->ref_cnt = 1;
62
63 key->size = key_size;
64 memcpy(key->data, key_data, key_size);
65 shader->key = key;
66
67 shader->kernel =
68 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
69 memcpy(shader->kernel.map, kernel_data, kernel_size);
70 shader->kernel_size = kernel_size;
71
72 if (constant_data_size) {
73 shader->constant_data =
74 anv_state_pool_alloc(&device->dynamic_state_pool,
75 constant_data_size, 32);
76 memcpy(shader->constant_data.map, constant_data, constant_data_size);
77 } else {
78 shader->constant_data = ANV_STATE_NULL;
79 }
80 shader->constant_data_size = constant_data_size;
81
82 memcpy(prog_data, prog_data_in, prog_data_size);
83 memcpy(prog_data_param, prog_data_param_in,
84 prog_data->nr_params * sizeof(*prog_data_param));
85 prog_data->param = prog_data_param;
86 shader->prog_data = prog_data;
87 shader->prog_data_size = prog_data_size;
88
89 shader->bind_map = *bind_map;
90 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
91 bind_map->surface_count);
92 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
93 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
94 bind_map->sampler_count);
95 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
96
97 return shader;
98 }
99
100 void
101 anv_shader_bin_destroy(struct anv_device *device,
102 struct anv_shader_bin *shader)
103 {
104 assert(shader->ref_cnt == 0);
105 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
106 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
107 vk_free(&device->alloc, shader);
108 }
109
110 static bool
111 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
112 struct blob *blob)
113 {
114 bool ok;
115
116 ok = blob_write_uint32(blob, shader->key->size);
117 ok = blob_write_bytes(blob, shader->key->data, shader->key->size);
118
119 ok = blob_write_uint32(blob, shader->kernel_size);
120 ok = blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
121
122 ok = blob_write_uint32(blob, shader->constant_data_size);
123 ok = blob_write_bytes(blob, shader->constant_data.map,
124 shader->constant_data_size);
125
126 ok = blob_write_uint32(blob, shader->prog_data_size);
127 ok = blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
128 ok = blob_write_bytes(blob, shader->prog_data->param,
129 shader->prog_data->nr_params *
130 sizeof(*shader->prog_data->param));
131
132 ok = blob_write_uint32(blob, shader->bind_map.surface_count);
133 ok = blob_write_uint32(blob, shader->bind_map.sampler_count);
134 ok = blob_write_uint32(blob, shader->bind_map.image_count);
135 ok = blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
136 shader->bind_map.surface_count *
137 sizeof(*shader->bind_map.surface_to_descriptor));
138 ok = blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
139 shader->bind_map.sampler_count *
140 sizeof(*shader->bind_map.sampler_to_descriptor));
141
142 return ok;
143 }
144
145 static struct anv_shader_bin *
146 anv_shader_bin_create_from_blob(struct anv_device *device,
147 struct blob_reader *blob)
148 {
149 uint32_t key_size = blob_read_uint32(blob);
150 const void *key_data = blob_read_bytes(blob, key_size);
151
152 uint32_t kernel_size = blob_read_uint32(blob);
153 const void *kernel_data = blob_read_bytes(blob, kernel_size);
154
155 uint32_t constant_data_size = blob_read_uint32(blob);
156 const void *constant_data = blob_read_bytes(blob, constant_data_size);
157
158 uint32_t prog_data_size = blob_read_uint32(blob);
159 const struct brw_stage_prog_data *prog_data =
160 blob_read_bytes(blob, prog_data_size);
161 if (blob->overrun)
162 return NULL;
163 const void *prog_data_param =
164 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
165
166 struct anv_pipeline_bind_map bind_map;
167 bind_map.surface_count = blob_read_uint32(blob);
168 bind_map.sampler_count = blob_read_uint32(blob);
169 bind_map.image_count = blob_read_uint32(blob);
170 bind_map.surface_to_descriptor = (void *)
171 blob_read_bytes(blob, bind_map.surface_count *
172 sizeof(*bind_map.surface_to_descriptor));
173 bind_map.sampler_to_descriptor = (void *)
174 blob_read_bytes(blob, bind_map.sampler_count *
175 sizeof(*bind_map.sampler_to_descriptor));
176
177 if (blob->overrun)
178 return NULL;
179
180 return anv_shader_bin_create(device,
181 key_data, key_size,
182 kernel_data, kernel_size,
183 constant_data, constant_data_size,
184 prog_data, prog_data_size, prog_data_param,
185 &bind_map);
186 }
187
188 /* Remaining work:
189 *
190 * - Compact binding table layout so it's tight and not dependent on
191 * descriptor set layout.
192 *
193 * - Review prog_data struct for size and cacheability: struct
194 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
195 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
196 */
197
198 static uint32_t
199 shader_bin_key_hash_func(const void *void_key)
200 {
201 const struct anv_shader_bin_key *key = void_key;
202 return _mesa_hash_data(key->data, key->size);
203 }
204
205 static bool
206 shader_bin_key_compare_func(const void *void_a, const void *void_b)
207 {
208 const struct anv_shader_bin_key *a = void_a, *b = void_b;
209 if (a->size != b->size)
210 return false;
211
212 return memcmp(a->data, b->data, a->size) == 0;
213 }
214
215 static uint32_t
216 sha1_hash_func(const void *sha1)
217 {
218 return _mesa_hash_data(sha1, 20);
219 }
220
221 static bool
222 sha1_compare_func(const void *sha1_a, const void *sha1_b)
223 {
224 return memcmp(sha1_a, sha1_b, 20) == 0;
225 }
226
227 void
228 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
229 struct anv_device *device,
230 bool cache_enabled)
231 {
232 cache->device = device;
233 pthread_mutex_init(&cache->mutex, NULL);
234
235 if (cache_enabled) {
236 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
237 shader_bin_key_compare_func);
238 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
239 sha1_compare_func);
240 } else {
241 cache->cache = NULL;
242 cache->nir_cache = NULL;
243 }
244 }
245
246 void
247 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
248 {
249 pthread_mutex_destroy(&cache->mutex);
250
251 if (cache->cache) {
252 /* This is a bit unfortunate. In order to keep things from randomly
253 * going away, the shader cache has to hold a reference to all shader
254 * binaries it contains. We unref them when we destroy the cache.
255 */
256 hash_table_foreach(cache->cache, entry)
257 anv_shader_bin_unref(cache->device, entry->data);
258
259 _mesa_hash_table_destroy(cache->cache, NULL);
260 }
261
262 if (cache->nir_cache) {
263 hash_table_foreach(cache->nir_cache, entry)
264 ralloc_free(entry->data);
265
266 _mesa_hash_table_destroy(cache->nir_cache, NULL);
267 }
268 }
269
270 static struct anv_shader_bin *
271 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
272 const void *key_data, uint32_t key_size)
273 {
274 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
275 struct anv_shader_bin_key *key = (void *)vla;
276 key->size = key_size;
277 memcpy(key->data, key_data, key_size);
278
279 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
280 if (entry)
281 return entry->data;
282 else
283 return NULL;
284 }
285
286 struct anv_shader_bin *
287 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
288 const void *key_data, uint32_t key_size)
289 {
290 if (!cache->cache)
291 return NULL;
292
293 pthread_mutex_lock(&cache->mutex);
294
295 struct anv_shader_bin *shader =
296 anv_pipeline_cache_search_locked(cache, key_data, key_size);
297
298 pthread_mutex_unlock(&cache->mutex);
299
300 /* We increment refcount before handing it to the caller */
301 if (shader)
302 anv_shader_bin_ref(shader);
303
304 return shader;
305 }
306
307 static void
308 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
309 struct anv_shader_bin *bin)
310 {
311 if (!cache->cache)
312 return;
313
314 pthread_mutex_lock(&cache->mutex);
315
316 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
317 if (entry == NULL) {
318 /* Take a reference for the cache */
319 anv_shader_bin_ref(bin);
320 _mesa_hash_table_insert(cache->cache, bin->key, bin);
321 }
322
323 pthread_mutex_unlock(&cache->mutex);
324 }
325
326 static struct anv_shader_bin *
327 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
328 const void *key_data, uint32_t key_size,
329 const void *kernel_data,
330 uint32_t kernel_size,
331 const void *constant_data,
332 uint32_t constant_data_size,
333 const struct brw_stage_prog_data *prog_data,
334 uint32_t prog_data_size,
335 const void *prog_data_param,
336 const struct anv_pipeline_bind_map *bind_map)
337 {
338 struct anv_shader_bin *shader =
339 anv_pipeline_cache_search_locked(cache, key_data, key_size);
340 if (shader)
341 return shader;
342
343 struct anv_shader_bin *bin =
344 anv_shader_bin_create(cache->device, key_data, key_size,
345 kernel_data, kernel_size,
346 constant_data, constant_data_size,
347 prog_data, prog_data_size, prog_data_param,
348 bind_map);
349 if (!bin)
350 return NULL;
351
352 _mesa_hash_table_insert(cache->cache, bin->key, bin);
353
354 return bin;
355 }
356
357 struct anv_shader_bin *
358 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
359 const void *key_data, uint32_t key_size,
360 const void *kernel_data, uint32_t kernel_size,
361 const void *constant_data,
362 uint32_t constant_data_size,
363 const struct brw_stage_prog_data *prog_data,
364 uint32_t prog_data_size,
365 const struct anv_pipeline_bind_map *bind_map)
366 {
367 if (cache->cache) {
368 pthread_mutex_lock(&cache->mutex);
369
370 struct anv_shader_bin *bin =
371 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
372 kernel_data, kernel_size,
373 constant_data, constant_data_size,
374 prog_data, prog_data_size,
375 prog_data->param, bind_map);
376
377 pthread_mutex_unlock(&cache->mutex);
378
379 /* We increment refcount before handing it to the caller */
380 if (bin)
381 anv_shader_bin_ref(bin);
382
383 return bin;
384 } else {
385 /* In this case, we're not caching it so the caller owns it entirely */
386 return anv_shader_bin_create(cache->device, key_data, key_size,
387 kernel_data, kernel_size,
388 constant_data, constant_data_size,
389 prog_data, prog_data_size,
390 prog_data->param, bind_map);
391 }
392 }
393
394 struct cache_header {
395 uint32_t header_size;
396 uint32_t header_version;
397 uint32_t vendor_id;
398 uint32_t device_id;
399 uint8_t uuid[VK_UUID_SIZE];
400 };
401
402 static void
403 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
404 const void *data, size_t size)
405 {
406 struct anv_device *device = cache->device;
407 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
408
409 if (cache->cache == NULL)
410 return;
411
412 struct blob_reader blob;
413 blob_reader_init(&blob, data, size);
414
415 struct cache_header header;
416 blob_copy_bytes(&blob, &header, sizeof(header));
417 uint32_t count = blob_read_uint32(&blob);
418 if (blob.overrun)
419 return;
420
421 if (header.header_size < sizeof(header))
422 return;
423 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
424 return;
425 if (header.vendor_id != 0x8086)
426 return;
427 if (header.device_id != device->chipset_id)
428 return;
429 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
430 return;
431
432 for (uint32_t i = 0; i < count; i++) {
433 struct anv_shader_bin *bin =
434 anv_shader_bin_create_from_blob(device, &blob);
435 if (!bin)
436 break;
437 _mesa_hash_table_insert(cache->cache, bin->key, bin);
438 }
439 }
440
441 VkResult anv_CreatePipelineCache(
442 VkDevice _device,
443 const VkPipelineCacheCreateInfo* pCreateInfo,
444 const VkAllocationCallbacks* pAllocator,
445 VkPipelineCache* pPipelineCache)
446 {
447 ANV_FROM_HANDLE(anv_device, device, _device);
448 struct anv_pipeline_cache *cache;
449
450 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
451 assert(pCreateInfo->flags == 0);
452
453 cache = vk_alloc2(&device->alloc, pAllocator,
454 sizeof(*cache), 8,
455 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
456 if (cache == NULL)
457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
458
459 anv_pipeline_cache_init(cache, device,
460 device->instance->pipeline_cache_enabled);
461
462 if (pCreateInfo->initialDataSize > 0)
463 anv_pipeline_cache_load(cache,
464 pCreateInfo->pInitialData,
465 pCreateInfo->initialDataSize);
466
467 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
468
469 return VK_SUCCESS;
470 }
471
472 void anv_DestroyPipelineCache(
473 VkDevice _device,
474 VkPipelineCache _cache,
475 const VkAllocationCallbacks* pAllocator)
476 {
477 ANV_FROM_HANDLE(anv_device, device, _device);
478 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
479
480 if (!cache)
481 return;
482
483 anv_pipeline_cache_finish(cache);
484
485 vk_free2(&device->alloc, pAllocator, cache);
486 }
487
488 VkResult anv_GetPipelineCacheData(
489 VkDevice _device,
490 VkPipelineCache _cache,
491 size_t* pDataSize,
492 void* pData)
493 {
494 ANV_FROM_HANDLE(anv_device, device, _device);
495 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
496 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
497
498 struct blob blob;
499 if (pData) {
500 blob_init_fixed(&blob, pData, *pDataSize);
501 } else {
502 blob_init_fixed(&blob, NULL, SIZE_MAX);
503 }
504
505 struct cache_header header = {
506 .header_size = sizeof(struct cache_header),
507 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
508 .vendor_id = 0x8086,
509 .device_id = device->chipset_id,
510 };
511 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
512 blob_write_bytes(&blob, &header, sizeof(header));
513
514 uint32_t count = 0;
515 intptr_t count_offset = blob_reserve_uint32(&blob);
516 if (count_offset < 0) {
517 *pDataSize = 0;
518 blob_finish(&blob);
519 return VK_INCOMPLETE;
520 }
521
522 VkResult result = VK_SUCCESS;
523 if (cache->cache) {
524 hash_table_foreach(cache->cache, entry) {
525 struct anv_shader_bin *shader = entry->data;
526
527 size_t save_size = blob.size;
528 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
529 /* If it fails reset to the previous size and bail */
530 blob.size = save_size;
531 result = VK_INCOMPLETE;
532 break;
533 }
534
535 count++;
536 }
537 }
538
539 blob_overwrite_uint32(&blob, count_offset, count);
540
541 *pDataSize = blob.size;
542
543 blob_finish(&blob);
544
545 return result;
546 }
547
548 VkResult anv_MergePipelineCaches(
549 VkDevice _device,
550 VkPipelineCache destCache,
551 uint32_t srcCacheCount,
552 const VkPipelineCache* pSrcCaches)
553 {
554 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
555
556 if (!dst->cache)
557 return VK_SUCCESS;
558
559 for (uint32_t i = 0; i < srcCacheCount; i++) {
560 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
561 if (!src->cache)
562 continue;
563
564 hash_table_foreach(src->cache, entry) {
565 struct anv_shader_bin *bin = entry->data;
566 assert(bin);
567
568 if (_mesa_hash_table_search(dst->cache, bin->key))
569 continue;
570
571 anv_shader_bin_ref(bin);
572 _mesa_hash_table_insert(dst->cache, bin->key, bin);
573 }
574 }
575
576 return VK_SUCCESS;
577 }
578
579 struct anv_shader_bin *
580 anv_device_search_for_kernel(struct anv_device *device,
581 struct anv_pipeline_cache *cache,
582 const void *key_data, uint32_t key_size)
583 {
584 struct anv_shader_bin *bin;
585
586 if (cache) {
587 bin = anv_pipeline_cache_search(cache, key_data, key_size);
588 if (bin)
589 return bin;
590 }
591
592 #ifdef ENABLE_SHADER_CACHE
593 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
594 if (disk_cache && device->instance->pipeline_cache_enabled) {
595 cache_key cache_key;
596 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
597
598 size_t buffer_size;
599 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
600 if (buffer) {
601 struct blob_reader blob;
602 blob_reader_init(&blob, buffer, buffer_size);
603 bin = anv_shader_bin_create_from_blob(device, &blob);
604 free(buffer);
605
606 if (bin) {
607 if (cache)
608 anv_pipeline_cache_add_shader_bin(cache, bin);
609 return bin;
610 }
611 }
612 }
613 #endif
614
615 return NULL;
616 }
617
618 struct anv_shader_bin *
619 anv_device_upload_kernel(struct anv_device *device,
620 struct anv_pipeline_cache *cache,
621 const void *key_data, uint32_t key_size,
622 const void *kernel_data, uint32_t kernel_size,
623 const void *constant_data,
624 uint32_t constant_data_size,
625 const struct brw_stage_prog_data *prog_data,
626 uint32_t prog_data_size,
627 const struct anv_pipeline_bind_map *bind_map)
628 {
629 struct anv_shader_bin *bin;
630 if (cache) {
631 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
632 kernel_data, kernel_size,
633 constant_data, constant_data_size,
634 prog_data, prog_data_size,
635 bind_map);
636 } else {
637 bin = anv_shader_bin_create(device, key_data, key_size,
638 kernel_data, kernel_size,
639 constant_data, constant_data_size,
640 prog_data, prog_data_size,
641 prog_data->param, bind_map);
642 }
643
644 if (bin == NULL)
645 return NULL;
646
647 #ifdef ENABLE_SHADER_CACHE
648 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
649 if (disk_cache) {
650 struct blob binary;
651 blob_init(&binary);
652 anv_shader_bin_write_to_blob(bin, &binary);
653
654 if (!binary.out_of_memory) {
655 cache_key cache_key;
656 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
657
658 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
659 }
660
661 blob_finish(&binary);
662 }
663 #endif
664
665 return bin;
666 }
667
668 struct serialized_nir {
669 unsigned char sha1_key[20];
670 size_t size;
671 char data[0];
672 };
673
674 struct nir_shader *
675 anv_device_search_for_nir(struct anv_device *device,
676 struct anv_pipeline_cache *cache,
677 const nir_shader_compiler_options *nir_options,
678 unsigned char sha1_key[20],
679 void *mem_ctx)
680 {
681 if (cache && cache->nir_cache) {
682 const struct serialized_nir *snir = NULL;
683
684 pthread_mutex_lock(&cache->mutex);
685 struct hash_entry *entry =
686 _mesa_hash_table_search(cache->nir_cache, sha1_key);
687 if (entry)
688 snir = entry->data;
689 pthread_mutex_unlock(&cache->mutex);
690
691 if (snir) {
692 struct blob_reader blob;
693 blob_reader_init(&blob, snir->data, snir->size);
694
695 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
696 if (blob.overrun) {
697 ralloc_free(nir);
698 } else {
699 return nir;
700 }
701 }
702 }
703
704 return NULL;
705 }
706
707 void
708 anv_device_upload_nir(struct anv_device *device,
709 struct anv_pipeline_cache *cache,
710 const struct nir_shader *nir,
711 unsigned char sha1_key[20])
712 {
713 if (cache && cache->nir_cache) {
714 pthread_mutex_lock(&cache->mutex);
715 struct hash_entry *entry =
716 _mesa_hash_table_search(cache->nir_cache, sha1_key);
717 pthread_mutex_unlock(&cache->mutex);
718 if (entry)
719 return;
720
721 struct blob blob;
722 blob_init(&blob);
723
724 nir_serialize(&blob, nir);
725 if (blob.out_of_memory) {
726 blob_finish(&blob);
727 return;
728 }
729
730 pthread_mutex_lock(&cache->mutex);
731 /* Because ralloc isn't thread-safe, we have to do all this inside the
732 * lock. We could unlock for the big memcpy but it's probably not worth
733 * the hassle.
734 */
735 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
736 if (entry) {
737 pthread_mutex_unlock(&cache->mutex);
738 return;
739 }
740
741 struct serialized_nir *snir =
742 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
743 memcpy(snir->sha1_key, sha1_key, 20);
744 snir->size = blob.size;
745 memcpy(snir->data, blob.data, blob.size);
746
747 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
748
749 pthread_mutex_unlock(&cache->mutex);
750 }
751 }