anv: Add pipeline cache support for xfb_info
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32
33 struct anv_shader_bin *
34 anv_shader_bin_create(struct anv_device *device,
35 const void *key_data, uint32_t key_size,
36 const void *kernel_data, uint32_t kernel_size,
37 const void *constant_data, uint32_t constant_data_size,
38 const struct brw_stage_prog_data *prog_data_in,
39 uint32_t prog_data_size, const void *prog_data_param_in,
40 const nir_xfb_info *xfb_info_in,
41 const struct anv_pipeline_bind_map *bind_map)
42 {
43 struct anv_shader_bin *shader;
44 struct anv_shader_bin_key *key;
45 struct brw_stage_prog_data *prog_data;
46 uint32_t *prog_data_param;
47 nir_xfb_info *xfb_info;
48 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
49
50 ANV_MULTIALLOC(ma);
51 anv_multialloc_add(&ma, &shader, 1);
52 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
53 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
54 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
55 if (xfb_info_in) {
56 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
57 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
58 }
59 anv_multialloc_add(&ma, &surface_to_descriptor,
60 bind_map->surface_count);
61 anv_multialloc_add(&ma, &sampler_to_descriptor,
62 bind_map->sampler_count);
63
64 if (!anv_multialloc_alloc(&ma, &device->alloc,
65 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
66 return NULL;
67
68 shader->ref_cnt = 1;
69
70 key->size = key_size;
71 memcpy(key->data, key_data, key_size);
72 shader->key = key;
73
74 shader->kernel =
75 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
76 memcpy(shader->kernel.map, kernel_data, kernel_size);
77 shader->kernel_size = kernel_size;
78
79 if (constant_data_size) {
80 shader->constant_data =
81 anv_state_pool_alloc(&device->dynamic_state_pool,
82 constant_data_size, 32);
83 memcpy(shader->constant_data.map, constant_data, constant_data_size);
84 } else {
85 shader->constant_data = ANV_STATE_NULL;
86 }
87 shader->constant_data_size = constant_data_size;
88
89 memcpy(prog_data, prog_data_in, prog_data_size);
90 memcpy(prog_data_param, prog_data_param_in,
91 prog_data->nr_params * sizeof(*prog_data_param));
92 prog_data->param = prog_data_param;
93 shader->prog_data = prog_data;
94 shader->prog_data_size = prog_data_size;
95
96 if (xfb_info_in) {
97 *xfb_info = *xfb_info_in;
98 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
99 xfb_info_in->output_count);
100 shader->xfb_info = xfb_info;
101 } else {
102 shader->xfb_info = NULL;
103 }
104
105 shader->bind_map = *bind_map;
106 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
107 bind_map->surface_count);
108 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
109 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
110 bind_map->sampler_count);
111 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
112
113 return shader;
114 }
115
116 void
117 anv_shader_bin_destroy(struct anv_device *device,
118 struct anv_shader_bin *shader)
119 {
120 assert(shader->ref_cnt == 0);
121 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
122 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
123 vk_free(&device->alloc, shader);
124 }
125
126 static bool
127 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
128 struct blob *blob)
129 {
130 bool ok;
131
132 ok = blob_write_uint32(blob, shader->key->size);
133 ok = blob_write_bytes(blob, shader->key->data, shader->key->size);
134
135 ok = blob_write_uint32(blob, shader->kernel_size);
136 ok = blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
137
138 ok = blob_write_uint32(blob, shader->constant_data_size);
139 ok = blob_write_bytes(blob, shader->constant_data.map,
140 shader->constant_data_size);
141
142 ok = blob_write_uint32(blob, shader->prog_data_size);
143 ok = blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
144 ok = blob_write_bytes(blob, shader->prog_data->param,
145 shader->prog_data->nr_params *
146 sizeof(*shader->prog_data->param));
147
148 if (shader->xfb_info) {
149 uint32_t xfb_info_size =
150 nir_xfb_info_size(shader->xfb_info->output_count);
151 ok = blob_write_uint32(blob, xfb_info_size);
152 ok = blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
153 } else {
154 ok = blob_write_uint32(blob, 0);
155 }
156
157 ok = blob_write_uint32(blob, shader->bind_map.surface_count);
158 ok = blob_write_uint32(blob, shader->bind_map.sampler_count);
159 ok = blob_write_uint32(blob, shader->bind_map.image_count);
160 ok = blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
161 shader->bind_map.surface_count *
162 sizeof(*shader->bind_map.surface_to_descriptor));
163 ok = blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
164 shader->bind_map.sampler_count *
165 sizeof(*shader->bind_map.sampler_to_descriptor));
166
167 return ok;
168 }
169
170 static struct anv_shader_bin *
171 anv_shader_bin_create_from_blob(struct anv_device *device,
172 struct blob_reader *blob)
173 {
174 uint32_t key_size = blob_read_uint32(blob);
175 const void *key_data = blob_read_bytes(blob, key_size);
176
177 uint32_t kernel_size = blob_read_uint32(blob);
178 const void *kernel_data = blob_read_bytes(blob, kernel_size);
179
180 uint32_t constant_data_size = blob_read_uint32(blob);
181 const void *constant_data = blob_read_bytes(blob, constant_data_size);
182
183 uint32_t prog_data_size = blob_read_uint32(blob);
184 const struct brw_stage_prog_data *prog_data =
185 blob_read_bytes(blob, prog_data_size);
186 if (blob->overrun)
187 return NULL;
188 const void *prog_data_param =
189 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
190
191 const nir_xfb_info *xfb_info = NULL;
192 uint32_t xfb_size = blob_read_uint32(blob);
193 if (xfb_size)
194 xfb_info = blob_read_bytes(blob, xfb_size);
195
196 struct anv_pipeline_bind_map bind_map;
197 bind_map.surface_count = blob_read_uint32(blob);
198 bind_map.sampler_count = blob_read_uint32(blob);
199 bind_map.image_count = blob_read_uint32(blob);
200 bind_map.surface_to_descriptor = (void *)
201 blob_read_bytes(blob, bind_map.surface_count *
202 sizeof(*bind_map.surface_to_descriptor));
203 bind_map.sampler_to_descriptor = (void *)
204 blob_read_bytes(blob, bind_map.sampler_count *
205 sizeof(*bind_map.sampler_to_descriptor));
206
207 if (blob->overrun)
208 return NULL;
209
210 return anv_shader_bin_create(device,
211 key_data, key_size,
212 kernel_data, kernel_size,
213 constant_data, constant_data_size,
214 prog_data, prog_data_size, prog_data_param,
215 xfb_info, &bind_map);
216 }
217
218 /* Remaining work:
219 *
220 * - Compact binding table layout so it's tight and not dependent on
221 * descriptor set layout.
222 *
223 * - Review prog_data struct for size and cacheability: struct
224 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
225 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
226 */
227
228 static uint32_t
229 shader_bin_key_hash_func(const void *void_key)
230 {
231 const struct anv_shader_bin_key *key = void_key;
232 return _mesa_hash_data(key->data, key->size);
233 }
234
235 static bool
236 shader_bin_key_compare_func(const void *void_a, const void *void_b)
237 {
238 const struct anv_shader_bin_key *a = void_a, *b = void_b;
239 if (a->size != b->size)
240 return false;
241
242 return memcmp(a->data, b->data, a->size) == 0;
243 }
244
245 static uint32_t
246 sha1_hash_func(const void *sha1)
247 {
248 return _mesa_hash_data(sha1, 20);
249 }
250
251 static bool
252 sha1_compare_func(const void *sha1_a, const void *sha1_b)
253 {
254 return memcmp(sha1_a, sha1_b, 20) == 0;
255 }
256
257 void
258 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
259 struct anv_device *device,
260 bool cache_enabled)
261 {
262 cache->device = device;
263 pthread_mutex_init(&cache->mutex, NULL);
264
265 if (cache_enabled) {
266 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
267 shader_bin_key_compare_func);
268 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
269 sha1_compare_func);
270 } else {
271 cache->cache = NULL;
272 cache->nir_cache = NULL;
273 }
274 }
275
276 void
277 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
278 {
279 pthread_mutex_destroy(&cache->mutex);
280
281 if (cache->cache) {
282 /* This is a bit unfortunate. In order to keep things from randomly
283 * going away, the shader cache has to hold a reference to all shader
284 * binaries it contains. We unref them when we destroy the cache.
285 */
286 hash_table_foreach(cache->cache, entry)
287 anv_shader_bin_unref(cache->device, entry->data);
288
289 _mesa_hash_table_destroy(cache->cache, NULL);
290 }
291
292 if (cache->nir_cache) {
293 hash_table_foreach(cache->nir_cache, entry)
294 ralloc_free(entry->data);
295
296 _mesa_hash_table_destroy(cache->nir_cache, NULL);
297 }
298 }
299
300 static struct anv_shader_bin *
301 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
302 const void *key_data, uint32_t key_size)
303 {
304 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
305 struct anv_shader_bin_key *key = (void *)vla;
306 key->size = key_size;
307 memcpy(key->data, key_data, key_size);
308
309 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
310 if (entry)
311 return entry->data;
312 else
313 return NULL;
314 }
315
316 struct anv_shader_bin *
317 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
318 const void *key_data, uint32_t key_size)
319 {
320 if (!cache->cache)
321 return NULL;
322
323 pthread_mutex_lock(&cache->mutex);
324
325 struct anv_shader_bin *shader =
326 anv_pipeline_cache_search_locked(cache, key_data, key_size);
327
328 pthread_mutex_unlock(&cache->mutex);
329
330 /* We increment refcount before handing it to the caller */
331 if (shader)
332 anv_shader_bin_ref(shader);
333
334 return shader;
335 }
336
337 static void
338 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
339 struct anv_shader_bin *bin)
340 {
341 if (!cache->cache)
342 return;
343
344 pthread_mutex_lock(&cache->mutex);
345
346 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
347 if (entry == NULL) {
348 /* Take a reference for the cache */
349 anv_shader_bin_ref(bin);
350 _mesa_hash_table_insert(cache->cache, bin->key, bin);
351 }
352
353 pthread_mutex_unlock(&cache->mutex);
354 }
355
356 static struct anv_shader_bin *
357 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
358 const void *key_data, uint32_t key_size,
359 const void *kernel_data,
360 uint32_t kernel_size,
361 const void *constant_data,
362 uint32_t constant_data_size,
363 const struct brw_stage_prog_data *prog_data,
364 uint32_t prog_data_size,
365 const void *prog_data_param,
366 const nir_xfb_info *xfb_info,
367 const struct anv_pipeline_bind_map *bind_map)
368 {
369 struct anv_shader_bin *shader =
370 anv_pipeline_cache_search_locked(cache, key_data, key_size);
371 if (shader)
372 return shader;
373
374 struct anv_shader_bin *bin =
375 anv_shader_bin_create(cache->device, key_data, key_size,
376 kernel_data, kernel_size,
377 constant_data, constant_data_size,
378 prog_data, prog_data_size, prog_data_param,
379 xfb_info, bind_map);
380 if (!bin)
381 return NULL;
382
383 _mesa_hash_table_insert(cache->cache, bin->key, bin);
384
385 return bin;
386 }
387
388 struct anv_shader_bin *
389 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
390 const void *key_data, uint32_t key_size,
391 const void *kernel_data, uint32_t kernel_size,
392 const void *constant_data,
393 uint32_t constant_data_size,
394 const struct brw_stage_prog_data *prog_data,
395 uint32_t prog_data_size,
396 const nir_xfb_info *xfb_info,
397 const struct anv_pipeline_bind_map *bind_map)
398 {
399 if (cache->cache) {
400 pthread_mutex_lock(&cache->mutex);
401
402 struct anv_shader_bin *bin =
403 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
404 kernel_data, kernel_size,
405 constant_data, constant_data_size,
406 prog_data, prog_data_size,
407 prog_data->param,
408 xfb_info, bind_map);
409
410 pthread_mutex_unlock(&cache->mutex);
411
412 /* We increment refcount before handing it to the caller */
413 if (bin)
414 anv_shader_bin_ref(bin);
415
416 return bin;
417 } else {
418 /* In this case, we're not caching it so the caller owns it entirely */
419 return anv_shader_bin_create(cache->device, key_data, key_size,
420 kernel_data, kernel_size,
421 constant_data, constant_data_size,
422 prog_data, prog_data_size,
423 prog_data->param,
424 xfb_info, bind_map);
425 }
426 }
427
428 struct cache_header {
429 uint32_t header_size;
430 uint32_t header_version;
431 uint32_t vendor_id;
432 uint32_t device_id;
433 uint8_t uuid[VK_UUID_SIZE];
434 };
435
436 static void
437 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
438 const void *data, size_t size)
439 {
440 struct anv_device *device = cache->device;
441 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
442
443 if (cache->cache == NULL)
444 return;
445
446 struct blob_reader blob;
447 blob_reader_init(&blob, data, size);
448
449 struct cache_header header;
450 blob_copy_bytes(&blob, &header, sizeof(header));
451 uint32_t count = blob_read_uint32(&blob);
452 if (blob.overrun)
453 return;
454
455 if (header.header_size < sizeof(header))
456 return;
457 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
458 return;
459 if (header.vendor_id != 0x8086)
460 return;
461 if (header.device_id != device->chipset_id)
462 return;
463 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
464 return;
465
466 for (uint32_t i = 0; i < count; i++) {
467 struct anv_shader_bin *bin =
468 anv_shader_bin_create_from_blob(device, &blob);
469 if (!bin)
470 break;
471 _mesa_hash_table_insert(cache->cache, bin->key, bin);
472 }
473 }
474
475 VkResult anv_CreatePipelineCache(
476 VkDevice _device,
477 const VkPipelineCacheCreateInfo* pCreateInfo,
478 const VkAllocationCallbacks* pAllocator,
479 VkPipelineCache* pPipelineCache)
480 {
481 ANV_FROM_HANDLE(anv_device, device, _device);
482 struct anv_pipeline_cache *cache;
483
484 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
485 assert(pCreateInfo->flags == 0);
486
487 cache = vk_alloc2(&device->alloc, pAllocator,
488 sizeof(*cache), 8,
489 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
490 if (cache == NULL)
491 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
492
493 anv_pipeline_cache_init(cache, device,
494 device->instance->pipeline_cache_enabled);
495
496 if (pCreateInfo->initialDataSize > 0)
497 anv_pipeline_cache_load(cache,
498 pCreateInfo->pInitialData,
499 pCreateInfo->initialDataSize);
500
501 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
502
503 return VK_SUCCESS;
504 }
505
506 void anv_DestroyPipelineCache(
507 VkDevice _device,
508 VkPipelineCache _cache,
509 const VkAllocationCallbacks* pAllocator)
510 {
511 ANV_FROM_HANDLE(anv_device, device, _device);
512 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
513
514 if (!cache)
515 return;
516
517 anv_pipeline_cache_finish(cache);
518
519 vk_free2(&device->alloc, pAllocator, cache);
520 }
521
522 VkResult anv_GetPipelineCacheData(
523 VkDevice _device,
524 VkPipelineCache _cache,
525 size_t* pDataSize,
526 void* pData)
527 {
528 ANV_FROM_HANDLE(anv_device, device, _device);
529 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
530 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
531
532 struct blob blob;
533 if (pData) {
534 blob_init_fixed(&blob, pData, *pDataSize);
535 } else {
536 blob_init_fixed(&blob, NULL, SIZE_MAX);
537 }
538
539 struct cache_header header = {
540 .header_size = sizeof(struct cache_header),
541 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
542 .vendor_id = 0x8086,
543 .device_id = device->chipset_id,
544 };
545 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
546 blob_write_bytes(&blob, &header, sizeof(header));
547
548 uint32_t count = 0;
549 intptr_t count_offset = blob_reserve_uint32(&blob);
550 if (count_offset < 0) {
551 *pDataSize = 0;
552 blob_finish(&blob);
553 return VK_INCOMPLETE;
554 }
555
556 VkResult result = VK_SUCCESS;
557 if (cache->cache) {
558 hash_table_foreach(cache->cache, entry) {
559 struct anv_shader_bin *shader = entry->data;
560
561 size_t save_size = blob.size;
562 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
563 /* If it fails reset to the previous size and bail */
564 blob.size = save_size;
565 result = VK_INCOMPLETE;
566 break;
567 }
568
569 count++;
570 }
571 }
572
573 blob_overwrite_uint32(&blob, count_offset, count);
574
575 *pDataSize = blob.size;
576
577 blob_finish(&blob);
578
579 return result;
580 }
581
582 VkResult anv_MergePipelineCaches(
583 VkDevice _device,
584 VkPipelineCache destCache,
585 uint32_t srcCacheCount,
586 const VkPipelineCache* pSrcCaches)
587 {
588 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
589
590 if (!dst->cache)
591 return VK_SUCCESS;
592
593 for (uint32_t i = 0; i < srcCacheCount; i++) {
594 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
595 if (!src->cache)
596 continue;
597
598 hash_table_foreach(src->cache, entry) {
599 struct anv_shader_bin *bin = entry->data;
600 assert(bin);
601
602 if (_mesa_hash_table_search(dst->cache, bin->key))
603 continue;
604
605 anv_shader_bin_ref(bin);
606 _mesa_hash_table_insert(dst->cache, bin->key, bin);
607 }
608 }
609
610 return VK_SUCCESS;
611 }
612
613 struct anv_shader_bin *
614 anv_device_search_for_kernel(struct anv_device *device,
615 struct anv_pipeline_cache *cache,
616 const void *key_data, uint32_t key_size)
617 {
618 struct anv_shader_bin *bin;
619
620 if (cache) {
621 bin = anv_pipeline_cache_search(cache, key_data, key_size);
622 if (bin)
623 return bin;
624 }
625
626 #ifdef ENABLE_SHADER_CACHE
627 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
628 if (disk_cache && device->instance->pipeline_cache_enabled) {
629 cache_key cache_key;
630 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
631
632 size_t buffer_size;
633 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
634 if (buffer) {
635 struct blob_reader blob;
636 blob_reader_init(&blob, buffer, buffer_size);
637 bin = anv_shader_bin_create_from_blob(device, &blob);
638 free(buffer);
639
640 if (bin) {
641 if (cache)
642 anv_pipeline_cache_add_shader_bin(cache, bin);
643 return bin;
644 }
645 }
646 }
647 #endif
648
649 return NULL;
650 }
651
652 struct anv_shader_bin *
653 anv_device_upload_kernel(struct anv_device *device,
654 struct anv_pipeline_cache *cache,
655 const void *key_data, uint32_t key_size,
656 const void *kernel_data, uint32_t kernel_size,
657 const void *constant_data,
658 uint32_t constant_data_size,
659 const struct brw_stage_prog_data *prog_data,
660 uint32_t prog_data_size,
661 const nir_xfb_info *xfb_info,
662 const struct anv_pipeline_bind_map *bind_map)
663 {
664 struct anv_shader_bin *bin;
665 if (cache) {
666 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
667 kernel_data, kernel_size,
668 constant_data, constant_data_size,
669 prog_data, prog_data_size,
670 xfb_info, bind_map);
671 } else {
672 bin = anv_shader_bin_create(device, key_data, key_size,
673 kernel_data, kernel_size,
674 constant_data, constant_data_size,
675 prog_data, prog_data_size,
676 prog_data->param,
677 xfb_info, bind_map);
678 }
679
680 if (bin == NULL)
681 return NULL;
682
683 #ifdef ENABLE_SHADER_CACHE
684 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
685 if (disk_cache) {
686 struct blob binary;
687 blob_init(&binary);
688 anv_shader_bin_write_to_blob(bin, &binary);
689
690 if (!binary.out_of_memory) {
691 cache_key cache_key;
692 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
693
694 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
695 }
696
697 blob_finish(&binary);
698 }
699 #endif
700
701 return bin;
702 }
703
704 struct serialized_nir {
705 unsigned char sha1_key[20];
706 size_t size;
707 char data[0];
708 };
709
710 struct nir_shader *
711 anv_device_search_for_nir(struct anv_device *device,
712 struct anv_pipeline_cache *cache,
713 const nir_shader_compiler_options *nir_options,
714 unsigned char sha1_key[20],
715 void *mem_ctx)
716 {
717 if (cache && cache->nir_cache) {
718 const struct serialized_nir *snir = NULL;
719
720 pthread_mutex_lock(&cache->mutex);
721 struct hash_entry *entry =
722 _mesa_hash_table_search(cache->nir_cache, sha1_key);
723 if (entry)
724 snir = entry->data;
725 pthread_mutex_unlock(&cache->mutex);
726
727 if (snir) {
728 struct blob_reader blob;
729 blob_reader_init(&blob, snir->data, snir->size);
730
731 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
732 if (blob.overrun) {
733 ralloc_free(nir);
734 } else {
735 return nir;
736 }
737 }
738 }
739
740 return NULL;
741 }
742
743 void
744 anv_device_upload_nir(struct anv_device *device,
745 struct anv_pipeline_cache *cache,
746 const struct nir_shader *nir,
747 unsigned char sha1_key[20])
748 {
749 if (cache && cache->nir_cache) {
750 pthread_mutex_lock(&cache->mutex);
751 struct hash_entry *entry =
752 _mesa_hash_table_search(cache->nir_cache, sha1_key);
753 pthread_mutex_unlock(&cache->mutex);
754 if (entry)
755 return;
756
757 struct blob blob;
758 blob_init(&blob);
759
760 nir_serialize(&blob, nir);
761 if (blob.out_of_memory) {
762 blob_finish(&blob);
763 return;
764 }
765
766 pthread_mutex_lock(&cache->mutex);
767 /* Because ralloc isn't thread-safe, we have to do all this inside the
768 * lock. We could unlock for the big memcpy but it's probably not worth
769 * the hassle.
770 */
771 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
772 if (entry) {
773 pthread_mutex_unlock(&cache->mutex);
774 return;
775 }
776
777 struct serialized_nir *snir =
778 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
779 memcpy(snir->sha1_key, sha1_key, 20);
780 snir->size = blob.size;
781 memcpy(snir->data, blob.data, blob.size);
782
783 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
784
785 pthread_mutex_unlock(&cache->mutex);
786 }
787 }