anv: stop storing prog param data into shader blobs
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32
33 struct anv_shader_bin *
34 anv_shader_bin_create(struct anv_device *device,
35 const void *key_data, uint32_t key_size,
36 const void *kernel_data, uint32_t kernel_size,
37 const void *constant_data, uint32_t constant_data_size,
38 const struct brw_stage_prog_data *prog_data_in,
39 uint32_t prog_data_size,
40 const struct brw_compile_stats *stats, uint32_t num_stats,
41 const nir_xfb_info *xfb_info_in,
42 const struct anv_pipeline_bind_map *bind_map)
43 {
44 struct anv_shader_bin *shader;
45 struct anv_shader_bin_key *key;
46 struct brw_stage_prog_data *prog_data;
47 uint32_t *prog_data_param;
48 nir_xfb_info *xfb_info;
49 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
50
51 ANV_MULTIALLOC(ma);
52 anv_multialloc_add(&ma, &shader, 1);
53 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
54 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
55 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
56 if (xfb_info_in) {
57 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
58 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
59 }
60 anv_multialloc_add(&ma, &surface_to_descriptor,
61 bind_map->surface_count);
62 anv_multialloc_add(&ma, &sampler_to_descriptor,
63 bind_map->sampler_count);
64
65 if (!anv_multialloc_alloc(&ma, &device->alloc,
66 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
67 return NULL;
68
69 shader->ref_cnt = 1;
70
71 key->size = key_size;
72 memcpy(key->data, key_data, key_size);
73 shader->key = key;
74
75 shader->kernel =
76 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
77 memcpy(shader->kernel.map, kernel_data, kernel_size);
78 shader->kernel_size = kernel_size;
79
80 if (constant_data_size) {
81 shader->constant_data =
82 anv_state_pool_alloc(&device->dynamic_state_pool,
83 constant_data_size, 32);
84 memcpy(shader->constant_data.map, constant_data, constant_data_size);
85 } else {
86 shader->constant_data = ANV_STATE_NULL;
87 }
88 shader->constant_data_size = constant_data_size;
89
90 memcpy(prog_data, prog_data_in, prog_data_size);
91 memset(prog_data_param, 0,
92 prog_data->nr_params * sizeof(*prog_data_param));
93 prog_data->param = prog_data_param;
94 shader->prog_data = prog_data;
95 shader->prog_data_size = prog_data_size;
96
97 assert(num_stats <= ARRAY_SIZE(shader->stats));
98 typed_memcpy(shader->stats, stats, num_stats);
99 shader->num_stats = num_stats;
100
101 if (xfb_info_in) {
102 *xfb_info = *xfb_info_in;
103 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
104 xfb_info_in->output_count);
105 shader->xfb_info = xfb_info;
106 } else {
107 shader->xfb_info = NULL;
108 }
109
110 shader->bind_map = *bind_map;
111 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
112 bind_map->surface_count);
113 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
114 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
115 bind_map->sampler_count);
116 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
117
118 return shader;
119 }
120
121 void
122 anv_shader_bin_destroy(struct anv_device *device,
123 struct anv_shader_bin *shader)
124 {
125 assert(shader->ref_cnt == 0);
126 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
127 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
128 vk_free(&device->alloc, shader);
129 }
130
131 static bool
132 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
133 struct blob *blob)
134 {
135 blob_write_uint32(blob, shader->key->size);
136 blob_write_bytes(blob, shader->key->data, shader->key->size);
137
138 blob_write_uint32(blob, shader->kernel_size);
139 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
140
141 blob_write_uint32(blob, shader->constant_data_size);
142 blob_write_bytes(blob, shader->constant_data.map,
143 shader->constant_data_size);
144
145 blob_write_uint32(blob, shader->prog_data_size);
146 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
147
148 blob_write_uint32(blob, shader->num_stats);
149 blob_write_bytes(blob, shader->stats,
150 shader->num_stats * sizeof(shader->stats[0]));
151
152 if (shader->xfb_info) {
153 uint32_t xfb_info_size =
154 nir_xfb_info_size(shader->xfb_info->output_count);
155 blob_write_uint32(blob, xfb_info_size);
156 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
157 } else {
158 blob_write_uint32(blob, 0);
159 }
160
161 blob_write_bytes(blob, shader->bind_map.surface_sha1,
162 sizeof(shader->bind_map.surface_sha1));
163 blob_write_bytes(blob, shader->bind_map.sampler_sha1,
164 sizeof(shader->bind_map.sampler_sha1));
165 blob_write_bytes(blob, shader->bind_map.push_sha1,
166 sizeof(shader->bind_map.push_sha1));
167 blob_write_uint32(blob, shader->bind_map.surface_count);
168 blob_write_uint32(blob, shader->bind_map.sampler_count);
169 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
170 shader->bind_map.surface_count *
171 sizeof(*shader->bind_map.surface_to_descriptor));
172 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
173 shader->bind_map.sampler_count *
174 sizeof(*shader->bind_map.sampler_to_descriptor));
175 blob_write_bytes(blob, shader->bind_map.push_ranges,
176 sizeof(shader->bind_map.push_ranges));
177
178 return !blob->out_of_memory;
179 }
180
181 static struct anv_shader_bin *
182 anv_shader_bin_create_from_blob(struct anv_device *device,
183 struct blob_reader *blob)
184 {
185 uint32_t key_size = blob_read_uint32(blob);
186 const void *key_data = blob_read_bytes(blob, key_size);
187
188 uint32_t kernel_size = blob_read_uint32(blob);
189 const void *kernel_data = blob_read_bytes(blob, kernel_size);
190
191 uint32_t constant_data_size = blob_read_uint32(blob);
192 const void *constant_data = blob_read_bytes(blob, constant_data_size);
193
194 uint32_t prog_data_size = blob_read_uint32(blob);
195 const struct brw_stage_prog_data *prog_data =
196 blob_read_bytes(blob, prog_data_size);
197 if (blob->overrun)
198 return NULL;
199
200 uint32_t num_stats = blob_read_uint32(blob);
201 const struct brw_compile_stats *stats =
202 blob_read_bytes(blob, num_stats * sizeof(stats[0]));
203
204 const nir_xfb_info *xfb_info = NULL;
205 uint32_t xfb_size = blob_read_uint32(blob);
206 if (xfb_size)
207 xfb_info = blob_read_bytes(blob, xfb_size);
208
209 struct anv_pipeline_bind_map bind_map;
210 blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
211 blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
212 blob_copy_bytes(blob, bind_map.push_sha1, sizeof(bind_map.push_sha1));
213 bind_map.surface_count = blob_read_uint32(blob);
214 bind_map.sampler_count = blob_read_uint32(blob);
215 bind_map.surface_to_descriptor = (void *)
216 blob_read_bytes(blob, bind_map.surface_count *
217 sizeof(*bind_map.surface_to_descriptor));
218 bind_map.sampler_to_descriptor = (void *)
219 blob_read_bytes(blob, bind_map.sampler_count *
220 sizeof(*bind_map.sampler_to_descriptor));
221 blob_copy_bytes(blob, bind_map.push_ranges, sizeof(bind_map.push_ranges));
222
223 if (blob->overrun)
224 return NULL;
225
226 return anv_shader_bin_create(device,
227 key_data, key_size,
228 kernel_data, kernel_size,
229 constant_data, constant_data_size,
230 prog_data, prog_data_size,
231 stats, num_stats, xfb_info, &bind_map);
232 }
233
234 /* Remaining work:
235 *
236 * - Compact binding table layout so it's tight and not dependent on
237 * descriptor set layout.
238 *
239 * - Review prog_data struct for size and cacheability: struct
240 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
241 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
242 */
243
244 static uint32_t
245 shader_bin_key_hash_func(const void *void_key)
246 {
247 const struct anv_shader_bin_key *key = void_key;
248 return _mesa_hash_data(key->data, key->size);
249 }
250
251 static bool
252 shader_bin_key_compare_func(const void *void_a, const void *void_b)
253 {
254 const struct anv_shader_bin_key *a = void_a, *b = void_b;
255 if (a->size != b->size)
256 return false;
257
258 return memcmp(a->data, b->data, a->size) == 0;
259 }
260
261 static uint32_t
262 sha1_hash_func(const void *sha1)
263 {
264 return _mesa_hash_data(sha1, 20);
265 }
266
267 static bool
268 sha1_compare_func(const void *sha1_a, const void *sha1_b)
269 {
270 return memcmp(sha1_a, sha1_b, 20) == 0;
271 }
272
273 void
274 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
275 struct anv_device *device,
276 bool cache_enabled)
277 {
278 cache->device = device;
279 pthread_mutex_init(&cache->mutex, NULL);
280
281 if (cache_enabled) {
282 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
283 shader_bin_key_compare_func);
284 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
285 sha1_compare_func);
286 } else {
287 cache->cache = NULL;
288 cache->nir_cache = NULL;
289 }
290 }
291
292 void
293 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
294 {
295 pthread_mutex_destroy(&cache->mutex);
296
297 if (cache->cache) {
298 /* This is a bit unfortunate. In order to keep things from randomly
299 * going away, the shader cache has to hold a reference to all shader
300 * binaries it contains. We unref them when we destroy the cache.
301 */
302 hash_table_foreach(cache->cache, entry)
303 anv_shader_bin_unref(cache->device, entry->data);
304
305 _mesa_hash_table_destroy(cache->cache, NULL);
306 }
307
308 if (cache->nir_cache) {
309 hash_table_foreach(cache->nir_cache, entry)
310 ralloc_free(entry->data);
311
312 _mesa_hash_table_destroy(cache->nir_cache, NULL);
313 }
314 }
315
316 static struct anv_shader_bin *
317 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
318 const void *key_data, uint32_t key_size)
319 {
320 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
321 struct anv_shader_bin_key *key = (void *)vla;
322 key->size = key_size;
323 memcpy(key->data, key_data, key_size);
324
325 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
326 if (entry)
327 return entry->data;
328 else
329 return NULL;
330 }
331
332 struct anv_shader_bin *
333 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
334 const void *key_data, uint32_t key_size)
335 {
336 if (!cache->cache)
337 return NULL;
338
339 pthread_mutex_lock(&cache->mutex);
340
341 struct anv_shader_bin *shader =
342 anv_pipeline_cache_search_locked(cache, key_data, key_size);
343
344 pthread_mutex_unlock(&cache->mutex);
345
346 /* We increment refcount before handing it to the caller */
347 if (shader)
348 anv_shader_bin_ref(shader);
349
350 return shader;
351 }
352
353 static void
354 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
355 struct anv_shader_bin *bin)
356 {
357 if (!cache->cache)
358 return;
359
360 pthread_mutex_lock(&cache->mutex);
361
362 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
363 if (entry == NULL) {
364 /* Take a reference for the cache */
365 anv_shader_bin_ref(bin);
366 _mesa_hash_table_insert(cache->cache, bin->key, bin);
367 }
368
369 pthread_mutex_unlock(&cache->mutex);
370 }
371
372 static struct anv_shader_bin *
373 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
374 const void *key_data, uint32_t key_size,
375 const void *kernel_data,
376 uint32_t kernel_size,
377 const void *constant_data,
378 uint32_t constant_data_size,
379 const struct brw_stage_prog_data *prog_data,
380 uint32_t prog_data_size,
381 const struct brw_compile_stats *stats,
382 uint32_t num_stats,
383 const nir_xfb_info *xfb_info,
384 const struct anv_pipeline_bind_map *bind_map)
385 {
386 struct anv_shader_bin *shader =
387 anv_pipeline_cache_search_locked(cache, key_data, key_size);
388 if (shader)
389 return shader;
390
391 struct anv_shader_bin *bin =
392 anv_shader_bin_create(cache->device, key_data, key_size,
393 kernel_data, kernel_size,
394 constant_data, constant_data_size,
395 prog_data, prog_data_size,
396 stats, num_stats, xfb_info, bind_map);
397 if (!bin)
398 return NULL;
399
400 _mesa_hash_table_insert(cache->cache, bin->key, bin);
401
402 return bin;
403 }
404
405 struct anv_shader_bin *
406 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
407 const void *key_data, uint32_t key_size,
408 const void *kernel_data, uint32_t kernel_size,
409 const void *constant_data,
410 uint32_t constant_data_size,
411 const struct brw_stage_prog_data *prog_data,
412 uint32_t prog_data_size,
413 const struct brw_compile_stats *stats,
414 uint32_t num_stats,
415 const nir_xfb_info *xfb_info,
416 const struct anv_pipeline_bind_map *bind_map)
417 {
418 if (cache->cache) {
419 pthread_mutex_lock(&cache->mutex);
420
421 struct anv_shader_bin *bin =
422 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
423 kernel_data, kernel_size,
424 constant_data, constant_data_size,
425 prog_data, prog_data_size,
426 stats, num_stats,
427 xfb_info, bind_map);
428
429 pthread_mutex_unlock(&cache->mutex);
430
431 /* We increment refcount before handing it to the caller */
432 if (bin)
433 anv_shader_bin_ref(bin);
434
435 return bin;
436 } else {
437 /* In this case, we're not caching it so the caller owns it entirely */
438 return anv_shader_bin_create(cache->device, key_data, key_size,
439 kernel_data, kernel_size,
440 constant_data, constant_data_size,
441 prog_data, prog_data_size,
442 stats, num_stats,
443 xfb_info, bind_map);
444 }
445 }
446
447 struct cache_header {
448 uint32_t header_size;
449 uint32_t header_version;
450 uint32_t vendor_id;
451 uint32_t device_id;
452 uint8_t uuid[VK_UUID_SIZE];
453 };
454
455 static void
456 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
457 const void *data, size_t size)
458 {
459 struct anv_device *device = cache->device;
460 struct anv_physical_device *pdevice = device->physical;
461
462 if (cache->cache == NULL)
463 return;
464
465 struct blob_reader blob;
466 blob_reader_init(&blob, data, size);
467
468 struct cache_header header;
469 blob_copy_bytes(&blob, &header, sizeof(header));
470 uint32_t count = blob_read_uint32(&blob);
471 if (blob.overrun)
472 return;
473
474 if (header.header_size < sizeof(header))
475 return;
476 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
477 return;
478 if (header.vendor_id != 0x8086)
479 return;
480 if (header.device_id != device->info.chipset_id)
481 return;
482 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
483 return;
484
485 for (uint32_t i = 0; i < count; i++) {
486 struct anv_shader_bin *bin =
487 anv_shader_bin_create_from_blob(device, &blob);
488 if (!bin)
489 break;
490 _mesa_hash_table_insert(cache->cache, bin->key, bin);
491 }
492 }
493
494 VkResult anv_CreatePipelineCache(
495 VkDevice _device,
496 const VkPipelineCacheCreateInfo* pCreateInfo,
497 const VkAllocationCallbacks* pAllocator,
498 VkPipelineCache* pPipelineCache)
499 {
500 ANV_FROM_HANDLE(anv_device, device, _device);
501 struct anv_pipeline_cache *cache;
502
503 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
504 assert(pCreateInfo->flags == 0);
505
506 cache = vk_alloc2(&device->alloc, pAllocator,
507 sizeof(*cache), 8,
508 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
509 if (cache == NULL)
510 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
511
512 anv_pipeline_cache_init(cache, device,
513 device->physical->instance->pipeline_cache_enabled);
514
515 if (pCreateInfo->initialDataSize > 0)
516 anv_pipeline_cache_load(cache,
517 pCreateInfo->pInitialData,
518 pCreateInfo->initialDataSize);
519
520 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
521
522 return VK_SUCCESS;
523 }
524
525 void anv_DestroyPipelineCache(
526 VkDevice _device,
527 VkPipelineCache _cache,
528 const VkAllocationCallbacks* pAllocator)
529 {
530 ANV_FROM_HANDLE(anv_device, device, _device);
531 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
532
533 if (!cache)
534 return;
535
536 anv_pipeline_cache_finish(cache);
537
538 vk_free2(&device->alloc, pAllocator, cache);
539 }
540
541 VkResult anv_GetPipelineCacheData(
542 VkDevice _device,
543 VkPipelineCache _cache,
544 size_t* pDataSize,
545 void* pData)
546 {
547 ANV_FROM_HANDLE(anv_device, device, _device);
548 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
549
550 struct blob blob;
551 if (pData) {
552 blob_init_fixed(&blob, pData, *pDataSize);
553 } else {
554 blob_init_fixed(&blob, NULL, SIZE_MAX);
555 }
556
557 struct cache_header header = {
558 .header_size = sizeof(struct cache_header),
559 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
560 .vendor_id = 0x8086,
561 .device_id = device->info.chipset_id,
562 };
563 memcpy(header.uuid, device->physical->pipeline_cache_uuid, VK_UUID_SIZE);
564 blob_write_bytes(&blob, &header, sizeof(header));
565
566 uint32_t count = 0;
567 intptr_t count_offset = blob_reserve_uint32(&blob);
568 if (count_offset < 0) {
569 *pDataSize = 0;
570 blob_finish(&blob);
571 return VK_INCOMPLETE;
572 }
573
574 VkResult result = VK_SUCCESS;
575 if (cache->cache) {
576 hash_table_foreach(cache->cache, entry) {
577 struct anv_shader_bin *shader = entry->data;
578
579 size_t save_size = blob.size;
580 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
581 /* If it fails reset to the previous size and bail */
582 blob.size = save_size;
583 result = VK_INCOMPLETE;
584 break;
585 }
586
587 count++;
588 }
589 }
590
591 blob_overwrite_uint32(&blob, count_offset, count);
592
593 *pDataSize = blob.size;
594
595 blob_finish(&blob);
596
597 return result;
598 }
599
600 VkResult anv_MergePipelineCaches(
601 VkDevice _device,
602 VkPipelineCache destCache,
603 uint32_t srcCacheCount,
604 const VkPipelineCache* pSrcCaches)
605 {
606 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
607
608 if (!dst->cache)
609 return VK_SUCCESS;
610
611 for (uint32_t i = 0; i < srcCacheCount; i++) {
612 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
613 if (!src->cache)
614 continue;
615
616 hash_table_foreach(src->cache, entry) {
617 struct anv_shader_bin *bin = entry->data;
618 assert(bin);
619
620 if (_mesa_hash_table_search(dst->cache, bin->key))
621 continue;
622
623 anv_shader_bin_ref(bin);
624 _mesa_hash_table_insert(dst->cache, bin->key, bin);
625 }
626 }
627
628 return VK_SUCCESS;
629 }
630
631 struct anv_shader_bin *
632 anv_device_search_for_kernel(struct anv_device *device,
633 struct anv_pipeline_cache *cache,
634 const void *key_data, uint32_t key_size,
635 bool *user_cache_hit)
636 {
637 struct anv_shader_bin *bin;
638
639 *user_cache_hit = false;
640
641 if (cache) {
642 bin = anv_pipeline_cache_search(cache, key_data, key_size);
643 if (bin) {
644 *user_cache_hit = cache != &device->default_pipeline_cache;
645 return bin;
646 }
647 }
648
649 #ifdef ENABLE_SHADER_CACHE
650 struct disk_cache *disk_cache = device->physical->disk_cache;
651 if (disk_cache && device->physical->instance->pipeline_cache_enabled) {
652 cache_key cache_key;
653 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
654
655 size_t buffer_size;
656 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
657 if (buffer) {
658 struct blob_reader blob;
659 blob_reader_init(&blob, buffer, buffer_size);
660 bin = anv_shader_bin_create_from_blob(device, &blob);
661 free(buffer);
662
663 if (bin) {
664 if (cache)
665 anv_pipeline_cache_add_shader_bin(cache, bin);
666 return bin;
667 }
668 }
669 }
670 #endif
671
672 return NULL;
673 }
674
675 struct anv_shader_bin *
676 anv_device_upload_kernel(struct anv_device *device,
677 struct anv_pipeline_cache *cache,
678 const void *key_data, uint32_t key_size,
679 const void *kernel_data, uint32_t kernel_size,
680 const void *constant_data,
681 uint32_t constant_data_size,
682 const struct brw_stage_prog_data *prog_data,
683 uint32_t prog_data_size,
684 const struct brw_compile_stats *stats,
685 uint32_t num_stats,
686 const nir_xfb_info *xfb_info,
687 const struct anv_pipeline_bind_map *bind_map)
688 {
689 struct anv_shader_bin *bin;
690 if (cache) {
691 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
692 kernel_data, kernel_size,
693 constant_data, constant_data_size,
694 prog_data, prog_data_size,
695 stats, num_stats,
696 xfb_info, bind_map);
697 } else {
698 bin = anv_shader_bin_create(device, key_data, key_size,
699 kernel_data, kernel_size,
700 constant_data, constant_data_size,
701 prog_data, prog_data_size,
702 stats, num_stats,
703 xfb_info, bind_map);
704 }
705
706 if (bin == NULL)
707 return NULL;
708
709 #ifdef ENABLE_SHADER_CACHE
710 struct disk_cache *disk_cache = device->physical->disk_cache;
711 if (disk_cache) {
712 struct blob binary;
713 blob_init(&binary);
714 if (anv_shader_bin_write_to_blob(bin, &binary)) {
715 cache_key cache_key;
716 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
717
718 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
719 }
720
721 blob_finish(&binary);
722 }
723 #endif
724
725 return bin;
726 }
727
728 struct serialized_nir {
729 unsigned char sha1_key[20];
730 size_t size;
731 char data[0];
732 };
733
734 struct nir_shader *
735 anv_device_search_for_nir(struct anv_device *device,
736 struct anv_pipeline_cache *cache,
737 const nir_shader_compiler_options *nir_options,
738 unsigned char sha1_key[20],
739 void *mem_ctx)
740 {
741 if (cache && cache->nir_cache) {
742 const struct serialized_nir *snir = NULL;
743
744 pthread_mutex_lock(&cache->mutex);
745 struct hash_entry *entry =
746 _mesa_hash_table_search(cache->nir_cache, sha1_key);
747 if (entry)
748 snir = entry->data;
749 pthread_mutex_unlock(&cache->mutex);
750
751 if (snir) {
752 struct blob_reader blob;
753 blob_reader_init(&blob, snir->data, snir->size);
754
755 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
756 if (blob.overrun) {
757 ralloc_free(nir);
758 } else {
759 return nir;
760 }
761 }
762 }
763
764 return NULL;
765 }
766
767 void
768 anv_device_upload_nir(struct anv_device *device,
769 struct anv_pipeline_cache *cache,
770 const struct nir_shader *nir,
771 unsigned char sha1_key[20])
772 {
773 if (cache && cache->nir_cache) {
774 pthread_mutex_lock(&cache->mutex);
775 struct hash_entry *entry =
776 _mesa_hash_table_search(cache->nir_cache, sha1_key);
777 pthread_mutex_unlock(&cache->mutex);
778 if (entry)
779 return;
780
781 struct blob blob;
782 blob_init(&blob);
783
784 nir_serialize(&blob, nir, false);
785 if (blob.out_of_memory) {
786 blob_finish(&blob);
787 return;
788 }
789
790 pthread_mutex_lock(&cache->mutex);
791 /* Because ralloc isn't thread-safe, we have to do all this inside the
792 * lock. We could unlock for the big memcpy but it's probably not worth
793 * the hassle.
794 */
795 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
796 if (entry) {
797 blob_finish(&blob);
798 pthread_mutex_unlock(&cache->mutex);
799 return;
800 }
801
802 struct serialized_nir *snir =
803 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
804 memcpy(snir->sha1_key, sha1_key, 20);
805 snir->size = blob.size;
806 memcpy(snir->data, blob.data, blob.size);
807
808 blob_finish(&blob);
809
810 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
811
812 pthread_mutex_unlock(&cache->mutex);
813 }
814 }