anv: Put image params in the descriptor set buffer on gen8 and earlier
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32
33 struct anv_shader_bin *
34 anv_shader_bin_create(struct anv_device *device,
35 const void *key_data, uint32_t key_size,
36 const void *kernel_data, uint32_t kernel_size,
37 const void *constant_data, uint32_t constant_data_size,
38 const struct brw_stage_prog_data *prog_data_in,
39 uint32_t prog_data_size, const void *prog_data_param_in,
40 const nir_xfb_info *xfb_info_in,
41 const struct anv_pipeline_bind_map *bind_map)
42 {
43 struct anv_shader_bin *shader;
44 struct anv_shader_bin_key *key;
45 struct brw_stage_prog_data *prog_data;
46 uint32_t *prog_data_param;
47 nir_xfb_info *xfb_info;
48 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
49
50 ANV_MULTIALLOC(ma);
51 anv_multialloc_add(&ma, &shader, 1);
52 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
53 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
54 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
55 if (xfb_info_in) {
56 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
57 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
58 }
59 anv_multialloc_add(&ma, &surface_to_descriptor,
60 bind_map->surface_count);
61 anv_multialloc_add(&ma, &sampler_to_descriptor,
62 bind_map->sampler_count);
63
64 if (!anv_multialloc_alloc(&ma, &device->alloc,
65 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
66 return NULL;
67
68 shader->ref_cnt = 1;
69
70 key->size = key_size;
71 memcpy(key->data, key_data, key_size);
72 shader->key = key;
73
74 shader->kernel =
75 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
76 memcpy(shader->kernel.map, kernel_data, kernel_size);
77 shader->kernel_size = kernel_size;
78
79 if (constant_data_size) {
80 shader->constant_data =
81 anv_state_pool_alloc(&device->dynamic_state_pool,
82 constant_data_size, 32);
83 memcpy(shader->constant_data.map, constant_data, constant_data_size);
84 } else {
85 shader->constant_data = ANV_STATE_NULL;
86 }
87 shader->constant_data_size = constant_data_size;
88
89 memcpy(prog_data, prog_data_in, prog_data_size);
90 memcpy(prog_data_param, prog_data_param_in,
91 prog_data->nr_params * sizeof(*prog_data_param));
92 prog_data->param = prog_data_param;
93 shader->prog_data = prog_data;
94 shader->prog_data_size = prog_data_size;
95
96 if (xfb_info_in) {
97 *xfb_info = *xfb_info_in;
98 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
99 xfb_info_in->output_count);
100 shader->xfb_info = xfb_info;
101 } else {
102 shader->xfb_info = NULL;
103 }
104
105 shader->bind_map = *bind_map;
106 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
107 bind_map->surface_count);
108 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
109 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
110 bind_map->sampler_count);
111 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
112
113 return shader;
114 }
115
116 void
117 anv_shader_bin_destroy(struct anv_device *device,
118 struct anv_shader_bin *shader)
119 {
120 assert(shader->ref_cnt == 0);
121 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
122 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
123 vk_free(&device->alloc, shader);
124 }
125
126 static bool
127 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
128 struct blob *blob)
129 {
130 blob_write_uint32(blob, shader->key->size);
131 blob_write_bytes(blob, shader->key->data, shader->key->size);
132
133 blob_write_uint32(blob, shader->kernel_size);
134 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
135
136 blob_write_uint32(blob, shader->constant_data_size);
137 blob_write_bytes(blob, shader->constant_data.map,
138 shader->constant_data_size);
139
140 blob_write_uint32(blob, shader->prog_data_size);
141 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
142 blob_write_bytes(blob, shader->prog_data->param,
143 shader->prog_data->nr_params *
144 sizeof(*shader->prog_data->param));
145
146 if (shader->xfb_info) {
147 uint32_t xfb_info_size =
148 nir_xfb_info_size(shader->xfb_info->output_count);
149 blob_write_uint32(blob, xfb_info_size);
150 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
151 } else {
152 blob_write_uint32(blob, 0);
153 }
154
155 blob_write_uint32(blob, shader->bind_map.surface_count);
156 blob_write_uint32(blob, shader->bind_map.sampler_count);
157 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
158 shader->bind_map.surface_count *
159 sizeof(*shader->bind_map.surface_to_descriptor));
160 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
161 shader->bind_map.sampler_count *
162 sizeof(*shader->bind_map.sampler_to_descriptor));
163
164 return !blob->out_of_memory;
165 }
166
167 static struct anv_shader_bin *
168 anv_shader_bin_create_from_blob(struct anv_device *device,
169 struct blob_reader *blob)
170 {
171 uint32_t key_size = blob_read_uint32(blob);
172 const void *key_data = blob_read_bytes(blob, key_size);
173
174 uint32_t kernel_size = blob_read_uint32(blob);
175 const void *kernel_data = blob_read_bytes(blob, kernel_size);
176
177 uint32_t constant_data_size = blob_read_uint32(blob);
178 const void *constant_data = blob_read_bytes(blob, constant_data_size);
179
180 uint32_t prog_data_size = blob_read_uint32(blob);
181 const struct brw_stage_prog_data *prog_data =
182 blob_read_bytes(blob, prog_data_size);
183 if (blob->overrun)
184 return NULL;
185 const void *prog_data_param =
186 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
187
188 const nir_xfb_info *xfb_info = NULL;
189 uint32_t xfb_size = blob_read_uint32(blob);
190 if (xfb_size)
191 xfb_info = blob_read_bytes(blob, xfb_size);
192
193 struct anv_pipeline_bind_map bind_map;
194 bind_map.surface_count = blob_read_uint32(blob);
195 bind_map.sampler_count = blob_read_uint32(blob);
196 bind_map.surface_to_descriptor = (void *)
197 blob_read_bytes(blob, bind_map.surface_count *
198 sizeof(*bind_map.surface_to_descriptor));
199 bind_map.sampler_to_descriptor = (void *)
200 blob_read_bytes(blob, bind_map.sampler_count *
201 sizeof(*bind_map.sampler_to_descriptor));
202
203 if (blob->overrun)
204 return NULL;
205
206 return anv_shader_bin_create(device,
207 key_data, key_size,
208 kernel_data, kernel_size,
209 constant_data, constant_data_size,
210 prog_data, prog_data_size, prog_data_param,
211 xfb_info, &bind_map);
212 }
213
214 /* Remaining work:
215 *
216 * - Compact binding table layout so it's tight and not dependent on
217 * descriptor set layout.
218 *
219 * - Review prog_data struct for size and cacheability: struct
220 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
221 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
222 */
223
224 static uint32_t
225 shader_bin_key_hash_func(const void *void_key)
226 {
227 const struct anv_shader_bin_key *key = void_key;
228 return _mesa_hash_data(key->data, key->size);
229 }
230
231 static bool
232 shader_bin_key_compare_func(const void *void_a, const void *void_b)
233 {
234 const struct anv_shader_bin_key *a = void_a, *b = void_b;
235 if (a->size != b->size)
236 return false;
237
238 return memcmp(a->data, b->data, a->size) == 0;
239 }
240
241 static uint32_t
242 sha1_hash_func(const void *sha1)
243 {
244 return _mesa_hash_data(sha1, 20);
245 }
246
247 static bool
248 sha1_compare_func(const void *sha1_a, const void *sha1_b)
249 {
250 return memcmp(sha1_a, sha1_b, 20) == 0;
251 }
252
253 void
254 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
255 struct anv_device *device,
256 bool cache_enabled)
257 {
258 cache->device = device;
259 pthread_mutex_init(&cache->mutex, NULL);
260
261 if (cache_enabled) {
262 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
263 shader_bin_key_compare_func);
264 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
265 sha1_compare_func);
266 } else {
267 cache->cache = NULL;
268 cache->nir_cache = NULL;
269 }
270 }
271
272 void
273 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
274 {
275 pthread_mutex_destroy(&cache->mutex);
276
277 if (cache->cache) {
278 /* This is a bit unfortunate. In order to keep things from randomly
279 * going away, the shader cache has to hold a reference to all shader
280 * binaries it contains. We unref them when we destroy the cache.
281 */
282 hash_table_foreach(cache->cache, entry)
283 anv_shader_bin_unref(cache->device, entry->data);
284
285 _mesa_hash_table_destroy(cache->cache, NULL);
286 }
287
288 if (cache->nir_cache) {
289 hash_table_foreach(cache->nir_cache, entry)
290 ralloc_free(entry->data);
291
292 _mesa_hash_table_destroy(cache->nir_cache, NULL);
293 }
294 }
295
296 static struct anv_shader_bin *
297 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
298 const void *key_data, uint32_t key_size)
299 {
300 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
301 struct anv_shader_bin_key *key = (void *)vla;
302 key->size = key_size;
303 memcpy(key->data, key_data, key_size);
304
305 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
306 if (entry)
307 return entry->data;
308 else
309 return NULL;
310 }
311
312 struct anv_shader_bin *
313 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
314 const void *key_data, uint32_t key_size)
315 {
316 if (!cache->cache)
317 return NULL;
318
319 pthread_mutex_lock(&cache->mutex);
320
321 struct anv_shader_bin *shader =
322 anv_pipeline_cache_search_locked(cache, key_data, key_size);
323
324 pthread_mutex_unlock(&cache->mutex);
325
326 /* We increment refcount before handing it to the caller */
327 if (shader)
328 anv_shader_bin_ref(shader);
329
330 return shader;
331 }
332
333 static void
334 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
335 struct anv_shader_bin *bin)
336 {
337 if (!cache->cache)
338 return;
339
340 pthread_mutex_lock(&cache->mutex);
341
342 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
343 if (entry == NULL) {
344 /* Take a reference for the cache */
345 anv_shader_bin_ref(bin);
346 _mesa_hash_table_insert(cache->cache, bin->key, bin);
347 }
348
349 pthread_mutex_unlock(&cache->mutex);
350 }
351
352 static struct anv_shader_bin *
353 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
354 const void *key_data, uint32_t key_size,
355 const void *kernel_data,
356 uint32_t kernel_size,
357 const void *constant_data,
358 uint32_t constant_data_size,
359 const struct brw_stage_prog_data *prog_data,
360 uint32_t prog_data_size,
361 const void *prog_data_param,
362 const nir_xfb_info *xfb_info,
363 const struct anv_pipeline_bind_map *bind_map)
364 {
365 struct anv_shader_bin *shader =
366 anv_pipeline_cache_search_locked(cache, key_data, key_size);
367 if (shader)
368 return shader;
369
370 struct anv_shader_bin *bin =
371 anv_shader_bin_create(cache->device, key_data, key_size,
372 kernel_data, kernel_size,
373 constant_data, constant_data_size,
374 prog_data, prog_data_size, prog_data_param,
375 xfb_info, bind_map);
376 if (!bin)
377 return NULL;
378
379 _mesa_hash_table_insert(cache->cache, bin->key, bin);
380
381 return bin;
382 }
383
384 struct anv_shader_bin *
385 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
386 const void *key_data, uint32_t key_size,
387 const void *kernel_data, uint32_t kernel_size,
388 const void *constant_data,
389 uint32_t constant_data_size,
390 const struct brw_stage_prog_data *prog_data,
391 uint32_t prog_data_size,
392 const nir_xfb_info *xfb_info,
393 const struct anv_pipeline_bind_map *bind_map)
394 {
395 if (cache->cache) {
396 pthread_mutex_lock(&cache->mutex);
397
398 struct anv_shader_bin *bin =
399 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
400 kernel_data, kernel_size,
401 constant_data, constant_data_size,
402 prog_data, prog_data_size,
403 prog_data->param,
404 xfb_info, bind_map);
405
406 pthread_mutex_unlock(&cache->mutex);
407
408 /* We increment refcount before handing it to the caller */
409 if (bin)
410 anv_shader_bin_ref(bin);
411
412 return bin;
413 } else {
414 /* In this case, we're not caching it so the caller owns it entirely */
415 return anv_shader_bin_create(cache->device, key_data, key_size,
416 kernel_data, kernel_size,
417 constant_data, constant_data_size,
418 prog_data, prog_data_size,
419 prog_data->param,
420 xfb_info, bind_map);
421 }
422 }
423
424 struct cache_header {
425 uint32_t header_size;
426 uint32_t header_version;
427 uint32_t vendor_id;
428 uint32_t device_id;
429 uint8_t uuid[VK_UUID_SIZE];
430 };
431
432 static void
433 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
434 const void *data, size_t size)
435 {
436 struct anv_device *device = cache->device;
437 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
438
439 if (cache->cache == NULL)
440 return;
441
442 struct blob_reader blob;
443 blob_reader_init(&blob, data, size);
444
445 struct cache_header header;
446 blob_copy_bytes(&blob, &header, sizeof(header));
447 uint32_t count = blob_read_uint32(&blob);
448 if (blob.overrun)
449 return;
450
451 if (header.header_size < sizeof(header))
452 return;
453 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
454 return;
455 if (header.vendor_id != 0x8086)
456 return;
457 if (header.device_id != device->chipset_id)
458 return;
459 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
460 return;
461
462 for (uint32_t i = 0; i < count; i++) {
463 struct anv_shader_bin *bin =
464 anv_shader_bin_create_from_blob(device, &blob);
465 if (!bin)
466 break;
467 _mesa_hash_table_insert(cache->cache, bin->key, bin);
468 }
469 }
470
471 VkResult anv_CreatePipelineCache(
472 VkDevice _device,
473 const VkPipelineCacheCreateInfo* pCreateInfo,
474 const VkAllocationCallbacks* pAllocator,
475 VkPipelineCache* pPipelineCache)
476 {
477 ANV_FROM_HANDLE(anv_device, device, _device);
478 struct anv_pipeline_cache *cache;
479
480 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
481 assert(pCreateInfo->flags == 0);
482
483 cache = vk_alloc2(&device->alloc, pAllocator,
484 sizeof(*cache), 8,
485 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
486 if (cache == NULL)
487 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
488
489 anv_pipeline_cache_init(cache, device,
490 device->instance->pipeline_cache_enabled);
491
492 if (pCreateInfo->initialDataSize > 0)
493 anv_pipeline_cache_load(cache,
494 pCreateInfo->pInitialData,
495 pCreateInfo->initialDataSize);
496
497 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
498
499 return VK_SUCCESS;
500 }
501
502 void anv_DestroyPipelineCache(
503 VkDevice _device,
504 VkPipelineCache _cache,
505 const VkAllocationCallbacks* pAllocator)
506 {
507 ANV_FROM_HANDLE(anv_device, device, _device);
508 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
509
510 if (!cache)
511 return;
512
513 anv_pipeline_cache_finish(cache);
514
515 vk_free2(&device->alloc, pAllocator, cache);
516 }
517
518 VkResult anv_GetPipelineCacheData(
519 VkDevice _device,
520 VkPipelineCache _cache,
521 size_t* pDataSize,
522 void* pData)
523 {
524 ANV_FROM_HANDLE(anv_device, device, _device);
525 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
526 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
527
528 struct blob blob;
529 if (pData) {
530 blob_init_fixed(&blob, pData, *pDataSize);
531 } else {
532 blob_init_fixed(&blob, NULL, SIZE_MAX);
533 }
534
535 struct cache_header header = {
536 .header_size = sizeof(struct cache_header),
537 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
538 .vendor_id = 0x8086,
539 .device_id = device->chipset_id,
540 };
541 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
542 blob_write_bytes(&blob, &header, sizeof(header));
543
544 uint32_t count = 0;
545 intptr_t count_offset = blob_reserve_uint32(&blob);
546 if (count_offset < 0) {
547 *pDataSize = 0;
548 blob_finish(&blob);
549 return VK_INCOMPLETE;
550 }
551
552 VkResult result = VK_SUCCESS;
553 if (cache->cache) {
554 hash_table_foreach(cache->cache, entry) {
555 struct anv_shader_bin *shader = entry->data;
556
557 size_t save_size = blob.size;
558 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
559 /* If it fails reset to the previous size and bail */
560 blob.size = save_size;
561 result = VK_INCOMPLETE;
562 break;
563 }
564
565 count++;
566 }
567 }
568
569 blob_overwrite_uint32(&blob, count_offset, count);
570
571 *pDataSize = blob.size;
572
573 blob_finish(&blob);
574
575 return result;
576 }
577
578 VkResult anv_MergePipelineCaches(
579 VkDevice _device,
580 VkPipelineCache destCache,
581 uint32_t srcCacheCount,
582 const VkPipelineCache* pSrcCaches)
583 {
584 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
585
586 if (!dst->cache)
587 return VK_SUCCESS;
588
589 for (uint32_t i = 0; i < srcCacheCount; i++) {
590 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
591 if (!src->cache)
592 continue;
593
594 hash_table_foreach(src->cache, entry) {
595 struct anv_shader_bin *bin = entry->data;
596 assert(bin);
597
598 if (_mesa_hash_table_search(dst->cache, bin->key))
599 continue;
600
601 anv_shader_bin_ref(bin);
602 _mesa_hash_table_insert(dst->cache, bin->key, bin);
603 }
604 }
605
606 return VK_SUCCESS;
607 }
608
609 struct anv_shader_bin *
610 anv_device_search_for_kernel(struct anv_device *device,
611 struct anv_pipeline_cache *cache,
612 const void *key_data, uint32_t key_size,
613 bool *user_cache_hit)
614 {
615 struct anv_shader_bin *bin;
616
617 *user_cache_hit = false;
618
619 if (cache) {
620 bin = anv_pipeline_cache_search(cache, key_data, key_size);
621 if (bin) {
622 *user_cache_hit = cache != &device->default_pipeline_cache;
623 return bin;
624 }
625 }
626
627 #ifdef ENABLE_SHADER_CACHE
628 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
629 if (disk_cache && device->instance->pipeline_cache_enabled) {
630 cache_key cache_key;
631 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
632
633 size_t buffer_size;
634 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
635 if (buffer) {
636 struct blob_reader blob;
637 blob_reader_init(&blob, buffer, buffer_size);
638 bin = anv_shader_bin_create_from_blob(device, &blob);
639 free(buffer);
640
641 if (bin) {
642 if (cache)
643 anv_pipeline_cache_add_shader_bin(cache, bin);
644 return bin;
645 }
646 }
647 }
648 #endif
649
650 return NULL;
651 }
652
653 struct anv_shader_bin *
654 anv_device_upload_kernel(struct anv_device *device,
655 struct anv_pipeline_cache *cache,
656 const void *key_data, uint32_t key_size,
657 const void *kernel_data, uint32_t kernel_size,
658 const void *constant_data,
659 uint32_t constant_data_size,
660 const struct brw_stage_prog_data *prog_data,
661 uint32_t prog_data_size,
662 const nir_xfb_info *xfb_info,
663 const struct anv_pipeline_bind_map *bind_map)
664 {
665 struct anv_shader_bin *bin;
666 if (cache) {
667 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
668 kernel_data, kernel_size,
669 constant_data, constant_data_size,
670 prog_data, prog_data_size,
671 xfb_info, bind_map);
672 } else {
673 bin = anv_shader_bin_create(device, key_data, key_size,
674 kernel_data, kernel_size,
675 constant_data, constant_data_size,
676 prog_data, prog_data_size,
677 prog_data->param,
678 xfb_info, bind_map);
679 }
680
681 if (bin == NULL)
682 return NULL;
683
684 #ifdef ENABLE_SHADER_CACHE
685 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
686 if (disk_cache) {
687 struct blob binary;
688 blob_init(&binary);
689 if (anv_shader_bin_write_to_blob(bin, &binary)) {
690 cache_key cache_key;
691 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
692
693 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
694 }
695
696 blob_finish(&binary);
697 }
698 #endif
699
700 return bin;
701 }
702
703 struct serialized_nir {
704 unsigned char sha1_key[20];
705 size_t size;
706 char data[0];
707 };
708
709 struct nir_shader *
710 anv_device_search_for_nir(struct anv_device *device,
711 struct anv_pipeline_cache *cache,
712 const nir_shader_compiler_options *nir_options,
713 unsigned char sha1_key[20],
714 void *mem_ctx)
715 {
716 if (cache && cache->nir_cache) {
717 const struct serialized_nir *snir = NULL;
718
719 pthread_mutex_lock(&cache->mutex);
720 struct hash_entry *entry =
721 _mesa_hash_table_search(cache->nir_cache, sha1_key);
722 if (entry)
723 snir = entry->data;
724 pthread_mutex_unlock(&cache->mutex);
725
726 if (snir) {
727 struct blob_reader blob;
728 blob_reader_init(&blob, snir->data, snir->size);
729
730 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
731 if (blob.overrun) {
732 ralloc_free(nir);
733 } else {
734 return nir;
735 }
736 }
737 }
738
739 return NULL;
740 }
741
742 void
743 anv_device_upload_nir(struct anv_device *device,
744 struct anv_pipeline_cache *cache,
745 const struct nir_shader *nir,
746 unsigned char sha1_key[20])
747 {
748 if (cache && cache->nir_cache) {
749 pthread_mutex_lock(&cache->mutex);
750 struct hash_entry *entry =
751 _mesa_hash_table_search(cache->nir_cache, sha1_key);
752 pthread_mutex_unlock(&cache->mutex);
753 if (entry)
754 return;
755
756 struct blob blob;
757 blob_init(&blob);
758
759 nir_serialize(&blob, nir);
760 if (blob.out_of_memory) {
761 blob_finish(&blob);
762 return;
763 }
764
765 pthread_mutex_lock(&cache->mutex);
766 /* Because ralloc isn't thread-safe, we have to do all this inside the
767 * lock. We could unlock for the big memcpy but it's probably not worth
768 * the hassle.
769 */
770 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
771 if (entry) {
772 blob_finish(&blob);
773 pthread_mutex_unlock(&cache->mutex);
774 return;
775 }
776
777 struct serialized_nir *snir =
778 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
779 memcpy(snir->sha1_key, sha1_key, 20);
780 snir->size = blob.size;
781 memcpy(snir->data, blob.data, blob.size);
782
783 blob_finish(&blob);
784
785 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
786
787 pthread_mutex_unlock(&cache->mutex);
788 }
789 }