anv: More carefully dirty state in BindPipeline
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "nir/nir_serialize.h"
30 #include "anv_private.h"
31 #include "nir/nir_xfb_info.h"
32
33 struct anv_shader_bin *
34 anv_shader_bin_create(struct anv_device *device,
35 const void *key_data, uint32_t key_size,
36 const void *kernel_data, uint32_t kernel_size,
37 const void *constant_data, uint32_t constant_data_size,
38 const struct brw_stage_prog_data *prog_data_in,
39 uint32_t prog_data_size, const void *prog_data_param_in,
40 const struct brw_compile_stats *stats, uint32_t num_stats,
41 const nir_xfb_info *xfb_info_in,
42 const struct anv_pipeline_bind_map *bind_map)
43 {
44 struct anv_shader_bin *shader;
45 struct anv_shader_bin_key *key;
46 struct brw_stage_prog_data *prog_data;
47 uint32_t *prog_data_param;
48 nir_xfb_info *xfb_info;
49 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
50
51 ANV_MULTIALLOC(ma);
52 anv_multialloc_add(&ma, &shader, 1);
53 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
54 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
55 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
56 if (xfb_info_in) {
57 uint32_t xfb_info_size = nir_xfb_info_size(xfb_info_in->output_count);
58 anv_multialloc_add_size(&ma, &xfb_info, xfb_info_size);
59 }
60 anv_multialloc_add(&ma, &surface_to_descriptor,
61 bind_map->surface_count);
62 anv_multialloc_add(&ma, &sampler_to_descriptor,
63 bind_map->sampler_count);
64
65 if (!anv_multialloc_alloc(&ma, &device->alloc,
66 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
67 return NULL;
68
69 shader->ref_cnt = 1;
70
71 key->size = key_size;
72 memcpy(key->data, key_data, key_size);
73 shader->key = key;
74
75 shader->kernel =
76 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
77 memcpy(shader->kernel.map, kernel_data, kernel_size);
78 shader->kernel_size = kernel_size;
79
80 if (constant_data_size) {
81 shader->constant_data =
82 anv_state_pool_alloc(&device->dynamic_state_pool,
83 constant_data_size, 32);
84 memcpy(shader->constant_data.map, constant_data, constant_data_size);
85 } else {
86 shader->constant_data = ANV_STATE_NULL;
87 }
88 shader->constant_data_size = constant_data_size;
89
90 memcpy(prog_data, prog_data_in, prog_data_size);
91 memcpy(prog_data_param, prog_data_param_in,
92 prog_data->nr_params * sizeof(*prog_data_param));
93 prog_data->param = prog_data_param;
94 shader->prog_data = prog_data;
95 shader->prog_data_size = prog_data_size;
96
97 assert(num_stats <= ARRAY_SIZE(shader->stats));
98 typed_memcpy(shader->stats, stats, num_stats);
99 shader->num_stats = num_stats;
100
101 if (xfb_info_in) {
102 *xfb_info = *xfb_info_in;
103 typed_memcpy(xfb_info->outputs, xfb_info_in->outputs,
104 xfb_info_in->output_count);
105 shader->xfb_info = xfb_info;
106 } else {
107 shader->xfb_info = NULL;
108 }
109
110 shader->bind_map = *bind_map;
111 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
112 bind_map->surface_count);
113 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
114 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
115 bind_map->sampler_count);
116 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
117
118 return shader;
119 }
120
121 void
122 anv_shader_bin_destroy(struct anv_device *device,
123 struct anv_shader_bin *shader)
124 {
125 assert(shader->ref_cnt == 0);
126 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
127 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
128 vk_free(&device->alloc, shader);
129 }
130
131 static bool
132 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
133 struct blob *blob)
134 {
135 blob_write_uint32(blob, shader->key->size);
136 blob_write_bytes(blob, shader->key->data, shader->key->size);
137
138 blob_write_uint32(blob, shader->kernel_size);
139 blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
140
141 blob_write_uint32(blob, shader->constant_data_size);
142 blob_write_bytes(blob, shader->constant_data.map,
143 shader->constant_data_size);
144
145 blob_write_uint32(blob, shader->prog_data_size);
146 blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
147 blob_write_bytes(blob, shader->prog_data->param,
148 shader->prog_data->nr_params *
149 sizeof(*shader->prog_data->param));
150
151 blob_write_uint32(blob, shader->num_stats);
152 blob_write_bytes(blob, shader->stats,
153 shader->num_stats * sizeof(shader->stats[0]));
154
155 if (shader->xfb_info) {
156 uint32_t xfb_info_size =
157 nir_xfb_info_size(shader->xfb_info->output_count);
158 blob_write_uint32(blob, xfb_info_size);
159 blob_write_bytes(blob, shader->xfb_info, xfb_info_size);
160 } else {
161 blob_write_uint32(blob, 0);
162 }
163
164 blob_write_bytes(blob, shader->bind_map.surface_sha1,
165 sizeof(shader->bind_map.surface_sha1));
166 blob_write_bytes(blob, shader->bind_map.sampler_sha1,
167 sizeof(shader->bind_map.sampler_sha1));
168 blob_write_bytes(blob, shader->bind_map.push_sha1,
169 sizeof(shader->bind_map.push_sha1));
170 blob_write_uint32(blob, shader->bind_map.surface_count);
171 blob_write_uint32(blob, shader->bind_map.sampler_count);
172 blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
173 shader->bind_map.surface_count *
174 sizeof(*shader->bind_map.surface_to_descriptor));
175 blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
176 shader->bind_map.sampler_count *
177 sizeof(*shader->bind_map.sampler_to_descriptor));
178 blob_write_bytes(blob, shader->bind_map.push_ranges,
179 sizeof(shader->bind_map.push_ranges));
180
181 return !blob->out_of_memory;
182 }
183
184 static struct anv_shader_bin *
185 anv_shader_bin_create_from_blob(struct anv_device *device,
186 struct blob_reader *blob)
187 {
188 uint32_t key_size = blob_read_uint32(blob);
189 const void *key_data = blob_read_bytes(blob, key_size);
190
191 uint32_t kernel_size = blob_read_uint32(blob);
192 const void *kernel_data = blob_read_bytes(blob, kernel_size);
193
194 uint32_t constant_data_size = blob_read_uint32(blob);
195 const void *constant_data = blob_read_bytes(blob, constant_data_size);
196
197 uint32_t prog_data_size = blob_read_uint32(blob);
198 const struct brw_stage_prog_data *prog_data =
199 blob_read_bytes(blob, prog_data_size);
200 if (blob->overrun)
201 return NULL;
202 const void *prog_data_param =
203 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
204
205 uint32_t num_stats = blob_read_uint32(blob);
206 const struct brw_compile_stats *stats =
207 blob_read_bytes(blob, num_stats * sizeof(stats[0]));
208
209 const nir_xfb_info *xfb_info = NULL;
210 uint32_t xfb_size = blob_read_uint32(blob);
211 if (xfb_size)
212 xfb_info = blob_read_bytes(blob, xfb_size);
213
214 struct anv_pipeline_bind_map bind_map;
215 blob_copy_bytes(blob, bind_map.surface_sha1, sizeof(bind_map.surface_sha1));
216 blob_copy_bytes(blob, bind_map.sampler_sha1, sizeof(bind_map.sampler_sha1));
217 blob_copy_bytes(blob, bind_map.push_sha1, sizeof(bind_map.push_sha1));
218 bind_map.surface_count = blob_read_uint32(blob);
219 bind_map.sampler_count = blob_read_uint32(blob);
220 bind_map.surface_to_descriptor = (void *)
221 blob_read_bytes(blob, bind_map.surface_count *
222 sizeof(*bind_map.surface_to_descriptor));
223 bind_map.sampler_to_descriptor = (void *)
224 blob_read_bytes(blob, bind_map.sampler_count *
225 sizeof(*bind_map.sampler_to_descriptor));
226 blob_copy_bytes(blob, bind_map.push_ranges, sizeof(bind_map.push_ranges));
227
228 if (blob->overrun)
229 return NULL;
230
231 return anv_shader_bin_create(device,
232 key_data, key_size,
233 kernel_data, kernel_size,
234 constant_data, constant_data_size,
235 prog_data, prog_data_size, prog_data_param,
236 stats, num_stats, xfb_info, &bind_map);
237 }
238
239 /* Remaining work:
240 *
241 * - Compact binding table layout so it's tight and not dependent on
242 * descriptor set layout.
243 *
244 * - Review prog_data struct for size and cacheability: struct
245 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
246 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
247 */
248
249 static uint32_t
250 shader_bin_key_hash_func(const void *void_key)
251 {
252 const struct anv_shader_bin_key *key = void_key;
253 return _mesa_hash_data(key->data, key->size);
254 }
255
256 static bool
257 shader_bin_key_compare_func(const void *void_a, const void *void_b)
258 {
259 const struct anv_shader_bin_key *a = void_a, *b = void_b;
260 if (a->size != b->size)
261 return false;
262
263 return memcmp(a->data, b->data, a->size) == 0;
264 }
265
266 static uint32_t
267 sha1_hash_func(const void *sha1)
268 {
269 return _mesa_hash_data(sha1, 20);
270 }
271
272 static bool
273 sha1_compare_func(const void *sha1_a, const void *sha1_b)
274 {
275 return memcmp(sha1_a, sha1_b, 20) == 0;
276 }
277
278 void
279 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
280 struct anv_device *device,
281 bool cache_enabled)
282 {
283 cache->device = device;
284 pthread_mutex_init(&cache->mutex, NULL);
285
286 if (cache_enabled) {
287 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
288 shader_bin_key_compare_func);
289 cache->nir_cache = _mesa_hash_table_create(NULL, sha1_hash_func,
290 sha1_compare_func);
291 } else {
292 cache->cache = NULL;
293 cache->nir_cache = NULL;
294 }
295 }
296
297 void
298 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
299 {
300 pthread_mutex_destroy(&cache->mutex);
301
302 if (cache->cache) {
303 /* This is a bit unfortunate. In order to keep things from randomly
304 * going away, the shader cache has to hold a reference to all shader
305 * binaries it contains. We unref them when we destroy the cache.
306 */
307 hash_table_foreach(cache->cache, entry)
308 anv_shader_bin_unref(cache->device, entry->data);
309
310 _mesa_hash_table_destroy(cache->cache, NULL);
311 }
312
313 if (cache->nir_cache) {
314 hash_table_foreach(cache->nir_cache, entry)
315 ralloc_free(entry->data);
316
317 _mesa_hash_table_destroy(cache->nir_cache, NULL);
318 }
319 }
320
321 static struct anv_shader_bin *
322 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
323 const void *key_data, uint32_t key_size)
324 {
325 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
326 struct anv_shader_bin_key *key = (void *)vla;
327 key->size = key_size;
328 memcpy(key->data, key_data, key_size);
329
330 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
331 if (entry)
332 return entry->data;
333 else
334 return NULL;
335 }
336
337 struct anv_shader_bin *
338 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
339 const void *key_data, uint32_t key_size)
340 {
341 if (!cache->cache)
342 return NULL;
343
344 pthread_mutex_lock(&cache->mutex);
345
346 struct anv_shader_bin *shader =
347 anv_pipeline_cache_search_locked(cache, key_data, key_size);
348
349 pthread_mutex_unlock(&cache->mutex);
350
351 /* We increment refcount before handing it to the caller */
352 if (shader)
353 anv_shader_bin_ref(shader);
354
355 return shader;
356 }
357
358 static void
359 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
360 struct anv_shader_bin *bin)
361 {
362 if (!cache->cache)
363 return;
364
365 pthread_mutex_lock(&cache->mutex);
366
367 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
368 if (entry == NULL) {
369 /* Take a reference for the cache */
370 anv_shader_bin_ref(bin);
371 _mesa_hash_table_insert(cache->cache, bin->key, bin);
372 }
373
374 pthread_mutex_unlock(&cache->mutex);
375 }
376
377 static struct anv_shader_bin *
378 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
379 const void *key_data, uint32_t key_size,
380 const void *kernel_data,
381 uint32_t kernel_size,
382 const void *constant_data,
383 uint32_t constant_data_size,
384 const struct brw_stage_prog_data *prog_data,
385 uint32_t prog_data_size,
386 const void *prog_data_param,
387 const struct brw_compile_stats *stats,
388 uint32_t num_stats,
389 const nir_xfb_info *xfb_info,
390 const struct anv_pipeline_bind_map *bind_map)
391 {
392 struct anv_shader_bin *shader =
393 anv_pipeline_cache_search_locked(cache, key_data, key_size);
394 if (shader)
395 return shader;
396
397 struct anv_shader_bin *bin =
398 anv_shader_bin_create(cache->device, key_data, key_size,
399 kernel_data, kernel_size,
400 constant_data, constant_data_size,
401 prog_data, prog_data_size, prog_data_param,
402 stats, num_stats, xfb_info, bind_map);
403 if (!bin)
404 return NULL;
405
406 _mesa_hash_table_insert(cache->cache, bin->key, bin);
407
408 return bin;
409 }
410
411 struct anv_shader_bin *
412 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
413 const void *key_data, uint32_t key_size,
414 const void *kernel_data, uint32_t kernel_size,
415 const void *constant_data,
416 uint32_t constant_data_size,
417 const struct brw_stage_prog_data *prog_data,
418 uint32_t prog_data_size,
419 const struct brw_compile_stats *stats,
420 uint32_t num_stats,
421 const nir_xfb_info *xfb_info,
422 const struct anv_pipeline_bind_map *bind_map)
423 {
424 if (cache->cache) {
425 pthread_mutex_lock(&cache->mutex);
426
427 struct anv_shader_bin *bin =
428 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
429 kernel_data, kernel_size,
430 constant_data, constant_data_size,
431 prog_data, prog_data_size,
432 prog_data->param,
433 stats, num_stats,
434 xfb_info, bind_map);
435
436 pthread_mutex_unlock(&cache->mutex);
437
438 /* We increment refcount before handing it to the caller */
439 if (bin)
440 anv_shader_bin_ref(bin);
441
442 return bin;
443 } else {
444 /* In this case, we're not caching it so the caller owns it entirely */
445 return anv_shader_bin_create(cache->device, key_data, key_size,
446 kernel_data, kernel_size,
447 constant_data, constant_data_size,
448 prog_data, prog_data_size,
449 prog_data->param,
450 stats, num_stats,
451 xfb_info, bind_map);
452 }
453 }
454
455 struct cache_header {
456 uint32_t header_size;
457 uint32_t header_version;
458 uint32_t vendor_id;
459 uint32_t device_id;
460 uint8_t uuid[VK_UUID_SIZE];
461 };
462
463 static void
464 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
465 const void *data, size_t size)
466 {
467 struct anv_device *device = cache->device;
468 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
469
470 if (cache->cache == NULL)
471 return;
472
473 struct blob_reader blob;
474 blob_reader_init(&blob, data, size);
475
476 struct cache_header header;
477 blob_copy_bytes(&blob, &header, sizeof(header));
478 uint32_t count = blob_read_uint32(&blob);
479 if (blob.overrun)
480 return;
481
482 if (header.header_size < sizeof(header))
483 return;
484 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
485 return;
486 if (header.vendor_id != 0x8086)
487 return;
488 if (header.device_id != device->chipset_id)
489 return;
490 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
491 return;
492
493 for (uint32_t i = 0; i < count; i++) {
494 struct anv_shader_bin *bin =
495 anv_shader_bin_create_from_blob(device, &blob);
496 if (!bin)
497 break;
498 _mesa_hash_table_insert(cache->cache, bin->key, bin);
499 }
500 }
501
502 VkResult anv_CreatePipelineCache(
503 VkDevice _device,
504 const VkPipelineCacheCreateInfo* pCreateInfo,
505 const VkAllocationCallbacks* pAllocator,
506 VkPipelineCache* pPipelineCache)
507 {
508 ANV_FROM_HANDLE(anv_device, device, _device);
509 struct anv_pipeline_cache *cache;
510
511 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
512 assert(pCreateInfo->flags == 0);
513
514 cache = vk_alloc2(&device->alloc, pAllocator,
515 sizeof(*cache), 8,
516 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
517 if (cache == NULL)
518 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
519
520 anv_pipeline_cache_init(cache, device,
521 device->instance->pipeline_cache_enabled);
522
523 if (pCreateInfo->initialDataSize > 0)
524 anv_pipeline_cache_load(cache,
525 pCreateInfo->pInitialData,
526 pCreateInfo->initialDataSize);
527
528 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
529
530 return VK_SUCCESS;
531 }
532
533 void anv_DestroyPipelineCache(
534 VkDevice _device,
535 VkPipelineCache _cache,
536 const VkAllocationCallbacks* pAllocator)
537 {
538 ANV_FROM_HANDLE(anv_device, device, _device);
539 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
540
541 if (!cache)
542 return;
543
544 anv_pipeline_cache_finish(cache);
545
546 vk_free2(&device->alloc, pAllocator, cache);
547 }
548
549 VkResult anv_GetPipelineCacheData(
550 VkDevice _device,
551 VkPipelineCache _cache,
552 size_t* pDataSize,
553 void* pData)
554 {
555 ANV_FROM_HANDLE(anv_device, device, _device);
556 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
557 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
558
559 struct blob blob;
560 if (pData) {
561 blob_init_fixed(&blob, pData, *pDataSize);
562 } else {
563 blob_init_fixed(&blob, NULL, SIZE_MAX);
564 }
565
566 struct cache_header header = {
567 .header_size = sizeof(struct cache_header),
568 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
569 .vendor_id = 0x8086,
570 .device_id = device->chipset_id,
571 };
572 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
573 blob_write_bytes(&blob, &header, sizeof(header));
574
575 uint32_t count = 0;
576 intptr_t count_offset = blob_reserve_uint32(&blob);
577 if (count_offset < 0) {
578 *pDataSize = 0;
579 blob_finish(&blob);
580 return VK_INCOMPLETE;
581 }
582
583 VkResult result = VK_SUCCESS;
584 if (cache->cache) {
585 hash_table_foreach(cache->cache, entry) {
586 struct anv_shader_bin *shader = entry->data;
587
588 size_t save_size = blob.size;
589 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
590 /* If it fails reset to the previous size and bail */
591 blob.size = save_size;
592 result = VK_INCOMPLETE;
593 break;
594 }
595
596 count++;
597 }
598 }
599
600 blob_overwrite_uint32(&blob, count_offset, count);
601
602 *pDataSize = blob.size;
603
604 blob_finish(&blob);
605
606 return result;
607 }
608
609 VkResult anv_MergePipelineCaches(
610 VkDevice _device,
611 VkPipelineCache destCache,
612 uint32_t srcCacheCount,
613 const VkPipelineCache* pSrcCaches)
614 {
615 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
616
617 if (!dst->cache)
618 return VK_SUCCESS;
619
620 for (uint32_t i = 0; i < srcCacheCount; i++) {
621 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
622 if (!src->cache)
623 continue;
624
625 hash_table_foreach(src->cache, entry) {
626 struct anv_shader_bin *bin = entry->data;
627 assert(bin);
628
629 if (_mesa_hash_table_search(dst->cache, bin->key))
630 continue;
631
632 anv_shader_bin_ref(bin);
633 _mesa_hash_table_insert(dst->cache, bin->key, bin);
634 }
635 }
636
637 return VK_SUCCESS;
638 }
639
640 struct anv_shader_bin *
641 anv_device_search_for_kernel(struct anv_device *device,
642 struct anv_pipeline_cache *cache,
643 const void *key_data, uint32_t key_size,
644 bool *user_cache_hit)
645 {
646 struct anv_shader_bin *bin;
647
648 *user_cache_hit = false;
649
650 if (cache) {
651 bin = anv_pipeline_cache_search(cache, key_data, key_size);
652 if (bin) {
653 *user_cache_hit = cache != &device->default_pipeline_cache;
654 return bin;
655 }
656 }
657
658 #ifdef ENABLE_SHADER_CACHE
659 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
660 if (disk_cache && device->instance->pipeline_cache_enabled) {
661 cache_key cache_key;
662 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
663
664 size_t buffer_size;
665 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
666 if (buffer) {
667 struct blob_reader blob;
668 blob_reader_init(&blob, buffer, buffer_size);
669 bin = anv_shader_bin_create_from_blob(device, &blob);
670 free(buffer);
671
672 if (bin) {
673 if (cache)
674 anv_pipeline_cache_add_shader_bin(cache, bin);
675 return bin;
676 }
677 }
678 }
679 #endif
680
681 return NULL;
682 }
683
684 struct anv_shader_bin *
685 anv_device_upload_kernel(struct anv_device *device,
686 struct anv_pipeline_cache *cache,
687 const void *key_data, uint32_t key_size,
688 const void *kernel_data, uint32_t kernel_size,
689 const void *constant_data,
690 uint32_t constant_data_size,
691 const struct brw_stage_prog_data *prog_data,
692 uint32_t prog_data_size,
693 const struct brw_compile_stats *stats,
694 uint32_t num_stats,
695 const nir_xfb_info *xfb_info,
696 const struct anv_pipeline_bind_map *bind_map)
697 {
698 struct anv_shader_bin *bin;
699 if (cache) {
700 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
701 kernel_data, kernel_size,
702 constant_data, constant_data_size,
703 prog_data, prog_data_size,
704 stats, num_stats,
705 xfb_info, bind_map);
706 } else {
707 bin = anv_shader_bin_create(device, key_data, key_size,
708 kernel_data, kernel_size,
709 constant_data, constant_data_size,
710 prog_data, prog_data_size,
711 prog_data->param,
712 stats, num_stats,
713 xfb_info, bind_map);
714 }
715
716 if (bin == NULL)
717 return NULL;
718
719 #ifdef ENABLE_SHADER_CACHE
720 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
721 if (disk_cache) {
722 struct blob binary;
723 blob_init(&binary);
724 if (anv_shader_bin_write_to_blob(bin, &binary)) {
725 cache_key cache_key;
726 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
727
728 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
729 }
730
731 blob_finish(&binary);
732 }
733 #endif
734
735 return bin;
736 }
737
738 struct serialized_nir {
739 unsigned char sha1_key[20];
740 size_t size;
741 char data[0];
742 };
743
744 struct nir_shader *
745 anv_device_search_for_nir(struct anv_device *device,
746 struct anv_pipeline_cache *cache,
747 const nir_shader_compiler_options *nir_options,
748 unsigned char sha1_key[20],
749 void *mem_ctx)
750 {
751 if (cache && cache->nir_cache) {
752 const struct serialized_nir *snir = NULL;
753
754 pthread_mutex_lock(&cache->mutex);
755 struct hash_entry *entry =
756 _mesa_hash_table_search(cache->nir_cache, sha1_key);
757 if (entry)
758 snir = entry->data;
759 pthread_mutex_unlock(&cache->mutex);
760
761 if (snir) {
762 struct blob_reader blob;
763 blob_reader_init(&blob, snir->data, snir->size);
764
765 nir_shader *nir = nir_deserialize(mem_ctx, nir_options, &blob);
766 if (blob.overrun) {
767 ralloc_free(nir);
768 } else {
769 return nir;
770 }
771 }
772 }
773
774 return NULL;
775 }
776
777 void
778 anv_device_upload_nir(struct anv_device *device,
779 struct anv_pipeline_cache *cache,
780 const struct nir_shader *nir,
781 unsigned char sha1_key[20])
782 {
783 if (cache && cache->nir_cache) {
784 pthread_mutex_lock(&cache->mutex);
785 struct hash_entry *entry =
786 _mesa_hash_table_search(cache->nir_cache, sha1_key);
787 pthread_mutex_unlock(&cache->mutex);
788 if (entry)
789 return;
790
791 struct blob blob;
792 blob_init(&blob);
793
794 nir_serialize(&blob, nir, false);
795 if (blob.out_of_memory) {
796 blob_finish(&blob);
797 return;
798 }
799
800 pthread_mutex_lock(&cache->mutex);
801 /* Because ralloc isn't thread-safe, we have to do all this inside the
802 * lock. We could unlock for the big memcpy but it's probably not worth
803 * the hassle.
804 */
805 entry = _mesa_hash_table_search(cache->nir_cache, sha1_key);
806 if (entry) {
807 blob_finish(&blob);
808 pthread_mutex_unlock(&cache->mutex);
809 return;
810 }
811
812 struct serialized_nir *snir =
813 ralloc_size(cache->nir_cache, sizeof(*snir) + blob.size);
814 memcpy(snir->sha1_key, sha1_key, 20);
815 snir->size = blob.size;
816 memcpy(snir->data, blob.data, blob.size);
817
818 blob_finish(&blob);
819
820 _mesa_hash_table_insert(cache->nir_cache, snir->sha1_key, snir);
821
822 pthread_mutex_unlock(&cache->mutex);
823 }
824 }