anv: Add support for the on-disk shader cache
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/blob.h"
25 #include "util/hash_table.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/mesa-sha1.h"
29 #include "anv_private.h"
30
31 struct anv_shader_bin *
32 anv_shader_bin_create(struct anv_device *device,
33 const void *key_data, uint32_t key_size,
34 const void *kernel_data, uint32_t kernel_size,
35 const void *constant_data, uint32_t constant_data_size,
36 const struct brw_stage_prog_data *prog_data_in,
37 uint32_t prog_data_size, const void *prog_data_param_in,
38 const struct anv_pipeline_bind_map *bind_map)
39 {
40 struct anv_shader_bin *shader;
41 struct anv_shader_bin_key *key;
42 struct brw_stage_prog_data *prog_data;
43 uint32_t *prog_data_param;
44 struct anv_pipeline_binding *surface_to_descriptor, *sampler_to_descriptor;
45
46 ANV_MULTIALLOC(ma);
47 anv_multialloc_add(&ma, &shader, 1);
48 anv_multialloc_add_size(&ma, &key, sizeof(*key) + key_size);
49 anv_multialloc_add_size(&ma, &prog_data, prog_data_size);
50 anv_multialloc_add(&ma, &prog_data_param, prog_data_in->nr_params);
51 anv_multialloc_add(&ma, &surface_to_descriptor,
52 bind_map->surface_count);
53 anv_multialloc_add(&ma, &sampler_to_descriptor,
54 bind_map->sampler_count);
55
56 if (!anv_multialloc_alloc(&ma, &device->alloc,
57 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
58 return NULL;
59
60 shader->ref_cnt = 1;
61
62 key->size = key_size;
63 memcpy(key->data, key_data, key_size);
64 shader->key = key;
65
66 shader->kernel =
67 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
68 memcpy(shader->kernel.map, kernel_data, kernel_size);
69 shader->kernel_size = kernel_size;
70
71 if (constant_data_size) {
72 shader->constant_data =
73 anv_state_pool_alloc(&device->dynamic_state_pool,
74 constant_data_size, 32);
75 memcpy(shader->constant_data.map, constant_data, constant_data_size);
76 } else {
77 shader->constant_data = ANV_STATE_NULL;
78 }
79 shader->constant_data_size = constant_data_size;
80
81 memcpy(prog_data, prog_data_in, prog_data_size);
82 memcpy(prog_data_param, prog_data_param_in,
83 prog_data->nr_params * sizeof(*prog_data_param));
84 prog_data->param = prog_data_param;
85 shader->prog_data = prog_data;
86 shader->prog_data_size = prog_data_size;
87
88 shader->bind_map = *bind_map;
89 typed_memcpy(surface_to_descriptor, bind_map->surface_to_descriptor,
90 bind_map->surface_count);
91 shader->bind_map.surface_to_descriptor = surface_to_descriptor;
92 typed_memcpy(sampler_to_descriptor, bind_map->sampler_to_descriptor,
93 bind_map->sampler_count);
94 shader->bind_map.sampler_to_descriptor = sampler_to_descriptor;
95
96 return shader;
97 }
98
99 void
100 anv_shader_bin_destroy(struct anv_device *device,
101 struct anv_shader_bin *shader)
102 {
103 assert(shader->ref_cnt == 0);
104 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
105 anv_state_pool_free(&device->dynamic_state_pool, shader->constant_data);
106 vk_free(&device->alloc, shader);
107 }
108
109 static bool
110 anv_shader_bin_write_to_blob(const struct anv_shader_bin *shader,
111 struct blob *blob)
112 {
113 bool ok;
114
115 ok = blob_write_uint32(blob, shader->key->size);
116 ok = blob_write_bytes(blob, shader->key->data, shader->key->size);
117
118 ok = blob_write_uint32(blob, shader->kernel_size);
119 ok = blob_write_bytes(blob, shader->kernel.map, shader->kernel_size);
120
121 ok = blob_write_uint32(blob, shader->constant_data_size);
122 ok = blob_write_bytes(blob, shader->constant_data.map,
123 shader->constant_data_size);
124
125 ok = blob_write_uint32(blob, shader->prog_data_size);
126 ok = blob_write_bytes(blob, shader->prog_data, shader->prog_data_size);
127 ok = blob_write_bytes(blob, shader->prog_data->param,
128 shader->prog_data->nr_params *
129 sizeof(*shader->prog_data->param));
130
131 ok = blob_write_uint32(blob, shader->bind_map.surface_count);
132 ok = blob_write_uint32(blob, shader->bind_map.sampler_count);
133 ok = blob_write_uint32(blob, shader->bind_map.image_count);
134 ok = blob_write_bytes(blob, shader->bind_map.surface_to_descriptor,
135 shader->bind_map.surface_count *
136 sizeof(*shader->bind_map.surface_to_descriptor));
137 ok = blob_write_bytes(blob, shader->bind_map.sampler_to_descriptor,
138 shader->bind_map.sampler_count *
139 sizeof(*shader->bind_map.sampler_to_descriptor));
140
141 return ok;
142 }
143
144 static struct anv_shader_bin *
145 anv_shader_bin_create_from_blob(struct anv_device *device,
146 struct blob_reader *blob)
147 {
148 uint32_t key_size = blob_read_uint32(blob);
149 const void *key_data = blob_read_bytes(blob, key_size);
150
151 uint32_t kernel_size = blob_read_uint32(blob);
152 const void *kernel_data = blob_read_bytes(blob, kernel_size);
153
154 uint32_t constant_data_size = blob_read_uint32(blob);
155 const void *constant_data = blob_read_bytes(blob, constant_data_size);
156
157 uint32_t prog_data_size = blob_read_uint32(blob);
158 const struct brw_stage_prog_data *prog_data =
159 blob_read_bytes(blob, prog_data_size);
160 if (blob->overrun)
161 return NULL;
162 const void *prog_data_param =
163 blob_read_bytes(blob, prog_data->nr_params * sizeof(*prog_data->param));
164
165 struct anv_pipeline_bind_map bind_map;
166 bind_map.surface_count = blob_read_uint32(blob);
167 bind_map.sampler_count = blob_read_uint32(blob);
168 bind_map.image_count = blob_read_uint32(blob);
169 bind_map.surface_to_descriptor = (void *)
170 blob_read_bytes(blob, bind_map.surface_count *
171 sizeof(*bind_map.surface_to_descriptor));
172 bind_map.sampler_to_descriptor = (void *)
173 blob_read_bytes(blob, bind_map.sampler_count *
174 sizeof(*bind_map.sampler_to_descriptor));
175
176 if (blob->overrun)
177 return NULL;
178
179 return anv_shader_bin_create(device,
180 key_data, key_size,
181 kernel_data, kernel_size,
182 constant_data, constant_data_size,
183 prog_data, prog_data_size, prog_data_param,
184 &bind_map);
185 }
186
187 /* Remaining work:
188 *
189 * - Compact binding table layout so it's tight and not dependent on
190 * descriptor set layout.
191 *
192 * - Review prog_data struct for size and cacheability: struct
193 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
194 * bit quantities etc; use bit fields for all bools, eg dual_src_blend.
195 */
196
197 static uint32_t
198 shader_bin_key_hash_func(const void *void_key)
199 {
200 const struct anv_shader_bin_key *key = void_key;
201 return _mesa_hash_data(key->data, key->size);
202 }
203
204 static bool
205 shader_bin_key_compare_func(const void *void_a, const void *void_b)
206 {
207 const struct anv_shader_bin_key *a = void_a, *b = void_b;
208 if (a->size != b->size)
209 return false;
210
211 return memcmp(a->data, b->data, a->size) == 0;
212 }
213
214 void
215 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
216 struct anv_device *device,
217 bool cache_enabled)
218 {
219 cache->device = device;
220 pthread_mutex_init(&cache->mutex, NULL);
221
222 if (cache_enabled) {
223 cache->cache = _mesa_hash_table_create(NULL, shader_bin_key_hash_func,
224 shader_bin_key_compare_func);
225 } else {
226 cache->cache = NULL;
227 }
228 }
229
230 void
231 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
232 {
233 pthread_mutex_destroy(&cache->mutex);
234
235 if (cache->cache) {
236 /* This is a bit unfortunate. In order to keep things from randomly
237 * going away, the shader cache has to hold a reference to all shader
238 * binaries it contains. We unref them when we destroy the cache.
239 */
240 struct hash_entry *entry;
241 hash_table_foreach(cache->cache, entry)
242 anv_shader_bin_unref(cache->device, entry->data);
243
244 _mesa_hash_table_destroy(cache->cache, NULL);
245 }
246 }
247
248 static struct anv_shader_bin *
249 anv_pipeline_cache_search_locked(struct anv_pipeline_cache *cache,
250 const void *key_data, uint32_t key_size)
251 {
252 uint32_t vla[1 + DIV_ROUND_UP(key_size, sizeof(uint32_t))];
253 struct anv_shader_bin_key *key = (void *)vla;
254 key->size = key_size;
255 memcpy(key->data, key_data, key_size);
256
257 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, key);
258 if (entry)
259 return entry->data;
260 else
261 return NULL;
262 }
263
264 struct anv_shader_bin *
265 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
266 const void *key_data, uint32_t key_size)
267 {
268 if (!cache->cache)
269 return NULL;
270
271 pthread_mutex_lock(&cache->mutex);
272
273 struct anv_shader_bin *shader =
274 anv_pipeline_cache_search_locked(cache, key_data, key_size);
275
276 pthread_mutex_unlock(&cache->mutex);
277
278 /* We increment refcount before handing it to the caller */
279 if (shader)
280 anv_shader_bin_ref(shader);
281
282 return shader;
283 }
284
285 static void
286 anv_pipeline_cache_add_shader_bin(struct anv_pipeline_cache *cache,
287 struct anv_shader_bin *bin)
288 {
289 if (!cache->cache)
290 return;
291
292 pthread_mutex_lock(&cache->mutex);
293
294 struct hash_entry *entry = _mesa_hash_table_search(cache->cache, bin->key);
295 if (entry == NULL) {
296 /* Take a reference for the cache */
297 anv_shader_bin_ref(bin);
298 _mesa_hash_table_insert(cache->cache, bin->key, bin);
299 }
300
301 pthread_mutex_unlock(&cache->mutex);
302 }
303
304 static struct anv_shader_bin *
305 anv_pipeline_cache_add_shader_locked(struct anv_pipeline_cache *cache,
306 const void *key_data, uint32_t key_size,
307 const void *kernel_data,
308 uint32_t kernel_size,
309 const void *constant_data,
310 uint32_t constant_data_size,
311 const struct brw_stage_prog_data *prog_data,
312 uint32_t prog_data_size,
313 const void *prog_data_param,
314 const struct anv_pipeline_bind_map *bind_map)
315 {
316 struct anv_shader_bin *shader =
317 anv_pipeline_cache_search_locked(cache, key_data, key_size);
318 if (shader)
319 return shader;
320
321 struct anv_shader_bin *bin =
322 anv_shader_bin_create(cache->device, key_data, key_size,
323 kernel_data, kernel_size,
324 constant_data, constant_data_size,
325 prog_data, prog_data_size, prog_data_param,
326 bind_map);
327 if (!bin)
328 return NULL;
329
330 _mesa_hash_table_insert(cache->cache, bin->key, bin);
331
332 return bin;
333 }
334
335 struct anv_shader_bin *
336 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
337 const void *key_data, uint32_t key_size,
338 const void *kernel_data, uint32_t kernel_size,
339 const void *constant_data,
340 uint32_t constant_data_size,
341 const struct brw_stage_prog_data *prog_data,
342 uint32_t prog_data_size,
343 const struct anv_pipeline_bind_map *bind_map)
344 {
345 if (cache->cache) {
346 pthread_mutex_lock(&cache->mutex);
347
348 struct anv_shader_bin *bin =
349 anv_pipeline_cache_add_shader_locked(cache, key_data, key_size,
350 kernel_data, kernel_size,
351 constant_data, constant_data_size,
352 prog_data, prog_data_size,
353 prog_data->param, bind_map);
354
355 pthread_mutex_unlock(&cache->mutex);
356
357 /* We increment refcount before handing it to the caller */
358 if (bin)
359 anv_shader_bin_ref(bin);
360
361 return bin;
362 } else {
363 /* In this case, we're not caching it so the caller owns it entirely */
364 return anv_shader_bin_create(cache->device, key_data, key_size,
365 kernel_data, kernel_size,
366 constant_data, constant_data_size,
367 prog_data, prog_data_size,
368 prog_data->param, bind_map);
369 }
370 }
371
372 struct cache_header {
373 uint32_t header_size;
374 uint32_t header_version;
375 uint32_t vendor_id;
376 uint32_t device_id;
377 uint8_t uuid[VK_UUID_SIZE];
378 };
379
380 static void
381 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
382 const void *data, size_t size)
383 {
384 struct anv_device *device = cache->device;
385 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
386
387 if (cache->cache == NULL)
388 return;
389
390 struct blob_reader blob;
391 blob_reader_init(&blob, data, size);
392
393 struct cache_header header;
394 blob_copy_bytes(&blob, &header, sizeof(header));
395 uint32_t count = blob_read_uint32(&blob);
396 if (blob.overrun)
397 return;
398
399 if (header.header_size < sizeof(header))
400 return;
401 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
402 return;
403 if (header.vendor_id != 0x8086)
404 return;
405 if (header.device_id != device->chipset_id)
406 return;
407 if (memcmp(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE) != 0)
408 return;
409
410 for (uint32_t i = 0; i < count; i++) {
411 struct anv_shader_bin *bin =
412 anv_shader_bin_create_from_blob(device, &blob);
413 if (!bin)
414 break;
415 _mesa_hash_table_insert(cache->cache, bin->key, bin);
416 }
417 }
418
419 VkResult anv_CreatePipelineCache(
420 VkDevice _device,
421 const VkPipelineCacheCreateInfo* pCreateInfo,
422 const VkAllocationCallbacks* pAllocator,
423 VkPipelineCache* pPipelineCache)
424 {
425 ANV_FROM_HANDLE(anv_device, device, _device);
426 struct anv_pipeline_cache *cache;
427
428 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
429 assert(pCreateInfo->flags == 0);
430
431 cache = vk_alloc2(&device->alloc, pAllocator,
432 sizeof(*cache), 8,
433 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
434 if (cache == NULL)
435 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
436
437 anv_pipeline_cache_init(cache, device,
438 device->instance->pipeline_cache_enabled);
439
440 if (pCreateInfo->initialDataSize > 0)
441 anv_pipeline_cache_load(cache,
442 pCreateInfo->pInitialData,
443 pCreateInfo->initialDataSize);
444
445 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
446
447 return VK_SUCCESS;
448 }
449
450 void anv_DestroyPipelineCache(
451 VkDevice _device,
452 VkPipelineCache _cache,
453 const VkAllocationCallbacks* pAllocator)
454 {
455 ANV_FROM_HANDLE(anv_device, device, _device);
456 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
457
458 if (!cache)
459 return;
460
461 anv_pipeline_cache_finish(cache);
462
463 vk_free2(&device->alloc, pAllocator, cache);
464 }
465
466 VkResult anv_GetPipelineCacheData(
467 VkDevice _device,
468 VkPipelineCache _cache,
469 size_t* pDataSize,
470 void* pData)
471 {
472 ANV_FROM_HANDLE(anv_device, device, _device);
473 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
474 struct anv_physical_device *pdevice = &device->instance->physicalDevice;
475
476 struct blob blob;
477 if (pData) {
478 blob_init_fixed(&blob, pData, *pDataSize);
479 } else {
480 blob_init_fixed(&blob, NULL, SIZE_MAX);
481 }
482
483 struct cache_header header = {
484 .header_size = sizeof(struct cache_header),
485 .header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE,
486 .vendor_id = 0x8086,
487 .device_id = device->chipset_id,
488 };
489 memcpy(header.uuid, pdevice->pipeline_cache_uuid, VK_UUID_SIZE);
490 blob_write_bytes(&blob, &header, sizeof(header));
491
492 uint32_t count = 0;
493 intptr_t count_offset = blob_reserve_uint32(&blob);
494 if (count_offset < 0) {
495 *pDataSize = 0;
496 blob_finish(&blob);
497 return VK_INCOMPLETE;
498 }
499
500 VkResult result = VK_SUCCESS;
501 if (cache->cache) {
502 struct hash_entry *entry;
503 hash_table_foreach(cache->cache, entry) {
504 struct anv_shader_bin *shader = entry->data;
505
506 size_t save_size = blob.size;
507 if (!anv_shader_bin_write_to_blob(shader, &blob)) {
508 /* If it fails reset to the previous size and bail */
509 blob.size = save_size;
510 result = VK_INCOMPLETE;
511 break;
512 }
513
514 count++;
515 }
516 }
517
518 blob_overwrite_uint32(&blob, count_offset, count);
519
520 *pDataSize = blob.size;
521
522 blob_finish(&blob);
523
524 return result;
525 }
526
527 VkResult anv_MergePipelineCaches(
528 VkDevice _device,
529 VkPipelineCache destCache,
530 uint32_t srcCacheCount,
531 const VkPipelineCache* pSrcCaches)
532 {
533 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
534
535 if (!dst->cache)
536 return VK_SUCCESS;
537
538 for (uint32_t i = 0; i < srcCacheCount; i++) {
539 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
540 if (!src->cache)
541 continue;
542
543 struct hash_entry *entry;
544 hash_table_foreach(src->cache, entry) {
545 struct anv_shader_bin *bin = entry->data;
546 assert(bin);
547
548 if (_mesa_hash_table_search(dst->cache, bin->key))
549 continue;
550
551 anv_shader_bin_ref(bin);
552 _mesa_hash_table_insert(dst->cache, bin->key, bin);
553 }
554 }
555
556 return VK_SUCCESS;
557 }
558
559 struct anv_shader_bin *
560 anv_device_search_for_kernel(struct anv_device *device,
561 struct anv_pipeline_cache *cache,
562 const void *key_data, uint32_t key_size)
563 {
564 struct anv_shader_bin *bin;
565
566 if (cache) {
567 bin = anv_pipeline_cache_search(cache, key_data, key_size);
568 if (bin)
569 return bin;
570 }
571
572 #ifdef ENABLE_SHADER_CACHE
573 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
574 if (disk_cache) {
575 cache_key cache_key;
576 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
577
578 size_t buffer_size;
579 uint8_t *buffer = disk_cache_get(disk_cache, cache_key, &buffer_size);
580 if (buffer) {
581 struct blob_reader blob;
582 blob_reader_init(&blob, buffer, buffer_size);
583 bin = anv_shader_bin_create_from_blob(device, &blob);
584 free(buffer);
585
586 if (bin) {
587 if (cache)
588 anv_pipeline_cache_add_shader_bin(cache, bin);
589 return bin;
590 }
591 }
592 }
593 #endif
594
595 return NULL;
596 }
597
598 struct anv_shader_bin *
599 anv_device_upload_kernel(struct anv_device *device,
600 struct anv_pipeline_cache *cache,
601 const void *key_data, uint32_t key_size,
602 const void *kernel_data, uint32_t kernel_size,
603 const void *constant_data,
604 uint32_t constant_data_size,
605 const struct brw_stage_prog_data *prog_data,
606 uint32_t prog_data_size,
607 const struct anv_pipeline_bind_map *bind_map)
608 {
609 struct anv_shader_bin *bin;
610 if (cache) {
611 bin = anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
612 kernel_data, kernel_size,
613 constant_data, constant_data_size,
614 prog_data, prog_data_size,
615 bind_map);
616 } else {
617 bin = anv_shader_bin_create(device, key_data, key_size,
618 kernel_data, kernel_size,
619 constant_data, constant_data_size,
620 prog_data, prog_data_size,
621 prog_data->param, bind_map);
622 }
623
624 if (bin == NULL)
625 return NULL;
626
627 #ifdef ENABLE_SHADER_CACHE
628 struct disk_cache *disk_cache = device->instance->physicalDevice.disk_cache;
629 if (disk_cache) {
630 struct blob binary;
631 blob_init(&binary);
632 anv_shader_bin_write_to_blob(bin, &binary);
633
634 if (!binary.out_of_memory) {
635 cache_key cache_key;
636 disk_cache_compute_key(disk_cache, key_data, key_size, cache_key);
637
638 disk_cache_put(disk_cache, cache_key, binary.data, binary.size, NULL);
639 }
640
641 blob_finish(&binary);
642 }
643 #endif
644
645 return bin;
646 }