a85634963259658003582dfa96e92e463068f16e
[mesa.git] / src / amd / vulkan / radv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/macros.h"
25 #include "util/mesa-sha1.h"
26 #include "util/debug.h"
27 #include "util/disk_cache.h"
28 #include "util/u_atomic.h"
29 #include "radv_debug.h"
30 #include "radv_private.h"
31 #include "radv_shader.h"
32 #include "vulkan/util/vk_util.h"
33
34 #include "ac_nir_to_llvm.h"
35
36 struct cache_entry {
37 union {
38 unsigned char sha1[20];
39 uint32_t sha1_dw[5];
40 };
41 uint32_t binary_sizes[MESA_SHADER_STAGES];
42 struct radv_shader_variant *variants[MESA_SHADER_STAGES];
43 char code[0];
44 };
45
46 static void
47 radv_pipeline_cache_lock(struct radv_pipeline_cache *cache)
48 {
49 if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
50 return;
51
52 pthread_mutex_lock(&cache->mutex);
53 }
54
55 static void
56 radv_pipeline_cache_unlock(struct radv_pipeline_cache *cache)
57 {
58 if (cache->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT)
59 return;
60
61 pthread_mutex_unlock(&cache->mutex);
62 }
63
64 void
65 radv_pipeline_cache_init(struct radv_pipeline_cache *cache,
66 struct radv_device *device)
67 {
68 cache->device = device;
69 pthread_mutex_init(&cache->mutex, NULL);
70 cache->flags = 0;
71
72 cache->modified = false;
73 cache->kernel_count = 0;
74 cache->total_size = 0;
75 cache->table_size = 1024;
76 const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
77 cache->hash_table = malloc(byte_size);
78
79 /* We don't consider allocation failure fatal, we just start with a 0-sized
80 * cache. Disable caching when we want to keep shader debug info, since
81 * we don't get the debug info on cached shaders. */
82 if (cache->hash_table == NULL ||
83 (device->instance->debug_flags & RADV_DEBUG_NO_CACHE))
84 cache->table_size = 0;
85 else
86 memset(cache->hash_table, 0, byte_size);
87 }
88
89 void
90 radv_pipeline_cache_finish(struct radv_pipeline_cache *cache)
91 {
92 for (unsigned i = 0; i < cache->table_size; ++i)
93 if (cache->hash_table[i]) {
94 for(int j = 0; j < MESA_SHADER_STAGES; ++j) {
95 if (cache->hash_table[i]->variants[j])
96 radv_shader_variant_destroy(cache->device,
97 cache->hash_table[i]->variants[j]);
98 }
99 vk_free(&cache->alloc, cache->hash_table[i]);
100 }
101 pthread_mutex_destroy(&cache->mutex);
102 free(cache->hash_table);
103 }
104
105 static uint32_t
106 entry_size(struct cache_entry *entry)
107 {
108 size_t ret = sizeof(*entry);
109 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
110 if (entry->binary_sizes[i])
111 ret += entry->binary_sizes[i];
112 ret = align(ret, alignof(struct cache_entry));
113 return ret;
114 }
115
116 void
117 radv_hash_shaders(unsigned char *hash,
118 const VkPipelineShaderStageCreateInfo **stages,
119 const struct radv_pipeline_layout *layout,
120 const struct radv_pipeline_key *key,
121 uint32_t flags)
122 {
123 struct mesa_sha1 ctx;
124
125 _mesa_sha1_init(&ctx);
126 if (key)
127 _mesa_sha1_update(&ctx, key, sizeof(*key));
128 if (layout)
129 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
130
131 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
132 if (stages[i]) {
133 RADV_FROM_HANDLE(radv_shader_module, module, stages[i]->module);
134 const VkSpecializationInfo *spec_info = stages[i]->pSpecializationInfo;
135
136 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
137 _mesa_sha1_update(&ctx, stages[i]->pName, strlen(stages[i]->pName));
138 if (spec_info && spec_info->mapEntryCount) {
139 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
140 spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
141 _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
142 }
143 }
144 }
145 _mesa_sha1_update(&ctx, &flags, 4);
146 _mesa_sha1_final(&ctx, hash);
147 }
148
149
150 static struct cache_entry *
151 radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache *cache,
152 const unsigned char *sha1)
153 {
154 const uint32_t mask = cache->table_size - 1;
155 const uint32_t start = (*(uint32_t *) sha1);
156
157 if (cache->table_size == 0)
158 return NULL;
159
160 for (uint32_t i = 0; i < cache->table_size; i++) {
161 const uint32_t index = (start + i) & mask;
162 struct cache_entry *entry = cache->hash_table[index];
163
164 if (!entry)
165 return NULL;
166
167 if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
168 return entry;
169 }
170 }
171
172 unreachable("hash table should never be full");
173 }
174
175 static struct cache_entry *
176 radv_pipeline_cache_search(struct radv_pipeline_cache *cache,
177 const unsigned char *sha1)
178 {
179 struct cache_entry *entry;
180
181 radv_pipeline_cache_lock(cache);
182
183 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
184
185 radv_pipeline_cache_unlock(cache);
186
187 return entry;
188 }
189
190 static void
191 radv_pipeline_cache_set_entry(struct radv_pipeline_cache *cache,
192 struct cache_entry *entry)
193 {
194 const uint32_t mask = cache->table_size - 1;
195 const uint32_t start = entry->sha1_dw[0];
196
197 /* We'll always be able to insert when we get here. */
198 assert(cache->kernel_count < cache->table_size / 2);
199
200 for (uint32_t i = 0; i < cache->table_size; i++) {
201 const uint32_t index = (start + i) & mask;
202 if (!cache->hash_table[index]) {
203 cache->hash_table[index] = entry;
204 break;
205 }
206 }
207
208 cache->total_size += entry_size(entry);
209 cache->kernel_count++;
210 }
211
212
213 static VkResult
214 radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
215 {
216 const uint32_t table_size = cache->table_size * 2;
217 const uint32_t old_table_size = cache->table_size;
218 const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
219 struct cache_entry **table;
220 struct cache_entry **old_table = cache->hash_table;
221
222 table = malloc(byte_size);
223 if (table == NULL)
224 return vk_error(cache->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
225
226 cache->hash_table = table;
227 cache->table_size = table_size;
228 cache->kernel_count = 0;
229 cache->total_size = 0;
230
231 memset(cache->hash_table, 0, byte_size);
232 for (uint32_t i = 0; i < old_table_size; i++) {
233 struct cache_entry *entry = old_table[i];
234 if (!entry)
235 continue;
236
237 radv_pipeline_cache_set_entry(cache, entry);
238 }
239
240 free(old_table);
241
242 return VK_SUCCESS;
243 }
244
245 static void
246 radv_pipeline_cache_add_entry(struct radv_pipeline_cache *cache,
247 struct cache_entry *entry)
248 {
249 if (cache->kernel_count == cache->table_size / 2)
250 radv_pipeline_cache_grow(cache);
251
252 /* Failing to grow that hash table isn't fatal, but may mean we don't
253 * have enough space to add this new kernel. Only add it if there's room.
254 */
255 if (cache->kernel_count < cache->table_size / 2)
256 radv_pipeline_cache_set_entry(cache, entry);
257 }
258
259 static bool
260 radv_is_cache_disabled(struct radv_device *device)
261 {
262 /* Pipeline caches can be disabled with RADV_DEBUG=nocache, with
263 * MESA_GLSL_CACHE_DISABLE=1, and when VK_AMD_shader_info is requested.
264 */
265 return (device->instance->debug_flags & RADV_DEBUG_NO_CACHE);
266 }
267
268 bool
269 radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
270 struct radv_pipeline_cache *cache,
271 const unsigned char *sha1,
272 struct radv_shader_variant **variants,
273 bool *found_in_application_cache)
274 {
275 struct cache_entry *entry;
276
277 if (!cache) {
278 cache = device->mem_cache;
279 *found_in_application_cache = false;
280 }
281
282 radv_pipeline_cache_lock(cache);
283
284 entry = radv_pipeline_cache_search_unlocked(cache, sha1);
285
286 if (!entry) {
287 *found_in_application_cache = false;
288
289 /* Don't cache when we want debug info, since this isn't
290 * present in the cache.
291 */
292 if (radv_is_cache_disabled(device) || !device->physical_device->disk_cache) {
293 radv_pipeline_cache_unlock(cache);
294 return false;
295 }
296
297 uint8_t disk_sha1[20];
298 disk_cache_compute_key(device->physical_device->disk_cache,
299 sha1, 20, disk_sha1);
300
301 entry = (struct cache_entry *)
302 disk_cache_get(device->physical_device->disk_cache,
303 disk_sha1, NULL);
304 if (!entry) {
305 radv_pipeline_cache_unlock(cache);
306 return false;
307 } else {
308 size_t size = entry_size(entry);
309 struct cache_entry *new_entry = vk_alloc(&cache->alloc, size, 8,
310 VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
311 if (!new_entry) {
312 free(entry);
313 radv_pipeline_cache_unlock(cache);
314 return false;
315 }
316
317 memcpy(new_entry, entry, entry_size(entry));
318 free(entry);
319 entry = new_entry;
320
321 if (!(device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE) ||
322 cache != device->mem_cache)
323 radv_pipeline_cache_add_entry(cache, new_entry);
324 }
325 }
326
327 char *p = entry->code;
328 for(int i = 0; i < MESA_SHADER_STAGES; ++i) {
329 if (!entry->variants[i] && entry->binary_sizes[i]) {
330 struct radv_shader_binary *binary = calloc(1, entry->binary_sizes[i]);
331 memcpy(binary, p, entry->binary_sizes[i]);
332 p += entry->binary_sizes[i];
333
334 entry->variants[i] = radv_shader_variant_create(device, binary, false);
335 free(binary);
336 } else if (entry->binary_sizes[i]) {
337 p += entry->binary_sizes[i];
338 }
339
340 }
341
342 memcpy(variants, entry->variants, sizeof(entry->variants));
343
344 if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE &&
345 cache == device->mem_cache)
346 vk_free(&cache->alloc, entry);
347 else {
348 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
349 if (entry->variants[i])
350 p_atomic_inc(&entry->variants[i]->ref_count);
351 }
352
353 radv_pipeline_cache_unlock(cache);
354 return true;
355 }
356
357 void
358 radv_pipeline_cache_insert_shaders(struct radv_device *device,
359 struct radv_pipeline_cache *cache,
360 const unsigned char *sha1,
361 struct radv_shader_variant **variants,
362 struct radv_shader_binary *const *binaries)
363 {
364 if (!cache)
365 cache = device->mem_cache;
366
367 radv_pipeline_cache_lock(cache);
368 struct cache_entry *entry = radv_pipeline_cache_search_unlocked(cache, sha1);
369 if (entry) {
370 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
371 if (entry->variants[i]) {
372 radv_shader_variant_destroy(cache->device, variants[i]);
373 variants[i] = entry->variants[i];
374 } else {
375 entry->variants[i] = variants[i];
376 }
377 if (variants[i])
378 p_atomic_inc(&variants[i]->ref_count);
379 }
380 radv_pipeline_cache_unlock(cache);
381 return;
382 }
383
384 /* Don't cache when we want debug info, since this isn't
385 * present in the cache.
386 */
387 if (radv_is_cache_disabled(device)) {
388 radv_pipeline_cache_unlock(cache);
389 return;
390 }
391
392 size_t size = sizeof(*entry);
393 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
394 if (variants[i])
395 size += binaries[i]->total_size;
396 size = align(size, alignof(struct cache_entry));
397
398
399 entry = vk_alloc(&cache->alloc, size, 8,
400 VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
401 if (!entry) {
402 radv_pipeline_cache_unlock(cache);
403 return;
404 }
405
406 memset(entry, 0, sizeof(*entry));
407 memcpy(entry->sha1, sha1, 20);
408
409 char* p = entry->code;
410
411 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
412 if (!variants[i])
413 continue;
414
415 entry->binary_sizes[i] = binaries[i]->total_size;
416
417 memcpy(p, binaries[i], binaries[i]->total_size);
418 p += binaries[i]->total_size;
419 }
420
421 /* Always add cache items to disk. This will allow collection of
422 * compiled shaders by third parties such as steam, even if the app
423 * implements its own pipeline cache.
424 */
425 if (device->physical_device->disk_cache) {
426 uint8_t disk_sha1[20];
427 disk_cache_compute_key(device->physical_device->disk_cache, sha1, 20,
428 disk_sha1);
429
430 disk_cache_put(device->physical_device->disk_cache, disk_sha1,
431 entry, entry_size(entry), NULL);
432 }
433
434 if (device->instance->debug_flags & RADV_DEBUG_NO_MEMORY_CACHE &&
435 cache == device->mem_cache) {
436 vk_free2(&cache->alloc, NULL, entry);
437 radv_pipeline_cache_unlock(cache);
438 return;
439 }
440
441 /* We delay setting the variant so we have reproducible disk cache
442 * items.
443 */
444 for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
445 if (!variants[i])
446 continue;
447
448 entry->variants[i] = variants[i];
449 p_atomic_inc(&variants[i]->ref_count);
450 }
451
452 radv_pipeline_cache_add_entry(cache, entry);
453
454 cache->modified = true;
455 radv_pipeline_cache_unlock(cache);
456 return;
457 }
458
459 bool
460 radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
461 const void *data, size_t size)
462 {
463 struct radv_device *device = cache->device;
464 struct vk_pipeline_cache_header header;
465
466 if (size < sizeof(header))
467 return false;
468 memcpy(&header, data, sizeof(header));
469 if (header.header_size < sizeof(header))
470 return false;
471 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
472 return false;
473 if (header.vendor_id != ATI_VENDOR_ID)
474 return false;
475 if (header.device_id != device->physical_device->rad_info.pci_id)
476 return false;
477 if (memcmp(header.uuid, device->physical_device->cache_uuid, VK_UUID_SIZE) != 0)
478 return false;
479
480 char *end = (void *) data + size;
481 char *p = (void *) data + header.header_size;
482
483 while (end - p >= sizeof(struct cache_entry)) {
484 struct cache_entry *entry = (struct cache_entry*)p;
485 struct cache_entry *dest_entry;
486 size_t size = entry_size(entry);
487 if(end - p < size)
488 break;
489
490 dest_entry = vk_alloc(&cache->alloc, size,
491 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE);
492 if (dest_entry) {
493 memcpy(dest_entry, entry, size);
494 for (int i = 0; i < MESA_SHADER_STAGES; ++i)
495 dest_entry->variants[i] = NULL;
496 radv_pipeline_cache_add_entry(cache, dest_entry);
497 }
498 p += size;
499 }
500
501 return true;
502 }
503
504 VkResult radv_CreatePipelineCache(
505 VkDevice _device,
506 const VkPipelineCacheCreateInfo* pCreateInfo,
507 const VkAllocationCallbacks* pAllocator,
508 VkPipelineCache* pPipelineCache)
509 {
510 RADV_FROM_HANDLE(radv_device, device, _device);
511 struct radv_pipeline_cache *cache;
512
513 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
514 assert(pCreateInfo->flags == 0);
515
516 cache = vk_alloc2(&device->vk.alloc, pAllocator,
517 sizeof(*cache), 8,
518 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
519 if (cache == NULL)
520 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
521
522 vk_object_base_init(&device->vk, &cache->base,
523 VK_OBJECT_TYPE_PIPELINE_CACHE);
524
525 if (pAllocator)
526 cache->alloc = *pAllocator;
527 else
528 cache->alloc = device->vk.alloc;
529
530 radv_pipeline_cache_init(cache, device);
531 cache->flags = pCreateInfo->flags;
532
533 if (pCreateInfo->initialDataSize > 0) {
534 radv_pipeline_cache_load(cache,
535 pCreateInfo->pInitialData,
536 pCreateInfo->initialDataSize);
537 }
538
539 *pPipelineCache = radv_pipeline_cache_to_handle(cache);
540
541 return VK_SUCCESS;
542 }
543
544 void radv_DestroyPipelineCache(
545 VkDevice _device,
546 VkPipelineCache _cache,
547 const VkAllocationCallbacks* pAllocator)
548 {
549 RADV_FROM_HANDLE(radv_device, device, _device);
550 RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
551
552 if (!cache)
553 return;
554 radv_pipeline_cache_finish(cache);
555
556 vk_object_base_finish(&cache->base);
557 vk_free2(&device->vk.alloc, pAllocator, cache);
558 }
559
560 VkResult radv_GetPipelineCacheData(
561 VkDevice _device,
562 VkPipelineCache _cache,
563 size_t* pDataSize,
564 void* pData)
565 {
566 RADV_FROM_HANDLE(radv_device, device, _device);
567 RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache);
568 struct vk_pipeline_cache_header *header;
569 VkResult result = VK_SUCCESS;
570
571 radv_pipeline_cache_lock(cache);
572
573 const size_t size = sizeof(*header) + cache->total_size;
574 if (pData == NULL) {
575 radv_pipeline_cache_unlock(cache);
576 *pDataSize = size;
577 return VK_SUCCESS;
578 }
579 if (*pDataSize < sizeof(*header)) {
580 radv_pipeline_cache_unlock(cache);
581 *pDataSize = 0;
582 return VK_INCOMPLETE;
583 }
584 void *p = pData, *end = pData + *pDataSize;
585 header = p;
586 header->header_size = align(sizeof(*header), alignof(struct cache_entry));
587 header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
588 header->vendor_id = ATI_VENDOR_ID;
589 header->device_id = device->physical_device->rad_info.pci_id;
590 memcpy(header->uuid, device->physical_device->cache_uuid, VK_UUID_SIZE);
591 p += header->header_size;
592
593 struct cache_entry *entry;
594 for (uint32_t i = 0; i < cache->table_size; i++) {
595 if (!cache->hash_table[i])
596 continue;
597 entry = cache->hash_table[i];
598 const uint32_t size = entry_size(entry);
599 if (end < p + size) {
600 result = VK_INCOMPLETE;
601 break;
602 }
603
604 memcpy(p, entry, size);
605 for(int j = 0; j < MESA_SHADER_STAGES; ++j)
606 ((struct cache_entry*)p)->variants[j] = NULL;
607 p += size;
608 }
609 *pDataSize = p - pData;
610
611 radv_pipeline_cache_unlock(cache);
612 return result;
613 }
614
615 static void
616 radv_pipeline_cache_merge(struct radv_pipeline_cache *dst,
617 struct radv_pipeline_cache *src)
618 {
619 for (uint32_t i = 0; i < src->table_size; i++) {
620 struct cache_entry *entry = src->hash_table[i];
621 if (!entry || radv_pipeline_cache_search(dst, entry->sha1))
622 continue;
623
624 radv_pipeline_cache_add_entry(dst, entry);
625
626 src->hash_table[i] = NULL;
627 }
628 }
629
630 VkResult radv_MergePipelineCaches(
631 VkDevice _device,
632 VkPipelineCache destCache,
633 uint32_t srcCacheCount,
634 const VkPipelineCache* pSrcCaches)
635 {
636 RADV_FROM_HANDLE(radv_pipeline_cache, dst, destCache);
637
638 for (uint32_t i = 0; i < srcCacheCount; i++) {
639 RADV_FROM_HANDLE(radv_pipeline_cache, src, pSrcCaches[i]);
640
641 radv_pipeline_cache_merge(dst, src);
642 }
643
644 return VK_SUCCESS;
645 }