2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/mesa-sha1.h"
25 #include "util/debug.h"
26 #include "util/disk_cache.h"
27 #include "util/u_atomic.h"
28 #include "radv_debug.h"
29 #include "radv_private.h"
30 #include "radv_shader.h"
32 #include "ac_nir_to_llvm.h"
34 struct cache_entry_variant_info
{
35 struct radv_shader_variant_info variant_info
;
36 struct ac_shader_config config
;
41 unsigned char sha1
[20];
44 uint32_t code_sizes
[MESA_SHADER_STAGES
];
45 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
];
50 radv_pipeline_cache_init(struct radv_pipeline_cache
*cache
,
51 struct radv_device
*device
)
53 cache
->device
= device
;
54 pthread_mutex_init(&cache
->mutex
, NULL
);
56 cache
->modified
= false;
57 cache
->kernel_count
= 0;
58 cache
->total_size
= 0;
59 cache
->table_size
= 1024;
60 const size_t byte_size
= cache
->table_size
* sizeof(cache
->hash_table
[0]);
61 cache
->hash_table
= malloc(byte_size
);
63 /* We don't consider allocation failure fatal, we just start with a 0-sized
64 * cache. Disable caching when we want to keep shader debug info, since
65 * we don't get the debug info on cached shaders. */
66 if (cache
->hash_table
== NULL
||
67 (device
->instance
->debug_flags
& RADV_DEBUG_NO_CACHE
) ||
68 device
->keep_shader_info
)
69 cache
->table_size
= 0;
71 memset(cache
->hash_table
, 0, byte_size
);
75 radv_pipeline_cache_finish(struct radv_pipeline_cache
*cache
)
77 for (unsigned i
= 0; i
< cache
->table_size
; ++i
)
78 if (cache
->hash_table
[i
]) {
79 for(int j
= 0; j
< MESA_SHADER_STAGES
; ++j
) {
80 if (cache
->hash_table
[i
]->variants
[j
])
81 radv_shader_variant_destroy(cache
->device
,
82 cache
->hash_table
[i
]->variants
[j
]);
84 vk_free(&cache
->alloc
, cache
->hash_table
[i
]);
86 pthread_mutex_destroy(&cache
->mutex
);
87 free(cache
->hash_table
);
91 entry_size(struct cache_entry
*entry
)
93 size_t ret
= sizeof(*entry
);
94 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
95 if (entry
->code_sizes
[i
])
96 ret
+= sizeof(struct cache_entry_variant_info
) + entry
->code_sizes
[i
];
101 radv_hash_shaders(unsigned char *hash
,
102 const VkPipelineShaderStageCreateInfo
**stages
,
103 const struct radv_pipeline_layout
*layout
,
104 const struct radv_pipeline_key
*key
,
107 struct mesa_sha1 ctx
;
109 _mesa_sha1_init(&ctx
);
111 _mesa_sha1_update(&ctx
, key
, sizeof(*key
));
113 _mesa_sha1_update(&ctx
, layout
->sha1
, sizeof(layout
->sha1
));
115 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
117 RADV_FROM_HANDLE(radv_shader_module
, module
, stages
[i
]->module
);
118 const VkSpecializationInfo
*spec_info
= stages
[i
]->pSpecializationInfo
;
120 _mesa_sha1_update(&ctx
, module
->sha1
, sizeof(module
->sha1
));
121 _mesa_sha1_update(&ctx
, stages
[i
]->pName
, strlen(stages
[i
]->pName
));
123 _mesa_sha1_update(&ctx
, spec_info
->pMapEntries
,
124 spec_info
->mapEntryCount
* sizeof spec_info
->pMapEntries
[0]);
125 _mesa_sha1_update(&ctx
, spec_info
->pData
, spec_info
->dataSize
);
129 _mesa_sha1_update(&ctx
, &flags
, 4);
130 _mesa_sha1_final(&ctx
, hash
);
134 static struct cache_entry
*
135 radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache
*cache
,
136 const unsigned char *sha1
)
138 const uint32_t mask
= cache
->table_size
- 1;
139 const uint32_t start
= (*(uint32_t *) sha1
);
141 if (cache
->table_size
== 0)
144 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
145 const uint32_t index
= (start
+ i
) & mask
;
146 struct cache_entry
*entry
= cache
->hash_table
[index
];
151 if (memcmp(entry
->sha1
, sha1
, sizeof(entry
->sha1
)) == 0) {
156 unreachable("hash table should never be full");
159 static struct cache_entry
*
160 radv_pipeline_cache_search(struct radv_pipeline_cache
*cache
,
161 const unsigned char *sha1
)
163 struct cache_entry
*entry
;
165 pthread_mutex_lock(&cache
->mutex
);
167 entry
= radv_pipeline_cache_search_unlocked(cache
, sha1
);
169 pthread_mutex_unlock(&cache
->mutex
);
175 radv_pipeline_cache_set_entry(struct radv_pipeline_cache
*cache
,
176 struct cache_entry
*entry
)
178 const uint32_t mask
= cache
->table_size
- 1;
179 const uint32_t start
= entry
->sha1_dw
[0];
181 /* We'll always be able to insert when we get here. */
182 assert(cache
->kernel_count
< cache
->table_size
/ 2);
184 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
185 const uint32_t index
= (start
+ i
) & mask
;
186 if (!cache
->hash_table
[index
]) {
187 cache
->hash_table
[index
] = entry
;
192 cache
->total_size
+= entry_size(entry
);
193 cache
->kernel_count
++;
198 radv_pipeline_cache_grow(struct radv_pipeline_cache
*cache
)
200 const uint32_t table_size
= cache
->table_size
* 2;
201 const uint32_t old_table_size
= cache
->table_size
;
202 const size_t byte_size
= table_size
* sizeof(cache
->hash_table
[0]);
203 struct cache_entry
**table
;
204 struct cache_entry
**old_table
= cache
->hash_table
;
206 table
= malloc(byte_size
);
208 return vk_error(cache
->device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
210 cache
->hash_table
= table
;
211 cache
->table_size
= table_size
;
212 cache
->kernel_count
= 0;
213 cache
->total_size
= 0;
215 memset(cache
->hash_table
, 0, byte_size
);
216 for (uint32_t i
= 0; i
< old_table_size
; i
++) {
217 struct cache_entry
*entry
= old_table
[i
];
221 radv_pipeline_cache_set_entry(cache
, entry
);
230 radv_pipeline_cache_add_entry(struct radv_pipeline_cache
*cache
,
231 struct cache_entry
*entry
)
233 if (cache
->kernel_count
== cache
->table_size
/ 2)
234 radv_pipeline_cache_grow(cache
);
236 /* Failing to grow that hash table isn't fatal, but may mean we don't
237 * have enough space to add this new kernel. Only add it if there's room.
239 if (cache
->kernel_count
< cache
->table_size
/ 2)
240 radv_pipeline_cache_set_entry(cache
, entry
);
244 radv_is_cache_disabled(struct radv_device
*device
)
246 /* Pipeline caches can be disabled with RADV_DEBUG=nocache, with
247 * MESA_GLSL_CACHE_DISABLE=1, and when VK_AMD_shader_info is requested.
249 return (device
->instance
->debug_flags
& RADV_DEBUG_NO_CACHE
) ||
250 device
->keep_shader_info
;
254 radv_create_shader_variants_from_pipeline_cache(struct radv_device
*device
,
255 struct radv_pipeline_cache
*cache
,
256 const unsigned char *sha1
,
257 struct radv_shader_variant
**variants
,
258 bool *found_in_application_cache
)
260 struct cache_entry
*entry
;
263 cache
= device
->mem_cache
;
264 *found_in_application_cache
= false;
267 pthread_mutex_lock(&cache
->mutex
);
269 entry
= radv_pipeline_cache_search_unlocked(cache
, sha1
);
272 *found_in_application_cache
= false;
274 /* Don't cache when we want debug info, since this isn't
275 * present in the cache.
277 if (radv_is_cache_disabled(device
) || !device
->physical_device
->disk_cache
) {
278 pthread_mutex_unlock(&cache
->mutex
);
282 uint8_t disk_sha1
[20];
283 disk_cache_compute_key(device
->physical_device
->disk_cache
,
284 sha1
, 20, disk_sha1
);
285 entry
= (struct cache_entry
*)
286 disk_cache_get(device
->physical_device
->disk_cache
,
289 pthread_mutex_unlock(&cache
->mutex
);
292 size_t size
= entry_size(entry
);
293 struct cache_entry
*new_entry
= vk_alloc(&cache
->alloc
, size
, 8,
294 VK_SYSTEM_ALLOCATION_SCOPE_CACHE
);
297 pthread_mutex_unlock(&cache
->mutex
);
301 memcpy(new_entry
, entry
, entry_size(entry
));
305 radv_pipeline_cache_add_entry(cache
, new_entry
);
309 char *p
= entry
->code
;
310 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
311 if (!entry
->variants
[i
] && entry
->code_sizes
[i
]) {
312 struct radv_shader_variant
*variant
;
313 struct cache_entry_variant_info info
;
315 variant
= calloc(1, sizeof(struct radv_shader_variant
));
317 pthread_mutex_unlock(&cache
->mutex
);
321 memcpy(&info
, p
, sizeof(struct cache_entry_variant_info
));
322 p
+= sizeof(struct cache_entry_variant_info
);
324 variant
->config
= info
.config
;
325 variant
->info
= info
.variant_info
;
326 variant
->code_size
= entry
->code_sizes
[i
];
327 variant
->ref_count
= 1;
329 void *ptr
= radv_alloc_shader_memory(device
, variant
);
330 memcpy(ptr
, p
, entry
->code_sizes
[i
]);
331 p
+= entry
->code_sizes
[i
];
333 entry
->variants
[i
] = variant
;
334 } else if (entry
->code_sizes
[i
]) {
335 p
+= sizeof(struct cache_entry_variant_info
) + entry
->code_sizes
[i
];
340 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
341 if (entry
->variants
[i
])
342 p_atomic_inc(&entry
->variants
[i
]->ref_count
);
344 memcpy(variants
, entry
->variants
, sizeof(entry
->variants
));
345 pthread_mutex_unlock(&cache
->mutex
);
350 radv_pipeline_cache_insert_shaders(struct radv_device
*device
,
351 struct radv_pipeline_cache
*cache
,
352 const unsigned char *sha1
,
353 struct radv_shader_variant
**variants
,
354 const void *const *codes
,
355 const unsigned *code_sizes
)
358 cache
= device
->mem_cache
;
360 pthread_mutex_lock(&cache
->mutex
);
361 struct cache_entry
*entry
= radv_pipeline_cache_search_unlocked(cache
, sha1
);
363 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
364 if (entry
->variants
[i
]) {
365 radv_shader_variant_destroy(cache
->device
, variants
[i
]);
366 variants
[i
] = entry
->variants
[i
];
368 entry
->variants
[i
] = variants
[i
];
371 p_atomic_inc(&variants
[i
]->ref_count
);
373 pthread_mutex_unlock(&cache
->mutex
);
377 /* Don't cache when we want debug info, since this isn't
378 * present in the cache.
380 if (radv_is_cache_disabled(device
)) {
381 pthread_mutex_unlock(&cache
->mutex
);
385 size_t size
= sizeof(*entry
);
386 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
388 size
+= sizeof(struct cache_entry_variant_info
) + code_sizes
[i
];
391 entry
= vk_alloc(&cache
->alloc
, size
, 8,
392 VK_SYSTEM_ALLOCATION_SCOPE_CACHE
);
394 pthread_mutex_unlock(&cache
->mutex
);
398 memset(entry
, 0, sizeof(*entry
));
399 memcpy(entry
->sha1
, sha1
, 20);
401 char* p
= entry
->code
;
402 struct cache_entry_variant_info info
;
403 memset(&info
, 0, sizeof(info
));
405 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
409 entry
->code_sizes
[i
] = code_sizes
[i
];
411 info
.config
= variants
[i
]->config
;
412 info
.variant_info
= variants
[i
]->info
;
413 memcpy(p
, &info
, sizeof(struct cache_entry_variant_info
));
414 p
+= sizeof(struct cache_entry_variant_info
);
416 memcpy(p
, codes
[i
], code_sizes
[i
]);
420 /* Always add cache items to disk. This will allow collection of
421 * compiled shaders by third parties such as steam, even if the app
422 * implements its own pipeline cache.
424 if (device
->physical_device
->disk_cache
) {
425 uint8_t disk_sha1
[20];
426 disk_cache_compute_key(device
->physical_device
->disk_cache
, sha1
, 20,
428 disk_cache_put(device
->physical_device
->disk_cache
,
429 disk_sha1
, entry
, entry_size(entry
), NULL
);
432 /* We delay setting the variant so we have reproducible disk cache
435 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
439 entry
->variants
[i
] = variants
[i
];
440 p_atomic_inc(&variants
[i
]->ref_count
);
443 radv_pipeline_cache_add_entry(cache
, entry
);
445 cache
->modified
= true;
446 pthread_mutex_unlock(&cache
->mutex
);
450 struct cache_header
{
451 uint32_t header_size
;
452 uint32_t header_version
;
455 uint8_t uuid
[VK_UUID_SIZE
];
459 radv_pipeline_cache_load(struct radv_pipeline_cache
*cache
,
460 const void *data
, size_t size
)
462 struct radv_device
*device
= cache
->device
;
463 struct cache_header header
;
465 if (size
< sizeof(header
))
467 memcpy(&header
, data
, sizeof(header
));
468 if (header
.header_size
< sizeof(header
))
470 if (header
.header_version
!= VK_PIPELINE_CACHE_HEADER_VERSION_ONE
)
472 if (header
.vendor_id
!= ATI_VENDOR_ID
)
474 if (header
.device_id
!= device
->physical_device
->rad_info
.pci_id
)
476 if (memcmp(header
.uuid
, device
->physical_device
->cache_uuid
, VK_UUID_SIZE
) != 0)
479 char *end
= (void *) data
+ size
;
480 char *p
= (void *) data
+ header
.header_size
;
482 while (end
- p
>= sizeof(struct cache_entry
)) {
483 struct cache_entry
*entry
= (struct cache_entry
*)p
;
484 struct cache_entry
*dest_entry
;
485 size_t size
= entry_size(entry
);
489 dest_entry
= vk_alloc(&cache
->alloc
, size
,
490 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE
);
492 memcpy(dest_entry
, entry
, size
);
493 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
494 dest_entry
->variants
[i
] = NULL
;
495 radv_pipeline_cache_add_entry(cache
, dest_entry
);
503 VkResult
radv_CreatePipelineCache(
505 const VkPipelineCacheCreateInfo
* pCreateInfo
,
506 const VkAllocationCallbacks
* pAllocator
,
507 VkPipelineCache
* pPipelineCache
)
509 RADV_FROM_HANDLE(radv_device
, device
, _device
);
510 struct radv_pipeline_cache
*cache
;
512 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
);
513 assert(pCreateInfo
->flags
== 0);
515 cache
= vk_alloc2(&device
->alloc
, pAllocator
,
517 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
519 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
522 cache
->alloc
= *pAllocator
;
524 cache
->alloc
= device
->alloc
;
526 radv_pipeline_cache_init(cache
, device
);
528 if (pCreateInfo
->initialDataSize
> 0) {
529 radv_pipeline_cache_load(cache
,
530 pCreateInfo
->pInitialData
,
531 pCreateInfo
->initialDataSize
);
534 *pPipelineCache
= radv_pipeline_cache_to_handle(cache
);
539 void radv_DestroyPipelineCache(
541 VkPipelineCache _cache
,
542 const VkAllocationCallbacks
* pAllocator
)
544 RADV_FROM_HANDLE(radv_device
, device
, _device
);
545 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
549 radv_pipeline_cache_finish(cache
);
551 vk_free2(&device
->alloc
, pAllocator
, cache
);
554 VkResult
radv_GetPipelineCacheData(
556 VkPipelineCache _cache
,
560 RADV_FROM_HANDLE(radv_device
, device
, _device
);
561 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
562 struct cache_header
*header
;
563 VkResult result
= VK_SUCCESS
;
565 pthread_mutex_lock(&cache
->mutex
);
567 const size_t size
= sizeof(*header
) + cache
->total_size
;
569 pthread_mutex_unlock(&cache
->mutex
);
573 if (*pDataSize
< sizeof(*header
)) {
574 pthread_mutex_unlock(&cache
->mutex
);
576 return VK_INCOMPLETE
;
578 void *p
= pData
, *end
= pData
+ *pDataSize
;
580 header
->header_size
= sizeof(*header
);
581 header
->header_version
= VK_PIPELINE_CACHE_HEADER_VERSION_ONE
;
582 header
->vendor_id
= ATI_VENDOR_ID
;
583 header
->device_id
= device
->physical_device
->rad_info
.pci_id
;
584 memcpy(header
->uuid
, device
->physical_device
->cache_uuid
, VK_UUID_SIZE
);
585 p
+= header
->header_size
;
587 struct cache_entry
*entry
;
588 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
589 if (!cache
->hash_table
[i
])
591 entry
= cache
->hash_table
[i
];
592 const uint32_t size
= entry_size(entry
);
593 if (end
< p
+ size
) {
594 result
= VK_INCOMPLETE
;
598 memcpy(p
, entry
, size
);
599 for(int j
= 0; j
< MESA_SHADER_STAGES
; ++j
)
600 ((struct cache_entry
*)p
)->variants
[j
] = NULL
;
603 *pDataSize
= p
- pData
;
605 pthread_mutex_unlock(&cache
->mutex
);
610 radv_pipeline_cache_merge(struct radv_pipeline_cache
*dst
,
611 struct radv_pipeline_cache
*src
)
613 for (uint32_t i
= 0; i
< src
->table_size
; i
++) {
614 struct cache_entry
*entry
= src
->hash_table
[i
];
615 if (!entry
|| radv_pipeline_cache_search(dst
, entry
->sha1
))
618 radv_pipeline_cache_add_entry(dst
, entry
);
620 src
->hash_table
[i
] = NULL
;
624 VkResult
radv_MergePipelineCaches(
626 VkPipelineCache destCache
,
627 uint32_t srcCacheCount
,
628 const VkPipelineCache
* pSrcCaches
)
630 RADV_FROM_HANDLE(radv_pipeline_cache
, dst
, destCache
);
632 for (uint32_t i
= 0; i
< srcCacheCount
; i
++) {
633 RADV_FROM_HANDLE(radv_pipeline_cache
, src
, pSrcCaches
[i
]);
635 radv_pipeline_cache_merge(dst
, src
);