2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/mesa-sha1.h"
25 #include "util/debug.h"
26 #include "util/disk_cache.h"
27 #include "util/u_atomic.h"
28 #include "radv_debug.h"
29 #include "radv_private.h"
30 #include "radv_shader.h"
32 #include "ac_nir_to_llvm.h"
34 struct cache_entry_variant_info
{
35 struct ac_shader_variant_info variant_info
;
36 struct ac_shader_config config
;
37 uint32_t rsrc1
, rsrc2
;
42 unsigned char sha1
[20];
45 uint32_t code_sizes
[MESA_SHADER_STAGES
];
46 struct radv_shader_variant
*variants
[MESA_SHADER_STAGES
];
51 radv_pipeline_cache_init(struct radv_pipeline_cache
*cache
,
52 struct radv_device
*device
)
54 cache
->device
= device
;
55 pthread_mutex_init(&cache
->mutex
, NULL
);
57 cache
->modified
= false;
58 cache
->kernel_count
= 0;
59 cache
->total_size
= 0;
60 cache
->table_size
= 1024;
61 const size_t byte_size
= cache
->table_size
* sizeof(cache
->hash_table
[0]);
62 cache
->hash_table
= malloc(byte_size
);
64 /* We don't consider allocation failure fatal, we just start with a 0-sized
66 if (cache
->hash_table
== NULL
||
67 (device
->instance
->debug_flags
& RADV_DEBUG_NO_CACHE
))
68 cache
->table_size
= 0;
70 memset(cache
->hash_table
, 0, byte_size
);
74 radv_pipeline_cache_finish(struct radv_pipeline_cache
*cache
)
76 for (unsigned i
= 0; i
< cache
->table_size
; ++i
)
77 if (cache
->hash_table
[i
]) {
78 for(int j
= 0; j
< MESA_SHADER_STAGES
; ++j
) {
79 if (cache
->hash_table
[i
]->variants
[j
])
80 radv_shader_variant_destroy(cache
->device
,
81 cache
->hash_table
[i
]->variants
[j
]);
83 vk_free(&cache
->alloc
, cache
->hash_table
[i
]);
85 pthread_mutex_destroy(&cache
->mutex
);
86 free(cache
->hash_table
);
90 entry_size(struct cache_entry
*entry
)
92 size_t ret
= sizeof(*entry
);
93 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
94 if (entry
->code_sizes
[i
])
95 ret
+= sizeof(struct cache_entry_variant_info
) + entry
->code_sizes
[i
];
100 radv_hash_shader(unsigned char *hash
, struct radv_shader_module
*module
,
101 const char *entrypoint
,
102 const VkSpecializationInfo
*spec_info
,
103 const struct radv_pipeline_layout
*layout
,
104 const struct ac_shader_variant_key
*key
,
107 struct mesa_sha1 ctx
;
109 _mesa_sha1_init(&ctx
);
111 _mesa_sha1_update(&ctx
, key
, sizeof(*key
));
112 _mesa_sha1_update(&ctx
, module
->sha1
, sizeof(module
->sha1
));
113 _mesa_sha1_update(&ctx
, entrypoint
, strlen(entrypoint
));
115 _mesa_sha1_update(&ctx
, layout
->sha1
, sizeof(layout
->sha1
));
117 _mesa_sha1_update(&ctx
, spec_info
->pMapEntries
,
118 spec_info
->mapEntryCount
* sizeof spec_info
->pMapEntries
[0]);
119 _mesa_sha1_update(&ctx
, spec_info
->pData
, spec_info
->dataSize
);
121 _mesa_sha1_update(&ctx
, &flags
, 4);
122 _mesa_sha1_final(&ctx
, hash
);
126 radv_hash_shaders(unsigned char *hash
,
127 const VkPipelineShaderStageCreateInfo
**stages
,
128 const struct radv_pipeline_layout
*layout
,
129 const struct ac_shader_variant_key
*keys
,
132 struct mesa_sha1 ctx
;
134 _mesa_sha1_init(&ctx
);
136 _mesa_sha1_update(&ctx
, keys
, sizeof(*keys
) * MESA_SHADER_STAGES
);
138 _mesa_sha1_update(&ctx
, layout
->sha1
, sizeof(layout
->sha1
));
140 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
142 RADV_FROM_HANDLE(radv_shader_module
, module
, stages
[i
]->module
);
143 const VkSpecializationInfo
*spec_info
= stages
[i
]->pSpecializationInfo
;
145 _mesa_sha1_update(&ctx
, module
->sha1
, sizeof(module
->sha1
));
146 _mesa_sha1_update(&ctx
, stages
[i
]->pName
, strlen(stages
[i
]->pName
));
148 _mesa_sha1_update(&ctx
, spec_info
->pMapEntries
,
149 spec_info
->mapEntryCount
* sizeof spec_info
->pMapEntries
[0]);
150 _mesa_sha1_update(&ctx
, spec_info
->pData
, spec_info
->dataSize
);
154 _mesa_sha1_update(&ctx
, &flags
, 4);
155 _mesa_sha1_final(&ctx
, hash
);
159 static struct cache_entry
*
160 radv_pipeline_cache_search_unlocked(struct radv_pipeline_cache
*cache
,
161 const unsigned char *sha1
)
163 const uint32_t mask
= cache
->table_size
- 1;
164 const uint32_t start
= (*(uint32_t *) sha1
);
166 if (cache
->table_size
== 0)
169 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
170 const uint32_t index
= (start
+ i
) & mask
;
171 struct cache_entry
*entry
= cache
->hash_table
[index
];
176 if (memcmp(entry
->sha1
, sha1
, sizeof(entry
->sha1
)) == 0) {
181 unreachable("hash table should never be full");
184 static struct cache_entry
*
185 radv_pipeline_cache_search(struct radv_pipeline_cache
*cache
,
186 const unsigned char *sha1
)
188 struct cache_entry
*entry
;
190 pthread_mutex_lock(&cache
->mutex
);
192 entry
= radv_pipeline_cache_search_unlocked(cache
, sha1
);
194 pthread_mutex_unlock(&cache
->mutex
);
199 struct radv_shader_variant
*
200 radv_create_shader_variant_from_pipeline_cache(struct radv_device
*device
,
201 struct radv_pipeline_cache
*cache
,
202 const unsigned char *sha1
)
204 struct cache_entry
*entry
= NULL
;
207 entry
= radv_pipeline_cache_search(cache
, sha1
);
209 entry
= radv_pipeline_cache_search(device
->mem_cache
, sha1
);
212 if (!device
->physical_device
->disk_cache
)
214 uint8_t disk_sha1
[20];
215 disk_cache_compute_key(device
->physical_device
->disk_cache
,
216 sha1
, 20, disk_sha1
);
217 entry
= (struct cache_entry
*)
218 disk_cache_get(device
->physical_device
->disk_cache
,
224 if (!entry
->variants
[0]) {
225 struct radv_shader_variant
*variant
;
226 char *p
= entry
->code
;
227 struct cache_entry_variant_info info
;
229 variant
= calloc(1, sizeof(struct radv_shader_variant
));
233 memcpy(&info
, p
, sizeof(struct cache_entry_variant_info
));
234 p
+= sizeof(struct cache_entry_variant_info
);
236 variant
->code_size
= entry
->code_sizes
[0];
237 variant
->config
= info
.config
;
238 variant
->info
= info
.variant_info
;
239 variant
->rsrc1
= info
.rsrc1
;
240 variant
->rsrc2
= info
.rsrc2
;
241 variant
->ref_count
= 1;
243 void *ptr
= radv_alloc_shader_memory(device
, variant
);
244 memcpy(ptr
, p
, entry
->code_sizes
[0]);
246 entry
->variants
[0] = variant
;
249 p_atomic_inc(&entry
->variants
[0]->ref_count
);
250 return entry
->variants
[0];
254 radv_create_shader_variants_from_pipeline_cache(struct radv_device
*device
,
255 struct radv_pipeline_cache
*cache
,
256 const unsigned char *sha1
,
257 struct radv_shader_variant
**variants
)
259 struct cache_entry
*entry
;
261 entry
= radv_pipeline_cache_search(cache
, sha1
);
263 entry
= radv_pipeline_cache_search(device
->mem_cache
, sha1
);
266 if (!device
->physical_device
->disk_cache
)
269 uint8_t disk_sha1
[20];
270 disk_cache_compute_key(device
->physical_device
->disk_cache
,
271 sha1
, 20, disk_sha1
);
272 entry
= (struct cache_entry
*)
273 disk_cache_get(device
->physical_device
->disk_cache
,
279 char *p
= entry
->code
;
280 for(int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
281 if (!entry
->variants
[i
] && entry
->code_sizes
[i
]) {
282 struct radv_shader_variant
*variant
;
283 struct cache_entry_variant_info info
;
285 variant
= calloc(1, sizeof(struct radv_shader_variant
));
289 memcpy(&info
, p
, sizeof(struct cache_entry_variant_info
));
290 p
+= sizeof(struct cache_entry_variant_info
);
292 variant
->config
= info
.config
;
293 variant
->info
= info
.variant_info
;
294 variant
->rsrc1
= info
.rsrc1
;
295 variant
->rsrc2
= info
.rsrc2
;
296 variant
->code_size
= entry
->code_sizes
[i
];
297 variant
->ref_count
= 1;
299 void *ptr
= radv_alloc_shader_memory(device
, variant
);
300 memcpy(ptr
, p
, entry
->code_sizes
[i
]);
301 p
+= entry
->code_sizes
[i
];
303 entry
->variants
[i
] = variant
;
308 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
309 if (entry
->variants
[i
])
310 p_atomic_inc(&entry
->variants
[i
]->ref_count
);
312 memcpy(variants
, entry
->variants
, sizeof(entry
->variants
));
318 radv_pipeline_cache_set_entry(struct radv_pipeline_cache
*cache
,
319 struct cache_entry
*entry
)
321 const uint32_t mask
= cache
->table_size
- 1;
322 const uint32_t start
= entry
->sha1_dw
[0];
324 /* We'll always be able to insert when we get here. */
325 assert(cache
->kernel_count
< cache
->table_size
/ 2);
327 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
328 const uint32_t index
= (start
+ i
) & mask
;
329 if (!cache
->hash_table
[index
]) {
330 cache
->hash_table
[index
] = entry
;
335 cache
->total_size
+= entry_size(entry
);
336 cache
->kernel_count
++;
341 radv_pipeline_cache_grow(struct radv_pipeline_cache
*cache
)
343 const uint32_t table_size
= cache
->table_size
* 2;
344 const uint32_t old_table_size
= cache
->table_size
;
345 const size_t byte_size
= table_size
* sizeof(cache
->hash_table
[0]);
346 struct cache_entry
**table
;
347 struct cache_entry
**old_table
= cache
->hash_table
;
349 table
= malloc(byte_size
);
351 return VK_ERROR_OUT_OF_HOST_MEMORY
;
353 cache
->hash_table
= table
;
354 cache
->table_size
= table_size
;
355 cache
->kernel_count
= 0;
356 cache
->total_size
= 0;
358 memset(cache
->hash_table
, 0, byte_size
);
359 for (uint32_t i
= 0; i
< old_table_size
; i
++) {
360 struct cache_entry
*entry
= old_table
[i
];
364 radv_pipeline_cache_set_entry(cache
, entry
);
373 radv_pipeline_cache_add_entry(struct radv_pipeline_cache
*cache
,
374 struct cache_entry
*entry
)
376 if (cache
->kernel_count
== cache
->table_size
/ 2)
377 radv_pipeline_cache_grow(cache
);
379 /* Failing to grow that hash table isn't fatal, but may mean we don't
380 * have enough space to add this new kernel. Only add it if there's room.
382 if (cache
->kernel_count
< cache
->table_size
/ 2)
383 radv_pipeline_cache_set_entry(cache
, entry
);
386 struct radv_shader_variant
*
387 radv_pipeline_cache_insert_shader(struct radv_device
*device
,
388 struct radv_pipeline_cache
*cache
,
389 const unsigned char *sha1
,
390 struct radv_shader_variant
*variant
,
391 const void *code
, unsigned code_size
)
394 cache
= device
->mem_cache
;
396 pthread_mutex_lock(&cache
->mutex
);
397 struct cache_entry
*entry
= radv_pipeline_cache_search_unlocked(cache
, sha1
);
399 if (entry
->variants
[0]) {
400 radv_shader_variant_destroy(cache
->device
, variant
);
401 variant
= entry
->variants
[0];
403 entry
->variants
[0] = variant
;
405 p_atomic_inc(&variant
->ref_count
);
406 pthread_mutex_unlock(&cache
->mutex
);
410 entry
= vk_alloc(&cache
->alloc
, sizeof(*entry
) + sizeof(struct cache_entry_variant_info
) + code_size
, 8,
411 VK_SYSTEM_ALLOCATION_SCOPE_CACHE
);
413 pthread_mutex_unlock(&cache
->mutex
);
417 memset(entry
, 0, sizeof(*entry
));
419 char* p
= entry
->code
;
420 struct cache_entry_variant_info info
;
422 info
.config
= variant
->config
;
423 info
.variant_info
= variant
->info
;
424 info
.rsrc1
= variant
->rsrc1
;
425 info
.rsrc2
= variant
->rsrc2
;
426 memcpy(p
, &info
, sizeof(struct cache_entry_variant_info
));
427 p
+= sizeof(struct cache_entry_variant_info
);
429 memcpy(entry
->sha1
, sha1
, 20);
430 memcpy(p
, code
, code_size
);
432 entry
->code_sizes
[0] = code_size
;
434 /* Set variant to NULL so we have reproducible cache items */
435 entry
->variants
[0] = NULL
;
437 /* Always add cache items to disk. This will allow collection of
438 * compiled shaders by third parties such as steam, even if the app
439 * implements its own pipeline cache.
441 if (device
->physical_device
->disk_cache
) {
442 uint8_t disk_sha1
[20];
443 disk_cache_compute_key(device
->physical_device
->disk_cache
, sha1
, 20,
445 disk_cache_put(device
->physical_device
->disk_cache
,
446 disk_sha1
, entry
, entry_size(entry
), NULL
);
449 entry
->variants
[0] = variant
;
450 p_atomic_inc(&variant
->ref_count
);
452 radv_pipeline_cache_add_entry(cache
, entry
);
454 cache
->modified
= true;
455 pthread_mutex_unlock(&cache
->mutex
);
460 radv_pipeline_cache_insert_shaders(struct radv_device
*device
,
461 struct radv_pipeline_cache
*cache
,
462 const unsigned char *sha1
,
463 struct radv_shader_variant
**variants
,
464 const void *const *codes
,
465 const unsigned *code_sizes
)
468 cache
= device
->mem_cache
;
470 pthread_mutex_lock(&cache
->mutex
);
471 struct cache_entry
*entry
= radv_pipeline_cache_search_unlocked(cache
, sha1
);
473 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
474 if (entry
->variants
[i
]) {
475 radv_shader_variant_destroy(cache
->device
, variants
[i
]);
476 variants
[i
] = entry
->variants
[i
];
478 entry
->variants
[i
] = variants
[i
];
481 p_atomic_inc(&variants
[i
]->ref_count
);
483 pthread_mutex_unlock(&cache
->mutex
);
486 size_t size
= sizeof(*entry
);
487 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
489 size
+= sizeof(struct cache_entry_variant_info
) + code_sizes
[i
];
492 entry
= vk_alloc(&cache
->alloc
, size
, 8,
493 VK_SYSTEM_ALLOCATION_SCOPE_CACHE
);
495 pthread_mutex_unlock(&cache
->mutex
);
499 memset(entry
, 0, sizeof(*entry
));
500 memcpy(entry
->sha1
, sha1
, 20);
502 char* p
= entry
->code
;
503 struct cache_entry_variant_info info
;
505 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
509 entry
->code_sizes
[i
] = code_sizes
[i
];
511 info
.config
= variants
[i
]->config
;
512 info
.variant_info
= variants
[i
]->info
;
513 info
.rsrc1
= variants
[i
]->rsrc1
;
514 info
.rsrc2
= variants
[i
]->rsrc2
;
515 memcpy(p
, &info
, sizeof(struct cache_entry_variant_info
));
516 p
+= sizeof(struct cache_entry_variant_info
);
518 memcpy(p
, codes
[i
], code_sizes
[i
]);
522 /* Always add cache items to disk. This will allow collection of
523 * compiled shaders by third parties such as steam, even if the app
524 * implements its own pipeline cache.
526 if (device
->physical_device
->disk_cache
) {
527 uint8_t disk_sha1
[20];
528 disk_cache_compute_key(device
->physical_device
->disk_cache
, sha1
, 20,
530 disk_cache_put(device
->physical_device
->disk_cache
,
531 disk_sha1
, entry
, entry_size(entry
), NULL
);
534 /* We delay setting the variant so we have reproducible disk cache
537 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
) {
541 entry
->variants
[i
] = variants
[i
];
542 p_atomic_inc(&variants
[i
]->ref_count
);
545 radv_pipeline_cache_add_entry(cache
, entry
);
547 cache
->modified
= true;
548 pthread_mutex_unlock(&cache
->mutex
);
552 struct cache_header
{
553 uint32_t header_size
;
554 uint32_t header_version
;
557 uint8_t uuid
[VK_UUID_SIZE
];
561 radv_pipeline_cache_load(struct radv_pipeline_cache
*cache
,
562 const void *data
, size_t size
)
564 struct radv_device
*device
= cache
->device
;
565 struct cache_header header
;
567 if (size
< sizeof(header
))
569 memcpy(&header
, data
, sizeof(header
));
570 if (header
.header_size
< sizeof(header
))
572 if (header
.header_version
!= VK_PIPELINE_CACHE_HEADER_VERSION_ONE
)
574 if (header
.vendor_id
!= ATI_VENDOR_ID
)
576 if (header
.device_id
!= device
->physical_device
->rad_info
.pci_id
)
578 if (memcmp(header
.uuid
, device
->physical_device
->cache_uuid
, VK_UUID_SIZE
) != 0)
581 char *end
= (void *) data
+ size
;
582 char *p
= (void *) data
+ header
.header_size
;
584 while (end
- p
>= sizeof(struct cache_entry
)) {
585 struct cache_entry
*entry
= (struct cache_entry
*)p
;
586 struct cache_entry
*dest_entry
;
587 size_t size
= entry_size(entry
);
591 dest_entry
= vk_alloc(&cache
->alloc
, size
,
592 8, VK_SYSTEM_ALLOCATION_SCOPE_CACHE
);
594 memcpy(dest_entry
, entry
, size
);
595 for (int i
= 0; i
< MESA_SHADER_STAGES
; ++i
)
596 dest_entry
->variants
[i
] = NULL
;
597 radv_pipeline_cache_add_entry(cache
, dest_entry
);
603 VkResult
radv_CreatePipelineCache(
605 const VkPipelineCacheCreateInfo
* pCreateInfo
,
606 const VkAllocationCallbacks
* pAllocator
,
607 VkPipelineCache
* pPipelineCache
)
609 RADV_FROM_HANDLE(radv_device
, device
, _device
);
610 struct radv_pipeline_cache
*cache
;
612 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
);
613 assert(pCreateInfo
->flags
== 0);
615 cache
= vk_alloc2(&device
->alloc
, pAllocator
,
617 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
619 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
622 cache
->alloc
= *pAllocator
;
624 cache
->alloc
= device
->alloc
;
626 radv_pipeline_cache_init(cache
, device
);
628 if (pCreateInfo
->initialDataSize
> 0) {
629 radv_pipeline_cache_load(cache
,
630 pCreateInfo
->pInitialData
,
631 pCreateInfo
->initialDataSize
);
634 *pPipelineCache
= radv_pipeline_cache_to_handle(cache
);
639 void radv_DestroyPipelineCache(
641 VkPipelineCache _cache
,
642 const VkAllocationCallbacks
* pAllocator
)
644 RADV_FROM_HANDLE(radv_device
, device
, _device
);
645 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
649 radv_pipeline_cache_finish(cache
);
651 vk_free2(&device
->alloc
, pAllocator
, cache
);
654 VkResult
radv_GetPipelineCacheData(
656 VkPipelineCache _cache
,
660 RADV_FROM_HANDLE(radv_device
, device
, _device
);
661 RADV_FROM_HANDLE(radv_pipeline_cache
, cache
, _cache
);
662 struct cache_header
*header
;
663 VkResult result
= VK_SUCCESS
;
664 const size_t size
= sizeof(*header
) + cache
->total_size
;
669 if (*pDataSize
< sizeof(*header
)) {
671 return VK_INCOMPLETE
;
673 void *p
= pData
, *end
= pData
+ *pDataSize
;
675 header
->header_size
= sizeof(*header
);
676 header
->header_version
= VK_PIPELINE_CACHE_HEADER_VERSION_ONE
;
677 header
->vendor_id
= ATI_VENDOR_ID
;
678 header
->device_id
= device
->physical_device
->rad_info
.pci_id
;
679 memcpy(header
->uuid
, device
->physical_device
->cache_uuid
, VK_UUID_SIZE
);
680 p
+= header
->header_size
;
682 struct cache_entry
*entry
;
683 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
684 if (!cache
->hash_table
[i
])
686 entry
= cache
->hash_table
[i
];
687 const uint32_t size
= entry_size(entry
);
688 if (end
< p
+ size
) {
689 result
= VK_INCOMPLETE
;
693 memcpy(p
, entry
, size
);
694 for(int j
= 0; j
< MESA_SHADER_STAGES
; ++j
)
695 ((struct cache_entry
*)p
)->variants
[j
] = NULL
;
698 *pDataSize
= p
- pData
;
704 radv_pipeline_cache_merge(struct radv_pipeline_cache
*dst
,
705 struct radv_pipeline_cache
*src
)
707 for (uint32_t i
= 0; i
< src
->table_size
; i
++) {
708 struct cache_entry
*entry
= src
->hash_table
[i
];
709 if (!entry
|| radv_pipeline_cache_search(dst
, entry
->sha1
))
712 radv_pipeline_cache_add_entry(dst
, entry
);
714 src
->hash_table
[i
] = NULL
;
718 VkResult
radv_MergePipelineCaches(
720 VkPipelineCache destCache
,
721 uint32_t srcCacheCount
,
722 const VkPipelineCache
* pSrcCaches
)
724 RADV_FROM_HANDLE(radv_pipeline_cache
, dst
, destCache
);
726 for (uint32_t i
= 0; i
< srcCacheCount
; i
++) {
727 RADV_FROM_HANDLE(radv_pipeline_cache
, src
, pSrcCaches
[i
]);
729 radv_pipeline_cache_merge(dst
, src
);