2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/mesa-sha1.h"
25 #include "util/debug.h"
26 #include "anv_private.h"
30 * - Compact binding table layout so it's tight and not dependent on
31 * descriptor set layout.
33 * - Review prog_data struct for size and cacheability: struct
34 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
35 * bit quantities etc; param, pull_param, and image_params are pointers, we
36 * just need the compation map. use bit fields for all bools, eg
41 anv_pipeline_cache_init(struct anv_pipeline_cache
*cache
,
42 struct anv_device
*device
)
44 cache
->device
= device
;
45 anv_state_stream_init(&cache
->program_stream
,
46 &device
->instruction_block_pool
);
47 pthread_mutex_init(&cache
->mutex
, NULL
);
49 cache
->kernel_count
= 0;
50 cache
->total_size
= 0;
51 cache
->table_size
= 1024;
52 const size_t byte_size
= cache
->table_size
* sizeof(cache
->hash_table
[0]);
53 cache
->hash_table
= malloc(byte_size
);
55 /* We don't consider allocation failure fatal, we just start with a 0-sized
57 if (cache
->hash_table
== NULL
||
58 !env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true))
59 cache
->table_size
= 0;
61 memset(cache
->hash_table
, 0xff, byte_size
);
65 anv_pipeline_cache_finish(struct anv_pipeline_cache
*cache
)
67 anv_state_stream_finish(&cache
->program_stream
);
68 pthread_mutex_destroy(&cache
->mutex
);
69 free(cache
->hash_table
);
73 unsigned char sha1
[20];
74 uint32_t prog_data_size
;
76 uint32_t surface_count
;
77 uint32_t sampler_count
;
82 /* kernel follows prog_data at next 64 byte aligned address */
86 entry_size(struct cache_entry
*entry
)
88 /* This returns the number of bytes needed to serialize an entry, which
89 * doesn't include the alignment padding bytes.
92 struct brw_stage_prog_data
*prog_data
= (void *)entry
->prog_data
;
93 const uint32_t param_size
=
94 prog_data
->nr_params
* sizeof(*prog_data
->param
);
96 const uint32_t map_size
=
97 entry
->surface_count
* sizeof(struct anv_pipeline_binding
) +
98 entry
->sampler_count
* sizeof(struct anv_pipeline_binding
);
100 return sizeof(*entry
) + entry
->prog_data_size
+ param_size
+ map_size
;
104 anv_hash_shader(unsigned char *hash
, const void *key
, size_t key_size
,
105 struct anv_shader_module
*module
,
106 const char *entrypoint
,
107 const struct anv_pipeline_layout
*pipeline_layout
,
108 const VkSpecializationInfo
*spec_info
)
110 struct mesa_sha1
*ctx
;
112 ctx
= _mesa_sha1_init();
113 _mesa_sha1_update(ctx
, key
, key_size
);
114 _mesa_sha1_update(ctx
, module
->sha1
, sizeof(module
->sha1
));
115 _mesa_sha1_update(ctx
, entrypoint
, strlen(entrypoint
));
116 if (pipeline_layout
) {
117 _mesa_sha1_update(ctx
, pipeline_layout
->sha1
,
118 sizeof(pipeline_layout
->sha1
));
120 /* hash in shader stage, pipeline layout? */
122 _mesa_sha1_update(ctx
, spec_info
->pMapEntries
,
123 spec_info
->mapEntryCount
* sizeof spec_info
->pMapEntries
[0]);
124 _mesa_sha1_update(ctx
, spec_info
->pData
, spec_info
->dataSize
);
126 _mesa_sha1_final(ctx
, hash
);
130 anv_pipeline_cache_search_unlocked(struct anv_pipeline_cache
*cache
,
131 const unsigned char *sha1
,
132 const struct brw_stage_prog_data
**prog_data
,
133 struct anv_pipeline_bind_map
*map
)
135 const uint32_t mask
= cache
->table_size
- 1;
136 const uint32_t start
= (*(uint32_t *) sha1
);
138 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
139 const uint32_t index
= (start
+ i
) & mask
;
140 const uint32_t offset
= cache
->hash_table
[index
];
145 struct cache_entry
*entry
=
146 cache
->program_stream
.block_pool
->map
+ offset
;
147 if (memcmp(entry
->sha1
, sha1
, sizeof(entry
->sha1
)) == 0) {
150 void *p
= entry
->prog_data
;
152 p
+= entry
->prog_data_size
;
153 p
+= (*prog_data
)->nr_params
* sizeof(*(*prog_data
)->param
);
154 map
->surface_count
= entry
->surface_count
;
155 map
->sampler_count
= entry
->sampler_count
;
156 map
->image_count
= entry
->image_count
;
157 map
->surface_to_descriptor
= p
;
158 p
+= map
->surface_count
* sizeof(struct anv_pipeline_binding
);
159 map
->sampler_to_descriptor
= p
;
162 return offset
+ align_u32(entry_size(entry
), 64);
166 /* This can happen if the pipeline cache is disabled via
167 * ANV_ENABLE_PIPELINE_CACHE=false
173 anv_pipeline_cache_search(struct anv_pipeline_cache
*cache
,
174 const unsigned char *sha1
,
175 const struct brw_stage_prog_data
**prog_data
,
176 struct anv_pipeline_bind_map
*map
)
180 pthread_mutex_lock(&cache
->mutex
);
182 kernel
= anv_pipeline_cache_search_unlocked(cache
, sha1
, prog_data
, map
);
184 pthread_mutex_unlock(&cache
->mutex
);
190 anv_pipeline_cache_set_entry(struct anv_pipeline_cache
*cache
,
191 struct cache_entry
*entry
, uint32_t entry_offset
)
193 const uint32_t mask
= cache
->table_size
- 1;
194 const uint32_t start
= (*(uint32_t *) entry
->sha1
);
196 /* We'll always be able to insert when we get here. */
197 assert(cache
->kernel_count
< cache
->table_size
/ 2);
199 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
200 const uint32_t index
= (start
+ i
) & mask
;
201 if (cache
->hash_table
[index
] == ~0) {
202 cache
->hash_table
[index
] = entry_offset
;
207 cache
->total_size
+= entry_size(entry
) + entry
->kernel_size
;
208 cache
->kernel_count
++;
212 anv_pipeline_cache_grow(struct anv_pipeline_cache
*cache
)
214 const uint32_t table_size
= cache
->table_size
* 2;
215 const uint32_t old_table_size
= cache
->table_size
;
216 const size_t byte_size
= table_size
* sizeof(cache
->hash_table
[0]);
218 uint32_t *old_table
= cache
->hash_table
;
220 table
= malloc(byte_size
);
222 return VK_ERROR_OUT_OF_HOST_MEMORY
;
224 cache
->hash_table
= table
;
225 cache
->table_size
= table_size
;
226 cache
->kernel_count
= 0;
227 cache
->total_size
= 0;
229 memset(cache
->hash_table
, 0xff, byte_size
);
230 for (uint32_t i
= 0; i
< old_table_size
; i
++) {
231 const uint32_t offset
= old_table
[i
];
235 struct cache_entry
*entry
=
236 cache
->program_stream
.block_pool
->map
+ offset
;
237 anv_pipeline_cache_set_entry(cache
, entry
, offset
);
246 anv_pipeline_cache_add_entry(struct anv_pipeline_cache
*cache
,
247 struct cache_entry
*entry
, uint32_t entry_offset
)
249 if (cache
->kernel_count
== cache
->table_size
/ 2)
250 anv_pipeline_cache_grow(cache
);
252 /* Failing to grow that hash table isn't fatal, but may mean we don't
253 * have enough space to add this new kernel. Only add it if there's room.
255 if (cache
->kernel_count
< cache
->table_size
/ 2)
256 anv_pipeline_cache_set_entry(cache
, entry
, entry_offset
);
260 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache
*cache
,
261 const unsigned char *sha1
,
262 const void *kernel
, size_t kernel_size
,
263 const struct brw_stage_prog_data
**prog_data
,
264 size_t prog_data_size
,
265 struct anv_pipeline_bind_map
*map
)
267 pthread_mutex_lock(&cache
->mutex
);
269 /* Before uploading, check again that another thread didn't upload this
270 * shader while we were compiling it.
273 uint32_t cached_kernel
=
274 anv_pipeline_cache_search_unlocked(cache
, sha1
, prog_data
, map
);
275 if (cached_kernel
!= NO_KERNEL
) {
276 pthread_mutex_unlock(&cache
->mutex
);
277 return cached_kernel
;
281 struct cache_entry
*entry
;
283 assert((*prog_data
)->nr_pull_params
== 0);
284 assert((*prog_data
)->nr_image_params
== 0);
286 const uint32_t param_size
=
287 (*prog_data
)->nr_params
* sizeof(*(*prog_data
)->param
);
289 const uint32_t map_size
=
290 map
->surface_count
* sizeof(struct anv_pipeline_binding
) +
291 map
->sampler_count
* sizeof(struct anv_pipeline_binding
);
293 const uint32_t preamble_size
=
294 align_u32(sizeof(*entry
) + prog_data_size
+ param_size
+ map_size
, 64);
296 const uint32_t size
= preamble_size
+ kernel_size
;
298 assert(size
< cache
->program_stream
.block_pool
->block_size
);
299 const struct anv_state state
=
300 anv_state_stream_alloc(&cache
->program_stream
, size
, 64);
303 entry
->prog_data_size
= prog_data_size
;
304 entry
->surface_count
= map
->surface_count
;
305 entry
->sampler_count
= map
->sampler_count
;
306 entry
->image_count
= map
->image_count
;
307 entry
->kernel_size
= kernel_size
;
309 void *p
= entry
->prog_data
;
310 memcpy(p
, *prog_data
, prog_data_size
);
313 memcpy(p
, (*prog_data
)->param
, param_size
);
314 ((struct brw_stage_prog_data
*)entry
->prog_data
)->param
= p
;
317 memcpy(p
, map
->surface_to_descriptor
,
318 map
->surface_count
* sizeof(struct anv_pipeline_binding
));
319 map
->surface_to_descriptor
= p
;
320 p
+= map
->surface_count
* sizeof(struct anv_pipeline_binding
);
322 memcpy(p
, map
->sampler_to_descriptor
,
323 map
->sampler_count
* sizeof(struct anv_pipeline_binding
));
324 map
->sampler_to_descriptor
= p
;
327 assert(anv_pipeline_cache_search_unlocked(cache
, sha1
,
328 NULL
, NULL
) == NO_KERNEL
);
330 memcpy(entry
->sha1
, sha1
, sizeof(entry
->sha1
));
331 anv_pipeline_cache_add_entry(cache
, entry
, state
.offset
);
334 pthread_mutex_unlock(&cache
->mutex
);
336 memcpy(state
.map
+ preamble_size
, kernel
, kernel_size
);
338 if (!cache
->device
->info
.has_llc
)
339 anv_state_clflush(state
);
341 *prog_data
= (const struct brw_stage_prog_data
*) entry
->prog_data
;
343 return state
.offset
+ preamble_size
;
346 struct cache_header
{
347 uint32_t header_size
;
348 uint32_t header_version
;
351 uint8_t uuid
[VK_UUID_SIZE
];
355 anv_pipeline_cache_load(struct anv_pipeline_cache
*cache
,
356 const void *data
, size_t size
)
358 struct anv_device
*device
= cache
->device
;
359 struct cache_header header
;
360 uint8_t uuid
[VK_UUID_SIZE
];
362 if (size
< sizeof(header
))
364 memcpy(&header
, data
, sizeof(header
));
365 if (header
.header_size
< sizeof(header
))
367 if (header
.header_version
!= VK_PIPELINE_CACHE_HEADER_VERSION_ONE
)
369 if (header
.vendor_id
!= 0x8086)
371 if (header
.device_id
!= device
->chipset_id
)
373 anv_device_get_cache_uuid(uuid
);
374 if (memcmp(header
.uuid
, uuid
, VK_UUID_SIZE
) != 0)
377 void *end
= (void *) data
+ size
;
378 void *p
= (void *) data
+ header
.header_size
;
381 struct cache_entry
*entry
= p
;
383 void *data
= entry
->prog_data
;
385 /* Make a copy of prog_data so that it's mutable */
386 uint8_t prog_data_tmp
[512];
387 assert(entry
->prog_data_size
<= sizeof(prog_data_tmp
));
388 memcpy(prog_data_tmp
, data
, entry
->prog_data_size
);
389 struct brw_stage_prog_data
*prog_data
= (void *)prog_data_tmp
;
390 data
+= entry
->prog_data_size
;
392 prog_data
->param
= data
;
393 data
+= prog_data
->nr_params
* sizeof(*prog_data
->param
);
395 struct anv_pipeline_binding
*surface_to_descriptor
= data
;
396 data
+= entry
->surface_count
* sizeof(struct anv_pipeline_binding
);
397 struct anv_pipeline_binding
*sampler_to_descriptor
= data
;
398 data
+= entry
->sampler_count
* sizeof(struct anv_pipeline_binding
);
401 struct anv_pipeline_bind_map map
= {
402 .surface_count
= entry
->surface_count
,
403 .sampler_count
= entry
->sampler_count
,
404 .image_count
= entry
->image_count
,
405 .surface_to_descriptor
= surface_to_descriptor
,
406 .sampler_to_descriptor
= sampler_to_descriptor
409 const struct brw_stage_prog_data
*const_prog_data
= prog_data
;
411 anv_pipeline_cache_upload_kernel(cache
, entry
->sha1
,
412 kernel
, entry
->kernel_size
,
414 entry
->prog_data_size
, &map
);
415 p
= kernel
+ entry
->kernel_size
;
419 VkResult
anv_CreatePipelineCache(
421 const VkPipelineCacheCreateInfo
* pCreateInfo
,
422 const VkAllocationCallbacks
* pAllocator
,
423 VkPipelineCache
* pPipelineCache
)
425 ANV_FROM_HANDLE(anv_device
, device
, _device
);
426 struct anv_pipeline_cache
*cache
;
428 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO
);
429 assert(pCreateInfo
->flags
== 0);
431 cache
= anv_alloc2(&device
->alloc
, pAllocator
,
433 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
435 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
437 anv_pipeline_cache_init(cache
, device
);
439 if (pCreateInfo
->initialDataSize
> 0)
440 anv_pipeline_cache_load(cache
,
441 pCreateInfo
->pInitialData
,
442 pCreateInfo
->initialDataSize
);
444 *pPipelineCache
= anv_pipeline_cache_to_handle(cache
);
449 void anv_DestroyPipelineCache(
451 VkPipelineCache _cache
,
452 const VkAllocationCallbacks
* pAllocator
)
454 ANV_FROM_HANDLE(anv_device
, device
, _device
);
455 ANV_FROM_HANDLE(anv_pipeline_cache
, cache
, _cache
);
457 anv_pipeline_cache_finish(cache
);
459 anv_free2(&device
->alloc
, pAllocator
, cache
);
462 VkResult
anv_GetPipelineCacheData(
464 VkPipelineCache _cache
,
468 ANV_FROM_HANDLE(anv_device
, device
, _device
);
469 ANV_FROM_HANDLE(anv_pipeline_cache
, cache
, _cache
);
470 struct cache_header
*header
;
472 const size_t size
= sizeof(*header
) + cache
->total_size
;
479 if (*pDataSize
< sizeof(*header
)) {
481 return VK_INCOMPLETE
;
484 void *p
= pData
, *end
= pData
+ *pDataSize
;
486 header
->header_size
= sizeof(*header
);
487 header
->header_version
= VK_PIPELINE_CACHE_HEADER_VERSION_ONE
;
488 header
->vendor_id
= 0x8086;
489 header
->device_id
= device
->chipset_id
;
490 anv_device_get_cache_uuid(header
->uuid
);
491 p
+= header
->header_size
;
493 struct cache_entry
*entry
;
494 for (uint32_t i
= 0; i
< cache
->table_size
; i
++) {
495 if (cache
->hash_table
[i
] == ~0)
498 entry
= cache
->program_stream
.block_pool
->map
+ cache
->hash_table
[i
];
499 const uint32_t size
= entry_size(entry
);
500 if (end
< p
+ size
+ entry
->kernel_size
)
503 memcpy(p
, entry
, size
);
506 void *kernel
= (void *) entry
+ align_u32(size
, 64);
508 memcpy(p
, kernel
, entry
->kernel_size
);
509 p
+= entry
->kernel_size
;
512 *pDataSize
= p
- pData
;
518 anv_pipeline_cache_merge(struct anv_pipeline_cache
*dst
,
519 struct anv_pipeline_cache
*src
)
521 for (uint32_t i
= 0; i
< src
->table_size
; i
++) {
522 const uint32_t offset
= src
->hash_table
[i
];
526 struct cache_entry
*entry
=
527 src
->program_stream
.block_pool
->map
+ offset
;
529 if (anv_pipeline_cache_search(dst
, entry
->sha1
, NULL
, NULL
) != NO_KERNEL
)
532 anv_pipeline_cache_add_entry(dst
, entry
, offset
);
536 VkResult
anv_MergePipelineCaches(
538 VkPipelineCache destCache
,
539 uint32_t srcCacheCount
,
540 const VkPipelineCache
* pSrcCaches
)
542 ANV_FROM_HANDLE(anv_pipeline_cache
, dst
, destCache
);
544 for (uint32_t i
= 0; i
< srcCacheCount
; i
++) {
545 ANV_FROM_HANDLE(anv_pipeline_cache
, src
, pSrcCaches
[i
]);
547 anv_pipeline_cache_merge(dst
, src
);