abca9fe3313ec46ce801894a1ecb2946d4171db9
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/mesa-sha1.h"
25 #include "util/debug.h"
26 #include "anv_private.h"
27
28 struct shader_bin_key {
29 uint32_t size;
30 uint8_t data[0];
31 };
32
33 static size_t
34 anv_shader_bin_size(uint32_t prog_data_size, uint32_t key_size,
35 uint32_t surface_count, uint32_t sampler_count)
36 {
37 const uint32_t binding_data_size =
38 (surface_count + sampler_count) * sizeof(struct anv_pipeline_binding);
39
40 return align_u32(sizeof(struct anv_shader_bin), 8) +
41 align_u32(prog_data_size, 8) +
42 align_u32(sizeof(uint32_t) + key_size, 8) +
43 align_u32(binding_data_size, 8);
44 }
45
46 static inline const struct shader_bin_key *
47 anv_shader_bin_get_key(const struct anv_shader_bin *shader)
48 {
49 const void *data = shader;
50 data += align_u32(sizeof(struct anv_shader_bin), 8);
51 data += align_u32(shader->prog_data_size, 8);
52 return data;
53 }
54
55 struct anv_shader_bin *
56 anv_shader_bin_create(struct anv_device *device,
57 const void *key_data, uint32_t key_size,
58 const void *kernel_data, uint32_t kernel_size,
59 const void *prog_data, uint32_t prog_data_size,
60 const struct anv_pipeline_bind_map *bind_map)
61 {
62 const size_t size =
63 anv_shader_bin_size(prog_data_size, key_size,
64 bind_map->surface_count, bind_map->sampler_count);
65
66 struct anv_shader_bin *shader =
67 anv_alloc(&device->alloc, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
68 if (!shader)
69 return NULL;
70
71 shader->ref_cnt = 1;
72
73 shader->kernel =
74 anv_state_pool_alloc(&device->instruction_state_pool, kernel_size, 64);
75 memcpy(shader->kernel.map, kernel_data, kernel_size);
76 shader->kernel_size = kernel_size;
77 shader->bind_map = *bind_map;
78 shader->prog_data_size = prog_data_size;
79
80 /* Now we fill out the floating data at the end */
81 void *data = shader;
82 data += align_u32(sizeof(struct anv_shader_bin), 8);
83
84 memcpy(data, prog_data, prog_data_size);
85 data += align_u32(prog_data_size, 8);
86
87 struct shader_bin_key *key = data;
88 key->size = key_size;
89 memcpy(key->data, key_data, key_size);
90 data += align_u32(sizeof(*key) + key_size, 8);
91
92 shader->bind_map.surface_to_descriptor = data;
93 memcpy(data, bind_map->surface_to_descriptor,
94 bind_map->surface_count * sizeof(struct anv_pipeline_binding));
95 data += bind_map->surface_count * sizeof(struct anv_pipeline_binding);
96
97 shader->bind_map.sampler_to_descriptor = data;
98 memcpy(data, bind_map->sampler_to_descriptor,
99 bind_map->sampler_count * sizeof(struct anv_pipeline_binding));
100
101 return shader;
102 }
103
104 void
105 anv_shader_bin_destroy(struct anv_device *device,
106 struct anv_shader_bin *shader)
107 {
108 assert(shader->ref_cnt == 0);
109 anv_state_pool_free(&device->instruction_state_pool, shader->kernel);
110 anv_free(&device->alloc, shader);
111 }
112
113 static size_t
114 anv_shader_bin_data_size(const struct anv_shader_bin *shader)
115 {
116 return anv_shader_bin_size(shader->prog_data_size,
117 anv_shader_bin_get_key(shader)->size,
118 shader->bind_map.surface_count,
119 shader->bind_map.sampler_count) +
120 align_u32(shader->kernel_size, 8);
121 }
122
123 static void
124 anv_shader_bin_write_data(const struct anv_shader_bin *shader, void *data)
125 {
126 size_t struct_size =
127 anv_shader_bin_size(shader->prog_data_size,
128 anv_shader_bin_get_key(shader)->size,
129 shader->bind_map.surface_count,
130 shader->bind_map.sampler_count);
131
132 memcpy(data, shader, struct_size);
133 data += struct_size;
134
135 memcpy(data, shader->kernel.map, shader->kernel_size);
136 }
137
138 /* Remaining work:
139 *
140 * - Compact binding table layout so it's tight and not dependent on
141 * descriptor set layout.
142 *
143 * - Review prog_data struct for size and cacheability: struct
144 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
145 * bit quantities etc; param, pull_param, and image_params are pointers, we
146 * just need the compation map. use bit fields for all bools, eg
147 * dual_src_blend.
148 */
149
150 void
151 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
152 struct anv_device *device)
153 {
154 cache->device = device;
155 anv_state_stream_init(&cache->program_stream,
156 &device->instruction_block_pool);
157 pthread_mutex_init(&cache->mutex, NULL);
158
159 cache->kernel_count = 0;
160 cache->total_size = 0;
161 cache->table_size = 1024;
162 const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
163 cache->hash_table = malloc(byte_size);
164
165 /* We don't consider allocation failure fatal, we just start with a 0-sized
166 * cache. */
167 if (cache->hash_table == NULL ||
168 !env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true))
169 cache->table_size = 0;
170 else
171 memset(cache->hash_table, 0xff, byte_size);
172 }
173
174 void
175 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
176 {
177 anv_state_stream_finish(&cache->program_stream);
178 pthread_mutex_destroy(&cache->mutex);
179 free(cache->hash_table);
180 }
181
182 struct cache_entry {
183 unsigned char sha1[20];
184 uint32_t prog_data_size;
185 uint32_t kernel_size;
186 uint32_t surface_count;
187 uint32_t sampler_count;
188 uint32_t image_count;
189
190 char prog_data[0];
191
192 /* kernel follows prog_data at next 64 byte aligned address */
193 };
194
195 static uint32_t
196 entry_size(struct cache_entry *entry)
197 {
198 /* This returns the number of bytes needed to serialize an entry, which
199 * doesn't include the alignment padding bytes.
200 */
201
202 struct brw_stage_prog_data *prog_data = (void *)entry->prog_data;
203 const uint32_t param_size =
204 prog_data->nr_params * sizeof(*prog_data->param);
205
206 const uint32_t map_size =
207 entry->surface_count * sizeof(struct anv_pipeline_binding) +
208 entry->sampler_count * sizeof(struct anv_pipeline_binding);
209
210 return sizeof(*entry) + entry->prog_data_size + param_size + map_size;
211 }
212
213 void
214 anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
215 struct anv_shader_module *module,
216 const char *entrypoint,
217 const struct anv_pipeline_layout *pipeline_layout,
218 const VkSpecializationInfo *spec_info)
219 {
220 struct mesa_sha1 *ctx;
221
222 ctx = _mesa_sha1_init();
223 _mesa_sha1_update(ctx, key, key_size);
224 _mesa_sha1_update(ctx, module->sha1, sizeof(module->sha1));
225 _mesa_sha1_update(ctx, entrypoint, strlen(entrypoint));
226 if (pipeline_layout) {
227 _mesa_sha1_update(ctx, pipeline_layout->sha1,
228 sizeof(pipeline_layout->sha1));
229 }
230 /* hash in shader stage, pipeline layout? */
231 if (spec_info) {
232 _mesa_sha1_update(ctx, spec_info->pMapEntries,
233 spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
234 _mesa_sha1_update(ctx, spec_info->pData, spec_info->dataSize);
235 }
236 _mesa_sha1_final(ctx, hash);
237 }
238
239 static uint32_t
240 anv_pipeline_cache_search_unlocked(struct anv_pipeline_cache *cache,
241 const unsigned char *sha1,
242 const struct brw_stage_prog_data **prog_data,
243 struct anv_pipeline_bind_map *map)
244 {
245 const uint32_t mask = cache->table_size - 1;
246 const uint32_t start = (*(uint32_t *) sha1);
247
248 for (uint32_t i = 0; i < cache->table_size; i++) {
249 const uint32_t index = (start + i) & mask;
250 const uint32_t offset = cache->hash_table[index];
251
252 if (offset == ~0)
253 return NO_KERNEL;
254
255 struct cache_entry *entry =
256 cache->program_stream.block_pool->map + offset;
257 if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
258 if (prog_data) {
259 assert(map);
260 void *p = entry->prog_data;
261 *prog_data = p;
262 p += entry->prog_data_size;
263 p += (*prog_data)->nr_params * sizeof(*(*prog_data)->param);
264 map->surface_count = entry->surface_count;
265 map->sampler_count = entry->sampler_count;
266 map->image_count = entry->image_count;
267 map->surface_to_descriptor = p;
268 p += map->surface_count * sizeof(struct anv_pipeline_binding);
269 map->sampler_to_descriptor = p;
270 }
271
272 return offset + align_u32(entry_size(entry), 64);
273 }
274 }
275
276 /* This can happen if the pipeline cache is disabled via
277 * ANV_ENABLE_PIPELINE_CACHE=false
278 */
279 return NO_KERNEL;
280 }
281
282 uint32_t
283 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
284 const unsigned char *sha1,
285 const struct brw_stage_prog_data **prog_data,
286 struct anv_pipeline_bind_map *map)
287 {
288 uint32_t kernel;
289
290 pthread_mutex_lock(&cache->mutex);
291
292 kernel = anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map);
293
294 pthread_mutex_unlock(&cache->mutex);
295
296 return kernel;
297 }
298
299 static void
300 anv_pipeline_cache_set_entry(struct anv_pipeline_cache *cache,
301 struct cache_entry *entry, uint32_t entry_offset)
302 {
303 const uint32_t mask = cache->table_size - 1;
304 const uint32_t start = (*(uint32_t *) entry->sha1);
305
306 /* We'll always be able to insert when we get here. */
307 assert(cache->kernel_count < cache->table_size / 2);
308
309 for (uint32_t i = 0; i < cache->table_size; i++) {
310 const uint32_t index = (start + i) & mask;
311 if (cache->hash_table[index] == ~0) {
312 cache->hash_table[index] = entry_offset;
313 break;
314 }
315 }
316
317 cache->total_size += entry_size(entry) + entry->kernel_size;
318 cache->kernel_count++;
319 }
320
321 static VkResult
322 anv_pipeline_cache_grow(struct anv_pipeline_cache *cache)
323 {
324 const uint32_t table_size = cache->table_size * 2;
325 const uint32_t old_table_size = cache->table_size;
326 const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
327 uint32_t *table;
328 uint32_t *old_table = cache->hash_table;
329
330 table = malloc(byte_size);
331 if (table == NULL)
332 return VK_ERROR_OUT_OF_HOST_MEMORY;
333
334 cache->hash_table = table;
335 cache->table_size = table_size;
336 cache->kernel_count = 0;
337 cache->total_size = 0;
338
339 memset(cache->hash_table, 0xff, byte_size);
340 for (uint32_t i = 0; i < old_table_size; i++) {
341 const uint32_t offset = old_table[i];
342 if (offset == ~0)
343 continue;
344
345 struct cache_entry *entry =
346 cache->program_stream.block_pool->map + offset;
347 anv_pipeline_cache_set_entry(cache, entry, offset);
348 }
349
350 free(old_table);
351
352 return VK_SUCCESS;
353 }
354
355 static void
356 anv_pipeline_cache_add_entry(struct anv_pipeline_cache *cache,
357 struct cache_entry *entry, uint32_t entry_offset)
358 {
359 if (cache->kernel_count == cache->table_size / 2)
360 anv_pipeline_cache_grow(cache);
361
362 /* Failing to grow that hash table isn't fatal, but may mean we don't
363 * have enough space to add this new kernel. Only add it if there's room.
364 */
365 if (cache->kernel_count < cache->table_size / 2)
366 anv_pipeline_cache_set_entry(cache, entry, entry_offset);
367 }
368
369 uint32_t
370 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
371 const unsigned char *sha1,
372 const void *kernel, size_t kernel_size,
373 const struct brw_stage_prog_data **prog_data,
374 size_t prog_data_size,
375 struct anv_pipeline_bind_map *map)
376 {
377 pthread_mutex_lock(&cache->mutex);
378
379 /* Before uploading, check again that another thread didn't upload this
380 * shader while we were compiling it.
381 */
382 if (sha1) {
383 uint32_t cached_kernel =
384 anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map);
385 if (cached_kernel != NO_KERNEL) {
386 pthread_mutex_unlock(&cache->mutex);
387 return cached_kernel;
388 }
389 }
390
391 struct cache_entry *entry;
392
393 assert((*prog_data)->nr_pull_params == 0);
394 assert((*prog_data)->nr_image_params == 0);
395
396 const uint32_t param_size =
397 (*prog_data)->nr_params * sizeof(*(*prog_data)->param);
398
399 const uint32_t map_size =
400 map->surface_count * sizeof(struct anv_pipeline_binding) +
401 map->sampler_count * sizeof(struct anv_pipeline_binding);
402
403 const uint32_t preamble_size =
404 align_u32(sizeof(*entry) + prog_data_size + param_size + map_size, 64);
405
406 const uint32_t size = preamble_size + kernel_size;
407
408 assert(size < cache->program_stream.block_pool->block_size);
409 const struct anv_state state =
410 anv_state_stream_alloc(&cache->program_stream, size, 64);
411
412 entry = state.map;
413 entry->prog_data_size = prog_data_size;
414 entry->surface_count = map->surface_count;
415 entry->sampler_count = map->sampler_count;
416 entry->image_count = map->image_count;
417 entry->kernel_size = kernel_size;
418
419 void *p = entry->prog_data;
420 memcpy(p, *prog_data, prog_data_size);
421 p += prog_data_size;
422
423 memcpy(p, (*prog_data)->param, param_size);
424 ((struct brw_stage_prog_data *)entry->prog_data)->param = p;
425 p += param_size;
426
427 memcpy(p, map->surface_to_descriptor,
428 map->surface_count * sizeof(struct anv_pipeline_binding));
429 map->surface_to_descriptor = p;
430 p += map->surface_count * sizeof(struct anv_pipeline_binding);
431
432 memcpy(p, map->sampler_to_descriptor,
433 map->sampler_count * sizeof(struct anv_pipeline_binding));
434 map->sampler_to_descriptor = p;
435
436 if (sha1) {
437 assert(anv_pipeline_cache_search_unlocked(cache, sha1,
438 NULL, NULL) == NO_KERNEL);
439
440 memcpy(entry->sha1, sha1, sizeof(entry->sha1));
441 anv_pipeline_cache_add_entry(cache, entry, state.offset);
442 }
443
444 pthread_mutex_unlock(&cache->mutex);
445
446 memcpy(state.map + preamble_size, kernel, kernel_size);
447
448 if (!cache->device->info.has_llc)
449 anv_state_clflush(state);
450
451 *prog_data = (const struct brw_stage_prog_data *) entry->prog_data;
452
453 return state.offset + preamble_size;
454 }
455
456 struct cache_header {
457 uint32_t header_size;
458 uint32_t header_version;
459 uint32_t vendor_id;
460 uint32_t device_id;
461 uint8_t uuid[VK_UUID_SIZE];
462 };
463
464 static void
465 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
466 const void *data, size_t size)
467 {
468 struct anv_device *device = cache->device;
469 struct cache_header header;
470 uint8_t uuid[VK_UUID_SIZE];
471
472 if (size < sizeof(header))
473 return;
474 memcpy(&header, data, sizeof(header));
475 if (header.header_size < sizeof(header))
476 return;
477 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
478 return;
479 if (header.vendor_id != 0x8086)
480 return;
481 if (header.device_id != device->chipset_id)
482 return;
483 anv_device_get_cache_uuid(uuid);
484 if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0)
485 return;
486
487 void *end = (void *) data + size;
488 void *p = (void *) data + header.header_size;
489
490 while (p < end) {
491 struct cache_entry *entry = p;
492
493 void *data = entry->prog_data;
494
495 /* Make a copy of prog_data so that it's mutable */
496 uint8_t prog_data_tmp[512];
497 assert(entry->prog_data_size <= sizeof(prog_data_tmp));
498 memcpy(prog_data_tmp, data, entry->prog_data_size);
499 struct brw_stage_prog_data *prog_data = (void *)prog_data_tmp;
500 data += entry->prog_data_size;
501
502 prog_data->param = data;
503 data += prog_data->nr_params * sizeof(*prog_data->param);
504
505 struct anv_pipeline_binding *surface_to_descriptor = data;
506 data += entry->surface_count * sizeof(struct anv_pipeline_binding);
507 struct anv_pipeline_binding *sampler_to_descriptor = data;
508 data += entry->sampler_count * sizeof(struct anv_pipeline_binding);
509 void *kernel = data;
510
511 struct anv_pipeline_bind_map map = {
512 .surface_count = entry->surface_count,
513 .sampler_count = entry->sampler_count,
514 .image_count = entry->image_count,
515 .surface_to_descriptor = surface_to_descriptor,
516 .sampler_to_descriptor = sampler_to_descriptor
517 };
518
519 const struct brw_stage_prog_data *const_prog_data = prog_data;
520
521 anv_pipeline_cache_upload_kernel(cache, entry->sha1,
522 kernel, entry->kernel_size,
523 &const_prog_data,
524 entry->prog_data_size, &map);
525 p = kernel + entry->kernel_size;
526 }
527 }
528
529 VkResult anv_CreatePipelineCache(
530 VkDevice _device,
531 const VkPipelineCacheCreateInfo* pCreateInfo,
532 const VkAllocationCallbacks* pAllocator,
533 VkPipelineCache* pPipelineCache)
534 {
535 ANV_FROM_HANDLE(anv_device, device, _device);
536 struct anv_pipeline_cache *cache;
537
538 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
539 assert(pCreateInfo->flags == 0);
540
541 cache = anv_alloc2(&device->alloc, pAllocator,
542 sizeof(*cache), 8,
543 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
544 if (cache == NULL)
545 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
546
547 anv_pipeline_cache_init(cache, device);
548
549 if (pCreateInfo->initialDataSize > 0)
550 anv_pipeline_cache_load(cache,
551 pCreateInfo->pInitialData,
552 pCreateInfo->initialDataSize);
553
554 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
555
556 return VK_SUCCESS;
557 }
558
559 void anv_DestroyPipelineCache(
560 VkDevice _device,
561 VkPipelineCache _cache,
562 const VkAllocationCallbacks* pAllocator)
563 {
564 ANV_FROM_HANDLE(anv_device, device, _device);
565 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
566
567 anv_pipeline_cache_finish(cache);
568
569 anv_free2(&device->alloc, pAllocator, cache);
570 }
571
572 VkResult anv_GetPipelineCacheData(
573 VkDevice _device,
574 VkPipelineCache _cache,
575 size_t* pDataSize,
576 void* pData)
577 {
578 ANV_FROM_HANDLE(anv_device, device, _device);
579 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
580 struct cache_header *header;
581
582 const size_t size = sizeof(*header) + cache->total_size;
583
584 if (pData == NULL) {
585 *pDataSize = size;
586 return VK_SUCCESS;
587 }
588
589 if (*pDataSize < sizeof(*header)) {
590 *pDataSize = 0;
591 return VK_INCOMPLETE;
592 }
593
594 void *p = pData, *end = pData + *pDataSize;
595 header = p;
596 header->header_size = sizeof(*header);
597 header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
598 header->vendor_id = 0x8086;
599 header->device_id = device->chipset_id;
600 anv_device_get_cache_uuid(header->uuid);
601 p += header->header_size;
602
603 struct cache_entry *entry;
604 for (uint32_t i = 0; i < cache->table_size; i++) {
605 if (cache->hash_table[i] == ~0)
606 continue;
607
608 entry = cache->program_stream.block_pool->map + cache->hash_table[i];
609 const uint32_t size = entry_size(entry);
610 if (end < p + size + entry->kernel_size)
611 break;
612
613 memcpy(p, entry, size);
614 p += size;
615
616 void *kernel = (void *) entry + align_u32(size, 64);
617
618 memcpy(p, kernel, entry->kernel_size);
619 p += entry->kernel_size;
620 }
621
622 *pDataSize = p - pData;
623
624 return VK_SUCCESS;
625 }
626
627 static void
628 anv_pipeline_cache_merge(struct anv_pipeline_cache *dst,
629 struct anv_pipeline_cache *src)
630 {
631 for (uint32_t i = 0; i < src->table_size; i++) {
632 const uint32_t offset = src->hash_table[i];
633 if (offset == ~0)
634 continue;
635
636 struct cache_entry *entry =
637 src->program_stream.block_pool->map + offset;
638
639 if (anv_pipeline_cache_search(dst, entry->sha1, NULL, NULL) != NO_KERNEL)
640 continue;
641
642 anv_pipeline_cache_add_entry(dst, entry, offset);
643 }
644 }
645
646 VkResult anv_MergePipelineCaches(
647 VkDevice _device,
648 VkPipelineCache destCache,
649 uint32_t srcCacheCount,
650 const VkPipelineCache* pSrcCaches)
651 {
652 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
653
654 for (uint32_t i = 0; i < srcCacheCount; i++) {
655 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
656
657 anv_pipeline_cache_merge(dst, src);
658 }
659
660 return VK_SUCCESS;
661 }