anv: Include the pipeline layout in the shader hash
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/mesa-sha1.h"
25 #include "util/debug.h"
26 #include "anv_private.h"
27
28 /* Remaining work:
29 *
30 * - Compact binding table layout so it's tight and not dependent on
31 * descriptor set layout.
32 *
33 * - Review prog_data struct for size and cacheability: struct
34 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
35 * bit quantities etc; param, pull_param, and image_params are pointers, we
36 * just need the compation map. use bit fields for all bools, eg
37 * dual_src_blend.
38 */
39
40 void
41 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
42 struct anv_device *device)
43 {
44 cache->device = device;
45 anv_state_stream_init(&cache->program_stream,
46 &device->instruction_block_pool);
47 pthread_mutex_init(&cache->mutex, NULL);
48
49 cache->kernel_count = 0;
50 cache->total_size = 0;
51 cache->table_size = 1024;
52 const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
53 cache->hash_table = malloc(byte_size);
54
55 /* We don't consider allocation failure fatal, we just start with a 0-sized
56 * cache. */
57 if (cache->hash_table == NULL ||
58 !env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", true))
59 cache->table_size = 0;
60 else
61 memset(cache->hash_table, 0xff, byte_size);
62 }
63
64 void
65 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
66 {
67 anv_state_stream_finish(&cache->program_stream);
68 pthread_mutex_destroy(&cache->mutex);
69 free(cache->hash_table);
70 }
71
72 struct cache_entry {
73 unsigned char sha1[20];
74 uint32_t prog_data_size;
75 uint32_t kernel_size;
76 uint32_t surface_count;
77 uint32_t sampler_count;
78 uint32_t image_count;
79
80 char prog_data[0];
81
82 /* kernel follows prog_data at next 64 byte aligned address */
83 };
84
85 static uint32_t
86 entry_size(struct cache_entry *entry)
87 {
88 /* This returns the number of bytes needed to serialize an entry, which
89 * doesn't include the alignment padding bytes.
90 */
91
92 struct brw_stage_prog_data *prog_data = (void *)entry->prog_data;
93 const uint32_t param_size =
94 prog_data->nr_params * sizeof(*prog_data->param);
95
96 const uint32_t map_size =
97 entry->surface_count * sizeof(struct anv_pipeline_binding) +
98 entry->sampler_count * sizeof(struct anv_pipeline_binding);
99
100 return sizeof(*entry) + entry->prog_data_size + param_size + map_size;
101 }
102
103 void
104 anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
105 struct anv_shader_module *module,
106 const char *entrypoint,
107 const struct anv_pipeline_layout *pipeline_layout,
108 const VkSpecializationInfo *spec_info)
109 {
110 struct mesa_sha1 *ctx;
111
112 ctx = _mesa_sha1_init();
113 _mesa_sha1_update(ctx, key, key_size);
114 _mesa_sha1_update(ctx, module->sha1, sizeof(module->sha1));
115 _mesa_sha1_update(ctx, entrypoint, strlen(entrypoint));
116 if (pipeline_layout) {
117 _mesa_sha1_update(ctx, pipeline_layout->sha1,
118 sizeof(pipeline_layout->sha1));
119 }
120 /* hash in shader stage, pipeline layout? */
121 if (spec_info) {
122 _mesa_sha1_update(ctx, spec_info->pMapEntries,
123 spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
124 _mesa_sha1_update(ctx, spec_info->pData, spec_info->dataSize);
125 }
126 _mesa_sha1_final(ctx, hash);
127 }
128
129 static uint32_t
130 anv_pipeline_cache_search_unlocked(struct anv_pipeline_cache *cache,
131 const unsigned char *sha1,
132 const struct brw_stage_prog_data **prog_data,
133 struct anv_pipeline_bind_map *map)
134 {
135 const uint32_t mask = cache->table_size - 1;
136 const uint32_t start = (*(uint32_t *) sha1);
137
138 for (uint32_t i = 0; i < cache->table_size; i++) {
139 const uint32_t index = (start + i) & mask;
140 const uint32_t offset = cache->hash_table[index];
141
142 if (offset == ~0)
143 return NO_KERNEL;
144
145 struct cache_entry *entry =
146 cache->program_stream.block_pool->map + offset;
147 if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
148 if (prog_data) {
149 assert(map);
150 void *p = entry->prog_data;
151 *prog_data = p;
152 p += entry->prog_data_size;
153 p += (*prog_data)->nr_params * sizeof(*(*prog_data)->param);
154 map->surface_count = entry->surface_count;
155 map->sampler_count = entry->sampler_count;
156 map->image_count = entry->image_count;
157 map->surface_to_descriptor = p;
158 p += map->surface_count * sizeof(struct anv_pipeline_binding);
159 map->sampler_to_descriptor = p;
160 }
161
162 return offset + align_u32(entry_size(entry), 64);
163 }
164 }
165
166 /* This can happen if the pipeline cache is disabled via
167 * ANV_ENABLE_PIPELINE_CACHE=false
168 */
169 return NO_KERNEL;
170 }
171
172 uint32_t
173 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
174 const unsigned char *sha1,
175 const struct brw_stage_prog_data **prog_data,
176 struct anv_pipeline_bind_map *map)
177 {
178 uint32_t kernel;
179
180 pthread_mutex_lock(&cache->mutex);
181
182 kernel = anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map);
183
184 pthread_mutex_unlock(&cache->mutex);
185
186 return kernel;
187 }
188
189 static void
190 anv_pipeline_cache_set_entry(struct anv_pipeline_cache *cache,
191 struct cache_entry *entry, uint32_t entry_offset)
192 {
193 const uint32_t mask = cache->table_size - 1;
194 const uint32_t start = (*(uint32_t *) entry->sha1);
195
196 /* We'll always be able to insert when we get here. */
197 assert(cache->kernel_count < cache->table_size / 2);
198
199 for (uint32_t i = 0; i < cache->table_size; i++) {
200 const uint32_t index = (start + i) & mask;
201 if (cache->hash_table[index] == ~0) {
202 cache->hash_table[index] = entry_offset;
203 break;
204 }
205 }
206
207 cache->total_size += entry_size(entry) + entry->kernel_size;
208 cache->kernel_count++;
209 }
210
211 static VkResult
212 anv_pipeline_cache_grow(struct anv_pipeline_cache *cache)
213 {
214 const uint32_t table_size = cache->table_size * 2;
215 const uint32_t old_table_size = cache->table_size;
216 const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
217 uint32_t *table;
218 uint32_t *old_table = cache->hash_table;
219
220 table = malloc(byte_size);
221 if (table == NULL)
222 return VK_ERROR_OUT_OF_HOST_MEMORY;
223
224 cache->hash_table = table;
225 cache->table_size = table_size;
226 cache->kernel_count = 0;
227 cache->total_size = 0;
228
229 memset(cache->hash_table, 0xff, byte_size);
230 for (uint32_t i = 0; i < old_table_size; i++) {
231 const uint32_t offset = old_table[i];
232 if (offset == ~0)
233 continue;
234
235 struct cache_entry *entry =
236 cache->program_stream.block_pool->map + offset;
237 anv_pipeline_cache_set_entry(cache, entry, offset);
238 }
239
240 free(old_table);
241
242 return VK_SUCCESS;
243 }
244
245 static void
246 anv_pipeline_cache_add_entry(struct anv_pipeline_cache *cache,
247 struct cache_entry *entry, uint32_t entry_offset)
248 {
249 if (cache->kernel_count == cache->table_size / 2)
250 anv_pipeline_cache_grow(cache);
251
252 /* Failing to grow that hash table isn't fatal, but may mean we don't
253 * have enough space to add this new kernel. Only add it if there's room.
254 */
255 if (cache->kernel_count < cache->table_size / 2)
256 anv_pipeline_cache_set_entry(cache, entry, entry_offset);
257 }
258
259 uint32_t
260 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
261 const unsigned char *sha1,
262 const void *kernel, size_t kernel_size,
263 const struct brw_stage_prog_data **prog_data,
264 size_t prog_data_size,
265 struct anv_pipeline_bind_map *map)
266 {
267 pthread_mutex_lock(&cache->mutex);
268
269 /* Before uploading, check again that another thread didn't upload this
270 * shader while we were compiling it.
271 */
272 if (sha1) {
273 uint32_t cached_kernel =
274 anv_pipeline_cache_search_unlocked(cache, sha1, prog_data, map);
275 if (cached_kernel != NO_KERNEL) {
276 pthread_mutex_unlock(&cache->mutex);
277 return cached_kernel;
278 }
279 }
280
281 struct cache_entry *entry;
282
283 assert((*prog_data)->nr_pull_params == 0);
284 assert((*prog_data)->nr_image_params == 0);
285
286 const uint32_t param_size =
287 (*prog_data)->nr_params * sizeof(*(*prog_data)->param);
288
289 const uint32_t map_size =
290 map->surface_count * sizeof(struct anv_pipeline_binding) +
291 map->sampler_count * sizeof(struct anv_pipeline_binding);
292
293 const uint32_t preamble_size =
294 align_u32(sizeof(*entry) + prog_data_size + param_size + map_size, 64);
295
296 const uint32_t size = preamble_size + kernel_size;
297
298 assert(size < cache->program_stream.block_pool->block_size);
299 const struct anv_state state =
300 anv_state_stream_alloc(&cache->program_stream, size, 64);
301
302 entry = state.map;
303 entry->prog_data_size = prog_data_size;
304 entry->surface_count = map->surface_count;
305 entry->sampler_count = map->sampler_count;
306 entry->image_count = map->image_count;
307 entry->kernel_size = kernel_size;
308
309 void *p = entry->prog_data;
310 memcpy(p, *prog_data, prog_data_size);
311 p += prog_data_size;
312
313 memcpy(p, (*prog_data)->param, param_size);
314 ((struct brw_stage_prog_data *)entry->prog_data)->param = p;
315 p += param_size;
316
317 memcpy(p, map->surface_to_descriptor,
318 map->surface_count * sizeof(struct anv_pipeline_binding));
319 map->surface_to_descriptor = p;
320 p += map->surface_count * sizeof(struct anv_pipeline_binding);
321
322 memcpy(p, map->sampler_to_descriptor,
323 map->sampler_count * sizeof(struct anv_pipeline_binding));
324 map->sampler_to_descriptor = p;
325
326 if (sha1) {
327 assert(anv_pipeline_cache_search_unlocked(cache, sha1,
328 NULL, NULL) == NO_KERNEL);
329
330 memcpy(entry->sha1, sha1, sizeof(entry->sha1));
331 anv_pipeline_cache_add_entry(cache, entry, state.offset);
332 }
333
334 pthread_mutex_unlock(&cache->mutex);
335
336 memcpy(state.map + preamble_size, kernel, kernel_size);
337
338 if (!cache->device->info.has_llc)
339 anv_state_clflush(state);
340
341 *prog_data = (const struct brw_stage_prog_data *) entry->prog_data;
342
343 return state.offset + preamble_size;
344 }
345
346 struct cache_header {
347 uint32_t header_size;
348 uint32_t header_version;
349 uint32_t vendor_id;
350 uint32_t device_id;
351 uint8_t uuid[VK_UUID_SIZE];
352 };
353
354 static void
355 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
356 const void *data, size_t size)
357 {
358 struct anv_device *device = cache->device;
359 struct cache_header header;
360 uint8_t uuid[VK_UUID_SIZE];
361
362 if (size < sizeof(header))
363 return;
364 memcpy(&header, data, sizeof(header));
365 if (header.header_size < sizeof(header))
366 return;
367 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
368 return;
369 if (header.vendor_id != 0x8086)
370 return;
371 if (header.device_id != device->chipset_id)
372 return;
373 anv_device_get_cache_uuid(uuid);
374 if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0)
375 return;
376
377 void *end = (void *) data + size;
378 void *p = (void *) data + header.header_size;
379
380 while (p < end) {
381 struct cache_entry *entry = p;
382
383 void *data = entry->prog_data;
384
385 /* Make a copy of prog_data so that it's mutable */
386 uint8_t prog_data_tmp[512];
387 assert(entry->prog_data_size <= sizeof(prog_data_tmp));
388 memcpy(prog_data_tmp, data, entry->prog_data_size);
389 struct brw_stage_prog_data *prog_data = (void *)prog_data_tmp;
390 data += entry->prog_data_size;
391
392 prog_data->param = data;
393 data += prog_data->nr_params * sizeof(*prog_data->param);
394
395 struct anv_pipeline_binding *surface_to_descriptor = data;
396 data += entry->surface_count * sizeof(struct anv_pipeline_binding);
397 struct anv_pipeline_binding *sampler_to_descriptor = data;
398 data += entry->sampler_count * sizeof(struct anv_pipeline_binding);
399 void *kernel = data;
400
401 struct anv_pipeline_bind_map map = {
402 .surface_count = entry->surface_count,
403 .sampler_count = entry->sampler_count,
404 .image_count = entry->image_count,
405 .surface_to_descriptor = surface_to_descriptor,
406 .sampler_to_descriptor = sampler_to_descriptor
407 };
408
409 const struct brw_stage_prog_data *const_prog_data = prog_data;
410
411 anv_pipeline_cache_upload_kernel(cache, entry->sha1,
412 kernel, entry->kernel_size,
413 &const_prog_data,
414 entry->prog_data_size, &map);
415 p = kernel + entry->kernel_size;
416 }
417 }
418
419 VkResult anv_CreatePipelineCache(
420 VkDevice _device,
421 const VkPipelineCacheCreateInfo* pCreateInfo,
422 const VkAllocationCallbacks* pAllocator,
423 VkPipelineCache* pPipelineCache)
424 {
425 ANV_FROM_HANDLE(anv_device, device, _device);
426 struct anv_pipeline_cache *cache;
427
428 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
429 assert(pCreateInfo->flags == 0);
430
431 cache = anv_alloc2(&device->alloc, pAllocator,
432 sizeof(*cache), 8,
433 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
434 if (cache == NULL)
435 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
436
437 anv_pipeline_cache_init(cache, device);
438
439 if (pCreateInfo->initialDataSize > 0)
440 anv_pipeline_cache_load(cache,
441 pCreateInfo->pInitialData,
442 pCreateInfo->initialDataSize);
443
444 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
445
446 return VK_SUCCESS;
447 }
448
449 void anv_DestroyPipelineCache(
450 VkDevice _device,
451 VkPipelineCache _cache,
452 const VkAllocationCallbacks* pAllocator)
453 {
454 ANV_FROM_HANDLE(anv_device, device, _device);
455 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
456
457 anv_pipeline_cache_finish(cache);
458
459 anv_free2(&device->alloc, pAllocator, cache);
460 }
461
462 VkResult anv_GetPipelineCacheData(
463 VkDevice _device,
464 VkPipelineCache _cache,
465 size_t* pDataSize,
466 void* pData)
467 {
468 ANV_FROM_HANDLE(anv_device, device, _device);
469 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
470 struct cache_header *header;
471
472 const size_t size = sizeof(*header) + cache->total_size;
473
474 if (pData == NULL) {
475 *pDataSize = size;
476 return VK_SUCCESS;
477 }
478
479 if (*pDataSize < sizeof(*header)) {
480 *pDataSize = 0;
481 return VK_INCOMPLETE;
482 }
483
484 void *p = pData, *end = pData + *pDataSize;
485 header = p;
486 header->header_size = sizeof(*header);
487 header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
488 header->vendor_id = 0x8086;
489 header->device_id = device->chipset_id;
490 anv_device_get_cache_uuid(header->uuid);
491 p += header->header_size;
492
493 struct cache_entry *entry;
494 for (uint32_t i = 0; i < cache->table_size; i++) {
495 if (cache->hash_table[i] == ~0)
496 continue;
497
498 entry = cache->program_stream.block_pool->map + cache->hash_table[i];
499 const uint32_t size = entry_size(entry);
500 if (end < p + size + entry->kernel_size)
501 break;
502
503 memcpy(p, entry, size);
504 p += size;
505
506 void *kernel = (void *) entry + align_u32(size, 64);
507
508 memcpy(p, kernel, entry->kernel_size);
509 p += entry->kernel_size;
510 }
511
512 *pDataSize = p - pData;
513
514 return VK_SUCCESS;
515 }
516
517 static void
518 anv_pipeline_cache_merge(struct anv_pipeline_cache *dst,
519 struct anv_pipeline_cache *src)
520 {
521 for (uint32_t i = 0; i < src->table_size; i++) {
522 const uint32_t offset = src->hash_table[i];
523 if (offset == ~0)
524 continue;
525
526 struct cache_entry *entry =
527 src->program_stream.block_pool->map + offset;
528
529 if (anv_pipeline_cache_search(dst, entry->sha1, NULL, NULL) != NO_KERNEL)
530 continue;
531
532 anv_pipeline_cache_add_entry(dst, entry, offset);
533 }
534 }
535
536 VkResult anv_MergePipelineCaches(
537 VkDevice _device,
538 VkPipelineCache destCache,
539 uint32_t srcCacheCount,
540 const VkPipelineCache* pSrcCaches)
541 {
542 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
543
544 for (uint32_t i = 0; i < srcCacheCount; i++) {
545 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
546
547 anv_pipeline_cache_merge(dst, src);
548 }
549
550 return VK_SUCCESS;
551 }