anv: Add anv_pipeline_cache_add_entry()
[mesa.git] / src / intel / vulkan / anv_pipeline_cache.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/mesa-sha1.h"
25 #include "util/debug.h"
26 #include "anv_private.h"
27
28 /* Remaining work:
29 *
30 * - Compact binding table layout so it's tight and not dependent on
31 * descriptor set layout.
32 *
33 * - Review prog_data struct for size and cacheability: struct
34 * brw_stage_prog_data has binding_table which uses a lot of uint32_t for 8
35 * bit quantities etc; param, pull_param, and image_params are pointers, we
36 * just need the compation map. use bit fields for all bools, eg
37 * dual_src_blend.
38 */
39
40 void
41 anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
42 struct anv_device *device)
43 {
44 cache->device = device;
45 anv_state_stream_init(&cache->program_stream,
46 &device->instruction_block_pool);
47 pthread_mutex_init(&cache->mutex, NULL);
48
49 cache->kernel_count = 0;
50 cache->total_size = 0;
51 cache->table_size = 1024;
52 const size_t byte_size = cache->table_size * sizeof(cache->hash_table[0]);
53 cache->hash_table = malloc(byte_size);
54
55 /* We don't consider allocation failure fatal, we just start with a 0-sized
56 * cache. */
57 if (cache->hash_table == NULL)
58 cache->table_size = 0;
59 else
60 memset(cache->hash_table, 0xff, byte_size);
61 }
62
63 void
64 anv_pipeline_cache_finish(struct anv_pipeline_cache *cache)
65 {
66 anv_state_stream_finish(&cache->program_stream);
67 pthread_mutex_destroy(&cache->mutex);
68 free(cache->hash_table);
69 }
70
71 struct cache_entry {
72 unsigned char sha1[20];
73 uint32_t prog_data_size;
74 uint32_t kernel_size;
75 char prog_data[0];
76
77 /* kernel follows prog_data at next 64 byte aligned address */
78 };
79
80 static uint32_t
81 entry_size(struct cache_entry *entry)
82 {
83 /* This returns the number of bytes needed to serialize an entry, which
84 * doesn't include the alignment padding bytes.
85 */
86
87 return sizeof(*entry) + entry->prog_data_size + entry->kernel_size;
88 }
89
90 void
91 anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
92 struct anv_shader_module *module,
93 const char *entrypoint,
94 const VkSpecializationInfo *spec_info)
95 {
96 struct mesa_sha1 *ctx;
97
98 ctx = _mesa_sha1_init();
99 _mesa_sha1_update(ctx, key, key_size);
100 _mesa_sha1_update(ctx, module->sha1, sizeof(module->sha1));
101 _mesa_sha1_update(ctx, entrypoint, strlen(entrypoint));
102 /* hash in shader stage, pipeline layout? */
103 if (spec_info) {
104 _mesa_sha1_update(ctx, spec_info->pMapEntries,
105 spec_info->mapEntryCount * sizeof spec_info->pMapEntries[0]);
106 _mesa_sha1_update(ctx, spec_info->pData, spec_info->dataSize);
107 }
108 _mesa_sha1_final(ctx, hash);
109 }
110
111 uint32_t
112 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
113 const unsigned char *sha1,
114 const struct brw_stage_prog_data **prog_data)
115 {
116 const uint32_t mask = cache->table_size - 1;
117 const uint32_t start = (*(uint32_t *) sha1);
118
119 for (uint32_t i = 0; i < cache->table_size; i++) {
120 const uint32_t index = (start + i) & mask;
121 const uint32_t offset = cache->hash_table[index];
122
123 if (offset == ~0)
124 return NO_KERNEL;
125
126 struct cache_entry *entry =
127 cache->program_stream.block_pool->map + offset;
128 if (memcmp(entry->sha1, sha1, sizeof(entry->sha1)) == 0) {
129 if (prog_data)
130 *prog_data = (const struct brw_stage_prog_data *) entry->prog_data;
131
132 const uint32_t preamble_size =
133 align_u32(sizeof(*entry) + entry->prog_data_size, 64);
134
135 return offset + preamble_size;
136 }
137 }
138
139 return NO_KERNEL;
140 }
141
142 static void
143 anv_pipeline_cache_set_entry(struct anv_pipeline_cache *cache,
144 struct cache_entry *entry, uint32_t entry_offset)
145 {
146 const uint32_t mask = cache->table_size - 1;
147 const uint32_t start = (*(uint32_t *) entry->sha1);
148
149 /* We'll always be able to insert when we get here. */
150 assert(cache->kernel_count < cache->table_size / 2);
151
152 for (uint32_t i = 0; i < cache->table_size; i++) {
153 const uint32_t index = (start + i) & mask;
154 if (cache->hash_table[index] == ~0) {
155 cache->hash_table[index] = entry_offset;
156 break;
157 }
158 }
159
160 cache->total_size += entry_size(entry);
161 cache->kernel_count++;
162 }
163
164 static VkResult
165 anv_pipeline_cache_grow(struct anv_pipeline_cache *cache)
166 {
167 const uint32_t table_size = cache->table_size * 2;
168 const uint32_t old_table_size = cache->table_size;
169 const size_t byte_size = table_size * sizeof(cache->hash_table[0]);
170 uint32_t *table;
171 uint32_t *old_table = cache->hash_table;
172
173 table = malloc(byte_size);
174 if (table == NULL)
175 return VK_ERROR_OUT_OF_HOST_MEMORY;
176
177 cache->hash_table = table;
178 cache->table_size = table_size;
179 cache->kernel_count = 0;
180 cache->total_size = 0;
181
182 memset(cache->hash_table, 0xff, byte_size);
183 for (uint32_t i = 0; i < old_table_size; i++) {
184 const uint32_t offset = old_table[i];
185 if (offset == ~0)
186 continue;
187
188 struct cache_entry *entry =
189 cache->program_stream.block_pool->map + offset;
190 anv_pipeline_cache_set_entry(cache, entry, offset);
191 }
192
193 free(old_table);
194
195 return VK_SUCCESS;
196 }
197
198 static void
199 anv_pipeline_cache_add_entry(struct anv_pipeline_cache *cache,
200 struct cache_entry *entry, uint32_t entry_offset)
201 {
202 if (cache->kernel_count == cache->table_size / 2)
203 anv_pipeline_cache_grow(cache);
204
205 /* Failing to grow that hash table isn't fatal, but may mean we don't
206 * have enough space to add this new kernel. Only add it if there's room.
207 */
208 if (cache->kernel_count < cache->table_size / 2)
209 anv_pipeline_cache_set_entry(cache, entry, entry_offset);
210 }
211
212 uint32_t
213 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
214 const unsigned char *sha1,
215 const void *kernel, size_t kernel_size,
216 const struct brw_stage_prog_data **prog_data,
217 size_t prog_data_size)
218 {
219 pthread_mutex_lock(&cache->mutex);
220 struct cache_entry *entry;
221
222 const uint32_t preamble_size =
223 align_u32(sizeof(*entry) + prog_data_size, 64);
224
225 const uint32_t size = preamble_size + kernel_size;
226
227 assert(size < cache->program_stream.block_pool->block_size);
228 const struct anv_state state =
229 anv_state_stream_alloc(&cache->program_stream, size, 64);
230
231 entry = state.map;
232 entry->prog_data_size = prog_data_size;
233 memcpy(entry->prog_data, *prog_data, prog_data_size);
234 *prog_data = (const struct brw_stage_prog_data *) entry->prog_data;
235 entry->kernel_size = kernel_size;
236
237 if (sha1 && env_var_as_boolean("ANV_ENABLE_PIPELINE_CACHE", false)) {
238 assert(anv_pipeline_cache_search(cache, sha1, NULL) == NO_KERNEL);
239
240 memcpy(entry->sha1, sha1, sizeof(entry->sha1));
241 anv_pipeline_cache_add_entry(cache, entry, state.offset);
242 }
243
244 pthread_mutex_unlock(&cache->mutex);
245
246 memcpy(state.map + preamble_size, kernel, kernel_size);
247
248 if (!cache->device->info.has_llc)
249 anv_state_clflush(state);
250
251 return state.offset + preamble_size;
252 }
253
254 struct cache_header {
255 uint32_t header_size;
256 uint32_t header_version;
257 uint32_t vendor_id;
258 uint32_t device_id;
259 uint8_t uuid[VK_UUID_SIZE];
260 };
261
262 static void
263 anv_pipeline_cache_load(struct anv_pipeline_cache *cache,
264 const void *data, size_t size)
265 {
266 struct anv_device *device = cache->device;
267 struct cache_header header;
268 uint8_t uuid[VK_UUID_SIZE];
269
270 if (size < sizeof(header))
271 return;
272 memcpy(&header, data, sizeof(header));
273 if (header.header_size < sizeof(header))
274 return;
275 if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
276 return;
277 if (header.vendor_id != 0x8086)
278 return;
279 if (header.device_id != device->chipset_id)
280 return;
281 anv_device_get_cache_uuid(uuid);
282 if (memcmp(header.uuid, uuid, VK_UUID_SIZE) != 0)
283 return;
284
285 const void *end = data + size;
286 const void *p = data + header.header_size;
287
288 while (p < end) {
289 /* The kernels aren't 64 byte aligned in the serialized format so
290 * they're always right after the prog_data.
291 */
292 const struct cache_entry *entry = p;
293 const void *kernel = &entry->prog_data[entry->prog_data_size];
294
295 const struct brw_stage_prog_data *prog_data =
296 (const struct brw_stage_prog_data *) entry->prog_data;
297
298 anv_pipeline_cache_upload_kernel(cache, entry->sha1,
299 kernel, entry->kernel_size,
300 &prog_data,
301 entry->prog_data_size);
302 p = kernel + entry->kernel_size;
303 }
304 }
305
306 VkResult anv_CreatePipelineCache(
307 VkDevice _device,
308 const VkPipelineCacheCreateInfo* pCreateInfo,
309 const VkAllocationCallbacks* pAllocator,
310 VkPipelineCache* pPipelineCache)
311 {
312 ANV_FROM_HANDLE(anv_device, device, _device);
313 struct anv_pipeline_cache *cache;
314
315 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO);
316 assert(pCreateInfo->flags == 0);
317
318 cache = anv_alloc2(&device->alloc, pAllocator,
319 sizeof(*cache), 8,
320 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
321 if (cache == NULL)
322 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
323
324 anv_pipeline_cache_init(cache, device);
325
326 if (pCreateInfo->initialDataSize > 0)
327 anv_pipeline_cache_load(cache,
328 pCreateInfo->pInitialData,
329 pCreateInfo->initialDataSize);
330
331 *pPipelineCache = anv_pipeline_cache_to_handle(cache);
332
333 return VK_SUCCESS;
334 }
335
336 void anv_DestroyPipelineCache(
337 VkDevice _device,
338 VkPipelineCache _cache,
339 const VkAllocationCallbacks* pAllocator)
340 {
341 ANV_FROM_HANDLE(anv_device, device, _device);
342 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
343
344 anv_pipeline_cache_finish(cache);
345
346 anv_free2(&device->alloc, pAllocator, cache);
347 }
348
349 VkResult anv_GetPipelineCacheData(
350 VkDevice _device,
351 VkPipelineCache _cache,
352 size_t* pDataSize,
353 void* pData)
354 {
355 ANV_FROM_HANDLE(anv_device, device, _device);
356 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
357 struct cache_header *header;
358
359 const size_t size = sizeof(*header) + cache->total_size;
360
361 if (pData == NULL) {
362 *pDataSize = size;
363 return VK_SUCCESS;
364 }
365
366 if (*pDataSize < sizeof(*header)) {
367 *pDataSize = 0;
368 return VK_INCOMPLETE;
369 }
370
371 void *p = pData, *end = pData + *pDataSize;
372 header = p;
373 header->header_size = sizeof(*header);
374 header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
375 header->vendor_id = 0x8086;
376 header->device_id = device->chipset_id;
377 anv_device_get_cache_uuid(header->uuid);
378 p += header->header_size;
379
380 struct cache_entry *entry;
381 for (uint32_t i = 0; i < cache->table_size; i++) {
382 if (cache->hash_table[i] == ~0)
383 continue;
384
385 entry = cache->program_stream.block_pool->map + cache->hash_table[i];
386 if (end < p + entry_size(entry))
387 break;
388
389 memcpy(p, entry, sizeof(*entry) + entry->prog_data_size);
390 p += sizeof(*entry) + entry->prog_data_size;
391
392 void *kernel = (void *) entry +
393 align_u32(sizeof(*entry) + entry->prog_data_size, 64);
394
395 memcpy(p, kernel, entry->kernel_size);
396 p += entry->kernel_size;
397 }
398
399 *pDataSize = p - pData;
400
401 return VK_SUCCESS;
402 }
403
404 static void
405 anv_pipeline_cache_merge(struct anv_pipeline_cache *dst,
406 struct anv_pipeline_cache *src)
407 {
408 for (uint32_t i = 0; i < src->table_size; i++) {
409 if (src->hash_table[i] == ~0)
410 continue;
411
412 struct cache_entry *entry =
413 src->program_stream.block_pool->map + src->hash_table[i];
414
415 if (anv_pipeline_cache_search(dst, entry->sha1, NULL) != NO_KERNEL)
416 continue;
417
418 const void *kernel = (void *) entry +
419 align_u32(sizeof(*entry) + entry->prog_data_size, 64);
420 const struct brw_stage_prog_data *prog_data =
421 (const struct brw_stage_prog_data *) entry->prog_data;
422
423 anv_pipeline_cache_upload_kernel(dst, entry->sha1,
424 kernel, entry->kernel_size,
425 &prog_data, entry->prog_data_size);
426 }
427 }
428
429 VkResult anv_MergePipelineCaches(
430 VkDevice _device,
431 VkPipelineCache destCache,
432 uint32_t srcCacheCount,
433 const VkPipelineCache* pSrcCaches)
434 {
435 ANV_FROM_HANDLE(anv_pipeline_cache, dst, destCache);
436
437 for (uint32_t i = 0; i < srcCacheCount; i++) {
438 ANV_FROM_HANDLE(anv_pipeline_cache, src, pSrcCaches[i]);
439
440 anv_pipeline_cache_merge(dst, src);
441 }
442
443 return VK_SUCCESS;
444 }