anv/pipeline: Add skeleton support for spilling to bindless
[mesa.git] / src / intel / vulkan / anv_descriptor_set.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "vk_util.h"
32
33 #include "anv_private.h"
34
35 /*
36 * Descriptor set layouts.
37 */
38
39 static enum anv_descriptor_data
40 anv_descriptor_data_for_type(const struct anv_physical_device *device,
41 VkDescriptorType type)
42 {
43 enum anv_descriptor_data data = 0;
44
45 switch (type) {
46 case VK_DESCRIPTOR_TYPE_SAMPLER:
47 data = ANV_DESCRIPTOR_SAMPLER_STATE;
48 break;
49
50 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
51 data = ANV_DESCRIPTOR_SURFACE_STATE |
52 ANV_DESCRIPTOR_SAMPLER_STATE;
53 break;
54
55 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
56 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
57 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
58 data = ANV_DESCRIPTOR_SURFACE_STATE;
59 break;
60
61 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
62 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
63 data = ANV_DESCRIPTOR_SURFACE_STATE;
64 if (device->info.gen < 9)
65 data |= ANV_DESCRIPTOR_IMAGE_PARAM;
66 break;
67
68 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
69 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
70 data = ANV_DESCRIPTOR_SURFACE_STATE |
71 ANV_DESCRIPTOR_BUFFER_VIEW;
72 break;
73
74 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
75 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
76 data = ANV_DESCRIPTOR_SURFACE_STATE;
77 break;
78
79 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
80 data = ANV_DESCRIPTOR_INLINE_UNIFORM;
81 break;
82
83 default:
84 unreachable("Unsupported descriptor type");
85 }
86
87 return data;
88 }
89
90 static unsigned
91 anv_descriptor_data_size(enum anv_descriptor_data data)
92 {
93 unsigned size = 0;
94
95 if (data & ANV_DESCRIPTOR_IMAGE_PARAM)
96 size += BRW_IMAGE_PARAM_SIZE * 4;
97
98 return size;
99 }
100
101 /** Returns the size in bytes of each descriptor with the given layout */
102 unsigned
103 anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout)
104 {
105 if (layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
106 assert(layout->data == ANV_DESCRIPTOR_INLINE_UNIFORM);
107 return layout->array_size;
108 }
109
110 return anv_descriptor_data_size(layout->data);
111 }
112
113 /** Returns the size in bytes of each descriptor of the given type
114 *
115 * This version of the function does not have access to the entire layout so
116 * it may only work on certain descriptor types where the descriptor size is
117 * entirely determined by the descriptor type. Whenever possible, code should
118 * use anv_descriptor_size() instead.
119 */
120 unsigned
121 anv_descriptor_type_size(const struct anv_physical_device *pdevice,
122 VkDescriptorType type)
123 {
124 assert(type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
125 return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice, type));
126 }
127
128 static bool
129 anv_descriptor_data_supports_bindless(const struct anv_physical_device *pdevice,
130 enum anv_descriptor_data data,
131 bool sampler)
132 {
133 return false;
134 }
135
136 bool
137 anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice,
138 const struct anv_descriptor_set_binding_layout *binding,
139 bool sampler)
140 {
141 return anv_descriptor_data_supports_bindless(pdevice, binding->data,
142 sampler);
143 }
144
145 bool
146 anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice,
147 const struct anv_descriptor_set_binding_layout *binding,
148 bool sampler)
149 {
150 if (pdevice->always_use_bindless)
151 return anv_descriptor_supports_bindless(pdevice, binding, sampler);
152
153 return false;
154 }
155
156 void anv_GetDescriptorSetLayoutSupport(
157 VkDevice _device,
158 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
159 VkDescriptorSetLayoutSupport* pSupport)
160 {
161 ANV_FROM_HANDLE(anv_device, device, _device);
162 const struct anv_physical_device *pdevice =
163 &device->instance->physicalDevice;
164
165 uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
166
167 for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) {
168 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b];
169
170 enum anv_descriptor_data desc_data =
171 anv_descriptor_data_for_type(pdevice, binding->descriptorType);
172
173 switch (binding->descriptorType) {
174 case VK_DESCRIPTOR_TYPE_SAMPLER:
175 /* There is no real limit on samplers */
176 break;
177
178 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
179 if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false))
180 break;
181
182 if (binding->pImmutableSamplers) {
183 for (uint32_t i = 0; i < binding->descriptorCount; i++) {
184 ANV_FROM_HANDLE(anv_sampler, sampler,
185 binding->pImmutableSamplers[i]);
186 anv_foreach_stage(s, binding->stageFlags)
187 surface_count[s] += sampler->n_planes;
188 }
189 } else {
190 anv_foreach_stage(s, binding->stageFlags)
191 surface_count[s] += binding->descriptorCount;
192 }
193 break;
194
195 default:
196 if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false))
197 break;
198
199 anv_foreach_stage(s, binding->stageFlags)
200 surface_count[s] += binding->descriptorCount;
201 break;
202 }
203 }
204
205 bool supported = true;
206 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
207 /* Our maximum binding table size is 240 and we need to reserve 8 for
208 * render targets.
209 */
210 if (surface_count[s] >= MAX_BINDING_TABLE_SIZE - MAX_RTS)
211 supported = false;
212 }
213
214 pSupport->supported = supported;
215 }
216
217 VkResult anv_CreateDescriptorSetLayout(
218 VkDevice _device,
219 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
220 const VkAllocationCallbacks* pAllocator,
221 VkDescriptorSetLayout* pSetLayout)
222 {
223 ANV_FROM_HANDLE(anv_device, device, _device);
224
225 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
226
227 uint32_t max_binding = 0;
228 uint32_t immutable_sampler_count = 0;
229 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
230 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
231
232 /* From the Vulkan 1.1.97 spec for VkDescriptorSetLayoutBinding:
233 *
234 * "If descriptorType specifies a VK_DESCRIPTOR_TYPE_SAMPLER or
235 * VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER type descriptor, then
236 * pImmutableSamplers can be used to initialize a set of immutable
237 * samplers. [...] If descriptorType is not one of these descriptor
238 * types, then pImmutableSamplers is ignored.
239 *
240 * We need to be careful here and only parse pImmutableSamplers if we
241 * have one of the right descriptor types.
242 */
243 VkDescriptorType desc_type = pCreateInfo->pBindings[j].descriptorType;
244 if ((desc_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
245 desc_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
246 pCreateInfo->pBindings[j].pImmutableSamplers)
247 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
248 }
249
250 struct anv_descriptor_set_layout *set_layout;
251 struct anv_descriptor_set_binding_layout *bindings;
252 struct anv_sampler **samplers;
253
254 /* We need to allocate decriptor set layouts off the device allocator
255 * with DEVICE scope because they are reference counted and may not be
256 * destroyed when vkDestroyDescriptorSetLayout is called.
257 */
258 ANV_MULTIALLOC(ma);
259 anv_multialloc_add(&ma, &set_layout, 1);
260 anv_multialloc_add(&ma, &bindings, max_binding + 1);
261 anv_multialloc_add(&ma, &samplers, immutable_sampler_count);
262
263 if (!anv_multialloc_alloc(&ma, &device->alloc,
264 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
265 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
266
267 memset(set_layout, 0, sizeof(*set_layout));
268 set_layout->ref_cnt = 1;
269 set_layout->binding_count = max_binding + 1;
270
271 for (uint32_t b = 0; b <= max_binding; b++) {
272 /* Initialize all binding_layout entries to -1 */
273 memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b]));
274
275 set_layout->binding[b].data = 0;
276 set_layout->binding[b].array_size = 0;
277 set_layout->binding[b].immutable_samplers = NULL;
278 }
279
280 /* Initialize all samplers to 0 */
281 memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
282
283 uint32_t buffer_view_count = 0;
284 uint32_t dynamic_offset_count = 0;
285 uint32_t descriptor_buffer_size = 0;
286
287 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
288 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
289 uint32_t b = binding->binding;
290 /* We temporarily store the pointer to the binding in the
291 * immutable_samplers pointer. This provides us with a quick-and-dirty
292 * way to sort the bindings by binding number.
293 */
294 set_layout->binding[b].immutable_samplers = (void *)binding;
295 }
296
297 for (uint32_t b = 0; b <= max_binding; b++) {
298 const VkDescriptorSetLayoutBinding *binding =
299 (void *)set_layout->binding[b].immutable_samplers;
300
301 if (binding == NULL)
302 continue;
303
304 /* We temporarily stashed the pointer to the binding in the
305 * immutable_samplers pointer. Now that we've pulled it back out
306 * again, we reset immutable_samplers to NULL.
307 */
308 set_layout->binding[b].immutable_samplers = NULL;
309
310 if (binding->descriptorCount == 0)
311 continue;
312
313 #ifndef NDEBUG
314 set_layout->binding[b].type = binding->descriptorType;
315 #endif
316 set_layout->binding[b].data =
317 anv_descriptor_data_for_type(&device->instance->physicalDevice,
318 binding->descriptorType);
319 set_layout->binding[b].array_size = binding->descriptorCount;
320 set_layout->binding[b].descriptor_index = set_layout->size;
321 set_layout->size += binding->descriptorCount;
322
323 if (set_layout->binding[b].data & ANV_DESCRIPTOR_BUFFER_VIEW) {
324 set_layout->binding[b].buffer_view_index = buffer_view_count;
325 buffer_view_count += binding->descriptorCount;
326 }
327
328 switch (binding->descriptorType) {
329 case VK_DESCRIPTOR_TYPE_SAMPLER:
330 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
331 if (binding->pImmutableSamplers) {
332 set_layout->binding[b].immutable_samplers = samplers;
333 samplers += binding->descriptorCount;
334
335 for (uint32_t i = 0; i < binding->descriptorCount; i++)
336 set_layout->binding[b].immutable_samplers[i] =
337 anv_sampler_from_handle(binding->pImmutableSamplers[i]);
338 }
339 break;
340 default:
341 break;
342 }
343
344 switch (binding->descriptorType) {
345 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
346 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
347 set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
348 dynamic_offset_count += binding->descriptorCount;
349 break;
350
351 default:
352 break;
353 }
354
355 if (binding->descriptorType ==
356 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
357 /* Inline uniform blocks are specified to use the descriptor array
358 * size as the size in bytes of the block.
359 */
360 descriptor_buffer_size = align_u32(descriptor_buffer_size, 32);
361 set_layout->binding[b].descriptor_offset = descriptor_buffer_size;
362 descriptor_buffer_size += binding->descriptorCount;
363 } else {
364 set_layout->binding[b].descriptor_offset = descriptor_buffer_size;
365 descriptor_buffer_size += anv_descriptor_size(&set_layout->binding[b]) *
366 binding->descriptorCount;
367 }
368
369 set_layout->shader_stages |= binding->stageFlags;
370 }
371
372 set_layout->buffer_view_count = buffer_view_count;
373 set_layout->dynamic_offset_count = dynamic_offset_count;
374 set_layout->descriptor_buffer_size = descriptor_buffer_size;
375
376 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
377
378 return VK_SUCCESS;
379 }
380
381 void anv_DestroyDescriptorSetLayout(
382 VkDevice _device,
383 VkDescriptorSetLayout _set_layout,
384 const VkAllocationCallbacks* pAllocator)
385 {
386 ANV_FROM_HANDLE(anv_device, device, _device);
387 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
388
389 if (!set_layout)
390 return;
391
392 anv_descriptor_set_layout_unref(device, set_layout);
393 }
394
395 #define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));
396
397 static void
398 sha1_update_immutable_sampler(struct mesa_sha1 *ctx,
399 const struct anv_sampler *sampler)
400 {
401 if (!sampler->conversion)
402 return;
403
404 /* The only thing that affects the shader is ycbcr conversion */
405 _mesa_sha1_update(ctx, sampler->conversion,
406 sizeof(*sampler->conversion));
407 }
408
409 static void
410 sha1_update_descriptor_set_binding_layout(struct mesa_sha1 *ctx,
411 const struct anv_descriptor_set_binding_layout *layout)
412 {
413 SHA1_UPDATE_VALUE(ctx, layout->data);
414 SHA1_UPDATE_VALUE(ctx, layout->array_size);
415 SHA1_UPDATE_VALUE(ctx, layout->descriptor_index);
416 SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_index);
417 SHA1_UPDATE_VALUE(ctx, layout->buffer_view_index);
418 SHA1_UPDATE_VALUE(ctx, layout->descriptor_offset);
419
420 if (layout->immutable_samplers) {
421 for (uint16_t i = 0; i < layout->array_size; i++)
422 sha1_update_immutable_sampler(ctx, layout->immutable_samplers[i]);
423 }
424 }
425
426 static void
427 sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
428 const struct anv_descriptor_set_layout *layout)
429 {
430 SHA1_UPDATE_VALUE(ctx, layout->binding_count);
431 SHA1_UPDATE_VALUE(ctx, layout->size);
432 SHA1_UPDATE_VALUE(ctx, layout->shader_stages);
433 SHA1_UPDATE_VALUE(ctx, layout->buffer_view_count);
434 SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_count);
435 SHA1_UPDATE_VALUE(ctx, layout->descriptor_buffer_size);
436
437 for (uint16_t i = 0; i < layout->binding_count; i++)
438 sha1_update_descriptor_set_binding_layout(ctx, &layout->binding[i]);
439 }
440
441 /*
442 * Pipeline layouts. These have nothing to do with the pipeline. They are
443 * just multiple descriptor set layouts pasted together
444 */
445
446 VkResult anv_CreatePipelineLayout(
447 VkDevice _device,
448 const VkPipelineLayoutCreateInfo* pCreateInfo,
449 const VkAllocationCallbacks* pAllocator,
450 VkPipelineLayout* pPipelineLayout)
451 {
452 ANV_FROM_HANDLE(anv_device, device, _device);
453 struct anv_pipeline_layout *layout;
454
455 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
456
457 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
458 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
459 if (layout == NULL)
460 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
461
462 layout->num_sets = pCreateInfo->setLayoutCount;
463
464 unsigned dynamic_offset_count = 0;
465
466 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
467 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
468 pCreateInfo->pSetLayouts[set]);
469 layout->set[set].layout = set_layout;
470 anv_descriptor_set_layout_ref(set_layout);
471
472 layout->set[set].dynamic_offset_start = dynamic_offset_count;
473 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
474 if (set_layout->binding[b].dynamic_offset_index < 0)
475 continue;
476
477 dynamic_offset_count += set_layout->binding[b].array_size;
478 }
479 }
480
481 struct mesa_sha1 ctx;
482 _mesa_sha1_init(&ctx);
483 for (unsigned s = 0; s < layout->num_sets; s++) {
484 sha1_update_descriptor_set_layout(&ctx, layout->set[s].layout);
485 _mesa_sha1_update(&ctx, &layout->set[s].dynamic_offset_start,
486 sizeof(layout->set[s].dynamic_offset_start));
487 }
488 _mesa_sha1_update(&ctx, &layout->num_sets, sizeof(layout->num_sets));
489 _mesa_sha1_final(&ctx, layout->sha1);
490
491 *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
492
493 return VK_SUCCESS;
494 }
495
496 void anv_DestroyPipelineLayout(
497 VkDevice _device,
498 VkPipelineLayout _pipelineLayout,
499 const VkAllocationCallbacks* pAllocator)
500 {
501 ANV_FROM_HANDLE(anv_device, device, _device);
502 ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
503
504 if (!pipeline_layout)
505 return;
506
507 for (uint32_t i = 0; i < pipeline_layout->num_sets; i++)
508 anv_descriptor_set_layout_unref(device, pipeline_layout->set[i].layout);
509
510 vk_free2(&device->alloc, pAllocator, pipeline_layout);
511 }
512
513 /*
514 * Descriptor pools.
515 *
516 * These are implemented using a big pool of memory and a free-list for the
517 * host memory allocations and a state_stream and a free list for the buffer
518 * view surface state. The spec allows us to fail to allocate due to
519 * fragmentation in all cases but two: 1) after pool reset, allocating up
520 * until the pool size with no freeing must succeed and 2) allocating and
521 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
522 * and the free lists lets us recycle blocks for case 2).
523 */
524
525 /* The vma heap reserves 0 to mean NULL; we have to offset by some ammount to
526 * ensure we can allocate the entire BO without hitting zero. The actual
527 * amount doesn't matter.
528 */
529 #define POOL_HEAP_OFFSET 64
530
531 #define EMPTY 1
532
533 VkResult anv_CreateDescriptorPool(
534 VkDevice _device,
535 const VkDescriptorPoolCreateInfo* pCreateInfo,
536 const VkAllocationCallbacks* pAllocator,
537 VkDescriptorPool* pDescriptorPool)
538 {
539 ANV_FROM_HANDLE(anv_device, device, _device);
540 struct anv_descriptor_pool *pool;
541
542 const VkDescriptorPoolInlineUniformBlockCreateInfoEXT *inline_info =
543 vk_find_struct_const(pCreateInfo->pNext,
544 DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT);
545
546 uint32_t descriptor_count = 0;
547 uint32_t buffer_view_count = 0;
548 uint32_t descriptor_bo_size = 0;
549 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
550 enum anv_descriptor_data desc_data =
551 anv_descriptor_data_for_type(&device->instance->physicalDevice,
552 pCreateInfo->pPoolSizes[i].type);
553
554 if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW)
555 buffer_view_count += pCreateInfo->pPoolSizes[i].descriptorCount;
556
557 unsigned desc_data_size = anv_descriptor_data_size(desc_data) *
558 pCreateInfo->pPoolSizes[i].descriptorCount;
559
560 if (pCreateInfo->pPoolSizes[i].type ==
561 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
562 /* Inline uniform blocks are specified to use the descriptor array
563 * size as the size in bytes of the block.
564 */
565 assert(inline_info);
566 desc_data_size += pCreateInfo->pPoolSizes[i].descriptorCount;
567 }
568
569 descriptor_bo_size += desc_data_size;
570
571 descriptor_count += pCreateInfo->pPoolSizes[i].descriptorCount;
572 }
573 /* We have to align descriptor buffer allocations to 32B so that we can
574 * push descriptor buffers. This means that each descriptor buffer
575 * allocated may burn up to 32B of extra space to get the right alignment.
576 * (Technically, it's at most 28B because we're always going to start at
577 * least 4B aligned but we're being conservative here.) Allocate enough
578 * extra space that we can chop it into maxSets pieces and align each one
579 * of them to 32B.
580 */
581 descriptor_bo_size += 32 * pCreateInfo->maxSets;
582 descriptor_bo_size = ALIGN(descriptor_bo_size, 4096);
583 /* We align inline uniform blocks to 32B */
584 if (inline_info)
585 descriptor_bo_size += 32 * inline_info->maxInlineUniformBlockBindings;
586
587 const size_t pool_size =
588 pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) +
589 descriptor_count * sizeof(struct anv_descriptor) +
590 buffer_view_count * sizeof(struct anv_buffer_view);
591 const size_t total_size = sizeof(*pool) + pool_size;
592
593 pool = vk_alloc2(&device->alloc, pAllocator, total_size, 8,
594 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
595 if (!pool)
596 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
597
598 pool->size = pool_size;
599 pool->next = 0;
600 pool->free_list = EMPTY;
601
602 if (descriptor_bo_size > 0) {
603 VkResult result = anv_bo_init_new(&pool->bo, device, descriptor_bo_size);
604 if (result != VK_SUCCESS) {
605 vk_free2(&device->alloc, pAllocator, pool);
606 return result;
607 }
608
609 anv_gem_set_caching(device, pool->bo.gem_handle, I915_CACHING_CACHED);
610
611 pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0,
612 descriptor_bo_size, 0);
613 if (pool->bo.map == NULL) {
614 anv_gem_close(device, pool->bo.gem_handle);
615 vk_free2(&device->alloc, pAllocator, pool);
616 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
617 }
618
619 if (device->instance->physicalDevice.use_softpin) {
620 pool->bo.flags |= EXEC_OBJECT_PINNED;
621 anv_vma_alloc(device, &pool->bo);
622 }
623
624 util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, descriptor_bo_size);
625 } else {
626 pool->bo.size = 0;
627 }
628
629 anv_state_stream_init(&pool->surface_state_stream,
630 &device->surface_state_pool, 4096);
631 pool->surface_state_free_list = NULL;
632
633 list_inithead(&pool->desc_sets);
634
635 *pDescriptorPool = anv_descriptor_pool_to_handle(pool);
636
637 return VK_SUCCESS;
638 }
639
640 void anv_DestroyDescriptorPool(
641 VkDevice _device,
642 VkDescriptorPool _pool,
643 const VkAllocationCallbacks* pAllocator)
644 {
645 ANV_FROM_HANDLE(anv_device, device, _device);
646 ANV_FROM_HANDLE(anv_descriptor_pool, pool, _pool);
647
648 if (!pool)
649 return;
650
651 if (pool->bo.size) {
652 anv_gem_munmap(pool->bo.map, pool->bo.size);
653 anv_vma_free(device, &pool->bo);
654 anv_gem_close(device, pool->bo.gem_handle);
655 }
656 anv_state_stream_finish(&pool->surface_state_stream);
657
658 list_for_each_entry_safe(struct anv_descriptor_set, set,
659 &pool->desc_sets, pool_link) {
660 anv_descriptor_set_destroy(device, pool, set);
661 }
662
663 util_vma_heap_finish(&pool->bo_heap);
664
665 vk_free2(&device->alloc, pAllocator, pool);
666 }
667
668 VkResult anv_ResetDescriptorPool(
669 VkDevice _device,
670 VkDescriptorPool descriptorPool,
671 VkDescriptorPoolResetFlags flags)
672 {
673 ANV_FROM_HANDLE(anv_device, device, _device);
674 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
675
676 list_for_each_entry_safe(struct anv_descriptor_set, set,
677 &pool->desc_sets, pool_link) {
678 anv_descriptor_set_destroy(device, pool, set);
679 }
680
681 pool->next = 0;
682 pool->free_list = EMPTY;
683
684 if (pool->bo.size) {
685 util_vma_heap_finish(&pool->bo_heap);
686 util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, pool->bo.size);
687 }
688
689 anv_state_stream_finish(&pool->surface_state_stream);
690 anv_state_stream_init(&pool->surface_state_stream,
691 &device->surface_state_pool, 4096);
692 pool->surface_state_free_list = NULL;
693
694 return VK_SUCCESS;
695 }
696
697 struct pool_free_list_entry {
698 uint32_t next;
699 uint32_t size;
700 };
701
702 static VkResult
703 anv_descriptor_pool_alloc_set(struct anv_descriptor_pool *pool,
704 uint32_t size,
705 struct anv_descriptor_set **set)
706 {
707 if (size <= pool->size - pool->next) {
708 *set = (struct anv_descriptor_set *) (pool->data + pool->next);
709 pool->next += size;
710 return VK_SUCCESS;
711 } else {
712 struct pool_free_list_entry *entry;
713 uint32_t *link = &pool->free_list;
714 for (uint32_t f = pool->free_list; f != EMPTY; f = entry->next) {
715 entry = (struct pool_free_list_entry *) (pool->data + f);
716 if (size <= entry->size) {
717 *link = entry->next;
718 *set = (struct anv_descriptor_set *) entry;
719 return VK_SUCCESS;
720 }
721 link = &entry->next;
722 }
723
724 if (pool->free_list != EMPTY) {
725 return vk_error(VK_ERROR_FRAGMENTED_POOL);
726 } else {
727 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY);
728 }
729 }
730 }
731
732 static void
733 anv_descriptor_pool_free_set(struct anv_descriptor_pool *pool,
734 struct anv_descriptor_set *set)
735 {
736 /* Put the descriptor set allocation back on the free list. */
737 const uint32_t index = (char *) set - pool->data;
738 if (index + set->size == pool->next) {
739 pool->next = index;
740 } else {
741 struct pool_free_list_entry *entry = (struct pool_free_list_entry *) set;
742 entry->next = pool->free_list;
743 entry->size = set->size;
744 pool->free_list = (char *) entry - pool->data;
745 }
746
747 list_del(&set->pool_link);
748 }
749
750 struct surface_state_free_list_entry {
751 void *next;
752 struct anv_state state;
753 };
754
755 static struct anv_state
756 anv_descriptor_pool_alloc_state(struct anv_descriptor_pool *pool)
757 {
758 struct surface_state_free_list_entry *entry =
759 pool->surface_state_free_list;
760
761 if (entry) {
762 struct anv_state state = entry->state;
763 pool->surface_state_free_list = entry->next;
764 assert(state.alloc_size == 64);
765 return state;
766 } else {
767 return anv_state_stream_alloc(&pool->surface_state_stream, 64, 64);
768 }
769 }
770
771 static void
772 anv_descriptor_pool_free_state(struct anv_descriptor_pool *pool,
773 struct anv_state state)
774 {
775 /* Put the buffer view surface state back on the free list. */
776 struct surface_state_free_list_entry *entry = state.map;
777 entry->next = pool->surface_state_free_list;
778 entry->state = state;
779 pool->surface_state_free_list = entry;
780 }
781
782 size_t
783 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout)
784 {
785 return
786 sizeof(struct anv_descriptor_set) +
787 layout->size * sizeof(struct anv_descriptor) +
788 layout->buffer_view_count * sizeof(struct anv_buffer_view);
789 }
790
791 VkResult
792 anv_descriptor_set_create(struct anv_device *device,
793 struct anv_descriptor_pool *pool,
794 struct anv_descriptor_set_layout *layout,
795 struct anv_descriptor_set **out_set)
796 {
797 struct anv_descriptor_set *set;
798 const size_t size = anv_descriptor_set_layout_size(layout);
799
800 VkResult result = anv_descriptor_pool_alloc_set(pool, size, &set);
801 if (result != VK_SUCCESS)
802 return result;
803
804 if (layout->descriptor_buffer_size) {
805 /* Align the size to 32 so that alignment gaps don't cause extra holes
806 * in the heap which can lead to bad performance.
807 */
808 uint64_t pool_vma_offset =
809 util_vma_heap_alloc(&pool->bo_heap,
810 ALIGN(layout->descriptor_buffer_size, 32), 32);
811 if (pool_vma_offset == 0) {
812 anv_descriptor_pool_free_set(pool, set);
813 return vk_error(VK_ERROR_FRAGMENTED_POOL);
814 }
815 assert(pool_vma_offset >= POOL_HEAP_OFFSET &&
816 pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX);
817 set->desc_mem.offset = pool_vma_offset - POOL_HEAP_OFFSET;
818 set->desc_mem.alloc_size = layout->descriptor_buffer_size;
819 set->desc_mem.map = pool->bo.map + set->desc_mem.offset;
820
821 set->desc_surface_state = anv_descriptor_pool_alloc_state(pool);
822 anv_fill_buffer_surface_state(device, set->desc_surface_state,
823 ISL_FORMAT_R32G32B32A32_FLOAT,
824 (struct anv_address) {
825 .bo = &pool->bo,
826 .offset = set->desc_mem.offset,
827 },
828 layout->descriptor_buffer_size, 1);
829 } else {
830 set->desc_mem = ANV_STATE_NULL;
831 set->desc_surface_state = ANV_STATE_NULL;
832 }
833
834 set->pool = pool;
835 set->layout = layout;
836 anv_descriptor_set_layout_ref(layout);
837
838 set->size = size;
839 set->buffer_views =
840 (struct anv_buffer_view *) &set->descriptors[layout->size];
841 set->buffer_view_count = layout->buffer_view_count;
842
843 /* By defining the descriptors to be zero now, we can later verify that
844 * a descriptor has not been populated with user data.
845 */
846 memset(set->descriptors, 0, sizeof(struct anv_descriptor) * layout->size);
847
848 /* Go through and fill out immutable samplers if we have any */
849 struct anv_descriptor *desc = set->descriptors;
850 for (uint32_t b = 0; b < layout->binding_count; b++) {
851 if (layout->binding[b].immutable_samplers) {
852 for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
853 /* The type will get changed to COMBINED_IMAGE_SAMPLER in
854 * UpdateDescriptorSets if needed. However, if the descriptor
855 * set has an immutable sampler, UpdateDescriptorSets may never
856 * touch it, so we need to make sure it's 100% valid now.
857 */
858 desc[i] = (struct anv_descriptor) {
859 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
860 .sampler = layout->binding[b].immutable_samplers[i],
861 };
862 }
863 }
864 desc += layout->binding[b].array_size;
865 }
866
867 /* Allocate surface state for the buffer views. */
868 for (uint32_t b = 0; b < layout->buffer_view_count; b++) {
869 set->buffer_views[b].surface_state =
870 anv_descriptor_pool_alloc_state(pool);
871 }
872
873 *out_set = set;
874
875 return VK_SUCCESS;
876 }
877
878 void
879 anv_descriptor_set_destroy(struct anv_device *device,
880 struct anv_descriptor_pool *pool,
881 struct anv_descriptor_set *set)
882 {
883 anv_descriptor_set_layout_unref(device, set->layout);
884
885 if (set->desc_mem.alloc_size) {
886 util_vma_heap_free(&pool->bo_heap,
887 (uint64_t)set->desc_mem.offset + POOL_HEAP_OFFSET,
888 set->desc_mem.alloc_size);
889 anv_descriptor_pool_free_state(pool, set->desc_surface_state);
890 }
891
892 for (uint32_t b = 0; b < set->buffer_view_count; b++)
893 anv_descriptor_pool_free_state(pool, set->buffer_views[b].surface_state);
894
895 anv_descriptor_pool_free_set(pool, set);
896 }
897
898 VkResult anv_AllocateDescriptorSets(
899 VkDevice _device,
900 const VkDescriptorSetAllocateInfo* pAllocateInfo,
901 VkDescriptorSet* pDescriptorSets)
902 {
903 ANV_FROM_HANDLE(anv_device, device, _device);
904 ANV_FROM_HANDLE(anv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
905
906 VkResult result = VK_SUCCESS;
907 struct anv_descriptor_set *set;
908 uint32_t i;
909
910 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
911 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout,
912 pAllocateInfo->pSetLayouts[i]);
913
914 result = anv_descriptor_set_create(device, pool, layout, &set);
915 if (result != VK_SUCCESS)
916 break;
917
918 list_addtail(&set->pool_link, &pool->desc_sets);
919
920 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
921 }
922
923 if (result != VK_SUCCESS)
924 anv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
925 i, pDescriptorSets);
926
927 return result;
928 }
929
930 VkResult anv_FreeDescriptorSets(
931 VkDevice _device,
932 VkDescriptorPool descriptorPool,
933 uint32_t count,
934 const VkDescriptorSet* pDescriptorSets)
935 {
936 ANV_FROM_HANDLE(anv_device, device, _device);
937 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
938
939 for (uint32_t i = 0; i < count; i++) {
940 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
941
942 if (!set)
943 continue;
944
945 anv_descriptor_set_destroy(device, pool, set);
946 }
947
948 return VK_SUCCESS;
949 }
950
951 static void
952 anv_descriptor_set_write_image_param(uint32_t *param_desc_map,
953 const struct brw_image_param *param)
954 {
955 #define WRITE_PARAM_FIELD(field, FIELD) \
956 for (unsigned i = 0; i < ARRAY_SIZE(param->field); i++) \
957 param_desc_map[BRW_IMAGE_PARAM_##FIELD##_OFFSET + i] = param->field[i]
958
959 WRITE_PARAM_FIELD(offset, OFFSET);
960 WRITE_PARAM_FIELD(size, SIZE);
961 WRITE_PARAM_FIELD(stride, STRIDE);
962 WRITE_PARAM_FIELD(tiling, TILING);
963 WRITE_PARAM_FIELD(swizzling, SWIZZLING);
964 WRITE_PARAM_FIELD(size, SIZE);
965
966 #undef WRITE_PARAM_FIELD
967 }
968
969 void
970 anv_descriptor_set_write_image_view(struct anv_device *device,
971 struct anv_descriptor_set *set,
972 const VkDescriptorImageInfo * const info,
973 VkDescriptorType type,
974 uint32_t binding,
975 uint32_t element)
976 {
977 const struct anv_descriptor_set_binding_layout *bind_layout =
978 &set->layout->binding[binding];
979 struct anv_descriptor *desc =
980 &set->descriptors[bind_layout->descriptor_index + element];
981 struct anv_image_view *image_view = NULL;
982 struct anv_sampler *sampler = NULL;
983
984 assert(type == bind_layout->type);
985
986 switch (type) {
987 case VK_DESCRIPTOR_TYPE_SAMPLER:
988 sampler = anv_sampler_from_handle(info->sampler);
989 break;
990
991 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
992 image_view = anv_image_view_from_handle(info->imageView);
993 sampler = anv_sampler_from_handle(info->sampler);
994 break;
995
996 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
997 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
998 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
999 image_view = anv_image_view_from_handle(info->imageView);
1000 break;
1001
1002 default:
1003 unreachable("invalid descriptor type");
1004 }
1005
1006 /* If this descriptor has an immutable sampler, we don't want to stomp on
1007 * it.
1008 */
1009 sampler = bind_layout->immutable_samplers ?
1010 bind_layout->immutable_samplers[element] :
1011 sampler;
1012
1013 *desc = (struct anv_descriptor) {
1014 .type = type,
1015 .layout = info->imageLayout,
1016 .image_view = image_view,
1017 .sampler = sampler,
1018 };
1019
1020 void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
1021 element * anv_descriptor_size(bind_layout);
1022
1023 if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
1024 /* Storage images can only ever have one plane */
1025 assert(image_view->n_planes == 1);
1026 const struct brw_image_param *image_param =
1027 &image_view->planes[0].storage_image_param;
1028
1029 anv_descriptor_set_write_image_param(desc_map, image_param);
1030 }
1031 }
1032
1033 void
1034 anv_descriptor_set_write_buffer_view(struct anv_device *device,
1035 struct anv_descriptor_set *set,
1036 VkDescriptorType type,
1037 struct anv_buffer_view *buffer_view,
1038 uint32_t binding,
1039 uint32_t element)
1040 {
1041 const struct anv_descriptor_set_binding_layout *bind_layout =
1042 &set->layout->binding[binding];
1043 struct anv_descriptor *desc =
1044 &set->descriptors[bind_layout->descriptor_index + element];
1045
1046 assert(type == bind_layout->type);
1047
1048 *desc = (struct anv_descriptor) {
1049 .type = type,
1050 .buffer_view = buffer_view,
1051 };
1052
1053 void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
1054 element * anv_descriptor_size(bind_layout);
1055
1056 if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
1057 anv_descriptor_set_write_image_param(desc_map,
1058 &buffer_view->storage_image_param);
1059 }
1060 }
1061
1062 void
1063 anv_descriptor_set_write_buffer(struct anv_device *device,
1064 struct anv_descriptor_set *set,
1065 struct anv_state_stream *alloc_stream,
1066 VkDescriptorType type,
1067 struct anv_buffer *buffer,
1068 uint32_t binding,
1069 uint32_t element,
1070 VkDeviceSize offset,
1071 VkDeviceSize range)
1072 {
1073 const struct anv_descriptor_set_binding_layout *bind_layout =
1074 &set->layout->binding[binding];
1075 struct anv_descriptor *desc =
1076 &set->descriptors[bind_layout->descriptor_index + element];
1077
1078 assert(type == bind_layout->type);
1079
1080 if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1081 type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
1082 *desc = (struct anv_descriptor) {
1083 .type = type,
1084 .buffer = buffer,
1085 .offset = offset,
1086 .range = range,
1087 };
1088 } else {
1089 assert(bind_layout->data & ANV_DESCRIPTOR_BUFFER_VIEW);
1090 struct anv_buffer_view *bview =
1091 &set->buffer_views[bind_layout->buffer_view_index + element];
1092
1093 bview->format = anv_isl_format_for_descriptor_type(type);
1094 bview->range = anv_buffer_get_range(buffer, offset, range);
1095 bview->address = anv_address_add(buffer->address, offset);
1096
1097 /* If we're writing descriptors through a push command, we need to
1098 * allocate the surface state from the command buffer. Otherwise it will
1099 * be allocated by the descriptor pool when calling
1100 * vkAllocateDescriptorSets. */
1101 if (alloc_stream)
1102 bview->surface_state = anv_state_stream_alloc(alloc_stream, 64, 64);
1103
1104 anv_fill_buffer_surface_state(device, bview->surface_state,
1105 bview->format,
1106 bview->address, bview->range, 1);
1107
1108 *desc = (struct anv_descriptor) {
1109 .type = type,
1110 .buffer_view = bview,
1111 };
1112 }
1113 }
1114
1115 void
1116 anv_descriptor_set_write_inline_uniform_data(struct anv_device *device,
1117 struct anv_descriptor_set *set,
1118 uint32_t binding,
1119 const void *data,
1120 size_t offset,
1121 size_t size)
1122 {
1123 const struct anv_descriptor_set_binding_layout *bind_layout =
1124 &set->layout->binding[binding];
1125
1126 assert(bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM);
1127
1128 void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset;
1129
1130 memcpy(desc_map + offset, data, size);
1131 }
1132
1133 void anv_UpdateDescriptorSets(
1134 VkDevice _device,
1135 uint32_t descriptorWriteCount,
1136 const VkWriteDescriptorSet* pDescriptorWrites,
1137 uint32_t descriptorCopyCount,
1138 const VkCopyDescriptorSet* pDescriptorCopies)
1139 {
1140 ANV_FROM_HANDLE(anv_device, device, _device);
1141
1142 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1143 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1144 ANV_FROM_HANDLE(anv_descriptor_set, set, write->dstSet);
1145
1146 switch (write->descriptorType) {
1147 case VK_DESCRIPTOR_TYPE_SAMPLER:
1148 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1149 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1150 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1151 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1152 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1153 anv_descriptor_set_write_image_view(device, set,
1154 write->pImageInfo + j,
1155 write->descriptorType,
1156 write->dstBinding,
1157 write->dstArrayElement + j);
1158 }
1159 break;
1160
1161 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1162 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1163 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1164 ANV_FROM_HANDLE(anv_buffer_view, bview,
1165 write->pTexelBufferView[j]);
1166
1167 anv_descriptor_set_write_buffer_view(device, set,
1168 write->descriptorType,
1169 bview,
1170 write->dstBinding,
1171 write->dstArrayElement + j);
1172 }
1173 break;
1174
1175 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1176 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1177 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1178 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1179 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1180 assert(write->pBufferInfo[j].buffer);
1181 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1182 assert(buffer);
1183
1184 anv_descriptor_set_write_buffer(device, set,
1185 NULL,
1186 write->descriptorType,
1187 buffer,
1188 write->dstBinding,
1189 write->dstArrayElement + j,
1190 write->pBufferInfo[j].offset,
1191 write->pBufferInfo[j].range);
1192 }
1193 break;
1194
1195 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: {
1196 const VkWriteDescriptorSetInlineUniformBlockEXT *inline_write =
1197 vk_find_struct_const(write->pNext,
1198 WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
1199 assert(inline_write->dataSize == write->descriptorCount);
1200 anv_descriptor_set_write_inline_uniform_data(device, set,
1201 write->dstBinding,
1202 inline_write->pData,
1203 write->dstArrayElement,
1204 inline_write->dataSize);
1205 break;
1206 }
1207
1208 default:
1209 break;
1210 }
1211 }
1212
1213 for (uint32_t i = 0; i < descriptorCopyCount; i++) {
1214 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1215 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->srcSet);
1216 ANV_FROM_HANDLE(anv_descriptor_set, dst, copy->dstSet);
1217
1218 const struct anv_descriptor_set_binding_layout *src_layout =
1219 &src->layout->binding[copy->srcBinding];
1220 struct anv_descriptor *src_desc =
1221 &src->descriptors[src_layout->descriptor_index];
1222 src_desc += copy->srcArrayElement;
1223
1224 const struct anv_descriptor_set_binding_layout *dst_layout =
1225 &dst->layout->binding[copy->dstBinding];
1226 struct anv_descriptor *dst_desc =
1227 &dst->descriptors[dst_layout->descriptor_index];
1228 dst_desc += copy->dstArrayElement;
1229
1230 for (uint32_t j = 0; j < copy->descriptorCount; j++)
1231 dst_desc[j] = src_desc[j];
1232
1233 if (src_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
1234 assert(src_layout->data == ANV_DESCRIPTOR_INLINE_UNIFORM);
1235 memcpy(dst->desc_mem.map + dst_layout->descriptor_offset +
1236 copy->dstArrayElement,
1237 src->desc_mem.map + src_layout->descriptor_offset +
1238 copy->srcArrayElement,
1239 copy->descriptorCount);
1240 } else {
1241 unsigned desc_size = anv_descriptor_size(src_layout);
1242 if (desc_size > 0) {
1243 assert(desc_size == anv_descriptor_size(dst_layout));
1244 memcpy(dst->desc_mem.map + dst_layout->descriptor_offset +
1245 copy->dstArrayElement * desc_size,
1246 src->desc_mem.map + src_layout->descriptor_offset +
1247 copy->srcArrayElement * desc_size,
1248 copy->descriptorCount * desc_size);
1249 }
1250 }
1251 }
1252 }
1253
1254 /*
1255 * Descriptor update templates.
1256 */
1257
1258 void
1259 anv_descriptor_set_write_template(struct anv_device *device,
1260 struct anv_descriptor_set *set,
1261 struct anv_state_stream *alloc_stream,
1262 const struct anv_descriptor_update_template *template,
1263 const void *data)
1264 {
1265 for (uint32_t i = 0; i < template->entry_count; i++) {
1266 const struct anv_descriptor_template_entry *entry =
1267 &template->entries[i];
1268
1269 switch (entry->type) {
1270 case VK_DESCRIPTOR_TYPE_SAMPLER:
1271 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1272 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1273 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1274 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1275 for (uint32_t j = 0; j < entry->array_count; j++) {
1276 const VkDescriptorImageInfo *info =
1277 data + entry->offset + j * entry->stride;
1278 anv_descriptor_set_write_image_view(device, set,
1279 info, entry->type,
1280 entry->binding,
1281 entry->array_element + j);
1282 }
1283 break;
1284
1285 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1286 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1287 for (uint32_t j = 0; j < entry->array_count; j++) {
1288 const VkBufferView *_bview =
1289 data + entry->offset + j * entry->stride;
1290 ANV_FROM_HANDLE(anv_buffer_view, bview, *_bview);
1291
1292 anv_descriptor_set_write_buffer_view(device, set,
1293 entry->type,
1294 bview,
1295 entry->binding,
1296 entry->array_element + j);
1297 }
1298 break;
1299
1300 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1301 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1302 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1303 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1304 for (uint32_t j = 0; j < entry->array_count; j++) {
1305 const VkDescriptorBufferInfo *info =
1306 data + entry->offset + j * entry->stride;
1307 ANV_FROM_HANDLE(anv_buffer, buffer, info->buffer);
1308
1309 anv_descriptor_set_write_buffer(device, set,
1310 alloc_stream,
1311 entry->type,
1312 buffer,
1313 entry->binding,
1314 entry->array_element + j,
1315 info->offset, info->range);
1316 }
1317 break;
1318
1319 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
1320 anv_descriptor_set_write_inline_uniform_data(device, set,
1321 entry->binding,
1322 data + entry->offset,
1323 entry->array_element,
1324 entry->array_count);
1325 break;
1326
1327 default:
1328 break;
1329 }
1330 }
1331 }
1332
1333 VkResult anv_CreateDescriptorUpdateTemplate(
1334 VkDevice _device,
1335 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
1336 const VkAllocationCallbacks* pAllocator,
1337 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
1338 {
1339 ANV_FROM_HANDLE(anv_device, device, _device);
1340 struct anv_descriptor_update_template *template;
1341
1342 size_t size = sizeof(*template) +
1343 pCreateInfo->descriptorUpdateEntryCount * sizeof(template->entries[0]);
1344 template = vk_alloc2(&device->alloc, pAllocator, size, 8,
1345 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1346 if (template == NULL)
1347 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1348
1349 template->bind_point = pCreateInfo->pipelineBindPoint;
1350
1351 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET)
1352 template->set = pCreateInfo->set;
1353
1354 template->entry_count = pCreateInfo->descriptorUpdateEntryCount;
1355 for (uint32_t i = 0; i < template->entry_count; i++) {
1356 const VkDescriptorUpdateTemplateEntry *pEntry =
1357 &pCreateInfo->pDescriptorUpdateEntries[i];
1358
1359 template->entries[i] = (struct anv_descriptor_template_entry) {
1360 .type = pEntry->descriptorType,
1361 .binding = pEntry->dstBinding,
1362 .array_element = pEntry->dstArrayElement,
1363 .array_count = pEntry->descriptorCount,
1364 .offset = pEntry->offset,
1365 .stride = pEntry->stride,
1366 };
1367 }
1368
1369 *pDescriptorUpdateTemplate =
1370 anv_descriptor_update_template_to_handle(template);
1371
1372 return VK_SUCCESS;
1373 }
1374
1375 void anv_DestroyDescriptorUpdateTemplate(
1376 VkDevice _device,
1377 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1378 const VkAllocationCallbacks* pAllocator)
1379 {
1380 ANV_FROM_HANDLE(anv_device, device, _device);
1381 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1382 descriptorUpdateTemplate);
1383
1384 vk_free2(&device->alloc, pAllocator, template);
1385 }
1386
1387 void anv_UpdateDescriptorSetWithTemplate(
1388 VkDevice _device,
1389 VkDescriptorSet descriptorSet,
1390 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1391 const void* pData)
1392 {
1393 ANV_FROM_HANDLE(anv_device, device, _device);
1394 ANV_FROM_HANDLE(anv_descriptor_set, set, descriptorSet);
1395 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1396 descriptorUpdateTemplate);
1397
1398 anv_descriptor_set_write_template(device, set, NULL, template, pData);
1399 }