anv: Implement SSBOs bindings with GPU addresses in the descriptor BO
[mesa.git] / src / intel / vulkan / anv_descriptor_set.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31 #include "vk_util.h"
32
33 #include "anv_private.h"
34
35 /*
36 * Descriptor set layouts.
37 */
38
39 static enum anv_descriptor_data
40 anv_descriptor_data_for_type(const struct anv_physical_device *device,
41 VkDescriptorType type)
42 {
43 enum anv_descriptor_data data = 0;
44
45 switch (type) {
46 case VK_DESCRIPTOR_TYPE_SAMPLER:
47 data = ANV_DESCRIPTOR_SAMPLER_STATE;
48 break;
49
50 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
51 data = ANV_DESCRIPTOR_SURFACE_STATE |
52 ANV_DESCRIPTOR_SAMPLER_STATE;
53 break;
54
55 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
56 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
57 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
58 data = ANV_DESCRIPTOR_SURFACE_STATE;
59 break;
60
61 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
62 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
63 data = ANV_DESCRIPTOR_SURFACE_STATE;
64 if (device->info.gen < 9)
65 data |= ANV_DESCRIPTOR_IMAGE_PARAM;
66 break;
67
68 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
69 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
70 data = ANV_DESCRIPTOR_SURFACE_STATE |
71 ANV_DESCRIPTOR_BUFFER_VIEW;
72 break;
73
74 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
75 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
76 data = ANV_DESCRIPTOR_SURFACE_STATE;
77 break;
78
79 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
80 data = ANV_DESCRIPTOR_INLINE_UNIFORM;
81 break;
82
83 default:
84 unreachable("Unsupported descriptor type");
85 }
86
87 /* On gen8 and above when we have softpin enabled, we also need to push
88 * SSBO address ranges so that we can use A64 messages in the shader.
89 */
90 if (device->has_a64_buffer_access &&
91 (type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
92 type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC))
93 data |= ANV_DESCRIPTOR_ADDRESS_RANGE;
94
95 return data;
96 }
97
98 static unsigned
99 anv_descriptor_data_size(enum anv_descriptor_data data)
100 {
101 unsigned size = 0;
102
103 if (data & ANV_DESCRIPTOR_IMAGE_PARAM)
104 size += BRW_IMAGE_PARAM_SIZE * 4;
105
106 if (data & ANV_DESCRIPTOR_ADDRESS_RANGE)
107 size += sizeof(struct anv_address_range_descriptor);
108
109 return size;
110 }
111
112 /** Returns the size in bytes of each descriptor with the given layout */
113 unsigned
114 anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout)
115 {
116 if (layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
117 assert(layout->data == ANV_DESCRIPTOR_INLINE_UNIFORM);
118 return layout->array_size;
119 }
120
121 return anv_descriptor_data_size(layout->data);
122 }
123
124 /** Returns the size in bytes of each descriptor of the given type
125 *
126 * This version of the function does not have access to the entire layout so
127 * it may only work on certain descriptor types where the descriptor size is
128 * entirely determined by the descriptor type. Whenever possible, code should
129 * use anv_descriptor_size() instead.
130 */
131 unsigned
132 anv_descriptor_type_size(const struct anv_physical_device *pdevice,
133 VkDescriptorType type)
134 {
135 assert(type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
136 return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice, type));
137 }
138
139 static bool
140 anv_descriptor_data_supports_bindless(const struct anv_physical_device *pdevice,
141 enum anv_descriptor_data data,
142 bool sampler)
143 {
144 if (data & ANV_DESCRIPTOR_ADDRESS_RANGE) {
145 assert(pdevice->has_a64_buffer_access);
146 return true;
147 }
148
149 return false;
150 }
151
152 bool
153 anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice,
154 const struct anv_descriptor_set_binding_layout *binding,
155 bool sampler)
156 {
157 return anv_descriptor_data_supports_bindless(pdevice, binding->data,
158 sampler);
159 }
160
161 bool
162 anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice,
163 const struct anv_descriptor_set_binding_layout *binding,
164 bool sampler)
165 {
166 if (pdevice->always_use_bindless)
167 return anv_descriptor_supports_bindless(pdevice, binding, sampler);
168
169 return false;
170 }
171
172 void anv_GetDescriptorSetLayoutSupport(
173 VkDevice _device,
174 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
175 VkDescriptorSetLayoutSupport* pSupport)
176 {
177 ANV_FROM_HANDLE(anv_device, device, _device);
178 const struct anv_physical_device *pdevice =
179 &device->instance->physicalDevice;
180
181 uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
182
183 for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) {
184 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b];
185
186 enum anv_descriptor_data desc_data =
187 anv_descriptor_data_for_type(pdevice, binding->descriptorType);
188
189 switch (binding->descriptorType) {
190 case VK_DESCRIPTOR_TYPE_SAMPLER:
191 /* There is no real limit on samplers */
192 break;
193
194 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
195 if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false))
196 break;
197
198 if (binding->pImmutableSamplers) {
199 for (uint32_t i = 0; i < binding->descriptorCount; i++) {
200 ANV_FROM_HANDLE(anv_sampler, sampler,
201 binding->pImmutableSamplers[i]);
202 anv_foreach_stage(s, binding->stageFlags)
203 surface_count[s] += sampler->n_planes;
204 }
205 } else {
206 anv_foreach_stage(s, binding->stageFlags)
207 surface_count[s] += binding->descriptorCount;
208 }
209 break;
210
211 default:
212 if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false))
213 break;
214
215 anv_foreach_stage(s, binding->stageFlags)
216 surface_count[s] += binding->descriptorCount;
217 break;
218 }
219 }
220
221 bool supported = true;
222 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
223 /* Our maximum binding table size is 240 and we need to reserve 8 for
224 * render targets.
225 */
226 if (surface_count[s] >= MAX_BINDING_TABLE_SIZE - MAX_RTS)
227 supported = false;
228 }
229
230 pSupport->supported = supported;
231 }
232
233 VkResult anv_CreateDescriptorSetLayout(
234 VkDevice _device,
235 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
236 const VkAllocationCallbacks* pAllocator,
237 VkDescriptorSetLayout* pSetLayout)
238 {
239 ANV_FROM_HANDLE(anv_device, device, _device);
240
241 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
242
243 uint32_t max_binding = 0;
244 uint32_t immutable_sampler_count = 0;
245 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
246 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
247
248 /* From the Vulkan 1.1.97 spec for VkDescriptorSetLayoutBinding:
249 *
250 * "If descriptorType specifies a VK_DESCRIPTOR_TYPE_SAMPLER or
251 * VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER type descriptor, then
252 * pImmutableSamplers can be used to initialize a set of immutable
253 * samplers. [...] If descriptorType is not one of these descriptor
254 * types, then pImmutableSamplers is ignored.
255 *
256 * We need to be careful here and only parse pImmutableSamplers if we
257 * have one of the right descriptor types.
258 */
259 VkDescriptorType desc_type = pCreateInfo->pBindings[j].descriptorType;
260 if ((desc_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
261 desc_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
262 pCreateInfo->pBindings[j].pImmutableSamplers)
263 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
264 }
265
266 struct anv_descriptor_set_layout *set_layout;
267 struct anv_descriptor_set_binding_layout *bindings;
268 struct anv_sampler **samplers;
269
270 /* We need to allocate decriptor set layouts off the device allocator
271 * with DEVICE scope because they are reference counted and may not be
272 * destroyed when vkDestroyDescriptorSetLayout is called.
273 */
274 ANV_MULTIALLOC(ma);
275 anv_multialloc_add(&ma, &set_layout, 1);
276 anv_multialloc_add(&ma, &bindings, max_binding + 1);
277 anv_multialloc_add(&ma, &samplers, immutable_sampler_count);
278
279 if (!anv_multialloc_alloc(&ma, &device->alloc,
280 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
281 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
282
283 memset(set_layout, 0, sizeof(*set_layout));
284 set_layout->ref_cnt = 1;
285 set_layout->binding_count = max_binding + 1;
286
287 for (uint32_t b = 0; b <= max_binding; b++) {
288 /* Initialize all binding_layout entries to -1 */
289 memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b]));
290
291 set_layout->binding[b].data = 0;
292 set_layout->binding[b].array_size = 0;
293 set_layout->binding[b].immutable_samplers = NULL;
294 }
295
296 /* Initialize all samplers to 0 */
297 memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
298
299 uint32_t buffer_view_count = 0;
300 uint32_t dynamic_offset_count = 0;
301 uint32_t descriptor_buffer_size = 0;
302
303 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
304 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
305 uint32_t b = binding->binding;
306 /* We temporarily store the pointer to the binding in the
307 * immutable_samplers pointer. This provides us with a quick-and-dirty
308 * way to sort the bindings by binding number.
309 */
310 set_layout->binding[b].immutable_samplers = (void *)binding;
311 }
312
313 for (uint32_t b = 0; b <= max_binding; b++) {
314 const VkDescriptorSetLayoutBinding *binding =
315 (void *)set_layout->binding[b].immutable_samplers;
316
317 if (binding == NULL)
318 continue;
319
320 /* We temporarily stashed the pointer to the binding in the
321 * immutable_samplers pointer. Now that we've pulled it back out
322 * again, we reset immutable_samplers to NULL.
323 */
324 set_layout->binding[b].immutable_samplers = NULL;
325
326 if (binding->descriptorCount == 0)
327 continue;
328
329 #ifndef NDEBUG
330 set_layout->binding[b].type = binding->descriptorType;
331 #endif
332 set_layout->binding[b].data =
333 anv_descriptor_data_for_type(&device->instance->physicalDevice,
334 binding->descriptorType);
335 set_layout->binding[b].array_size = binding->descriptorCount;
336 set_layout->binding[b].descriptor_index = set_layout->size;
337 set_layout->size += binding->descriptorCount;
338
339 if (set_layout->binding[b].data & ANV_DESCRIPTOR_BUFFER_VIEW) {
340 set_layout->binding[b].buffer_view_index = buffer_view_count;
341 buffer_view_count += binding->descriptorCount;
342 }
343
344 switch (binding->descriptorType) {
345 case VK_DESCRIPTOR_TYPE_SAMPLER:
346 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
347 if (binding->pImmutableSamplers) {
348 set_layout->binding[b].immutable_samplers = samplers;
349 samplers += binding->descriptorCount;
350
351 for (uint32_t i = 0; i < binding->descriptorCount; i++)
352 set_layout->binding[b].immutable_samplers[i] =
353 anv_sampler_from_handle(binding->pImmutableSamplers[i]);
354 }
355 break;
356 default:
357 break;
358 }
359
360 switch (binding->descriptorType) {
361 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
362 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
363 set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
364 dynamic_offset_count += binding->descriptorCount;
365 break;
366
367 default:
368 break;
369 }
370
371 if (binding->descriptorType ==
372 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
373 /* Inline uniform blocks are specified to use the descriptor array
374 * size as the size in bytes of the block.
375 */
376 descriptor_buffer_size = align_u32(descriptor_buffer_size, 32);
377 set_layout->binding[b].descriptor_offset = descriptor_buffer_size;
378 descriptor_buffer_size += binding->descriptorCount;
379 } else {
380 set_layout->binding[b].descriptor_offset = descriptor_buffer_size;
381 descriptor_buffer_size += anv_descriptor_size(&set_layout->binding[b]) *
382 binding->descriptorCount;
383 }
384
385 set_layout->shader_stages |= binding->stageFlags;
386 }
387
388 set_layout->buffer_view_count = buffer_view_count;
389 set_layout->dynamic_offset_count = dynamic_offset_count;
390 set_layout->descriptor_buffer_size = descriptor_buffer_size;
391
392 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
393
394 return VK_SUCCESS;
395 }
396
397 void anv_DestroyDescriptorSetLayout(
398 VkDevice _device,
399 VkDescriptorSetLayout _set_layout,
400 const VkAllocationCallbacks* pAllocator)
401 {
402 ANV_FROM_HANDLE(anv_device, device, _device);
403 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
404
405 if (!set_layout)
406 return;
407
408 anv_descriptor_set_layout_unref(device, set_layout);
409 }
410
411 #define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));
412
413 static void
414 sha1_update_immutable_sampler(struct mesa_sha1 *ctx,
415 const struct anv_sampler *sampler)
416 {
417 if (!sampler->conversion)
418 return;
419
420 /* The only thing that affects the shader is ycbcr conversion */
421 _mesa_sha1_update(ctx, sampler->conversion,
422 sizeof(*sampler->conversion));
423 }
424
425 static void
426 sha1_update_descriptor_set_binding_layout(struct mesa_sha1 *ctx,
427 const struct anv_descriptor_set_binding_layout *layout)
428 {
429 SHA1_UPDATE_VALUE(ctx, layout->data);
430 SHA1_UPDATE_VALUE(ctx, layout->array_size);
431 SHA1_UPDATE_VALUE(ctx, layout->descriptor_index);
432 SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_index);
433 SHA1_UPDATE_VALUE(ctx, layout->buffer_view_index);
434 SHA1_UPDATE_VALUE(ctx, layout->descriptor_offset);
435
436 if (layout->immutable_samplers) {
437 for (uint16_t i = 0; i < layout->array_size; i++)
438 sha1_update_immutable_sampler(ctx, layout->immutable_samplers[i]);
439 }
440 }
441
442 static void
443 sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
444 const struct anv_descriptor_set_layout *layout)
445 {
446 SHA1_UPDATE_VALUE(ctx, layout->binding_count);
447 SHA1_UPDATE_VALUE(ctx, layout->size);
448 SHA1_UPDATE_VALUE(ctx, layout->shader_stages);
449 SHA1_UPDATE_VALUE(ctx, layout->buffer_view_count);
450 SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_count);
451 SHA1_UPDATE_VALUE(ctx, layout->descriptor_buffer_size);
452
453 for (uint16_t i = 0; i < layout->binding_count; i++)
454 sha1_update_descriptor_set_binding_layout(ctx, &layout->binding[i]);
455 }
456
457 /*
458 * Pipeline layouts. These have nothing to do with the pipeline. They are
459 * just multiple descriptor set layouts pasted together
460 */
461
462 VkResult anv_CreatePipelineLayout(
463 VkDevice _device,
464 const VkPipelineLayoutCreateInfo* pCreateInfo,
465 const VkAllocationCallbacks* pAllocator,
466 VkPipelineLayout* pPipelineLayout)
467 {
468 ANV_FROM_HANDLE(anv_device, device, _device);
469 struct anv_pipeline_layout *layout;
470
471 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
472
473 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
474 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
475 if (layout == NULL)
476 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
477
478 layout->num_sets = pCreateInfo->setLayoutCount;
479
480 unsigned dynamic_offset_count = 0;
481
482 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
483 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
484 pCreateInfo->pSetLayouts[set]);
485 layout->set[set].layout = set_layout;
486 anv_descriptor_set_layout_ref(set_layout);
487
488 layout->set[set].dynamic_offset_start = dynamic_offset_count;
489 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
490 if (set_layout->binding[b].dynamic_offset_index < 0)
491 continue;
492
493 dynamic_offset_count += set_layout->binding[b].array_size;
494 }
495 }
496
497 struct mesa_sha1 ctx;
498 _mesa_sha1_init(&ctx);
499 for (unsigned s = 0; s < layout->num_sets; s++) {
500 sha1_update_descriptor_set_layout(&ctx, layout->set[s].layout);
501 _mesa_sha1_update(&ctx, &layout->set[s].dynamic_offset_start,
502 sizeof(layout->set[s].dynamic_offset_start));
503 }
504 _mesa_sha1_update(&ctx, &layout->num_sets, sizeof(layout->num_sets));
505 _mesa_sha1_final(&ctx, layout->sha1);
506
507 *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
508
509 return VK_SUCCESS;
510 }
511
512 void anv_DestroyPipelineLayout(
513 VkDevice _device,
514 VkPipelineLayout _pipelineLayout,
515 const VkAllocationCallbacks* pAllocator)
516 {
517 ANV_FROM_HANDLE(anv_device, device, _device);
518 ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
519
520 if (!pipeline_layout)
521 return;
522
523 for (uint32_t i = 0; i < pipeline_layout->num_sets; i++)
524 anv_descriptor_set_layout_unref(device, pipeline_layout->set[i].layout);
525
526 vk_free2(&device->alloc, pAllocator, pipeline_layout);
527 }
528
529 /*
530 * Descriptor pools.
531 *
532 * These are implemented using a big pool of memory and a free-list for the
533 * host memory allocations and a state_stream and a free list for the buffer
534 * view surface state. The spec allows us to fail to allocate due to
535 * fragmentation in all cases but two: 1) after pool reset, allocating up
536 * until the pool size with no freeing must succeed and 2) allocating and
537 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
538 * and the free lists lets us recycle blocks for case 2).
539 */
540
541 /* The vma heap reserves 0 to mean NULL; we have to offset by some ammount to
542 * ensure we can allocate the entire BO without hitting zero. The actual
543 * amount doesn't matter.
544 */
545 #define POOL_HEAP_OFFSET 64
546
547 #define EMPTY 1
548
549 VkResult anv_CreateDescriptorPool(
550 VkDevice _device,
551 const VkDescriptorPoolCreateInfo* pCreateInfo,
552 const VkAllocationCallbacks* pAllocator,
553 VkDescriptorPool* pDescriptorPool)
554 {
555 ANV_FROM_HANDLE(anv_device, device, _device);
556 struct anv_descriptor_pool *pool;
557
558 const VkDescriptorPoolInlineUniformBlockCreateInfoEXT *inline_info =
559 vk_find_struct_const(pCreateInfo->pNext,
560 DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT);
561
562 uint32_t descriptor_count = 0;
563 uint32_t buffer_view_count = 0;
564 uint32_t descriptor_bo_size = 0;
565 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
566 enum anv_descriptor_data desc_data =
567 anv_descriptor_data_for_type(&device->instance->physicalDevice,
568 pCreateInfo->pPoolSizes[i].type);
569
570 if (desc_data & ANV_DESCRIPTOR_BUFFER_VIEW)
571 buffer_view_count += pCreateInfo->pPoolSizes[i].descriptorCount;
572
573 unsigned desc_data_size = anv_descriptor_data_size(desc_data) *
574 pCreateInfo->pPoolSizes[i].descriptorCount;
575
576 if (pCreateInfo->pPoolSizes[i].type ==
577 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
578 /* Inline uniform blocks are specified to use the descriptor array
579 * size as the size in bytes of the block.
580 */
581 assert(inline_info);
582 desc_data_size += pCreateInfo->pPoolSizes[i].descriptorCount;
583 }
584
585 descriptor_bo_size += desc_data_size;
586
587 descriptor_count += pCreateInfo->pPoolSizes[i].descriptorCount;
588 }
589 /* We have to align descriptor buffer allocations to 32B so that we can
590 * push descriptor buffers. This means that each descriptor buffer
591 * allocated may burn up to 32B of extra space to get the right alignment.
592 * (Technically, it's at most 28B because we're always going to start at
593 * least 4B aligned but we're being conservative here.) Allocate enough
594 * extra space that we can chop it into maxSets pieces and align each one
595 * of them to 32B.
596 */
597 descriptor_bo_size += 32 * pCreateInfo->maxSets;
598 descriptor_bo_size = ALIGN(descriptor_bo_size, 4096);
599 /* We align inline uniform blocks to 32B */
600 if (inline_info)
601 descriptor_bo_size += 32 * inline_info->maxInlineUniformBlockBindings;
602
603 const size_t pool_size =
604 pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) +
605 descriptor_count * sizeof(struct anv_descriptor) +
606 buffer_view_count * sizeof(struct anv_buffer_view);
607 const size_t total_size = sizeof(*pool) + pool_size;
608
609 pool = vk_alloc2(&device->alloc, pAllocator, total_size, 8,
610 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
611 if (!pool)
612 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
613
614 pool->size = pool_size;
615 pool->next = 0;
616 pool->free_list = EMPTY;
617
618 if (descriptor_bo_size > 0) {
619 VkResult result = anv_bo_init_new(&pool->bo, device, descriptor_bo_size);
620 if (result != VK_SUCCESS) {
621 vk_free2(&device->alloc, pAllocator, pool);
622 return result;
623 }
624
625 anv_gem_set_caching(device, pool->bo.gem_handle, I915_CACHING_CACHED);
626
627 pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0,
628 descriptor_bo_size, 0);
629 if (pool->bo.map == NULL) {
630 anv_gem_close(device, pool->bo.gem_handle);
631 vk_free2(&device->alloc, pAllocator, pool);
632 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
633 }
634
635 if (device->instance->physicalDevice.use_softpin) {
636 pool->bo.flags |= EXEC_OBJECT_PINNED;
637 anv_vma_alloc(device, &pool->bo);
638 }
639
640 util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, descriptor_bo_size);
641 } else {
642 pool->bo.size = 0;
643 }
644
645 anv_state_stream_init(&pool->surface_state_stream,
646 &device->surface_state_pool, 4096);
647 pool->surface_state_free_list = NULL;
648
649 list_inithead(&pool->desc_sets);
650
651 *pDescriptorPool = anv_descriptor_pool_to_handle(pool);
652
653 return VK_SUCCESS;
654 }
655
656 void anv_DestroyDescriptorPool(
657 VkDevice _device,
658 VkDescriptorPool _pool,
659 const VkAllocationCallbacks* pAllocator)
660 {
661 ANV_FROM_HANDLE(anv_device, device, _device);
662 ANV_FROM_HANDLE(anv_descriptor_pool, pool, _pool);
663
664 if (!pool)
665 return;
666
667 if (pool->bo.size) {
668 anv_gem_munmap(pool->bo.map, pool->bo.size);
669 anv_vma_free(device, &pool->bo);
670 anv_gem_close(device, pool->bo.gem_handle);
671 }
672 anv_state_stream_finish(&pool->surface_state_stream);
673
674 list_for_each_entry_safe(struct anv_descriptor_set, set,
675 &pool->desc_sets, pool_link) {
676 anv_descriptor_set_destroy(device, pool, set);
677 }
678
679 util_vma_heap_finish(&pool->bo_heap);
680
681 vk_free2(&device->alloc, pAllocator, pool);
682 }
683
684 VkResult anv_ResetDescriptorPool(
685 VkDevice _device,
686 VkDescriptorPool descriptorPool,
687 VkDescriptorPoolResetFlags flags)
688 {
689 ANV_FROM_HANDLE(anv_device, device, _device);
690 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
691
692 list_for_each_entry_safe(struct anv_descriptor_set, set,
693 &pool->desc_sets, pool_link) {
694 anv_descriptor_set_destroy(device, pool, set);
695 }
696
697 pool->next = 0;
698 pool->free_list = EMPTY;
699
700 if (pool->bo.size) {
701 util_vma_heap_finish(&pool->bo_heap);
702 util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, pool->bo.size);
703 }
704
705 anv_state_stream_finish(&pool->surface_state_stream);
706 anv_state_stream_init(&pool->surface_state_stream,
707 &device->surface_state_pool, 4096);
708 pool->surface_state_free_list = NULL;
709
710 return VK_SUCCESS;
711 }
712
713 struct pool_free_list_entry {
714 uint32_t next;
715 uint32_t size;
716 };
717
718 static VkResult
719 anv_descriptor_pool_alloc_set(struct anv_descriptor_pool *pool,
720 uint32_t size,
721 struct anv_descriptor_set **set)
722 {
723 if (size <= pool->size - pool->next) {
724 *set = (struct anv_descriptor_set *) (pool->data + pool->next);
725 pool->next += size;
726 return VK_SUCCESS;
727 } else {
728 struct pool_free_list_entry *entry;
729 uint32_t *link = &pool->free_list;
730 for (uint32_t f = pool->free_list; f != EMPTY; f = entry->next) {
731 entry = (struct pool_free_list_entry *) (pool->data + f);
732 if (size <= entry->size) {
733 *link = entry->next;
734 *set = (struct anv_descriptor_set *) entry;
735 return VK_SUCCESS;
736 }
737 link = &entry->next;
738 }
739
740 if (pool->free_list != EMPTY) {
741 return vk_error(VK_ERROR_FRAGMENTED_POOL);
742 } else {
743 return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY);
744 }
745 }
746 }
747
748 static void
749 anv_descriptor_pool_free_set(struct anv_descriptor_pool *pool,
750 struct anv_descriptor_set *set)
751 {
752 /* Put the descriptor set allocation back on the free list. */
753 const uint32_t index = (char *) set - pool->data;
754 if (index + set->size == pool->next) {
755 pool->next = index;
756 } else {
757 struct pool_free_list_entry *entry = (struct pool_free_list_entry *) set;
758 entry->next = pool->free_list;
759 entry->size = set->size;
760 pool->free_list = (char *) entry - pool->data;
761 }
762
763 list_del(&set->pool_link);
764 }
765
766 struct surface_state_free_list_entry {
767 void *next;
768 struct anv_state state;
769 };
770
771 static struct anv_state
772 anv_descriptor_pool_alloc_state(struct anv_descriptor_pool *pool)
773 {
774 struct surface_state_free_list_entry *entry =
775 pool->surface_state_free_list;
776
777 if (entry) {
778 struct anv_state state = entry->state;
779 pool->surface_state_free_list = entry->next;
780 assert(state.alloc_size == 64);
781 return state;
782 } else {
783 return anv_state_stream_alloc(&pool->surface_state_stream, 64, 64);
784 }
785 }
786
787 static void
788 anv_descriptor_pool_free_state(struct anv_descriptor_pool *pool,
789 struct anv_state state)
790 {
791 /* Put the buffer view surface state back on the free list. */
792 struct surface_state_free_list_entry *entry = state.map;
793 entry->next = pool->surface_state_free_list;
794 entry->state = state;
795 pool->surface_state_free_list = entry;
796 }
797
798 size_t
799 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout)
800 {
801 return
802 sizeof(struct anv_descriptor_set) +
803 layout->size * sizeof(struct anv_descriptor) +
804 layout->buffer_view_count * sizeof(struct anv_buffer_view);
805 }
806
807 VkResult
808 anv_descriptor_set_create(struct anv_device *device,
809 struct anv_descriptor_pool *pool,
810 struct anv_descriptor_set_layout *layout,
811 struct anv_descriptor_set **out_set)
812 {
813 struct anv_descriptor_set *set;
814 const size_t size = anv_descriptor_set_layout_size(layout);
815
816 VkResult result = anv_descriptor_pool_alloc_set(pool, size, &set);
817 if (result != VK_SUCCESS)
818 return result;
819
820 if (layout->descriptor_buffer_size) {
821 /* Align the size to 32 so that alignment gaps don't cause extra holes
822 * in the heap which can lead to bad performance.
823 */
824 uint64_t pool_vma_offset =
825 util_vma_heap_alloc(&pool->bo_heap,
826 ALIGN(layout->descriptor_buffer_size, 32), 32);
827 if (pool_vma_offset == 0) {
828 anv_descriptor_pool_free_set(pool, set);
829 return vk_error(VK_ERROR_FRAGMENTED_POOL);
830 }
831 assert(pool_vma_offset >= POOL_HEAP_OFFSET &&
832 pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX);
833 set->desc_mem.offset = pool_vma_offset - POOL_HEAP_OFFSET;
834 set->desc_mem.alloc_size = layout->descriptor_buffer_size;
835 set->desc_mem.map = pool->bo.map + set->desc_mem.offset;
836
837 set->desc_surface_state = anv_descriptor_pool_alloc_state(pool);
838 anv_fill_buffer_surface_state(device, set->desc_surface_state,
839 ISL_FORMAT_R32G32B32A32_FLOAT,
840 (struct anv_address) {
841 .bo = &pool->bo,
842 .offset = set->desc_mem.offset,
843 },
844 layout->descriptor_buffer_size, 1);
845 } else {
846 set->desc_mem = ANV_STATE_NULL;
847 set->desc_surface_state = ANV_STATE_NULL;
848 }
849
850 set->pool = pool;
851 set->layout = layout;
852 anv_descriptor_set_layout_ref(layout);
853
854 set->size = size;
855 set->buffer_views =
856 (struct anv_buffer_view *) &set->descriptors[layout->size];
857 set->buffer_view_count = layout->buffer_view_count;
858
859 /* By defining the descriptors to be zero now, we can later verify that
860 * a descriptor has not been populated with user data.
861 */
862 memset(set->descriptors, 0, sizeof(struct anv_descriptor) * layout->size);
863
864 /* Go through and fill out immutable samplers if we have any */
865 struct anv_descriptor *desc = set->descriptors;
866 for (uint32_t b = 0; b < layout->binding_count; b++) {
867 if (layout->binding[b].immutable_samplers) {
868 for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
869 /* The type will get changed to COMBINED_IMAGE_SAMPLER in
870 * UpdateDescriptorSets if needed. However, if the descriptor
871 * set has an immutable sampler, UpdateDescriptorSets may never
872 * touch it, so we need to make sure it's 100% valid now.
873 */
874 desc[i] = (struct anv_descriptor) {
875 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
876 .sampler = layout->binding[b].immutable_samplers[i],
877 };
878 }
879 }
880 desc += layout->binding[b].array_size;
881 }
882
883 /* Allocate surface state for the buffer views. */
884 for (uint32_t b = 0; b < layout->buffer_view_count; b++) {
885 set->buffer_views[b].surface_state =
886 anv_descriptor_pool_alloc_state(pool);
887 }
888
889 *out_set = set;
890
891 return VK_SUCCESS;
892 }
893
894 void
895 anv_descriptor_set_destroy(struct anv_device *device,
896 struct anv_descriptor_pool *pool,
897 struct anv_descriptor_set *set)
898 {
899 anv_descriptor_set_layout_unref(device, set->layout);
900
901 if (set->desc_mem.alloc_size) {
902 util_vma_heap_free(&pool->bo_heap,
903 (uint64_t)set->desc_mem.offset + POOL_HEAP_OFFSET,
904 set->desc_mem.alloc_size);
905 anv_descriptor_pool_free_state(pool, set->desc_surface_state);
906 }
907
908 for (uint32_t b = 0; b < set->buffer_view_count; b++)
909 anv_descriptor_pool_free_state(pool, set->buffer_views[b].surface_state);
910
911 anv_descriptor_pool_free_set(pool, set);
912 }
913
914 VkResult anv_AllocateDescriptorSets(
915 VkDevice _device,
916 const VkDescriptorSetAllocateInfo* pAllocateInfo,
917 VkDescriptorSet* pDescriptorSets)
918 {
919 ANV_FROM_HANDLE(anv_device, device, _device);
920 ANV_FROM_HANDLE(anv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
921
922 VkResult result = VK_SUCCESS;
923 struct anv_descriptor_set *set;
924 uint32_t i;
925
926 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
927 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout,
928 pAllocateInfo->pSetLayouts[i]);
929
930 result = anv_descriptor_set_create(device, pool, layout, &set);
931 if (result != VK_SUCCESS)
932 break;
933
934 list_addtail(&set->pool_link, &pool->desc_sets);
935
936 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
937 }
938
939 if (result != VK_SUCCESS)
940 anv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
941 i, pDescriptorSets);
942
943 return result;
944 }
945
946 VkResult anv_FreeDescriptorSets(
947 VkDevice _device,
948 VkDescriptorPool descriptorPool,
949 uint32_t count,
950 const VkDescriptorSet* pDescriptorSets)
951 {
952 ANV_FROM_HANDLE(anv_device, device, _device);
953 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
954
955 for (uint32_t i = 0; i < count; i++) {
956 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
957
958 if (!set)
959 continue;
960
961 anv_descriptor_set_destroy(device, pool, set);
962 }
963
964 return VK_SUCCESS;
965 }
966
967 static void
968 anv_descriptor_set_write_image_param(uint32_t *param_desc_map,
969 const struct brw_image_param *param)
970 {
971 #define WRITE_PARAM_FIELD(field, FIELD) \
972 for (unsigned i = 0; i < ARRAY_SIZE(param->field); i++) \
973 param_desc_map[BRW_IMAGE_PARAM_##FIELD##_OFFSET + i] = param->field[i]
974
975 WRITE_PARAM_FIELD(offset, OFFSET);
976 WRITE_PARAM_FIELD(size, SIZE);
977 WRITE_PARAM_FIELD(stride, STRIDE);
978 WRITE_PARAM_FIELD(tiling, TILING);
979 WRITE_PARAM_FIELD(swizzling, SWIZZLING);
980 WRITE_PARAM_FIELD(size, SIZE);
981
982 #undef WRITE_PARAM_FIELD
983 }
984
985 void
986 anv_descriptor_set_write_image_view(struct anv_device *device,
987 struct anv_descriptor_set *set,
988 const VkDescriptorImageInfo * const info,
989 VkDescriptorType type,
990 uint32_t binding,
991 uint32_t element)
992 {
993 const struct anv_descriptor_set_binding_layout *bind_layout =
994 &set->layout->binding[binding];
995 struct anv_descriptor *desc =
996 &set->descriptors[bind_layout->descriptor_index + element];
997 struct anv_image_view *image_view = NULL;
998 struct anv_sampler *sampler = NULL;
999
1000 assert(type == bind_layout->type);
1001
1002 switch (type) {
1003 case VK_DESCRIPTOR_TYPE_SAMPLER:
1004 sampler = anv_sampler_from_handle(info->sampler);
1005 break;
1006
1007 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1008 image_view = anv_image_view_from_handle(info->imageView);
1009 sampler = anv_sampler_from_handle(info->sampler);
1010 break;
1011
1012 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1013 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1014 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1015 image_view = anv_image_view_from_handle(info->imageView);
1016 break;
1017
1018 default:
1019 unreachable("invalid descriptor type");
1020 }
1021
1022 /* If this descriptor has an immutable sampler, we don't want to stomp on
1023 * it.
1024 */
1025 sampler = bind_layout->immutable_samplers ?
1026 bind_layout->immutable_samplers[element] :
1027 sampler;
1028
1029 *desc = (struct anv_descriptor) {
1030 .type = type,
1031 .layout = info->imageLayout,
1032 .image_view = image_view,
1033 .sampler = sampler,
1034 };
1035
1036 void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
1037 element * anv_descriptor_size(bind_layout);
1038
1039 if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
1040 /* Storage images can only ever have one plane */
1041 assert(image_view->n_planes == 1);
1042 const struct brw_image_param *image_param =
1043 &image_view->planes[0].storage_image_param;
1044
1045 anv_descriptor_set_write_image_param(desc_map, image_param);
1046 }
1047 }
1048
1049 void
1050 anv_descriptor_set_write_buffer_view(struct anv_device *device,
1051 struct anv_descriptor_set *set,
1052 VkDescriptorType type,
1053 struct anv_buffer_view *buffer_view,
1054 uint32_t binding,
1055 uint32_t element)
1056 {
1057 const struct anv_descriptor_set_binding_layout *bind_layout =
1058 &set->layout->binding[binding];
1059 struct anv_descriptor *desc =
1060 &set->descriptors[bind_layout->descriptor_index + element];
1061
1062 assert(type == bind_layout->type);
1063
1064 *desc = (struct anv_descriptor) {
1065 .type = type,
1066 .buffer_view = buffer_view,
1067 };
1068
1069 void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
1070 element * anv_descriptor_size(bind_layout);
1071
1072 if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
1073 anv_descriptor_set_write_image_param(desc_map,
1074 &buffer_view->storage_image_param);
1075 }
1076 }
1077
1078 void
1079 anv_descriptor_set_write_buffer(struct anv_device *device,
1080 struct anv_descriptor_set *set,
1081 struct anv_state_stream *alloc_stream,
1082 VkDescriptorType type,
1083 struct anv_buffer *buffer,
1084 uint32_t binding,
1085 uint32_t element,
1086 VkDeviceSize offset,
1087 VkDeviceSize range)
1088 {
1089 const struct anv_descriptor_set_binding_layout *bind_layout =
1090 &set->layout->binding[binding];
1091 struct anv_descriptor *desc =
1092 &set->descriptors[bind_layout->descriptor_index + element];
1093
1094 assert(type == bind_layout->type);
1095
1096 struct anv_address bind_addr = anv_address_add(buffer->address, offset);
1097 uint64_t bind_range = anv_buffer_get_range(buffer, offset, range);
1098
1099 if (type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1100 type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
1101 *desc = (struct anv_descriptor) {
1102 .type = type,
1103 .buffer = buffer,
1104 .offset = offset,
1105 .range = range,
1106 };
1107 } else {
1108 assert(bind_layout->data & ANV_DESCRIPTOR_BUFFER_VIEW);
1109 struct anv_buffer_view *bview =
1110 &set->buffer_views[bind_layout->buffer_view_index + element];
1111
1112 bview->format = anv_isl_format_for_descriptor_type(type);
1113 bview->range = bind_range;
1114 bview->address = bind_addr;
1115
1116 /* If we're writing descriptors through a push command, we need to
1117 * allocate the surface state from the command buffer. Otherwise it will
1118 * be allocated by the descriptor pool when calling
1119 * vkAllocateDescriptorSets. */
1120 if (alloc_stream)
1121 bview->surface_state = anv_state_stream_alloc(alloc_stream, 64, 64);
1122
1123 anv_fill_buffer_surface_state(device, bview->surface_state,
1124 bview->format, bind_addr, bind_range, 1);
1125
1126 *desc = (struct anv_descriptor) {
1127 .type = type,
1128 .buffer_view = bview,
1129 };
1130 }
1131
1132 void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset +
1133 element * anv_descriptor_size(bind_layout);
1134
1135 if (bind_layout->data & ANV_DESCRIPTOR_ADDRESS_RANGE) {
1136 struct anv_address_range_descriptor desc = {
1137 .address = anv_address_physical(bind_addr),
1138 .range = bind_range,
1139 };
1140 memcpy(desc_map, &desc, sizeof(desc));
1141 }
1142 }
1143
1144 void
1145 anv_descriptor_set_write_inline_uniform_data(struct anv_device *device,
1146 struct anv_descriptor_set *set,
1147 uint32_t binding,
1148 const void *data,
1149 size_t offset,
1150 size_t size)
1151 {
1152 const struct anv_descriptor_set_binding_layout *bind_layout =
1153 &set->layout->binding[binding];
1154
1155 assert(bind_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM);
1156
1157 void *desc_map = set->desc_mem.map + bind_layout->descriptor_offset;
1158
1159 memcpy(desc_map + offset, data, size);
1160 }
1161
1162 void anv_UpdateDescriptorSets(
1163 VkDevice _device,
1164 uint32_t descriptorWriteCount,
1165 const VkWriteDescriptorSet* pDescriptorWrites,
1166 uint32_t descriptorCopyCount,
1167 const VkCopyDescriptorSet* pDescriptorCopies)
1168 {
1169 ANV_FROM_HANDLE(anv_device, device, _device);
1170
1171 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1172 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1173 ANV_FROM_HANDLE(anv_descriptor_set, set, write->dstSet);
1174
1175 switch (write->descriptorType) {
1176 case VK_DESCRIPTOR_TYPE_SAMPLER:
1177 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1178 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1179 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1180 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1181 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1182 anv_descriptor_set_write_image_view(device, set,
1183 write->pImageInfo + j,
1184 write->descriptorType,
1185 write->dstBinding,
1186 write->dstArrayElement + j);
1187 }
1188 break;
1189
1190 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1191 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1192 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1193 ANV_FROM_HANDLE(anv_buffer_view, bview,
1194 write->pTexelBufferView[j]);
1195
1196 anv_descriptor_set_write_buffer_view(device, set,
1197 write->descriptorType,
1198 bview,
1199 write->dstBinding,
1200 write->dstArrayElement + j);
1201 }
1202 break;
1203
1204 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1205 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1206 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1207 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1208 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1209 assert(write->pBufferInfo[j].buffer);
1210 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1211 assert(buffer);
1212
1213 anv_descriptor_set_write_buffer(device, set,
1214 NULL,
1215 write->descriptorType,
1216 buffer,
1217 write->dstBinding,
1218 write->dstArrayElement + j,
1219 write->pBufferInfo[j].offset,
1220 write->pBufferInfo[j].range);
1221 }
1222 break;
1223
1224 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: {
1225 const VkWriteDescriptorSetInlineUniformBlockEXT *inline_write =
1226 vk_find_struct_const(write->pNext,
1227 WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT);
1228 assert(inline_write->dataSize == write->descriptorCount);
1229 anv_descriptor_set_write_inline_uniform_data(device, set,
1230 write->dstBinding,
1231 inline_write->pData,
1232 write->dstArrayElement,
1233 inline_write->dataSize);
1234 break;
1235 }
1236
1237 default:
1238 break;
1239 }
1240 }
1241
1242 for (uint32_t i = 0; i < descriptorCopyCount; i++) {
1243 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
1244 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->srcSet);
1245 ANV_FROM_HANDLE(anv_descriptor_set, dst, copy->dstSet);
1246
1247 const struct anv_descriptor_set_binding_layout *src_layout =
1248 &src->layout->binding[copy->srcBinding];
1249 struct anv_descriptor *src_desc =
1250 &src->descriptors[src_layout->descriptor_index];
1251 src_desc += copy->srcArrayElement;
1252
1253 const struct anv_descriptor_set_binding_layout *dst_layout =
1254 &dst->layout->binding[copy->dstBinding];
1255 struct anv_descriptor *dst_desc =
1256 &dst->descriptors[dst_layout->descriptor_index];
1257 dst_desc += copy->dstArrayElement;
1258
1259 for (uint32_t j = 0; j < copy->descriptorCount; j++)
1260 dst_desc[j] = src_desc[j];
1261
1262 if (src_layout->data & ANV_DESCRIPTOR_INLINE_UNIFORM) {
1263 assert(src_layout->data == ANV_DESCRIPTOR_INLINE_UNIFORM);
1264 memcpy(dst->desc_mem.map + dst_layout->descriptor_offset +
1265 copy->dstArrayElement,
1266 src->desc_mem.map + src_layout->descriptor_offset +
1267 copy->srcArrayElement,
1268 copy->descriptorCount);
1269 } else {
1270 unsigned desc_size = anv_descriptor_size(src_layout);
1271 if (desc_size > 0) {
1272 assert(desc_size == anv_descriptor_size(dst_layout));
1273 memcpy(dst->desc_mem.map + dst_layout->descriptor_offset +
1274 copy->dstArrayElement * desc_size,
1275 src->desc_mem.map + src_layout->descriptor_offset +
1276 copy->srcArrayElement * desc_size,
1277 copy->descriptorCount * desc_size);
1278 }
1279 }
1280 }
1281 }
1282
1283 /*
1284 * Descriptor update templates.
1285 */
1286
1287 void
1288 anv_descriptor_set_write_template(struct anv_device *device,
1289 struct anv_descriptor_set *set,
1290 struct anv_state_stream *alloc_stream,
1291 const struct anv_descriptor_update_template *template,
1292 const void *data)
1293 {
1294 for (uint32_t i = 0; i < template->entry_count; i++) {
1295 const struct anv_descriptor_template_entry *entry =
1296 &template->entries[i];
1297
1298 switch (entry->type) {
1299 case VK_DESCRIPTOR_TYPE_SAMPLER:
1300 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1301 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1302 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1303 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1304 for (uint32_t j = 0; j < entry->array_count; j++) {
1305 const VkDescriptorImageInfo *info =
1306 data + entry->offset + j * entry->stride;
1307 anv_descriptor_set_write_image_view(device, set,
1308 info, entry->type,
1309 entry->binding,
1310 entry->array_element + j);
1311 }
1312 break;
1313
1314 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1315 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1316 for (uint32_t j = 0; j < entry->array_count; j++) {
1317 const VkBufferView *_bview =
1318 data + entry->offset + j * entry->stride;
1319 ANV_FROM_HANDLE(anv_buffer_view, bview, *_bview);
1320
1321 anv_descriptor_set_write_buffer_view(device, set,
1322 entry->type,
1323 bview,
1324 entry->binding,
1325 entry->array_element + j);
1326 }
1327 break;
1328
1329 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1330 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1331 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1332 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1333 for (uint32_t j = 0; j < entry->array_count; j++) {
1334 const VkDescriptorBufferInfo *info =
1335 data + entry->offset + j * entry->stride;
1336 ANV_FROM_HANDLE(anv_buffer, buffer, info->buffer);
1337
1338 anv_descriptor_set_write_buffer(device, set,
1339 alloc_stream,
1340 entry->type,
1341 buffer,
1342 entry->binding,
1343 entry->array_element + j,
1344 info->offset, info->range);
1345 }
1346 break;
1347
1348 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
1349 anv_descriptor_set_write_inline_uniform_data(device, set,
1350 entry->binding,
1351 data + entry->offset,
1352 entry->array_element,
1353 entry->array_count);
1354 break;
1355
1356 default:
1357 break;
1358 }
1359 }
1360 }
1361
1362 VkResult anv_CreateDescriptorUpdateTemplate(
1363 VkDevice _device,
1364 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
1365 const VkAllocationCallbacks* pAllocator,
1366 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
1367 {
1368 ANV_FROM_HANDLE(anv_device, device, _device);
1369 struct anv_descriptor_update_template *template;
1370
1371 size_t size = sizeof(*template) +
1372 pCreateInfo->descriptorUpdateEntryCount * sizeof(template->entries[0]);
1373 template = vk_alloc2(&device->alloc, pAllocator, size, 8,
1374 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1375 if (template == NULL)
1376 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1377
1378 template->bind_point = pCreateInfo->pipelineBindPoint;
1379
1380 if (pCreateInfo->templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET)
1381 template->set = pCreateInfo->set;
1382
1383 template->entry_count = pCreateInfo->descriptorUpdateEntryCount;
1384 for (uint32_t i = 0; i < template->entry_count; i++) {
1385 const VkDescriptorUpdateTemplateEntry *pEntry =
1386 &pCreateInfo->pDescriptorUpdateEntries[i];
1387
1388 template->entries[i] = (struct anv_descriptor_template_entry) {
1389 .type = pEntry->descriptorType,
1390 .binding = pEntry->dstBinding,
1391 .array_element = pEntry->dstArrayElement,
1392 .array_count = pEntry->descriptorCount,
1393 .offset = pEntry->offset,
1394 .stride = pEntry->stride,
1395 };
1396 }
1397
1398 *pDescriptorUpdateTemplate =
1399 anv_descriptor_update_template_to_handle(template);
1400
1401 return VK_SUCCESS;
1402 }
1403
1404 void anv_DestroyDescriptorUpdateTemplate(
1405 VkDevice _device,
1406 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1407 const VkAllocationCallbacks* pAllocator)
1408 {
1409 ANV_FROM_HANDLE(anv_device, device, _device);
1410 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1411 descriptorUpdateTemplate);
1412
1413 vk_free2(&device->alloc, pAllocator, template);
1414 }
1415
1416 void anv_UpdateDescriptorSetWithTemplate(
1417 VkDevice _device,
1418 VkDescriptorSet descriptorSet,
1419 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1420 const void* pData)
1421 {
1422 ANV_FROM_HANDLE(anv_device, device, _device);
1423 ANV_FROM_HANDLE(anv_descriptor_set, set, descriptorSet);
1424 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1425 descriptorUpdateTemplate);
1426
1427 anv_descriptor_set_write_template(device, set, NULL, template, pData);
1428 }