anv: move to using vk_alloc helpers.
[mesa.git] / src / intel / vulkan / anv_descriptor_set.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31
32 #include "anv_private.h"
33
34 /*
35 * Descriptor set layouts.
36 */
37
38 VkResult anv_CreateDescriptorSetLayout(
39 VkDevice _device,
40 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
41 const VkAllocationCallbacks* pAllocator,
42 VkDescriptorSetLayout* pSetLayout)
43 {
44 ANV_FROM_HANDLE(anv_device, device, _device);
45 struct anv_descriptor_set_layout *set_layout;
46
47 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
48
49 uint32_t max_binding = 0;
50 uint32_t immutable_sampler_count = 0;
51 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
52 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
53 if (pCreateInfo->pBindings[j].pImmutableSamplers)
54 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
55 }
56
57 size_t size = sizeof(struct anv_descriptor_set_layout) +
58 (max_binding + 1) * sizeof(set_layout->binding[0]) +
59 immutable_sampler_count * sizeof(struct anv_sampler *);
60
61 set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
62 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
63 if (!set_layout)
64 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
65
66 /* We just allocate all the samplers at the end of the struct */
67 struct anv_sampler **samplers =
68 (struct anv_sampler **)&set_layout->binding[max_binding + 1];
69
70 memset(set_layout, 0, sizeof(*set_layout));
71 set_layout->binding_count = max_binding + 1;
72
73 for (uint32_t b = 0; b <= max_binding; b++) {
74 /* Initialize all binding_layout entries to -1 */
75 memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b]));
76
77 set_layout->binding[b].array_size = 0;
78 set_layout->binding[b].immutable_samplers = NULL;
79 }
80
81 /* Initialize all samplers to 0 */
82 memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
83
84 uint32_t sampler_count[MESA_SHADER_STAGES] = { 0, };
85 uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
86 uint32_t image_count[MESA_SHADER_STAGES] = { 0, };
87 uint32_t buffer_count = 0;
88 uint32_t dynamic_offset_count = 0;
89
90 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
91 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
92 uint32_t b = binding->binding;
93 /* We temporarily store the pointer to the binding in the
94 * immutable_samplers pointer. This provides us with a quick-and-dirty
95 * way to sort the bindings by binding number.
96 */
97 set_layout->binding[b].immutable_samplers = (void *)binding;
98 }
99
100 for (uint32_t b = 0; b <= max_binding; b++) {
101 const VkDescriptorSetLayoutBinding *binding =
102 (void *)set_layout->binding[b].immutable_samplers;
103
104 if (binding == NULL)
105 continue;
106
107 assert(binding->descriptorCount > 0);
108 #ifndef NDEBUG
109 set_layout->binding[b].type = binding->descriptorType;
110 #endif
111 set_layout->binding[b].array_size = binding->descriptorCount;
112 set_layout->binding[b].descriptor_index = set_layout->size;
113 set_layout->size += binding->descriptorCount;
114
115 switch (binding->descriptorType) {
116 case VK_DESCRIPTOR_TYPE_SAMPLER:
117 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
118 anv_foreach_stage(s, binding->stageFlags) {
119 set_layout->binding[b].stage[s].sampler_index = sampler_count[s];
120 sampler_count[s] += binding->descriptorCount;
121 }
122 break;
123 default:
124 break;
125 }
126
127 switch (binding->descriptorType) {
128 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
129 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
130 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
131 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
132 set_layout->binding[b].buffer_index = buffer_count;
133 buffer_count += binding->descriptorCount;
134 /* fall through */
135
136 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
137 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
138 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
139 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
140 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
141 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
142 anv_foreach_stage(s, binding->stageFlags) {
143 set_layout->binding[b].stage[s].surface_index = surface_count[s];
144 surface_count[s] += binding->descriptorCount;
145 }
146 break;
147 default:
148 break;
149 }
150
151 switch (binding->descriptorType) {
152 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
153 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
154 set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
155 dynamic_offset_count += binding->descriptorCount;
156 break;
157 default:
158 break;
159 }
160
161 switch (binding->descriptorType) {
162 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
163 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
164 anv_foreach_stage(s, binding->stageFlags) {
165 set_layout->binding[b].stage[s].image_index = image_count[s];
166 image_count[s] += binding->descriptorCount;
167 }
168 break;
169 default:
170 break;
171 }
172
173 if (binding->pImmutableSamplers) {
174 set_layout->binding[b].immutable_samplers = samplers;
175 samplers += binding->descriptorCount;
176
177 for (uint32_t i = 0; i < binding->descriptorCount; i++)
178 set_layout->binding[b].immutable_samplers[i] =
179 anv_sampler_from_handle(binding->pImmutableSamplers[i]);
180 } else {
181 set_layout->binding[b].immutable_samplers = NULL;
182 }
183
184 set_layout->shader_stages |= binding->stageFlags;
185 }
186
187 set_layout->buffer_count = buffer_count;
188 set_layout->dynamic_offset_count = dynamic_offset_count;
189
190 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
191
192 return VK_SUCCESS;
193 }
194
195 void anv_DestroyDescriptorSetLayout(
196 VkDevice _device,
197 VkDescriptorSetLayout _set_layout,
198 const VkAllocationCallbacks* pAllocator)
199 {
200 ANV_FROM_HANDLE(anv_device, device, _device);
201 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
202
203 vk_free2(&device->alloc, pAllocator, set_layout);
204 }
205
206 static void
207 sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
208 const struct anv_descriptor_set_layout *layout)
209 {
210 size_t size = sizeof(*layout) +
211 sizeof(layout->binding[0]) * layout->binding_count;
212 _mesa_sha1_update(ctx, layout, size);
213 }
214
215 /*
216 * Pipeline layouts. These have nothing to do with the pipeline. They are
217 * just muttiple descriptor set layouts pasted together
218 */
219
220 VkResult anv_CreatePipelineLayout(
221 VkDevice _device,
222 const VkPipelineLayoutCreateInfo* pCreateInfo,
223 const VkAllocationCallbacks* pAllocator,
224 VkPipelineLayout* pPipelineLayout)
225 {
226 ANV_FROM_HANDLE(anv_device, device, _device);
227 struct anv_pipeline_layout *layout;
228
229 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
230
231 layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
232 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
233 if (layout == NULL)
234 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
235
236 layout->num_sets = pCreateInfo->setLayoutCount;
237
238 unsigned dynamic_offset_count = 0;
239
240 memset(layout->stage, 0, sizeof(layout->stage));
241 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
242 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
243 pCreateInfo->pSetLayouts[set]);
244 layout->set[set].layout = set_layout;
245
246 layout->set[set].dynamic_offset_start = dynamic_offset_count;
247 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
248 if (set_layout->binding[b].dynamic_offset_index < 0)
249 continue;
250
251 dynamic_offset_count += set_layout->binding[b].array_size;
252 for (gl_shader_stage s = 0; s < MESA_SHADER_STAGES; s++) {
253 if (set_layout->binding[b].stage[s].surface_index >= 0)
254 layout->stage[s].has_dynamic_offsets = true;
255 }
256 }
257 }
258
259 struct mesa_sha1 *ctx = _mesa_sha1_init();
260 for (unsigned s = 0; s < layout->num_sets; s++) {
261 sha1_update_descriptor_set_layout(ctx, layout->set[s].layout);
262 _mesa_sha1_update(ctx, &layout->set[s].dynamic_offset_start,
263 sizeof(layout->set[s].dynamic_offset_start));
264 }
265 _mesa_sha1_update(ctx, &layout->num_sets, sizeof(layout->num_sets));
266 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
267 _mesa_sha1_update(ctx, &layout->stage[s].has_dynamic_offsets,
268 sizeof(layout->stage[s].has_dynamic_offsets));
269 }
270 _mesa_sha1_final(ctx, layout->sha1);
271
272 *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
273
274 return VK_SUCCESS;
275 }
276
277 void anv_DestroyPipelineLayout(
278 VkDevice _device,
279 VkPipelineLayout _pipelineLayout,
280 const VkAllocationCallbacks* pAllocator)
281 {
282 ANV_FROM_HANDLE(anv_device, device, _device);
283 ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
284
285 vk_free2(&device->alloc, pAllocator, pipeline_layout);
286 }
287
288 /*
289 * Descriptor pools.
290 *
291 * These are implemented using a big pool of memory and a free-list for the
292 * host memory allocations and a state_stream and a free list for the buffer
293 * view surface state. The spec allows us to fail to allocate due to
294 * fragmentation in all cases but two: 1) after pool reset, allocating up
295 * until the pool size with no freeing must succeed and 2) allocating and
296 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
297 * and the free lists lets us recycle blocks for case 2).
298 */
299
300 #define EMPTY 1
301
302 VkResult anv_CreateDescriptorPool(
303 VkDevice _device,
304 const VkDescriptorPoolCreateInfo* pCreateInfo,
305 const VkAllocationCallbacks* pAllocator,
306 VkDescriptorPool* pDescriptorPool)
307 {
308 ANV_FROM_HANDLE(anv_device, device, _device);
309 struct anv_descriptor_pool *pool;
310
311 uint32_t descriptor_count = 0;
312 uint32_t buffer_count = 0;
313 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
314 switch (pCreateInfo->pPoolSizes[i].type) {
315 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
316 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
317 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
318 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
319 buffer_count += pCreateInfo->pPoolSizes[i].descriptorCount;
320 default:
321 descriptor_count += pCreateInfo->pPoolSizes[i].descriptorCount;
322 break;
323 }
324 }
325
326 const size_t size =
327 sizeof(*pool) +
328 pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) +
329 descriptor_count * sizeof(struct anv_descriptor) +
330 buffer_count * sizeof(struct anv_buffer_view);
331
332 pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
333 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
334 if (!pool)
335 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
336
337 pool->size = size;
338 pool->next = 0;
339 pool->free_list = EMPTY;
340
341 anv_state_stream_init(&pool->surface_state_stream,
342 &device->surface_state_block_pool);
343 pool->surface_state_free_list = NULL;
344
345 *pDescriptorPool = anv_descriptor_pool_to_handle(pool);
346
347 return VK_SUCCESS;
348 }
349
350 void anv_DestroyDescriptorPool(
351 VkDevice _device,
352 VkDescriptorPool _pool,
353 const VkAllocationCallbacks* pAllocator)
354 {
355 ANV_FROM_HANDLE(anv_device, device, _device);
356 ANV_FROM_HANDLE(anv_descriptor_pool, pool, _pool);
357
358 anv_state_stream_finish(&pool->surface_state_stream);
359 vk_free2(&device->alloc, pAllocator, pool);
360 }
361
362 VkResult anv_ResetDescriptorPool(
363 VkDevice _device,
364 VkDescriptorPool descriptorPool,
365 VkDescriptorPoolResetFlags flags)
366 {
367 ANV_FROM_HANDLE(anv_device, device, _device);
368 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
369
370 pool->next = 0;
371 pool->free_list = EMPTY;
372 anv_state_stream_finish(&pool->surface_state_stream);
373 anv_state_stream_init(&pool->surface_state_stream,
374 &device->surface_state_block_pool);
375 pool->surface_state_free_list = NULL;
376
377 return VK_SUCCESS;
378 }
379
380 struct pool_free_list_entry {
381 uint32_t next;
382 uint32_t size;
383 };
384
385 static size_t
386 layout_size(const struct anv_descriptor_set_layout *layout)
387 {
388 return
389 sizeof(struct anv_descriptor_set) +
390 layout->size * sizeof(struct anv_descriptor) +
391 layout->buffer_count * sizeof(struct anv_buffer_view);
392 }
393
394 struct surface_state_free_list_entry {
395 void *next;
396 uint32_t offset;
397 };
398
399 VkResult
400 anv_descriptor_set_create(struct anv_device *device,
401 struct anv_descriptor_pool *pool,
402 const struct anv_descriptor_set_layout *layout,
403 struct anv_descriptor_set **out_set)
404 {
405 struct anv_descriptor_set *set;
406 const size_t size = layout_size(layout);
407
408 set = NULL;
409 if (size <= pool->size - pool->next) {
410 set = (struct anv_descriptor_set *) (pool->data + pool->next);
411 pool->next += size;
412 } else {
413 struct pool_free_list_entry *entry;
414 uint32_t *link = &pool->free_list;
415 for (uint32_t f = pool->free_list; f != EMPTY; f = entry->next) {
416 entry = (struct pool_free_list_entry *) (pool->data + f);
417 if (size <= entry->size) {
418 *link = entry->next;
419 set = (struct anv_descriptor_set *) entry;
420 break;
421 }
422 link = &entry->next;
423 }
424 }
425
426 if (set == NULL)
427 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
428
429 set->size = size;
430 set->layout = layout;
431 set->buffer_views =
432 (struct anv_buffer_view *) &set->descriptors[layout->size];
433 set->buffer_count = layout->buffer_count;
434
435 /* By defining the descriptors to be zero now, we can later verify that
436 * a descriptor has not been populated with user data.
437 */
438 memset(set->descriptors, 0, sizeof(struct anv_descriptor) * layout->size);
439
440 /* Go through and fill out immutable samplers if we have any */
441 struct anv_descriptor *desc = set->descriptors;
442 for (uint32_t b = 0; b < layout->binding_count; b++) {
443 if (layout->binding[b].immutable_samplers) {
444 for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
445 /* The type will get changed to COMBINED_IMAGE_SAMPLER in
446 * UpdateDescriptorSets if needed. However, if the descriptor
447 * set has an immutable sampler, UpdateDescriptorSets may never
448 * touch it, so we need to make sure it's 100% valid now.
449 */
450 desc[i] = (struct anv_descriptor) {
451 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
452 .sampler = layout->binding[b].immutable_samplers[i],
453 };
454 }
455 }
456 desc += layout->binding[b].array_size;
457 }
458
459 /* Allocate surface state for the buffer views. */
460 for (uint32_t b = 0; b < layout->buffer_count; b++) {
461 struct surface_state_free_list_entry *entry =
462 pool->surface_state_free_list;
463 struct anv_state state;
464
465 if (entry) {
466 state.map = entry;
467 state.offset = entry->offset;
468 state.alloc_size = 64;
469 pool->surface_state_free_list = entry->next;
470 } else {
471 state = anv_state_stream_alloc(&pool->surface_state_stream, 64, 64);
472 }
473
474 set->buffer_views[b].surface_state = state;
475 }
476
477 *out_set = set;
478
479 return VK_SUCCESS;
480 }
481
482 void
483 anv_descriptor_set_destroy(struct anv_device *device,
484 struct anv_descriptor_pool *pool,
485 struct anv_descriptor_set *set)
486 {
487 /* Put the buffer view surface state back on the free list. */
488 for (uint32_t b = 0; b < set->buffer_count; b++) {
489 struct surface_state_free_list_entry *entry =
490 set->buffer_views[b].surface_state.map;
491 entry->next = pool->surface_state_free_list;
492 pool->surface_state_free_list = entry;
493 }
494
495 /* Put the descriptor set allocation back on the free list. */
496 const uint32_t index = (char *) set - pool->data;
497 if (index + set->size == pool->next) {
498 pool->next = index;
499 } else {
500 struct pool_free_list_entry *entry = (struct pool_free_list_entry *) set;
501 entry->next = pool->free_list;
502 entry->size = set->size;
503 pool->free_list = (char *) entry - pool->data;
504 }
505 }
506
507 VkResult anv_AllocateDescriptorSets(
508 VkDevice _device,
509 const VkDescriptorSetAllocateInfo* pAllocateInfo,
510 VkDescriptorSet* pDescriptorSets)
511 {
512 ANV_FROM_HANDLE(anv_device, device, _device);
513 ANV_FROM_HANDLE(anv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
514
515 VkResult result = VK_SUCCESS;
516 struct anv_descriptor_set *set;
517 uint32_t i;
518
519 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
520 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout,
521 pAllocateInfo->pSetLayouts[i]);
522
523 result = anv_descriptor_set_create(device, pool, layout, &set);
524 if (result != VK_SUCCESS)
525 break;
526
527 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
528 }
529
530 if (result != VK_SUCCESS)
531 anv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
532 i, pDescriptorSets);
533
534 return result;
535 }
536
537 VkResult anv_FreeDescriptorSets(
538 VkDevice _device,
539 VkDescriptorPool descriptorPool,
540 uint32_t count,
541 const VkDescriptorSet* pDescriptorSets)
542 {
543 ANV_FROM_HANDLE(anv_device, device, _device);
544 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
545
546 for (uint32_t i = 0; i < count; i++) {
547 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
548
549 anv_descriptor_set_destroy(device, pool, set);
550 }
551
552 return VK_SUCCESS;
553 }
554
555 void anv_UpdateDescriptorSets(
556 VkDevice _device,
557 uint32_t descriptorWriteCount,
558 const VkWriteDescriptorSet* pDescriptorWrites,
559 uint32_t descriptorCopyCount,
560 const VkCopyDescriptorSet* pDescriptorCopies)
561 {
562 ANV_FROM_HANDLE(anv_device, device, _device);
563
564 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
565 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
566 ANV_FROM_HANDLE(anv_descriptor_set, set, write->dstSet);
567 const struct anv_descriptor_set_binding_layout *bind_layout =
568 &set->layout->binding[write->dstBinding];
569 struct anv_descriptor *desc =
570 &set->descriptors[bind_layout->descriptor_index];
571 desc += write->dstArrayElement;
572
573 assert(write->descriptorType == bind_layout->type);
574
575 switch (write->descriptorType) {
576 case VK_DESCRIPTOR_TYPE_SAMPLER:
577 for (uint32_t j = 0; j < write->descriptorCount; j++) {
578 ANV_FROM_HANDLE(anv_sampler, sampler,
579 write->pImageInfo[j].sampler);
580
581 desc[j] = (struct anv_descriptor) {
582 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
583 .sampler = sampler,
584 };
585 }
586 break;
587
588 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
589 for (uint32_t j = 0; j < write->descriptorCount; j++) {
590 ANV_FROM_HANDLE(anv_image_view, iview,
591 write->pImageInfo[j].imageView);
592 ANV_FROM_HANDLE(anv_sampler, sampler,
593 write->pImageInfo[j].sampler);
594
595 desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
596 desc[j].image_view = iview;
597
598 /* If this descriptor has an immutable sampler, we don't want
599 * to stomp on it.
600 */
601 if (sampler)
602 desc[j].sampler = sampler;
603 }
604 break;
605
606 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
607 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
608 for (uint32_t j = 0; j < write->descriptorCount; j++) {
609 ANV_FROM_HANDLE(anv_image_view, iview,
610 write->pImageInfo[j].imageView);
611
612 desc[j] = (struct anv_descriptor) {
613 .type = write->descriptorType,
614 .image_view = iview,
615 };
616 }
617 break;
618
619 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
620 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
621 for (uint32_t j = 0; j < write->descriptorCount; j++) {
622 ANV_FROM_HANDLE(anv_buffer_view, bview,
623 write->pTexelBufferView[j]);
624
625 desc[j] = (struct anv_descriptor) {
626 .type = write->descriptorType,
627 .buffer_view = bview,
628 };
629 }
630 break;
631
632 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
633 anv_finishme("input attachments not implemented");
634 break;
635
636 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
637 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
638 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
639 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
640 for (uint32_t j = 0; j < write->descriptorCount; j++) {
641 assert(write->pBufferInfo[j].buffer);
642 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
643 assert(buffer);
644
645 struct anv_buffer_view *view =
646 &set->buffer_views[bind_layout->buffer_index];
647 view += write->dstArrayElement + j;
648
649 view->format =
650 anv_isl_format_for_descriptor_type(write->descriptorType);
651 view->bo = buffer->bo;
652 view->offset = buffer->offset + write->pBufferInfo[j].offset;
653
654 /* For buffers with dynamic offsets, we use the full possible
655 * range in the surface state and do the actual range-checking
656 * in the shader.
657 */
658 if (bind_layout->dynamic_offset_index >= 0 ||
659 write->pBufferInfo[j].range == VK_WHOLE_SIZE)
660 view->range = buffer->size - write->pBufferInfo[j].offset;
661 else
662 view->range = write->pBufferInfo[j].range;
663
664 anv_fill_buffer_surface_state(device, view->surface_state,
665 view->format,
666 view->offset, view->range, 1);
667
668 desc[j] = (struct anv_descriptor) {
669 .type = write->descriptorType,
670 .buffer_view = view,
671 };
672
673 }
674
675 default:
676 break;
677 }
678 }
679
680 for (uint32_t i = 0; i < descriptorCopyCount; i++) {
681 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
682 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->dstSet);
683 ANV_FROM_HANDLE(anv_descriptor_set, dst, copy->dstSet);
684
685 const struct anv_descriptor_set_binding_layout *src_layout =
686 &src->layout->binding[copy->srcBinding];
687 struct anv_descriptor *src_desc =
688 &src->descriptors[src_layout->descriptor_index];
689 src_desc += copy->srcArrayElement;
690
691 const struct anv_descriptor_set_binding_layout *dst_layout =
692 &dst->layout->binding[copy->dstBinding];
693 struct anv_descriptor *dst_desc =
694 &dst->descriptors[dst_layout->descriptor_index];
695 dst_desc += copy->dstArrayElement;
696
697 for (uint32_t j = 0; j < copy->descriptorCount; j++)
698 dst_desc[j] = src_desc[j];
699 }
700 }