anv: Include the pipeline layout in the shader hash
[mesa.git] / src / intel / vulkan / anv_descriptor_set.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "util/mesa-sha1.h"
31
32 #include "anv_private.h"
33
34 /*
35 * Descriptor set layouts.
36 */
37
38 VkResult anv_CreateDescriptorSetLayout(
39 VkDevice _device,
40 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
41 const VkAllocationCallbacks* pAllocator,
42 VkDescriptorSetLayout* pSetLayout)
43 {
44 ANV_FROM_HANDLE(anv_device, device, _device);
45 struct anv_descriptor_set_layout *set_layout;
46
47 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
48
49 uint32_t max_binding = 0;
50 uint32_t immutable_sampler_count = 0;
51 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
52 max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
53 if (pCreateInfo->pBindings[j].pImmutableSamplers)
54 immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
55 }
56
57 size_t size = sizeof(struct anv_descriptor_set_layout) +
58 (max_binding + 1) * sizeof(set_layout->binding[0]) +
59 immutable_sampler_count * sizeof(struct anv_sampler *);
60
61 set_layout = anv_alloc2(&device->alloc, pAllocator, size, 8,
62 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
63 if (!set_layout)
64 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
65
66 /* We just allocate all the samplers at the end of the struct */
67 struct anv_sampler **samplers =
68 (struct anv_sampler **)&set_layout->binding[max_binding + 1];
69
70 set_layout->binding_count = max_binding + 1;
71 set_layout->shader_stages = 0;
72 set_layout->size = 0;
73
74 for (uint32_t b = 0; b <= max_binding; b++) {
75 /* Initialize all binding_layout entries to -1 */
76 memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b]));
77
78 set_layout->binding[b].array_size = 0;
79 set_layout->binding[b].immutable_samplers = NULL;
80 }
81
82 /* Initialize all samplers to 0 */
83 memset(samplers, 0, immutable_sampler_count * sizeof(*samplers));
84
85 uint32_t sampler_count[MESA_SHADER_STAGES] = { 0, };
86 uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
87 uint32_t image_count[MESA_SHADER_STAGES] = { 0, };
88 uint32_t buffer_count = 0;
89 uint32_t dynamic_offset_count = 0;
90
91 for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
92 const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
93 uint32_t b = binding->binding;
94 /* We temporarily store the pointer to the binding in the
95 * immutable_samplers pointer. This provides us with a quick-and-dirty
96 * way to sort the bindings by binding number.
97 */
98 set_layout->binding[b].immutable_samplers = (void *)binding;
99 }
100
101 for (uint32_t b = 0; b <= max_binding; b++) {
102 const VkDescriptorSetLayoutBinding *binding =
103 (void *)set_layout->binding[b].immutable_samplers;
104
105 if (binding == NULL)
106 continue;
107
108 assert(binding->descriptorCount > 0);
109 #ifndef NDEBUG
110 set_layout->binding[b].type = binding->descriptorType;
111 #endif
112 set_layout->binding[b].array_size = binding->descriptorCount;
113 set_layout->binding[b].descriptor_index = set_layout->size;
114 set_layout->size += binding->descriptorCount;
115
116 switch (binding->descriptorType) {
117 case VK_DESCRIPTOR_TYPE_SAMPLER:
118 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
119 anv_foreach_stage(s, binding->stageFlags) {
120 set_layout->binding[b].stage[s].sampler_index = sampler_count[s];
121 sampler_count[s] += binding->descriptorCount;
122 }
123 break;
124 default:
125 break;
126 }
127
128 switch (binding->descriptorType) {
129 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
130 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
131 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
132 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
133 set_layout->binding[b].buffer_index = buffer_count;
134 buffer_count += binding->descriptorCount;
135 /* fall through */
136
137 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
138 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
139 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
140 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
141 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
142 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
143 anv_foreach_stage(s, binding->stageFlags) {
144 set_layout->binding[b].stage[s].surface_index = surface_count[s];
145 surface_count[s] += binding->descriptorCount;
146 }
147 break;
148 default:
149 break;
150 }
151
152 switch (binding->descriptorType) {
153 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
154 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
155 set_layout->binding[b].dynamic_offset_index = dynamic_offset_count;
156 dynamic_offset_count += binding->descriptorCount;
157 break;
158 default:
159 break;
160 }
161
162 switch (binding->descriptorType) {
163 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
164 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
165 anv_foreach_stage(s, binding->stageFlags) {
166 set_layout->binding[b].stage[s].image_index = image_count[s];
167 image_count[s] += binding->descriptorCount;
168 }
169 break;
170 default:
171 break;
172 }
173
174 if (binding->pImmutableSamplers) {
175 set_layout->binding[b].immutable_samplers = samplers;
176 samplers += binding->descriptorCount;
177
178 for (uint32_t i = 0; i < binding->descriptorCount; i++)
179 set_layout->binding[b].immutable_samplers[i] =
180 anv_sampler_from_handle(binding->pImmutableSamplers[i]);
181 } else {
182 set_layout->binding[b].immutable_samplers = NULL;
183 }
184
185 set_layout->shader_stages |= binding->stageFlags;
186 }
187
188 set_layout->buffer_count = buffer_count;
189 set_layout->dynamic_offset_count = dynamic_offset_count;
190
191 *pSetLayout = anv_descriptor_set_layout_to_handle(set_layout);
192
193 return VK_SUCCESS;
194 }
195
196 void anv_DestroyDescriptorSetLayout(
197 VkDevice _device,
198 VkDescriptorSetLayout _set_layout,
199 const VkAllocationCallbacks* pAllocator)
200 {
201 ANV_FROM_HANDLE(anv_device, device, _device);
202 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
203
204 anv_free2(&device->alloc, pAllocator, set_layout);
205 }
206
207 static void
208 sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
209 const struct anv_descriptor_set_layout *layout)
210 {
211 size_t size = sizeof(*layout) +
212 sizeof(layout->binding[0]) * layout->binding_count;
213 _mesa_sha1_update(ctx, layout, size);
214 }
215
216 /*
217 * Pipeline layouts. These have nothing to do with the pipeline. They are
218 * just muttiple descriptor set layouts pasted together
219 */
220
221 VkResult anv_CreatePipelineLayout(
222 VkDevice _device,
223 const VkPipelineLayoutCreateInfo* pCreateInfo,
224 const VkAllocationCallbacks* pAllocator,
225 VkPipelineLayout* pPipelineLayout)
226 {
227 ANV_FROM_HANDLE(anv_device, device, _device);
228 struct anv_pipeline_layout *layout;
229
230 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
231
232 layout = anv_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
233 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
234 if (layout == NULL)
235 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
236
237 layout->num_sets = pCreateInfo->setLayoutCount;
238
239 unsigned dynamic_offset_count = 0;
240
241 memset(layout->stage, 0, sizeof(layout->stage));
242 for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
243 ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
244 pCreateInfo->pSetLayouts[set]);
245 layout->set[set].layout = set_layout;
246
247 layout->set[set].dynamic_offset_start = dynamic_offset_count;
248 for (uint32_t b = 0; b < set_layout->binding_count; b++) {
249 if (set_layout->binding[b].dynamic_offset_index < 0)
250 continue;
251
252 dynamic_offset_count += set_layout->binding[b].array_size;
253 for (gl_shader_stage s = 0; s < MESA_SHADER_STAGES; s++) {
254 if (set_layout->binding[b].stage[s].surface_index >= 0)
255 layout->stage[s].has_dynamic_offsets = true;
256 }
257 }
258 }
259
260 struct mesa_sha1 *ctx = _mesa_sha1_init();
261 for (unsigned s = 0; s < layout->num_sets; s++) {
262 sha1_update_descriptor_set_layout(ctx, layout->set[s].layout);
263 _mesa_sha1_update(ctx, &layout->set[s].dynamic_offset_start,
264 sizeof(layout->set[s].dynamic_offset_start));
265 }
266 _mesa_sha1_update(ctx, &layout->num_sets, sizeof(layout->num_sets));
267 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
268 _mesa_sha1_update(ctx, &layout->stage[s].has_dynamic_offsets,
269 sizeof(layout->stage[s].has_dynamic_offsets));
270 }
271 _mesa_sha1_final(ctx, layout->sha1);
272
273 *pPipelineLayout = anv_pipeline_layout_to_handle(layout);
274
275 return VK_SUCCESS;
276 }
277
278 void anv_DestroyPipelineLayout(
279 VkDevice _device,
280 VkPipelineLayout _pipelineLayout,
281 const VkAllocationCallbacks* pAllocator)
282 {
283 ANV_FROM_HANDLE(anv_device, device, _device);
284 ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
285
286 anv_free2(&device->alloc, pAllocator, pipeline_layout);
287 }
288
289 /*
290 * Descriptor pools.
291 *
292 * These are implemented using a big pool of memory and a free-list for the
293 * host memory allocations and a state_stream and a free list for the buffer
294 * view surface state. The spec allows us to fail to allocate due to
295 * fragmentation in all cases but two: 1) after pool reset, allocating up
296 * until the pool size with no freeing must succeed and 2) allocating and
297 * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
298 * and the free lists lets us recycle blocks for case 2).
299 */
300
301 #define EMPTY 1
302
303 VkResult anv_CreateDescriptorPool(
304 VkDevice _device,
305 const VkDescriptorPoolCreateInfo* pCreateInfo,
306 const VkAllocationCallbacks* pAllocator,
307 VkDescriptorPool* pDescriptorPool)
308 {
309 ANV_FROM_HANDLE(anv_device, device, _device);
310 struct anv_descriptor_pool *pool;
311
312 uint32_t descriptor_count = 0;
313 uint32_t buffer_count = 0;
314 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
315 switch (pCreateInfo->pPoolSizes[i].type) {
316 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
317 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
318 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
319 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
320 buffer_count += pCreateInfo->pPoolSizes[i].descriptorCount;
321 default:
322 descriptor_count += pCreateInfo->pPoolSizes[i].descriptorCount;
323 break;
324 }
325 }
326
327 const size_t size =
328 sizeof(*pool) +
329 pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) +
330 descriptor_count * sizeof(struct anv_descriptor) +
331 buffer_count * sizeof(struct anv_buffer_view);
332
333 pool = anv_alloc2(&device->alloc, pAllocator, size, 8,
334 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
335 if (!pool)
336 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
337
338 pool->size = size;
339 pool->next = 0;
340 pool->free_list = EMPTY;
341
342 anv_state_stream_init(&pool->surface_state_stream,
343 &device->surface_state_block_pool);
344 pool->surface_state_free_list = NULL;
345
346 *pDescriptorPool = anv_descriptor_pool_to_handle(pool);
347
348 return VK_SUCCESS;
349 }
350
351 void anv_DestroyDescriptorPool(
352 VkDevice _device,
353 VkDescriptorPool _pool,
354 const VkAllocationCallbacks* pAllocator)
355 {
356 ANV_FROM_HANDLE(anv_device, device, _device);
357 ANV_FROM_HANDLE(anv_descriptor_pool, pool, _pool);
358
359 anv_state_stream_finish(&pool->surface_state_stream);
360 anv_free2(&device->alloc, pAllocator, pool);
361 }
362
363 VkResult anv_ResetDescriptorPool(
364 VkDevice _device,
365 VkDescriptorPool descriptorPool,
366 VkDescriptorPoolResetFlags flags)
367 {
368 ANV_FROM_HANDLE(anv_device, device, _device);
369 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
370
371 pool->next = 0;
372 pool->free_list = EMPTY;
373 anv_state_stream_finish(&pool->surface_state_stream);
374 anv_state_stream_init(&pool->surface_state_stream,
375 &device->surface_state_block_pool);
376 pool->surface_state_free_list = NULL;
377
378 return VK_SUCCESS;
379 }
380
381 struct pool_free_list_entry {
382 uint32_t next;
383 uint32_t size;
384 };
385
386 static size_t
387 layout_size(const struct anv_descriptor_set_layout *layout)
388 {
389 return
390 sizeof(struct anv_descriptor_set) +
391 layout->size * sizeof(struct anv_descriptor) +
392 layout->buffer_count * sizeof(struct anv_buffer_view);
393 }
394
395 struct surface_state_free_list_entry {
396 void *next;
397 uint32_t offset;
398 };
399
400 VkResult
401 anv_descriptor_set_create(struct anv_device *device,
402 struct anv_descriptor_pool *pool,
403 const struct anv_descriptor_set_layout *layout,
404 struct anv_descriptor_set **out_set)
405 {
406 struct anv_descriptor_set *set;
407 const size_t size = layout_size(layout);
408
409 set = NULL;
410 if (size <= pool->size - pool->next) {
411 set = (struct anv_descriptor_set *) (pool->data + pool->next);
412 pool->next += size;
413 } else {
414 struct pool_free_list_entry *entry;
415 uint32_t *link = &pool->free_list;
416 for (uint32_t f = pool->free_list; f != EMPTY; f = entry->next) {
417 entry = (struct pool_free_list_entry *) (pool->data + f);
418 if (size <= entry->size) {
419 *link = entry->next;
420 set = (struct anv_descriptor_set *) entry;
421 break;
422 }
423 link = &entry->next;
424 }
425 }
426
427 if (set == NULL)
428 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
429
430 set->size = size;
431 set->layout = layout;
432 set->buffer_views =
433 (struct anv_buffer_view *) &set->descriptors[layout->size];
434 set->buffer_count = layout->buffer_count;
435
436 /* By defining the descriptors to be zero now, we can later verify that
437 * a descriptor has not been populated with user data.
438 */
439 memset(set->descriptors, 0, sizeof(struct anv_descriptor) * layout->size);
440
441 /* Go through and fill out immutable samplers if we have any */
442 struct anv_descriptor *desc = set->descriptors;
443 for (uint32_t b = 0; b < layout->binding_count; b++) {
444 if (layout->binding[b].immutable_samplers) {
445 for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
446 /* The type will get changed to COMBINED_IMAGE_SAMPLER in
447 * UpdateDescriptorSets if needed. However, if the descriptor
448 * set has an immutable sampler, UpdateDescriptorSets may never
449 * touch it, so we need to make sure it's 100% valid now.
450 */
451 desc[i] = (struct anv_descriptor) {
452 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
453 .sampler = layout->binding[b].immutable_samplers[i],
454 };
455 }
456 }
457 desc += layout->binding[b].array_size;
458 }
459
460 /* Allocate surface state for the buffer views. */
461 for (uint32_t b = 0; b < layout->buffer_count; b++) {
462 struct surface_state_free_list_entry *entry =
463 pool->surface_state_free_list;
464 struct anv_state state;
465
466 if (entry) {
467 state.map = entry;
468 state.offset = entry->offset;
469 state.alloc_size = 64;
470 pool->surface_state_free_list = entry->next;
471 } else {
472 state = anv_state_stream_alloc(&pool->surface_state_stream, 64, 64);
473 }
474
475 set->buffer_views[b].surface_state = state;
476 }
477
478 *out_set = set;
479
480 return VK_SUCCESS;
481 }
482
483 void
484 anv_descriptor_set_destroy(struct anv_device *device,
485 struct anv_descriptor_pool *pool,
486 struct anv_descriptor_set *set)
487 {
488 /* Put the buffer view surface state back on the free list. */
489 for (uint32_t b = 0; b < set->buffer_count; b++) {
490 struct surface_state_free_list_entry *entry =
491 set->buffer_views[b].surface_state.map;
492 entry->next = pool->surface_state_free_list;
493 pool->surface_state_free_list = entry;
494 }
495
496 /* Put the descriptor set allocation back on the free list. */
497 const uint32_t index = (char *) set - pool->data;
498 if (index + set->size == pool->next) {
499 pool->next = index;
500 } else {
501 struct pool_free_list_entry *entry = (struct pool_free_list_entry *) set;
502 entry->next = pool->free_list;
503 entry->size = set->size;
504 pool->free_list = (char *) entry - pool->data;
505 }
506 }
507
508 VkResult anv_AllocateDescriptorSets(
509 VkDevice _device,
510 const VkDescriptorSetAllocateInfo* pAllocateInfo,
511 VkDescriptorSet* pDescriptorSets)
512 {
513 ANV_FROM_HANDLE(anv_device, device, _device);
514 ANV_FROM_HANDLE(anv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
515
516 VkResult result = VK_SUCCESS;
517 struct anv_descriptor_set *set;
518 uint32_t i;
519
520 for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
521 ANV_FROM_HANDLE(anv_descriptor_set_layout, layout,
522 pAllocateInfo->pSetLayouts[i]);
523
524 result = anv_descriptor_set_create(device, pool, layout, &set);
525 if (result != VK_SUCCESS)
526 break;
527
528 pDescriptorSets[i] = anv_descriptor_set_to_handle(set);
529 }
530
531 if (result != VK_SUCCESS)
532 anv_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool,
533 i, pDescriptorSets);
534
535 return result;
536 }
537
538 VkResult anv_FreeDescriptorSets(
539 VkDevice _device,
540 VkDescriptorPool descriptorPool,
541 uint32_t count,
542 const VkDescriptorSet* pDescriptorSets)
543 {
544 ANV_FROM_HANDLE(anv_device, device, _device);
545 ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
546
547 for (uint32_t i = 0; i < count; i++) {
548 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
549
550 anv_descriptor_set_destroy(device, pool, set);
551 }
552
553 return VK_SUCCESS;
554 }
555
556 void anv_UpdateDescriptorSets(
557 VkDevice _device,
558 uint32_t descriptorWriteCount,
559 const VkWriteDescriptorSet* pDescriptorWrites,
560 uint32_t descriptorCopyCount,
561 const VkCopyDescriptorSet* pDescriptorCopies)
562 {
563 ANV_FROM_HANDLE(anv_device, device, _device);
564
565 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
566 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
567 ANV_FROM_HANDLE(anv_descriptor_set, set, write->dstSet);
568 const struct anv_descriptor_set_binding_layout *bind_layout =
569 &set->layout->binding[write->dstBinding];
570 struct anv_descriptor *desc =
571 &set->descriptors[bind_layout->descriptor_index];
572 desc += write->dstArrayElement;
573
574 assert(write->descriptorType == bind_layout->type);
575
576 switch (write->descriptorType) {
577 case VK_DESCRIPTOR_TYPE_SAMPLER:
578 for (uint32_t j = 0; j < write->descriptorCount; j++) {
579 ANV_FROM_HANDLE(anv_sampler, sampler,
580 write->pImageInfo[j].sampler);
581
582 desc[j] = (struct anv_descriptor) {
583 .type = VK_DESCRIPTOR_TYPE_SAMPLER,
584 .sampler = sampler,
585 };
586 }
587 break;
588
589 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
590 for (uint32_t j = 0; j < write->descriptorCount; j++) {
591 ANV_FROM_HANDLE(anv_image_view, iview,
592 write->pImageInfo[j].imageView);
593 ANV_FROM_HANDLE(anv_sampler, sampler,
594 write->pImageInfo[j].sampler);
595
596 desc[j].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
597 desc[j].image_view = iview;
598
599 /* If this descriptor has an immutable sampler, we don't want
600 * to stomp on it.
601 */
602 if (sampler)
603 desc[j].sampler = sampler;
604 }
605 break;
606
607 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
608 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
609 for (uint32_t j = 0; j < write->descriptorCount; j++) {
610 ANV_FROM_HANDLE(anv_image_view, iview,
611 write->pImageInfo[j].imageView);
612
613 desc[j] = (struct anv_descriptor) {
614 .type = write->descriptorType,
615 .image_view = iview,
616 };
617 }
618 break;
619
620 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
621 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
622 for (uint32_t j = 0; j < write->descriptorCount; j++) {
623 ANV_FROM_HANDLE(anv_buffer_view, bview,
624 write->pTexelBufferView[j]);
625
626 desc[j] = (struct anv_descriptor) {
627 .type = write->descriptorType,
628 .buffer_view = bview,
629 };
630 }
631 break;
632
633 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
634 anv_finishme("input attachments not implemented");
635 break;
636
637 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
638 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
639 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
640 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
641 for (uint32_t j = 0; j < write->descriptorCount; j++) {
642 assert(write->pBufferInfo[j].buffer);
643 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
644 assert(buffer);
645
646 struct anv_buffer_view *view =
647 &set->buffer_views[bind_layout->buffer_index];
648 view += write->dstArrayElement + j;
649
650 view->format =
651 anv_isl_format_for_descriptor_type(write->descriptorType);
652 view->bo = buffer->bo;
653 view->offset = buffer->offset + write->pBufferInfo[j].offset;
654
655 /* For buffers with dynamic offsets, we use the full possible
656 * range in the surface state and do the actual range-checking
657 * in the shader.
658 */
659 if (bind_layout->dynamic_offset_index >= 0 ||
660 write->pBufferInfo[j].range == VK_WHOLE_SIZE)
661 view->range = buffer->size - write->pBufferInfo[j].offset;
662 else
663 view->range = write->pBufferInfo[j].range;
664
665 anv_fill_buffer_surface_state(device, view->surface_state,
666 view->format,
667 view->offset, view->range, 1);
668
669 desc[j] = (struct anv_descriptor) {
670 .type = write->descriptorType,
671 .buffer_view = view,
672 };
673
674 }
675
676 default:
677 break;
678 }
679 }
680
681 for (uint32_t i = 0; i < descriptorCopyCount; i++) {
682 const VkCopyDescriptorSet *copy = &pDescriptorCopies[i];
683 ANV_FROM_HANDLE(anv_descriptor_set, src, copy->dstSet);
684 ANV_FROM_HANDLE(anv_descriptor_set, dst, copy->dstSet);
685
686 const struct anv_descriptor_set_binding_layout *src_layout =
687 &src->layout->binding[copy->srcBinding];
688 struct anv_descriptor *src_desc =
689 &src->descriptors[src_layout->descriptor_index];
690 src_desc += copy->srcArrayElement;
691
692 const struct anv_descriptor_set_binding_layout *dst_layout =
693 &dst->layout->binding[copy->dstBinding];
694 struct anv_descriptor *dst_desc =
695 &dst->descriptors[dst_layout->descriptor_index];
696 dst_desc += copy->dstArrayElement;
697
698 for (uint32_t j = 0; j < copy->descriptorCount; j++)
699 dst_desc[j] = src_desc[j];
700 }
701 }