2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
33 * Descriptor set layouts.
36 VkResult
anv_CreateDescriptorSetLayout(
38 const VkDescriptorSetLayoutCreateInfo
* pCreateInfo
,
39 const VkAllocationCallbacks
* pAllocator
,
40 VkDescriptorSetLayout
* pSetLayout
)
42 ANV_FROM_HANDLE(anv_device
, device
, _device
);
43 struct anv_descriptor_set_layout
*set_layout
;
46 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
);
48 uint32_t max_binding
= 0;
49 uint32_t immutable_sampler_count
= 0;
50 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
51 max_binding
= MAX2(max_binding
, pCreateInfo
->pBinding
[j
].binding
);
52 if (pCreateInfo
->pBinding
[j
].pImmutableSamplers
)
53 immutable_sampler_count
+= pCreateInfo
->pBinding
[j
].descriptorCount
;
56 size_t size
= sizeof(struct anv_descriptor_set_layout
) +
57 (max_binding
+ 1) * sizeof(set_layout
->binding
[0]) +
58 immutable_sampler_count
* sizeof(struct anv_sampler
*);
60 set_layout
= anv_alloc2(&device
->alloc
, pAllocator
, size
, 8,
61 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
63 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
65 /* We just allocate all the samplers at the end of the struct */
66 struct anv_sampler
**samplers
=
67 (struct anv_sampler
**)&set_layout
->binding
[max_binding
+ 1];
69 set_layout
->binding_count
= max_binding
+ 1;
70 set_layout
->shader_stages
= 0;
73 /* Initialize all binding_layout entries to -1 */
74 memset(set_layout
->binding
, -1,
75 (max_binding
+ 1) * sizeof(set_layout
->binding
[0]));
77 /* Initialize all samplers to 0 */
78 memset(samplers
, 0, immutable_sampler_count
* sizeof(*samplers
));
80 uint32_t sampler_count
[MESA_SHADER_STAGES
] = { 0, };
81 uint32_t surface_count
[MESA_SHADER_STAGES
] = { 0, };
82 uint32_t image_count
[MESA_SHADER_STAGES
] = { 0, };
83 uint32_t dynamic_offset_count
= 0;
85 for (uint32_t j
= 0; j
< pCreateInfo
->bindingCount
; j
++) {
86 const VkDescriptorSetLayoutBinding
*binding
= &pCreateInfo
->pBinding
[j
];
87 uint32_t b
= binding
->binding
;
89 assert(binding
->descriptorCount
> 0);
90 set_layout
->binding
[b
].array_size
= binding
->descriptorCount
;
91 set_layout
->binding
[b
].descriptor_index
= set_layout
->size
;
92 set_layout
->size
+= binding
->descriptorCount
;
94 switch (binding
->descriptorType
) {
95 case VK_DESCRIPTOR_TYPE_SAMPLER
:
96 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
97 for_each_bit(s
, binding
->stageFlags
) {
98 set_layout
->binding
[b
].stage
[s
].sampler_index
= sampler_count
[s
];
99 sampler_count
[s
] += binding
->descriptorCount
;
106 switch (binding
->descriptorType
) {
107 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
108 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
109 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
110 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
111 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
112 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
113 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
114 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
115 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
116 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
117 for_each_bit(s
, binding
->stageFlags
) {
118 set_layout
->binding
[b
].stage
[s
].surface_index
= surface_count
[s
];
119 surface_count
[s
] += binding
->descriptorCount
;
126 switch (binding
->descriptorType
) {
127 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
128 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
129 set_layout
->binding
[b
].dynamic_offset_index
= dynamic_offset_count
;
130 dynamic_offset_count
+= binding
->descriptorCount
;
136 if (binding
->descriptorType
== VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
) {
137 for_each_bit(s
, binding
->stageFlags
) {
138 set_layout
->binding
[b
].stage
[s
].image_index
= image_count
[s
];
139 image_count
[s
] += binding
->descriptorCount
;
143 if (binding
->pImmutableSamplers
) {
144 set_layout
->binding
[b
].immutable_samplers
= samplers
;
145 samplers
+= binding
->descriptorCount
;
147 for (uint32_t i
= 0; i
< binding
->descriptorCount
; i
++)
148 set_layout
->binding
[b
].immutable_samplers
[i
] =
149 anv_sampler_from_handle(binding
->pImmutableSamplers
[i
]);
151 set_layout
->binding
[b
].immutable_samplers
= NULL
;
154 set_layout
->shader_stages
|= binding
->stageFlags
;
157 set_layout
->dynamic_offset_count
= dynamic_offset_count
;
159 *pSetLayout
= anv_descriptor_set_layout_to_handle(set_layout
);
164 void anv_DestroyDescriptorSetLayout(
166 VkDescriptorSetLayout _set_layout
,
167 const VkAllocationCallbacks
* pAllocator
)
169 ANV_FROM_HANDLE(anv_device
, device
, _device
);
170 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
, _set_layout
);
172 anv_free2(&device
->alloc
, pAllocator
, set_layout
);
176 * Pipeline layouts. These have nothing to do with the pipeline. They are
177 * just muttiple descriptor set layouts pasted together
180 VkResult
anv_CreatePipelineLayout(
182 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
183 const VkAllocationCallbacks
* pAllocator
,
184 VkPipelineLayout
* pPipelineLayout
)
186 ANV_FROM_HANDLE(anv_device
, device
, _device
);
187 struct anv_pipeline_layout l
, *layout
;
189 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
191 l
.num_sets
= pCreateInfo
->setLayoutCount
;
193 unsigned dynamic_offset_count
= 0;
195 memset(l
.stage
, 0, sizeof(l
.stage
));
196 for (uint32_t set
= 0; set
< pCreateInfo
->setLayoutCount
; set
++) {
197 ANV_FROM_HANDLE(anv_descriptor_set_layout
, set_layout
,
198 pCreateInfo
->pSetLayouts
[set
]);
199 l
.set
[set
].layout
= set_layout
;
201 l
.set
[set
].dynamic_offset_start
= dynamic_offset_count
;
202 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
203 if (set_layout
->binding
[b
].dynamic_offset_index
>= 0)
204 dynamic_offset_count
+= set_layout
->binding
[b
].array_size
;
207 for (gl_shader_stage s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
208 l
.set
[set
].stage
[s
].surface_start
= l
.stage
[s
].surface_count
;
209 l
.set
[set
].stage
[s
].sampler_start
= l
.stage
[s
].sampler_count
;
210 l
.set
[set
].stage
[s
].image_start
= l
.stage
[s
].image_count
;
212 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
213 unsigned array_size
= set_layout
->binding
[b
].array_size
;
215 if (set_layout
->binding
[b
].stage
[s
].surface_index
>= 0) {
216 l
.stage
[s
].surface_count
+= array_size
;
218 if (set_layout
->binding
[b
].dynamic_offset_index
>= 0)
219 l
.stage
[s
].has_dynamic_offsets
= true;
222 if (set_layout
->binding
[b
].stage
[s
].sampler_index
>= 0)
223 l
.stage
[s
].sampler_count
+= array_size
;
225 if (set_layout
->binding
[b
].stage
[s
].image_index
>= 0)
226 l
.stage
[s
].image_count
+= array_size
;
231 unsigned num_bindings
= 0;
232 for (gl_shader_stage s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
233 num_bindings
+= l
.stage
[s
].surface_count
+
234 l
.stage
[s
].sampler_count
+
235 l
.stage
[s
].image_count
;
238 size_t size
= sizeof(*layout
) + num_bindings
* sizeof(layout
->entries
[0]);
240 layout
= anv_alloc2(&device
->alloc
, pAllocator
, size
, 8,
241 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
243 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
245 /* Now we can actually build our surface and sampler maps */
246 struct anv_pipeline_binding
*entry
= layout
->entries
;
247 for (gl_shader_stage s
= 0; s
< MESA_SHADER_STAGES
; s
++) {
248 l
.stage
[s
].surface_to_descriptor
= entry
;
249 entry
+= l
.stage
[s
].surface_count
;
250 l
.stage
[s
].sampler_to_descriptor
= entry
;
251 entry
+= l
.stage
[s
].sampler_count
;
252 entry
+= l
.stage
[s
].image_count
;
256 for (uint32_t set
= 0; set
< pCreateInfo
->setLayoutCount
; set
++) {
257 struct anv_descriptor_set_layout
*set_layout
= l
.set
[set
].layout
;
259 for (uint32_t b
= 0; b
< set_layout
->binding_count
; b
++) {
260 unsigned array_size
= set_layout
->binding
[b
].array_size
;
261 unsigned set_offset
= set_layout
->binding
[b
].descriptor_index
;
263 if (set_layout
->binding
[b
].stage
[s
].surface_index
>= 0) {
264 assert(surface
== l
.set
[set
].stage
[s
].surface_start
+
265 set_layout
->binding
[b
].stage
[s
].surface_index
);
266 for (unsigned i
= 0; i
< array_size
; i
++) {
267 l
.stage
[s
].surface_to_descriptor
[surface
+ i
].set
= set
;
268 l
.stage
[s
].surface_to_descriptor
[surface
+ i
].offset
= set_offset
+ i
;
270 surface
+= array_size
;
273 if (set_layout
->binding
[b
].stage
[s
].sampler_index
>= 0) {
274 assert(sampler
== l
.set
[set
].stage
[s
].sampler_start
+
275 set_layout
->binding
[b
].stage
[s
].sampler_index
);
276 for (unsigned i
= 0; i
< array_size
; i
++) {
277 l
.stage
[s
].sampler_to_descriptor
[sampler
+ i
].set
= set
;
278 l
.stage
[s
].sampler_to_descriptor
[sampler
+ i
].offset
= set_offset
+ i
;
280 sampler
+= array_size
;
286 /* Finally, we're done setting it up, copy into the allocated version */
289 *pPipelineLayout
= anv_pipeline_layout_to_handle(layout
);
294 void anv_DestroyPipelineLayout(
296 VkPipelineLayout _pipelineLayout
,
297 const VkAllocationCallbacks
* pAllocator
)
299 ANV_FROM_HANDLE(anv_device
, device
, _device
);
300 ANV_FROM_HANDLE(anv_pipeline_layout
, pipeline_layout
, _pipelineLayout
);
302 anv_free2(&device
->alloc
, pAllocator
, pipeline_layout
);
306 * Descriptor pools. These are a no-op for now.
309 VkResult
anv_CreateDescriptorPool(
311 const VkDescriptorPoolCreateInfo
* pCreateInfo
,
312 const VkAllocationCallbacks
* pAllocator
,
313 VkDescriptorPool
* pDescriptorPool
)
315 anv_finishme("VkDescriptorPool is a stub");
316 *pDescriptorPool
= (VkDescriptorPool
)1;
320 void anv_DestroyDescriptorPool(
322 VkDescriptorPool _pool
,
323 const VkAllocationCallbacks
* pAllocator
)
325 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
328 VkResult
anv_ResetDescriptorPool(
330 VkDescriptorPool descriptorPool
,
331 VkDescriptorPoolResetFlags flags
)
333 anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
338 anv_descriptor_set_create(struct anv_device
*device
,
339 const struct anv_descriptor_set_layout
*layout
,
340 struct anv_descriptor_set
**out_set
)
342 struct anv_descriptor_set
*set
;
343 size_t size
= sizeof(*set
) + layout
->size
* sizeof(set
->descriptors
[0]);
345 set
= anv_alloc(&device
->alloc
/* XXX: Use the pool */, size
, 8,
346 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
348 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
350 /* A descriptor set may not be 100% filled. Clear the set so we can can
351 * later detect holes in it.
353 memset(set
, 0, size
);
355 set
->layout
= layout
;
357 /* Go through and fill out immutable samplers if we have any */
358 struct anv_descriptor
*desc
= set
->descriptors
;
359 for (uint32_t b
= 0; b
< layout
->binding_count
; b
++) {
360 if (layout
->binding
[b
].immutable_samplers
) {
361 for (uint32_t i
= 0; i
< layout
->binding
[b
].array_size
; i
++)
362 desc
[i
].sampler
= layout
->binding
[b
].immutable_samplers
[i
];
364 desc
+= layout
->binding
[b
].array_size
;
373 anv_descriptor_set_destroy(struct anv_device
*device
,
374 struct anv_descriptor_set
*set
)
376 anv_free(&device
->alloc
/* XXX: Use the pool */, set
);
379 VkResult
anv_AllocateDescriptorSets(
381 const VkDescriptorSetAllocateInfo
* pAllocateInfo
,
382 VkDescriptorSet
* pDescriptorSets
)
384 ANV_FROM_HANDLE(anv_device
, device
, _device
);
386 VkResult result
= VK_SUCCESS
;
387 struct anv_descriptor_set
*set
;
390 for (i
= 0; i
< pAllocateInfo
->setLayoutCount
; i
++) {
391 ANV_FROM_HANDLE(anv_descriptor_set_layout
, layout
,
392 pAllocateInfo
->pSetLayouts
[i
]);
394 result
= anv_descriptor_set_create(device
, layout
, &set
);
395 if (result
!= VK_SUCCESS
)
398 pDescriptorSets
[i
] = anv_descriptor_set_to_handle(set
);
401 if (result
!= VK_SUCCESS
)
402 anv_FreeDescriptorSets(_device
, pAllocateInfo
->descriptorPool
,
408 VkResult
anv_FreeDescriptorSets(
410 VkDescriptorPool descriptorPool
,
412 const VkDescriptorSet
* pDescriptorSets
)
414 ANV_FROM_HANDLE(anv_device
, device
, _device
);
416 for (uint32_t i
= 0; i
< count
; i
++) {
417 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
419 anv_descriptor_set_destroy(device
, set
);
425 void anv_UpdateDescriptorSets(
427 uint32_t descriptorWriteCount
,
428 const VkWriteDescriptorSet
* pDescriptorWrites
,
429 uint32_t descriptorCopyCount
,
430 const VkCopyDescriptorSet
* pDescriptorCopies
)
432 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
433 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
434 ANV_FROM_HANDLE(anv_descriptor_set
, set
, write
->dstSet
);
435 const struct anv_descriptor_set_binding_layout
*bind_layout
=
436 &set
->layout
->binding
[write
->dstBinding
];
437 struct anv_descriptor
*desc
=
438 &set
->descriptors
[bind_layout
->descriptor_index
];
440 switch (write
->descriptorType
) {
441 case VK_DESCRIPTOR_TYPE_SAMPLER
:
442 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
443 ANV_FROM_HANDLE(anv_sampler
, sampler
,
444 write
->pImageInfo
[j
].sampler
);
446 desc
[j
] = (struct anv_descriptor
) {
447 .type
= VK_DESCRIPTOR_TYPE_SAMPLER
,
453 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
454 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
455 ANV_FROM_HANDLE(anv_image_view
, iview
,
456 write
->pImageInfo
[j
].imageView
);
457 ANV_FROM_HANDLE(anv_sampler
, sampler
,
458 write
->pImageInfo
[j
].sampler
);
460 desc
[j
].type
= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
;
461 desc
[j
].image_view
= iview
;
463 /* If this descriptor has an immutable sampler, we don't want
467 desc
[j
].sampler
= sampler
;
471 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
472 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
473 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
474 ANV_FROM_HANDLE(anv_image_view
, iview
,
475 write
->pImageInfo
[j
].imageView
);
477 desc
[j
] = (struct anv_descriptor
) {
478 .type
= write
->descriptorType
,
484 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
485 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
486 anv_finishme("texel buffers not implemented");
489 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
490 anv_finishme("input attachments not implemented");
493 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
494 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
495 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
496 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
497 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
498 assert(write
->pBufferInfo
[j
].buffer
);
499 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
502 desc
[j
] = (struct anv_descriptor
) {
503 .type
= write
->descriptorType
,
505 .offset
= write
->pBufferInfo
[j
].offset
,
506 .range
= write
->pBufferInfo
[j
].range
,
509 /* For buffers with dynamic offsets, we use the full possible
510 * range in the surface state and do the actual range-checking
513 if (bind_layout
->dynamic_offset_index
>= 0)
514 desc
[j
].range
= buffer
->size
- desc
[j
].offset
;
522 for (uint32_t i
= 0; i
< descriptorCopyCount
; i
++) {
523 const VkCopyDescriptorSet
*copy
= &pDescriptorCopies
[i
];
524 ANV_FROM_HANDLE(anv_descriptor_set
, src
, copy
->dstSet
);
525 ANV_FROM_HANDLE(anv_descriptor_set
, dest
, copy
->dstSet
);
526 for (uint32_t j
= 0; j
< copy
->descriptorCount
; j
++) {
527 dest
->descriptors
[copy
->dstBinding
+ j
] =
528 src
->descriptors
[copy
->srcBinding
+ j
];