2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
34 /** \file anv_cmd_buffer.c
36 * This file contains all of the stuff for emitting commands into a command
37 * buffer. This includes implementations of most of the vkCmd*
38 * entrypoints. This file is concerned entirely with state emission and
39 * not with the command buffer data structure itself. As far as this file
40 * is concerned, most of anv_cmd_buffer is magic.
43 /* TODO: These are taken from GLES. We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state
= {
57 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
62 .stencil_compare_mask
= {
66 .stencil_write_mask
= {
70 .stencil_reference
= {
77 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
78 const struct anv_dynamic_state
*src
,
81 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
82 dest
->viewport
.count
= src
->viewport
.count
;
83 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
87 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
88 dest
->scissor
.count
= src
->scissor
.count
;
89 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
93 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
94 dest
->line_width
= src
->line_width
;
96 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
97 dest
->depth_bias
= src
->depth_bias
;
99 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
100 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
102 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
103 dest
->depth_bounds
= src
->depth_bounds
;
105 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
106 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
108 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
109 dest
->stencil_write_mask
= src
->stencil_write_mask
;
111 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
112 dest
->stencil_reference
= src
->stencil_reference
;
116 anv_cmd_state_init(struct anv_cmd_buffer
*cmd_buffer
)
118 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
120 memset(state
, 0, sizeof(*state
));
122 state
->current_pipeline
= UINT32_MAX
;
123 state
->restart_index
= UINT32_MAX
;
124 state
->gfx
.dynamic
= default_dynamic_state
;
128 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer
*cmd_buffer
,
129 struct anv_cmd_pipeline_state
*pipe_state
)
131 for (uint32_t i
= 0; i
< ARRAY_SIZE(pipe_state
->push_descriptors
); i
++)
132 vk_free(&cmd_buffer
->pool
->alloc
, pipe_state
->push_descriptors
[i
]);
136 anv_cmd_state_finish(struct anv_cmd_buffer
*cmd_buffer
)
138 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
140 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->gfx
.base
);
141 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->compute
.base
);
143 for (uint32_t i
= 0; i
< MESA_SHADER_STAGES
; i
++)
144 vk_free(&cmd_buffer
->pool
->alloc
, state
->push_constants
[i
]);
146 vk_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
150 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
152 anv_cmd_state_finish(cmd_buffer
);
153 anv_cmd_state_init(cmd_buffer
);
157 * This function updates the size of the push constant buffer we need to emit.
158 * This is called in various parts of the driver to ensure that different
159 * pieces of push constant data get emitted as needed. However, it is important
160 * that we never shrink the size of the buffer. For example, a compute shader
161 * dispatch will always call this for the base group id, which has an
162 * offset in the push constant buffer that is smaller than the offset for
163 * storage image data. If the compute shader has storage images, we will call
164 * this again with a larger size during binding table emission. However,
165 * if we dispatch the compute shader again without dirtying our descriptors,
166 * we would still call this function with a smaller size for the base group
167 * id, and not for the images, which would incorrectly shrink the size of the
168 * push constant data we emit with that dispatch, making us drop the image data.
171 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
172 gl_shader_stage stage
, uint32_t size
)
174 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
177 *ptr
= vk_alloc(&cmd_buffer
->pool
->alloc
, size
, 8,
178 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
180 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
181 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
184 } else if ((*ptr
)->size
< size
) {
185 *ptr
= vk_realloc(&cmd_buffer
->pool
->alloc
, *ptr
, size
, 8,
186 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
188 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
189 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
197 static VkResult
anv_create_cmd_buffer(
198 struct anv_device
* device
,
199 struct anv_cmd_pool
* pool
,
200 VkCommandBufferLevel level
,
201 VkCommandBuffer
* pCommandBuffer
)
203 struct anv_cmd_buffer
*cmd_buffer
;
206 cmd_buffer
= vk_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
207 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
208 if (cmd_buffer
== NULL
)
209 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
211 cmd_buffer
->batch
.status
= VK_SUCCESS
;
213 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
214 cmd_buffer
->device
= device
;
215 cmd_buffer
->pool
= pool
;
216 cmd_buffer
->level
= level
;
218 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
219 if (result
!= VK_SUCCESS
)
222 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
223 &device
->surface_state_pool
, 4096);
224 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
225 &device
->dynamic_state_pool
, 16384);
227 anv_cmd_state_init(cmd_buffer
);
230 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
232 /* Init the pool_link so we can safefly call list_del when we destroy
235 list_inithead(&cmd_buffer
->pool_link
);
238 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
243 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
248 VkResult
anv_AllocateCommandBuffers(
250 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
251 VkCommandBuffer
* pCommandBuffers
)
253 ANV_FROM_HANDLE(anv_device
, device
, _device
);
254 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
256 VkResult result
= VK_SUCCESS
;
259 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
260 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
261 &pCommandBuffers
[i
]);
262 if (result
!= VK_SUCCESS
)
266 if (result
!= VK_SUCCESS
) {
267 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
269 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++)
270 pCommandBuffers
[i
] = VK_NULL_HANDLE
;
277 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
279 list_del(&cmd_buffer
->pool_link
);
281 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
283 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
284 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
286 anv_cmd_state_finish(cmd_buffer
);
288 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
291 void anv_FreeCommandBuffers(
293 VkCommandPool commandPool
,
294 uint32_t commandBufferCount
,
295 const VkCommandBuffer
* pCommandBuffers
)
297 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
298 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
303 anv_cmd_buffer_destroy(cmd_buffer
);
308 anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
)
310 cmd_buffer
->usage_flags
= 0;
311 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
312 anv_cmd_state_reset(cmd_buffer
);
314 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
315 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
316 &cmd_buffer
->device
->surface_state_pool
, 4096);
318 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
319 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
320 &cmd_buffer
->device
->dynamic_state_pool
, 16384);
324 VkResult
anv_ResetCommandBuffer(
325 VkCommandBuffer commandBuffer
,
326 VkCommandBufferResetFlags flags
)
328 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
329 return anv_cmd_buffer_reset(cmd_buffer
);
332 #define anv_genX_call(devinfo, func, ...) \
333 switch ((devinfo)->gen) { \
335 if ((devinfo)->is_haswell) { \
336 gen75_##func(__VA_ARGS__); \
338 gen7_##func(__VA_ARGS__); \
342 gen8_##func(__VA_ARGS__); \
345 gen9_##func(__VA_ARGS__); \
348 gen10_##func(__VA_ARGS__); \
351 gen11_##func(__VA_ARGS__); \
354 assert(!"Unknown hardware generation"); \
358 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
360 anv_genX_call(&cmd_buffer
->device
->info
,
361 cmd_buffer_emit_state_base_address
,
366 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
367 const struct anv_image
*image
,
368 VkImageAspectFlagBits aspect
,
369 enum isl_aux_usage aux_usage
,
372 uint32_t layer_count
)
374 anv_genX_call(&cmd_buffer
->device
->info
,
375 cmd_buffer_mark_image_written
,
376 cmd_buffer
, image
, aspect
, aux_usage
,
377 level
, base_layer
, layer_count
);
381 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer
*cmd_buffer
)
383 anv_genX_call(&cmd_buffer
->device
->info
,
384 cmd_emit_conditional_render_predicate
,
388 void anv_CmdBindPipeline(
389 VkCommandBuffer commandBuffer
,
390 VkPipelineBindPoint pipelineBindPoint
,
391 VkPipeline _pipeline
)
393 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
394 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
396 switch (pipelineBindPoint
) {
397 case VK_PIPELINE_BIND_POINT_COMPUTE
:
398 cmd_buffer
->state
.compute
.base
.pipeline
= pipeline
;
399 cmd_buffer
->state
.compute
.pipeline_dirty
= true;
400 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
401 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
404 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
405 cmd_buffer
->state
.gfx
.base
.pipeline
= pipeline
;
406 cmd_buffer
->state
.gfx
.vb_dirty
|= pipeline
->vb_used
;
407 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
408 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
409 cmd_buffer
->state
.descriptors_dirty
|= pipeline
->active_stages
;
411 /* Apply the dynamic state from the pipeline */
412 cmd_buffer
->state
.gfx
.dirty
|= pipeline
->dynamic_state_mask
;
413 anv_dynamic_state_copy(&cmd_buffer
->state
.gfx
.dynamic
,
414 &pipeline
->dynamic_state
,
415 pipeline
->dynamic_state_mask
);
419 assert(!"invalid bind point");
424 void anv_CmdSetViewport(
425 VkCommandBuffer commandBuffer
,
426 uint32_t firstViewport
,
427 uint32_t viewportCount
,
428 const VkViewport
* pViewports
)
430 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
432 const uint32_t total_count
= firstViewport
+ viewportCount
;
433 if (cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
< total_count
)
434 cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
= total_count
;
436 memcpy(cmd_buffer
->state
.gfx
.dynamic
.viewport
.viewports
+ firstViewport
,
437 pViewports
, viewportCount
* sizeof(*pViewports
));
439 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
442 void anv_CmdSetScissor(
443 VkCommandBuffer commandBuffer
,
444 uint32_t firstScissor
,
445 uint32_t scissorCount
,
446 const VkRect2D
* pScissors
)
448 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
450 const uint32_t total_count
= firstScissor
+ scissorCount
;
451 if (cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
< total_count
)
452 cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
= total_count
;
454 memcpy(cmd_buffer
->state
.gfx
.dynamic
.scissor
.scissors
+ firstScissor
,
455 pScissors
, scissorCount
* sizeof(*pScissors
));
457 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
460 void anv_CmdSetLineWidth(
461 VkCommandBuffer commandBuffer
,
464 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
466 cmd_buffer
->state
.gfx
.dynamic
.line_width
= lineWidth
;
467 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
470 void anv_CmdSetDepthBias(
471 VkCommandBuffer commandBuffer
,
472 float depthBiasConstantFactor
,
473 float depthBiasClamp
,
474 float depthBiasSlopeFactor
)
476 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
478 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
479 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
480 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
482 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
485 void anv_CmdSetBlendConstants(
486 VkCommandBuffer commandBuffer
,
487 const float blendConstants
[4])
489 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
491 memcpy(cmd_buffer
->state
.gfx
.dynamic
.blend_constants
,
492 blendConstants
, sizeof(float) * 4);
494 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
497 void anv_CmdSetDepthBounds(
498 VkCommandBuffer commandBuffer
,
499 float minDepthBounds
,
500 float maxDepthBounds
)
502 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
504 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.min
= minDepthBounds
;
505 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
507 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
510 void anv_CmdSetStencilCompareMask(
511 VkCommandBuffer commandBuffer
,
512 VkStencilFaceFlags faceMask
,
513 uint32_t compareMask
)
515 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
517 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
518 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.front
= compareMask
;
519 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
520 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.back
= compareMask
;
522 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
525 void anv_CmdSetStencilWriteMask(
526 VkCommandBuffer commandBuffer
,
527 VkStencilFaceFlags faceMask
,
530 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
532 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
533 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.front
= writeMask
;
534 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
535 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.back
= writeMask
;
537 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
540 void anv_CmdSetStencilReference(
541 VkCommandBuffer commandBuffer
,
542 VkStencilFaceFlags faceMask
,
545 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
547 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
548 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.front
= reference
;
549 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
550 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.back
= reference
;
552 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
556 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
557 VkPipelineBindPoint bind_point
,
558 struct anv_pipeline_layout
*layout
,
560 struct anv_descriptor_set
*set
,
561 uint32_t *dynamic_offset_count
,
562 const uint32_t **dynamic_offsets
)
564 struct anv_descriptor_set_layout
*set_layout
=
565 layout
->set
[set_index
].layout
;
567 struct anv_cmd_pipeline_state
*pipe_state
;
568 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
569 pipe_state
= &cmd_buffer
->state
.compute
.base
;
571 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
572 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
574 pipe_state
->descriptors
[set_index
] = set
;
576 if (dynamic_offsets
) {
577 if (set_layout
->dynamic_offset_count
> 0) {
578 uint32_t dynamic_offset_start
=
579 layout
->set
[set_index
].dynamic_offset_start
;
581 /* Assert that everything is in range */
582 assert(set_layout
->dynamic_offset_count
<= *dynamic_offset_count
);
583 assert(dynamic_offset_start
+ set_layout
->dynamic_offset_count
<=
584 ARRAY_SIZE(pipe_state
->dynamic_offsets
));
586 typed_memcpy(&pipe_state
->dynamic_offsets
[dynamic_offset_start
],
587 *dynamic_offsets
, set_layout
->dynamic_offset_count
);
589 *dynamic_offsets
+= set_layout
->dynamic_offset_count
;
590 *dynamic_offset_count
-= set_layout
->dynamic_offset_count
;
594 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
595 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
597 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
598 cmd_buffer
->state
.descriptors_dirty
|=
599 set_layout
->shader_stages
& VK_SHADER_STAGE_ALL_GRAPHICS
;
602 /* Pipeline layout objects are required to live at least while any command
603 * buffers that use them are in recording state. We need to grab a reference
604 * to the pipeline layout being bound here so we can compute correct dynamic
605 * offsets for VK_DESCRIPTOR_TYPE_*_DYNAMIC in dynamic_offset_for_binding()
606 * when we record draw commands that come after this.
608 pipe_state
->layout
= layout
;
611 void anv_CmdBindDescriptorSets(
612 VkCommandBuffer commandBuffer
,
613 VkPipelineBindPoint pipelineBindPoint
,
614 VkPipelineLayout _layout
,
616 uint32_t descriptorSetCount
,
617 const VkDescriptorSet
* pDescriptorSets
,
618 uint32_t dynamicOffsetCount
,
619 const uint32_t* pDynamicOffsets
)
621 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
622 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
624 assert(firstSet
+ descriptorSetCount
<= MAX_SETS
);
626 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
627 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
628 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
629 layout
, firstSet
+ i
, set
,
635 void anv_CmdBindVertexBuffers(
636 VkCommandBuffer commandBuffer
,
637 uint32_t firstBinding
,
638 uint32_t bindingCount
,
639 const VkBuffer
* pBuffers
,
640 const VkDeviceSize
* pOffsets
)
642 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
643 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
645 /* We have to defer setting up vertex buffer since we need the buffer
646 * stride from the pipeline. */
648 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
649 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
650 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
651 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
652 cmd_buffer
->state
.gfx
.vb_dirty
|= 1 << (firstBinding
+ i
);
656 void anv_CmdBindTransformFeedbackBuffersEXT(
657 VkCommandBuffer commandBuffer
,
658 uint32_t firstBinding
,
659 uint32_t bindingCount
,
660 const VkBuffer
* pBuffers
,
661 const VkDeviceSize
* pOffsets
,
662 const VkDeviceSize
* pSizes
)
664 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
665 struct anv_xfb_binding
*xfb
= cmd_buffer
->state
.xfb_bindings
;
667 /* We have to defer setting up vertex buffer since we need the buffer
668 * stride from the pipeline. */
670 assert(firstBinding
+ bindingCount
<= MAX_XFB_BUFFERS
);
671 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
672 if (pBuffers
[i
] == VK_NULL_HANDLE
) {
673 xfb
[firstBinding
+ i
].buffer
= NULL
;
675 ANV_FROM_HANDLE(anv_buffer
, buffer
, pBuffers
[i
]);
676 xfb
[firstBinding
+ i
].buffer
= buffer
;
677 xfb
[firstBinding
+ i
].offset
= pOffsets
[i
];
678 xfb
[firstBinding
+ i
].size
=
679 anv_buffer_get_range(buffer
, pOffsets
[i
],
680 pSizes
? pSizes
[i
] : VK_WHOLE_SIZE
);
686 anv_isl_format_for_descriptor_type(VkDescriptorType type
)
689 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
690 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
691 return ISL_FORMAT_R32G32B32A32_FLOAT
;
693 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
694 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
695 return ISL_FORMAT_RAW
;
698 unreachable("Invalid descriptor type");
703 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
704 const void *data
, uint32_t size
, uint32_t alignment
)
706 struct anv_state state
;
708 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
709 memcpy(state
.map
, data
, size
);
711 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
717 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
718 uint32_t *a
, uint32_t *b
,
719 uint32_t dwords
, uint32_t alignment
)
721 struct anv_state state
;
724 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
725 dwords
* 4, alignment
);
727 for (uint32_t i
= 0; i
< dwords
; i
++)
730 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
736 anv_push_constant_value(struct anv_push_constants
*data
, uint32_t param
)
738 if (BRW_PARAM_IS_BUILTIN(param
)) {
740 case BRW_PARAM_BUILTIN_ZERO
:
742 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X
:
743 return data
->base_work_group_id
[0];
744 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y
:
745 return data
->base_work_group_id
[1];
746 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z
:
747 return data
->base_work_group_id
[2];
749 unreachable("Invalid param builtin");
752 uint32_t offset
= ANV_PARAM_PUSH_OFFSET(param
);
753 assert(offset
% sizeof(uint32_t) == 0);
754 if (offset
< data
->size
)
755 return *(uint32_t *)((uint8_t *)data
+ offset
);
762 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
763 gl_shader_stage stage
)
765 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.gfx
.base
.pipeline
;
767 /* If we don't have this stage, bail. */
768 if (!anv_pipeline_has_stage(pipeline
, stage
))
769 return (struct anv_state
) { .offset
= 0 };
771 struct anv_push_constants
*data
=
772 cmd_buffer
->state
.push_constants
[stage
];
773 const struct brw_stage_prog_data
*prog_data
=
774 pipeline
->shaders
[stage
]->prog_data
;
776 /* If we don't actually have any push constants, bail. */
777 if (data
== NULL
|| prog_data
== NULL
|| prog_data
->nr_params
== 0)
778 return (struct anv_state
) { .offset
= 0 };
780 struct anv_state state
=
781 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
782 prog_data
->nr_params
* sizeof(float),
783 32 /* bottom 5 bits MBZ */);
785 /* Walk through the param array and fill the buffer with data */
786 uint32_t *u32_map
= state
.map
;
787 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++)
788 u32_map
[i
] = anv_push_constant_value(data
, prog_data
->param
[i
]);
794 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
796 struct anv_push_constants
*data
=
797 cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
798 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute
.base
.pipeline
;
799 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
800 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
802 /* If we don't actually have any push constants, bail. */
803 if (cs_prog_data
->push
.total
.size
== 0)
804 return (struct anv_state
) { .offset
= 0 };
806 const unsigned push_constant_alignment
=
807 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
808 const unsigned aligned_total_push_constants_size
=
809 ALIGN(cs_prog_data
->push
.total
.size
, push_constant_alignment
);
810 struct anv_state state
=
811 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
812 aligned_total_push_constants_size
,
813 push_constant_alignment
);
815 /* Walk through the param array and fill the buffer with data */
816 uint32_t *u32_map
= state
.map
;
818 if (cs_prog_data
->push
.cross_thread
.size
> 0) {
820 i
< cs_prog_data
->push
.cross_thread
.dwords
;
822 assert(prog_data
->param
[i
] != BRW_PARAM_BUILTIN_SUBGROUP_ID
);
823 u32_map
[i
] = anv_push_constant_value(data
, prog_data
->param
[i
]);
827 if (cs_prog_data
->push
.per_thread
.size
> 0) {
828 for (unsigned t
= 0; t
< cs_prog_data
->threads
; t
++) {
830 8 * (cs_prog_data
->push
.per_thread
.regs
* t
+
831 cs_prog_data
->push
.cross_thread
.regs
);
832 unsigned src
= cs_prog_data
->push
.cross_thread
.dwords
;
833 for ( ; src
< prog_data
->nr_params
; src
++, dst
++) {
834 if (prog_data
->param
[src
] == BRW_PARAM_BUILTIN_SUBGROUP_ID
) {
838 anv_push_constant_value(data
, prog_data
->param
[src
]);
847 void anv_CmdPushConstants(
848 VkCommandBuffer commandBuffer
,
849 VkPipelineLayout layout
,
850 VkShaderStageFlags stageFlags
,
855 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
857 anv_foreach_stage(stage
, stageFlags
) {
859 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
,
861 if (result
!= VK_SUCCESS
)
864 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ offset
,
868 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
871 VkResult
anv_CreateCommandPool(
873 const VkCommandPoolCreateInfo
* pCreateInfo
,
874 const VkAllocationCallbacks
* pAllocator
,
875 VkCommandPool
* pCmdPool
)
877 ANV_FROM_HANDLE(anv_device
, device
, _device
);
878 struct anv_cmd_pool
*pool
;
880 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
881 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
883 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
886 pool
->alloc
= *pAllocator
;
888 pool
->alloc
= device
->alloc
;
890 list_inithead(&pool
->cmd_buffers
);
892 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
897 void anv_DestroyCommandPool(
899 VkCommandPool commandPool
,
900 const VkAllocationCallbacks
* pAllocator
)
902 ANV_FROM_HANDLE(anv_device
, device
, _device
);
903 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
908 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
909 &pool
->cmd_buffers
, pool_link
) {
910 anv_cmd_buffer_destroy(cmd_buffer
);
913 vk_free2(&device
->alloc
, pAllocator
, pool
);
916 VkResult
anv_ResetCommandPool(
918 VkCommandPool commandPool
,
919 VkCommandPoolResetFlags flags
)
921 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
923 list_for_each_entry(struct anv_cmd_buffer
, cmd_buffer
,
924 &pool
->cmd_buffers
, pool_link
) {
925 anv_cmd_buffer_reset(cmd_buffer
);
931 void anv_TrimCommandPool(
933 VkCommandPool commandPool
,
934 VkCommandPoolTrimFlags flags
)
936 /* Nothing for us to do here. Our pools stay pretty tidy. */
940 * Return NULL if the current subpass has no depthstencil attachment.
942 const struct anv_image_view
*
943 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
945 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
946 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
948 if (subpass
->depth_stencil_attachment
== NULL
)
951 const struct anv_image_view
*iview
=
952 fb
->attachments
[subpass
->depth_stencil_attachment
->attachment
];
954 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
955 VK_IMAGE_ASPECT_STENCIL_BIT
));
960 static struct anv_push_descriptor_set
*
961 anv_cmd_buffer_get_push_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
962 VkPipelineBindPoint bind_point
,
965 struct anv_cmd_pipeline_state
*pipe_state
;
966 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
967 pipe_state
= &cmd_buffer
->state
.compute
.base
;
969 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
970 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
973 struct anv_push_descriptor_set
**push_set
=
974 &pipe_state
->push_descriptors
[set
];
976 if (*push_set
== NULL
) {
977 *push_set
= vk_alloc(&cmd_buffer
->pool
->alloc
,
978 sizeof(struct anv_push_descriptor_set
), 8,
979 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
980 if (*push_set
== NULL
) {
981 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
989 void anv_CmdPushDescriptorSetKHR(
990 VkCommandBuffer commandBuffer
,
991 VkPipelineBindPoint pipelineBindPoint
,
992 VkPipelineLayout _layout
,
994 uint32_t descriptorWriteCount
,
995 const VkWriteDescriptorSet
* pDescriptorWrites
)
997 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
998 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1000 assert(_set
< MAX_SETS
);
1002 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1004 struct anv_push_descriptor_set
*push_set
=
1005 anv_cmd_buffer_get_push_descriptor_set(cmd_buffer
,
1006 pipelineBindPoint
, _set
);
1010 struct anv_descriptor_set
*set
= &push_set
->set
;
1012 set
->layout
= set_layout
;
1013 set
->size
= anv_descriptor_set_layout_size(set_layout
);
1014 set
->buffer_count
= set_layout
->buffer_count
;
1015 set
->buffer_views
= push_set
->buffer_views
;
1017 /* Go through the user supplied descriptors. */
1018 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
1019 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1021 switch (write
->descriptorType
) {
1022 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1023 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1024 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1025 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1026 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1027 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1028 anv_descriptor_set_write_image_view(cmd_buffer
->device
, set
,
1029 write
->pImageInfo
+ j
,
1030 write
->descriptorType
,
1032 write
->dstArrayElement
+ j
);
1036 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1037 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1038 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1039 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1040 write
->pTexelBufferView
[j
]);
1042 anv_descriptor_set_write_buffer_view(cmd_buffer
->device
, set
,
1043 write
->descriptorType
,
1046 write
->dstArrayElement
+ j
);
1050 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1051 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1052 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1053 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1054 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1055 assert(write
->pBufferInfo
[j
].buffer
);
1056 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
1059 anv_descriptor_set_write_buffer(cmd_buffer
->device
, set
,
1060 &cmd_buffer
->surface_state_stream
,
1061 write
->descriptorType
,
1064 write
->dstArrayElement
+ j
,
1065 write
->pBufferInfo
[j
].offset
,
1066 write
->pBufferInfo
[j
].range
);
1075 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1076 layout
, _set
, set
, NULL
, NULL
);
1079 void anv_CmdPushDescriptorSetWithTemplateKHR(
1080 VkCommandBuffer commandBuffer
,
1081 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1082 VkPipelineLayout _layout
,
1086 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1087 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1088 descriptorUpdateTemplate
);
1089 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1091 assert(_set
< MAX_PUSH_DESCRIPTORS
);
1093 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1095 struct anv_push_descriptor_set
*push_set
=
1096 anv_cmd_buffer_get_push_descriptor_set(cmd_buffer
,
1097 template->bind_point
, _set
);
1101 struct anv_descriptor_set
*set
= &push_set
->set
;
1103 set
->layout
= set_layout
;
1104 set
->size
= anv_descriptor_set_layout_size(set_layout
);
1105 set
->buffer_count
= set_layout
->buffer_count
;
1106 set
->buffer_views
= push_set
->buffer_views
;
1108 anv_descriptor_set_write_template(cmd_buffer
->device
, set
,
1109 &cmd_buffer
->surface_state_stream
,
1113 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, template->bind_point
,
1114 layout
, _set
, set
, NULL
, NULL
);
1117 void anv_CmdSetDeviceMask(
1118 VkCommandBuffer commandBuffer
,
1119 uint32_t deviceMask
)