2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
35 /** \file anv_cmd_buffer.c
37 * This file contains all of the stuff for emitting commands into a command
38 * buffer. This includes implementations of most of the vkCmd*
39 * entrypoints. This file is concerned entirely with state emission and
40 * not with the command buffer data structure itself. As far as this file
41 * is concerned, most of anv_cmd_buffer is magic.
44 /* TODO: These are taken from GLES. We should check the Vulkan spec */
45 const struct anv_dynamic_state default_dynamic_state
= {
58 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
63 .stencil_compare_mask
= {
67 .stencil_write_mask
= {
71 .stencil_reference
= {
78 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
79 const struct anv_dynamic_state
*src
,
82 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
83 dest
->viewport
.count
= src
->viewport
.count
;
84 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
88 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
89 dest
->scissor
.count
= src
->scissor
.count
;
90 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
94 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
95 dest
->line_width
= src
->line_width
;
97 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
98 dest
->depth_bias
= src
->depth_bias
;
100 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
101 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
103 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
104 dest
->depth_bounds
= src
->depth_bounds
;
106 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
107 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
109 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
110 dest
->stencil_write_mask
= src
->stencil_write_mask
;
112 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
113 dest
->stencil_reference
= src
->stencil_reference
;
117 anv_cmd_state_init(struct anv_cmd_buffer
*cmd_buffer
)
119 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
121 memset(state
, 0, sizeof(*state
));
123 state
->current_pipeline
= UINT32_MAX
;
124 state
->restart_index
= UINT32_MAX
;
125 state
->gfx
.dynamic
= default_dynamic_state
;
129 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer
*cmd_buffer
,
130 struct anv_cmd_pipeline_state
*pipe_state
)
132 for (uint32_t i
= 0; i
< ARRAY_SIZE(pipe_state
->push_descriptors
); i
++) {
133 if (pipe_state
->push_descriptors
[i
]) {
134 anv_descriptor_set_layout_unref(cmd_buffer
->device
,
135 pipe_state
->push_descriptors
[i
]->set
.layout
);
136 vk_free(&cmd_buffer
->pool
->alloc
, pipe_state
->push_descriptors
[i
]);
142 anv_cmd_state_finish(struct anv_cmd_buffer
*cmd_buffer
)
144 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
146 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->gfx
.base
);
147 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->compute
.base
);
149 vk_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
153 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
155 anv_cmd_state_finish(cmd_buffer
);
156 anv_cmd_state_init(cmd_buffer
);
159 static VkResult
anv_create_cmd_buffer(
160 struct anv_device
* device
,
161 struct anv_cmd_pool
* pool
,
162 VkCommandBufferLevel level
,
163 VkCommandBuffer
* pCommandBuffer
)
165 struct anv_cmd_buffer
*cmd_buffer
;
168 cmd_buffer
= vk_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
169 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
170 if (cmd_buffer
== NULL
)
171 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
173 cmd_buffer
->batch
.status
= VK_SUCCESS
;
175 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
176 cmd_buffer
->device
= device
;
177 cmd_buffer
->pool
= pool
;
178 cmd_buffer
->level
= level
;
180 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
181 if (result
!= VK_SUCCESS
)
184 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
185 &device
->surface_state_pool
, 4096);
186 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
187 &device
->dynamic_state_pool
, 16384);
189 anv_cmd_state_init(cmd_buffer
);
192 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
194 /* Init the pool_link so we can safefly call list_del when we destroy
197 list_inithead(&cmd_buffer
->pool_link
);
200 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
205 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
210 VkResult
anv_AllocateCommandBuffers(
212 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
213 VkCommandBuffer
* pCommandBuffers
)
215 ANV_FROM_HANDLE(anv_device
, device
, _device
);
216 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
218 VkResult result
= VK_SUCCESS
;
221 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
222 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
223 &pCommandBuffers
[i
]);
224 if (result
!= VK_SUCCESS
)
228 if (result
!= VK_SUCCESS
) {
229 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
231 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++)
232 pCommandBuffers
[i
] = VK_NULL_HANDLE
;
239 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
241 list_del(&cmd_buffer
->pool_link
);
243 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
245 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
246 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
248 anv_cmd_state_finish(cmd_buffer
);
250 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
253 void anv_FreeCommandBuffers(
255 VkCommandPool commandPool
,
256 uint32_t commandBufferCount
,
257 const VkCommandBuffer
* pCommandBuffers
)
259 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
260 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
265 anv_cmd_buffer_destroy(cmd_buffer
);
270 anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
)
272 cmd_buffer
->usage_flags
= 0;
273 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
274 anv_cmd_state_reset(cmd_buffer
);
276 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
277 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
278 &cmd_buffer
->device
->surface_state_pool
, 4096);
280 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
281 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
282 &cmd_buffer
->device
->dynamic_state_pool
, 16384);
286 VkResult
anv_ResetCommandBuffer(
287 VkCommandBuffer commandBuffer
,
288 VkCommandBufferResetFlags flags
)
290 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
291 return anv_cmd_buffer_reset(cmd_buffer
);
294 #define anv_genX_call(devinfo, func, ...) \
295 switch ((devinfo)->gen) { \
297 if ((devinfo)->is_haswell) { \
298 gen75_##func(__VA_ARGS__); \
300 gen7_##func(__VA_ARGS__); \
304 gen8_##func(__VA_ARGS__); \
307 gen9_##func(__VA_ARGS__); \
310 gen10_##func(__VA_ARGS__); \
313 gen11_##func(__VA_ARGS__); \
316 assert(!"Unknown hardware generation"); \
320 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
322 anv_genX_call(&cmd_buffer
->device
->info
,
323 cmd_buffer_emit_state_base_address
,
328 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
329 const struct anv_image
*image
,
330 VkImageAspectFlagBits aspect
,
331 enum isl_aux_usage aux_usage
,
334 uint32_t layer_count
)
336 anv_genX_call(&cmd_buffer
->device
->info
,
337 cmd_buffer_mark_image_written
,
338 cmd_buffer
, image
, aspect
, aux_usage
,
339 level
, base_layer
, layer_count
);
343 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer
*cmd_buffer
)
345 anv_genX_call(&cmd_buffer
->device
->info
,
346 cmd_emit_conditional_render_predicate
,
350 void anv_CmdBindPipeline(
351 VkCommandBuffer commandBuffer
,
352 VkPipelineBindPoint pipelineBindPoint
,
353 VkPipeline _pipeline
)
355 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
356 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
358 switch (pipelineBindPoint
) {
359 case VK_PIPELINE_BIND_POINT_COMPUTE
:
360 cmd_buffer
->state
.compute
.base
.pipeline
= pipeline
;
361 cmd_buffer
->state
.compute
.pipeline_dirty
= true;
362 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
363 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
366 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
367 cmd_buffer
->state
.gfx
.base
.pipeline
= pipeline
;
368 cmd_buffer
->state
.gfx
.vb_dirty
|= pipeline
->vb_used
;
369 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
370 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
371 cmd_buffer
->state
.descriptors_dirty
|= pipeline
->active_stages
;
373 /* Apply the dynamic state from the pipeline */
374 cmd_buffer
->state
.gfx
.dirty
|= pipeline
->dynamic_state_mask
;
375 anv_dynamic_state_copy(&cmd_buffer
->state
.gfx
.dynamic
,
376 &pipeline
->dynamic_state
,
377 pipeline
->dynamic_state_mask
);
381 assert(!"invalid bind point");
386 void anv_CmdSetViewport(
387 VkCommandBuffer commandBuffer
,
388 uint32_t firstViewport
,
389 uint32_t viewportCount
,
390 const VkViewport
* pViewports
)
392 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
394 const uint32_t total_count
= firstViewport
+ viewportCount
;
395 if (cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
< total_count
)
396 cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
= total_count
;
398 memcpy(cmd_buffer
->state
.gfx
.dynamic
.viewport
.viewports
+ firstViewport
,
399 pViewports
, viewportCount
* sizeof(*pViewports
));
401 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
404 void anv_CmdSetScissor(
405 VkCommandBuffer commandBuffer
,
406 uint32_t firstScissor
,
407 uint32_t scissorCount
,
408 const VkRect2D
* pScissors
)
410 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
412 const uint32_t total_count
= firstScissor
+ scissorCount
;
413 if (cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
< total_count
)
414 cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
= total_count
;
416 memcpy(cmd_buffer
->state
.gfx
.dynamic
.scissor
.scissors
+ firstScissor
,
417 pScissors
, scissorCount
* sizeof(*pScissors
));
419 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
422 void anv_CmdSetLineWidth(
423 VkCommandBuffer commandBuffer
,
426 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
428 cmd_buffer
->state
.gfx
.dynamic
.line_width
= lineWidth
;
429 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
432 void anv_CmdSetDepthBias(
433 VkCommandBuffer commandBuffer
,
434 float depthBiasConstantFactor
,
435 float depthBiasClamp
,
436 float depthBiasSlopeFactor
)
438 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
440 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
441 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
442 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
444 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
447 void anv_CmdSetBlendConstants(
448 VkCommandBuffer commandBuffer
,
449 const float blendConstants
[4])
451 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
453 memcpy(cmd_buffer
->state
.gfx
.dynamic
.blend_constants
,
454 blendConstants
, sizeof(float) * 4);
456 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
459 void anv_CmdSetDepthBounds(
460 VkCommandBuffer commandBuffer
,
461 float minDepthBounds
,
462 float maxDepthBounds
)
464 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
466 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.min
= minDepthBounds
;
467 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
469 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
472 void anv_CmdSetStencilCompareMask(
473 VkCommandBuffer commandBuffer
,
474 VkStencilFaceFlags faceMask
,
475 uint32_t compareMask
)
477 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
479 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
480 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.front
= compareMask
;
481 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
482 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.back
= compareMask
;
484 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
487 void anv_CmdSetStencilWriteMask(
488 VkCommandBuffer commandBuffer
,
489 VkStencilFaceFlags faceMask
,
492 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
494 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
495 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.front
= writeMask
;
496 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
497 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.back
= writeMask
;
499 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
502 void anv_CmdSetStencilReference(
503 VkCommandBuffer commandBuffer
,
504 VkStencilFaceFlags faceMask
,
507 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
509 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
510 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.front
= reference
;
511 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
512 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.back
= reference
;
514 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
518 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
519 VkPipelineBindPoint bind_point
,
520 struct anv_pipeline_layout
*layout
,
522 struct anv_descriptor_set
*set
,
523 uint32_t *dynamic_offset_count
,
524 const uint32_t **dynamic_offsets
)
526 struct anv_descriptor_set_layout
*set_layout
=
527 layout
->set
[set_index
].layout
;
529 struct anv_cmd_pipeline_state
*pipe_state
;
530 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
531 pipe_state
= &cmd_buffer
->state
.compute
.base
;
533 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
534 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
536 pipe_state
->descriptors
[set_index
] = set
;
538 if (dynamic_offsets
) {
539 if (set_layout
->dynamic_offset_count
> 0) {
540 uint32_t dynamic_offset_start
=
541 layout
->set
[set_index
].dynamic_offset_start
;
543 /* Assert that everything is in range */
544 assert(set_layout
->dynamic_offset_count
<= *dynamic_offset_count
);
545 assert(dynamic_offset_start
+ set_layout
->dynamic_offset_count
<=
546 ARRAY_SIZE(pipe_state
->dynamic_offsets
));
548 typed_memcpy(&pipe_state
->dynamic_offsets
[dynamic_offset_start
],
549 *dynamic_offsets
, set_layout
->dynamic_offset_count
);
551 *dynamic_offsets
+= set_layout
->dynamic_offset_count
;
552 *dynamic_offset_count
-= set_layout
->dynamic_offset_count
;
554 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
555 cmd_buffer
->state
.push_constants_dirty
|=
556 VK_SHADER_STAGE_COMPUTE_BIT
;
558 cmd_buffer
->state
.push_constants_dirty
|=
559 VK_SHADER_STAGE_ALL_GRAPHICS
;
564 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
565 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
567 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
568 cmd_buffer
->state
.descriptors_dirty
|=
569 set_layout
->shader_stages
& VK_SHADER_STAGE_ALL_GRAPHICS
;
572 /* Pipeline layout objects are required to live at least while any command
573 * buffers that use them are in recording state. We need to grab a reference
574 * to the pipeline layout being bound here so we can compute correct dynamic
575 * offsets for VK_DESCRIPTOR_TYPE_*_DYNAMIC in dynamic_offset_for_binding()
576 * when we record draw commands that come after this.
578 pipe_state
->layout
= layout
;
581 void anv_CmdBindDescriptorSets(
582 VkCommandBuffer commandBuffer
,
583 VkPipelineBindPoint pipelineBindPoint
,
584 VkPipelineLayout _layout
,
586 uint32_t descriptorSetCount
,
587 const VkDescriptorSet
* pDescriptorSets
,
588 uint32_t dynamicOffsetCount
,
589 const uint32_t* pDynamicOffsets
)
591 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
592 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
594 assert(firstSet
+ descriptorSetCount
<= MAX_SETS
);
596 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
597 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
598 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
599 layout
, firstSet
+ i
, set
,
605 void anv_CmdBindVertexBuffers(
606 VkCommandBuffer commandBuffer
,
607 uint32_t firstBinding
,
608 uint32_t bindingCount
,
609 const VkBuffer
* pBuffers
,
610 const VkDeviceSize
* pOffsets
)
612 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
613 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
615 /* We have to defer setting up vertex buffer since we need the buffer
616 * stride from the pipeline. */
618 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
619 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
620 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
621 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
622 cmd_buffer
->state
.gfx
.vb_dirty
|= 1 << (firstBinding
+ i
);
626 void anv_CmdBindTransformFeedbackBuffersEXT(
627 VkCommandBuffer commandBuffer
,
628 uint32_t firstBinding
,
629 uint32_t bindingCount
,
630 const VkBuffer
* pBuffers
,
631 const VkDeviceSize
* pOffsets
,
632 const VkDeviceSize
* pSizes
)
634 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
635 struct anv_xfb_binding
*xfb
= cmd_buffer
->state
.xfb_bindings
;
637 /* We have to defer setting up vertex buffer since we need the buffer
638 * stride from the pipeline. */
640 assert(firstBinding
+ bindingCount
<= MAX_XFB_BUFFERS
);
641 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
642 if (pBuffers
[i
] == VK_NULL_HANDLE
) {
643 xfb
[firstBinding
+ i
].buffer
= NULL
;
645 ANV_FROM_HANDLE(anv_buffer
, buffer
, pBuffers
[i
]);
646 xfb
[firstBinding
+ i
].buffer
= buffer
;
647 xfb
[firstBinding
+ i
].offset
= pOffsets
[i
];
648 xfb
[firstBinding
+ i
].size
=
649 anv_buffer_get_range(buffer
, pOffsets
[i
],
650 pSizes
? pSizes
[i
] : VK_WHOLE_SIZE
);
656 anv_isl_format_for_descriptor_type(VkDescriptorType type
)
659 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
660 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
661 return ISL_FORMAT_R32G32B32A32_FLOAT
;
663 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
664 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
665 return ISL_FORMAT_RAW
;
668 unreachable("Invalid descriptor type");
673 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
674 const void *data
, uint32_t size
, uint32_t alignment
)
676 struct anv_state state
;
678 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
679 memcpy(state
.map
, data
, size
);
681 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
687 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
688 uint32_t *a
, uint32_t *b
,
689 uint32_t dwords
, uint32_t alignment
)
691 struct anv_state state
;
694 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
695 dwords
* 4, alignment
);
697 for (uint32_t i
= 0; i
< dwords
; i
++)
700 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
706 anv_push_constant_value(const struct anv_cmd_pipeline_state
*state
,
707 const struct anv_push_constants
*data
, uint32_t param
)
709 if (BRW_PARAM_IS_BUILTIN(param
)) {
711 case BRW_PARAM_BUILTIN_ZERO
:
713 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X
:
714 return data
->base_work_group_id
[0];
715 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y
:
716 return data
->base_work_group_id
[1];
717 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z
:
718 return data
->base_work_group_id
[2];
720 unreachable("Invalid param builtin");
722 } else if (ANV_PARAM_IS_PUSH(param
)) {
723 uint32_t offset
= ANV_PARAM_PUSH_OFFSET(param
);
724 assert(offset
% sizeof(uint32_t) == 0);
725 if (offset
< sizeof(data
->client_data
))
726 return *(uint32_t *)((uint8_t *)data
+ offset
);
729 } else if (ANV_PARAM_IS_DYN_OFFSET(param
)) {
730 unsigned idx
= ANV_PARAM_DYN_OFFSET_IDX(param
);
731 assert(idx
< MAX_DYNAMIC_BUFFERS
);
732 return state
->dynamic_offsets
[idx
];
735 assert(!"Invalid param");
740 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
741 gl_shader_stage stage
)
743 struct anv_cmd_pipeline_state
*pipeline_state
= &cmd_buffer
->state
.gfx
.base
;
744 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.gfx
.base
.pipeline
;
746 /* If we don't have this stage, bail. */
747 if (!anv_pipeline_has_stage(pipeline
, stage
))
748 return (struct anv_state
) { .offset
= 0 };
750 struct anv_push_constants
*data
=
751 &cmd_buffer
->state
.push_constants
[stage
];
752 const struct brw_stage_prog_data
*prog_data
=
753 pipeline
->shaders
[stage
]->prog_data
;
755 /* If we don't actually have any push constants, bail. */
756 if (prog_data
== NULL
|| prog_data
->nr_params
== 0)
757 return (struct anv_state
) { .offset
= 0 };
759 struct anv_state state
=
760 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
761 prog_data
->nr_params
* sizeof(float),
762 32 /* bottom 5 bits MBZ */);
764 /* Walk through the param array and fill the buffer with data */
765 uint32_t *u32_map
= state
.map
;
766 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
767 u32_map
[i
] = anv_push_constant_value(pipeline_state
, data
,
768 prog_data
->param
[i
]);
775 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
777 struct anv_cmd_pipeline_state
*pipeline_state
= &cmd_buffer
->state
.compute
.base
;
778 struct anv_push_constants
*data
=
779 &cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
780 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute
.base
.pipeline
;
781 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
782 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
784 /* If we don't actually have any push constants, bail. */
785 if (cs_prog_data
->push
.total
.size
== 0)
786 return (struct anv_state
) { .offset
= 0 };
788 const unsigned push_constant_alignment
=
789 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
790 const unsigned aligned_total_push_constants_size
=
791 ALIGN(cs_prog_data
->push
.total
.size
, push_constant_alignment
);
792 struct anv_state state
=
793 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
794 aligned_total_push_constants_size
,
795 push_constant_alignment
);
797 /* Walk through the param array and fill the buffer with data */
798 uint32_t *u32_map
= state
.map
;
800 if (cs_prog_data
->push
.cross_thread
.size
> 0) {
802 i
< cs_prog_data
->push
.cross_thread
.dwords
;
804 assert(prog_data
->param
[i
] != BRW_PARAM_BUILTIN_SUBGROUP_ID
);
805 u32_map
[i
] = anv_push_constant_value(pipeline_state
, data
,
806 prog_data
->param
[i
]);
810 if (cs_prog_data
->push
.per_thread
.size
> 0) {
811 for (unsigned t
= 0; t
< cs_prog_data
->threads
; t
++) {
813 8 * (cs_prog_data
->push
.per_thread
.regs
* t
+
814 cs_prog_data
->push
.cross_thread
.regs
);
815 unsigned src
= cs_prog_data
->push
.cross_thread
.dwords
;
816 for ( ; src
< prog_data
->nr_params
; src
++, dst
++) {
817 if (prog_data
->param
[src
] == BRW_PARAM_BUILTIN_SUBGROUP_ID
) {
820 u32_map
[dst
] = anv_push_constant_value(pipeline_state
, data
,
821 prog_data
->param
[src
]);
830 void anv_CmdPushConstants(
831 VkCommandBuffer commandBuffer
,
832 VkPipelineLayout layout
,
833 VkShaderStageFlags stageFlags
,
838 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
840 anv_foreach_stage(stage
, stageFlags
) {
841 memcpy(cmd_buffer
->state
.push_constants
[stage
].client_data
+ offset
,
845 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
848 VkResult
anv_CreateCommandPool(
850 const VkCommandPoolCreateInfo
* pCreateInfo
,
851 const VkAllocationCallbacks
* pAllocator
,
852 VkCommandPool
* pCmdPool
)
854 ANV_FROM_HANDLE(anv_device
, device
, _device
);
855 struct anv_cmd_pool
*pool
;
857 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
858 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
860 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
863 pool
->alloc
= *pAllocator
;
865 pool
->alloc
= device
->alloc
;
867 list_inithead(&pool
->cmd_buffers
);
869 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
874 void anv_DestroyCommandPool(
876 VkCommandPool commandPool
,
877 const VkAllocationCallbacks
* pAllocator
)
879 ANV_FROM_HANDLE(anv_device
, device
, _device
);
880 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
885 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
886 &pool
->cmd_buffers
, pool_link
) {
887 anv_cmd_buffer_destroy(cmd_buffer
);
890 vk_free2(&device
->alloc
, pAllocator
, pool
);
893 VkResult
anv_ResetCommandPool(
895 VkCommandPool commandPool
,
896 VkCommandPoolResetFlags flags
)
898 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
900 list_for_each_entry(struct anv_cmd_buffer
, cmd_buffer
,
901 &pool
->cmd_buffers
, pool_link
) {
902 anv_cmd_buffer_reset(cmd_buffer
);
908 void anv_TrimCommandPool(
910 VkCommandPool commandPool
,
911 VkCommandPoolTrimFlags flags
)
913 /* Nothing for us to do here. Our pools stay pretty tidy. */
917 * Return NULL if the current subpass has no depthstencil attachment.
919 const struct anv_image_view
*
920 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
922 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
924 if (subpass
->depth_stencil_attachment
== NULL
)
927 const struct anv_image_view
*iview
=
928 cmd_buffer
->state
.attachments
[subpass
->depth_stencil_attachment
->attachment
].image_view
;
930 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
931 VK_IMAGE_ASPECT_STENCIL_BIT
));
936 static struct anv_descriptor_set
*
937 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
938 VkPipelineBindPoint bind_point
,
939 struct anv_descriptor_set_layout
*layout
,
942 struct anv_cmd_pipeline_state
*pipe_state
;
943 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
944 pipe_state
= &cmd_buffer
->state
.compute
.base
;
946 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
947 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
950 struct anv_push_descriptor_set
**push_set
=
951 &pipe_state
->push_descriptors
[_set
];
953 if (*push_set
== NULL
) {
954 *push_set
= vk_zalloc(&cmd_buffer
->pool
->alloc
,
955 sizeof(struct anv_push_descriptor_set
), 8,
956 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
957 if (*push_set
== NULL
) {
958 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
963 struct anv_descriptor_set
*set
= &(*push_set
)->set
;
965 if (set
->layout
!= layout
) {
967 anv_descriptor_set_layout_unref(cmd_buffer
->device
, set
->layout
);
968 anv_descriptor_set_layout_ref(layout
);
969 set
->layout
= layout
;
971 set
->size
= anv_descriptor_set_layout_size(layout
);
972 set
->buffer_view_count
= layout
->buffer_view_count
;
973 set
->buffer_views
= (*push_set
)->buffer_views
;
975 if (layout
->descriptor_buffer_size
&&
976 ((*push_set
)->set_used_on_gpu
||
977 set
->desc_mem
.alloc_size
< layout
->descriptor_buffer_size
)) {
978 /* The previous buffer is either actively used by some GPU command (so
979 * we can't modify it) or is too small. Allocate a new one.
981 struct anv_state desc_mem
=
982 anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
983 layout
->descriptor_buffer_size
, 32);
984 if (set
->desc_mem
.alloc_size
) {
985 /* TODO: Do we really need to copy all the time? */
986 memcpy(desc_mem
.map
, set
->desc_mem
.map
,
987 MIN2(desc_mem
.alloc_size
, set
->desc_mem
.alloc_size
));
989 set
->desc_mem
= desc_mem
;
991 struct anv_address addr
= {
992 .bo
= cmd_buffer
->dynamic_state_stream
.state_pool
->block_pool
.bo
,
993 .offset
= set
->desc_mem
.offset
,
996 const struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
997 set
->desc_surface_state
=
998 anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
999 isl_dev
->ss
.size
, isl_dev
->ss
.align
);
1000 anv_fill_buffer_surface_state(cmd_buffer
->device
,
1001 set
->desc_surface_state
,
1002 ISL_FORMAT_R32G32B32A32_FLOAT
,
1003 addr
, layout
->descriptor_buffer_size
, 1);
1009 void anv_CmdPushDescriptorSetKHR(
1010 VkCommandBuffer commandBuffer
,
1011 VkPipelineBindPoint pipelineBindPoint
,
1012 VkPipelineLayout _layout
,
1014 uint32_t descriptorWriteCount
,
1015 const VkWriteDescriptorSet
* pDescriptorWrites
)
1017 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1018 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1020 assert(_set
< MAX_SETS
);
1022 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1024 struct anv_descriptor_set
*set
=
1025 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1030 /* Go through the user supplied descriptors. */
1031 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
1032 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1034 switch (write
->descriptorType
) {
1035 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1036 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1037 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1038 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1039 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1040 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1041 anv_descriptor_set_write_image_view(cmd_buffer
->device
, set
,
1042 write
->pImageInfo
+ j
,
1043 write
->descriptorType
,
1045 write
->dstArrayElement
+ j
);
1049 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1050 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1051 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1052 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1053 write
->pTexelBufferView
[j
]);
1055 anv_descriptor_set_write_buffer_view(cmd_buffer
->device
, set
,
1056 write
->descriptorType
,
1059 write
->dstArrayElement
+ j
);
1063 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1064 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1065 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1066 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1067 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1068 assert(write
->pBufferInfo
[j
].buffer
);
1069 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
1072 anv_descriptor_set_write_buffer(cmd_buffer
->device
, set
,
1073 &cmd_buffer
->surface_state_stream
,
1074 write
->descriptorType
,
1077 write
->dstArrayElement
+ j
,
1078 write
->pBufferInfo
[j
].offset
,
1079 write
->pBufferInfo
[j
].range
);
1088 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1089 layout
, _set
, set
, NULL
, NULL
);
1092 void anv_CmdPushDescriptorSetWithTemplateKHR(
1093 VkCommandBuffer commandBuffer
,
1094 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1095 VkPipelineLayout _layout
,
1099 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1100 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1101 descriptorUpdateTemplate
);
1102 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1104 assert(_set
< MAX_PUSH_DESCRIPTORS
);
1106 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1108 struct anv_descriptor_set
*set
=
1109 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, template->bind_point
,
1114 anv_descriptor_set_write_template(cmd_buffer
->device
, set
,
1115 &cmd_buffer
->surface_state_stream
,
1119 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, template->bind_point
,
1120 layout
, _set
, set
, NULL
, NULL
);
1123 void anv_CmdSetDeviceMask(
1124 VkCommandBuffer commandBuffer
,
1125 uint32_t deviceMask
)