2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
35 /** \file anv_cmd_buffer.c
37 * This file contains all of the stuff for emitting commands into a command
38 * buffer. This includes implementations of most of the vkCmd*
39 * entrypoints. This file is concerned entirely with state emission and
40 * not with the command buffer data structure itself. As far as this file
41 * is concerned, most of anv_cmd_buffer is magic.
44 /* TODO: These are taken from GLES. We should check the Vulkan spec */
45 const struct anv_dynamic_state default_dynamic_state
= {
58 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
63 .stencil_compare_mask
= {
67 .stencil_write_mask
= {
71 .stencil_reference
= {
82 * Copy the dynamic state from src to dest based on the copy_mask.
84 * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
85 * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
87 * Returns a mask of the states which changed.
90 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
91 const struct anv_dynamic_state
*src
,
92 anv_cmd_dirty_mask_t copy_mask
)
94 anv_cmd_dirty_mask_t changed
= 0;
96 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
) {
97 dest
->viewport
.count
= src
->viewport
.count
;
98 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
100 changed
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
103 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_SCISSOR
) {
104 dest
->scissor
.count
= src
->scissor
.count
;
105 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
107 changed
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
110 if (copy_mask
& ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
) {
111 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
112 changed
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
115 #define ANV_CMP_COPY(field, flag) \
116 if (copy_mask & flag) { \
117 if (dest->field != src->field) { \
118 dest->field = src->field; \
123 ANV_CMP_COPY(line_width
, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
);
125 ANV_CMP_COPY(depth_bias
.bias
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
126 ANV_CMP_COPY(depth_bias
.clamp
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
127 ANV_CMP_COPY(depth_bias
.slope
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
);
129 ANV_CMP_COPY(depth_bounds
.min
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
);
130 ANV_CMP_COPY(depth_bounds
.max
, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
);
132 ANV_CMP_COPY(stencil_compare_mask
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
);
133 ANV_CMP_COPY(stencil_compare_mask
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
);
135 ANV_CMP_COPY(stencil_write_mask
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
);
136 ANV_CMP_COPY(stencil_write_mask
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
);
138 ANV_CMP_COPY(stencil_reference
.front
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
);
139 ANV_CMP_COPY(stencil_reference
.back
, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
);
141 ANV_CMP_COPY(line_stipple
.factor
, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
);
142 ANV_CMP_COPY(line_stipple
.pattern
, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
);
150 anv_cmd_state_init(struct anv_cmd_buffer
*cmd_buffer
)
152 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
154 memset(state
, 0, sizeof(*state
));
156 state
->current_pipeline
= UINT32_MAX
;
157 state
->restart_index
= UINT32_MAX
;
158 state
->gfx
.dynamic
= default_dynamic_state
;
162 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer
*cmd_buffer
,
163 struct anv_cmd_pipeline_state
*pipe_state
)
165 for (uint32_t i
= 0; i
< ARRAY_SIZE(pipe_state
->push_descriptors
); i
++) {
166 if (pipe_state
->push_descriptors
[i
]) {
167 anv_descriptor_set_layout_unref(cmd_buffer
->device
,
168 pipe_state
->push_descriptors
[i
]->set
.layout
);
169 vk_free(&cmd_buffer
->pool
->alloc
, pipe_state
->push_descriptors
[i
]);
175 anv_cmd_state_finish(struct anv_cmd_buffer
*cmd_buffer
)
177 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
179 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->gfx
.base
);
180 anv_cmd_pipeline_state_finish(cmd_buffer
, &state
->compute
.base
);
182 vk_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
186 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
188 anv_cmd_state_finish(cmd_buffer
);
189 anv_cmd_state_init(cmd_buffer
);
192 static VkResult
anv_create_cmd_buffer(
193 struct anv_device
* device
,
194 struct anv_cmd_pool
* pool
,
195 VkCommandBufferLevel level
,
196 VkCommandBuffer
* pCommandBuffer
)
198 struct anv_cmd_buffer
*cmd_buffer
;
201 cmd_buffer
= vk_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
202 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
203 if (cmd_buffer
== NULL
)
204 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
206 cmd_buffer
->batch
.status
= VK_SUCCESS
;
208 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
209 cmd_buffer
->device
= device
;
210 cmd_buffer
->pool
= pool
;
211 cmd_buffer
->level
= level
;
213 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
214 if (result
!= VK_SUCCESS
)
217 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
218 &device
->surface_state_pool
, 4096);
219 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
220 &device
->dynamic_state_pool
, 16384);
222 anv_cmd_state_init(cmd_buffer
);
225 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
227 /* Init the pool_link so we can safefly call list_del when we destroy
230 list_inithead(&cmd_buffer
->pool_link
);
233 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
238 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
243 VkResult
anv_AllocateCommandBuffers(
245 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
246 VkCommandBuffer
* pCommandBuffers
)
248 ANV_FROM_HANDLE(anv_device
, device
, _device
);
249 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
251 VkResult result
= VK_SUCCESS
;
254 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
255 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
256 &pCommandBuffers
[i
]);
257 if (result
!= VK_SUCCESS
)
261 if (result
!= VK_SUCCESS
) {
262 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
264 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++)
265 pCommandBuffers
[i
] = VK_NULL_HANDLE
;
272 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
274 list_del(&cmd_buffer
->pool_link
);
276 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
278 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
279 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
281 anv_cmd_state_finish(cmd_buffer
);
283 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
286 void anv_FreeCommandBuffers(
288 VkCommandPool commandPool
,
289 uint32_t commandBufferCount
,
290 const VkCommandBuffer
* pCommandBuffers
)
292 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
293 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
298 anv_cmd_buffer_destroy(cmd_buffer
);
303 anv_cmd_buffer_reset(struct anv_cmd_buffer
*cmd_buffer
)
305 cmd_buffer
->usage_flags
= 0;
306 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
307 anv_cmd_state_reset(cmd_buffer
);
309 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
310 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
311 &cmd_buffer
->device
->surface_state_pool
, 4096);
313 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
314 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
315 &cmd_buffer
->device
->dynamic_state_pool
, 16384);
319 VkResult
anv_ResetCommandBuffer(
320 VkCommandBuffer commandBuffer
,
321 VkCommandBufferResetFlags flags
)
323 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
324 return anv_cmd_buffer_reset(cmd_buffer
);
327 #define anv_genX_call(devinfo, func, ...) \
328 switch ((devinfo)->gen) { \
330 if ((devinfo)->is_haswell) { \
331 gen75_##func(__VA_ARGS__); \
333 gen7_##func(__VA_ARGS__); \
337 gen8_##func(__VA_ARGS__); \
340 gen9_##func(__VA_ARGS__); \
343 gen10_##func(__VA_ARGS__); \
346 gen11_##func(__VA_ARGS__); \
349 assert(!"Unknown hardware generation"); \
353 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
355 anv_genX_call(&cmd_buffer
->device
->info
,
356 cmd_buffer_emit_state_base_address
,
361 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer
*cmd_buffer
,
362 const struct anv_image
*image
,
363 VkImageAspectFlagBits aspect
,
364 enum isl_aux_usage aux_usage
,
367 uint32_t layer_count
)
369 anv_genX_call(&cmd_buffer
->device
->info
,
370 cmd_buffer_mark_image_written
,
371 cmd_buffer
, image
, aspect
, aux_usage
,
372 level
, base_layer
, layer_count
);
376 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer
*cmd_buffer
)
378 anv_genX_call(&cmd_buffer
->device
->info
,
379 cmd_emit_conditional_render_predicate
,
383 void anv_CmdBindPipeline(
384 VkCommandBuffer commandBuffer
,
385 VkPipelineBindPoint pipelineBindPoint
,
386 VkPipeline _pipeline
)
388 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
389 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
391 switch (pipelineBindPoint
) {
392 case VK_PIPELINE_BIND_POINT_COMPUTE
:
393 cmd_buffer
->state
.compute
.base
.pipeline
= pipeline
;
394 cmd_buffer
->state
.compute
.pipeline_dirty
= true;
395 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
396 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
399 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
400 cmd_buffer
->state
.gfx
.base
.pipeline
= pipeline
;
401 cmd_buffer
->state
.gfx
.vb_dirty
|= pipeline
->vb_used
;
402 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
403 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
404 cmd_buffer
->state
.descriptors_dirty
|= pipeline
->active_stages
;
406 /* Apply the dynamic state from the pipeline */
407 cmd_buffer
->state
.gfx
.dirty
|=
408 anv_dynamic_state_copy(&cmd_buffer
->state
.gfx
.dynamic
,
409 &pipeline
->dynamic_state
,
410 pipeline
->dynamic_state_mask
);
414 assert(!"invalid bind point");
419 void anv_CmdSetViewport(
420 VkCommandBuffer commandBuffer
,
421 uint32_t firstViewport
,
422 uint32_t viewportCount
,
423 const VkViewport
* pViewports
)
425 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
427 const uint32_t total_count
= firstViewport
+ viewportCount
;
428 if (cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
< total_count
)
429 cmd_buffer
->state
.gfx
.dynamic
.viewport
.count
= total_count
;
431 memcpy(cmd_buffer
->state
.gfx
.dynamic
.viewport
.viewports
+ firstViewport
,
432 pViewports
, viewportCount
* sizeof(*pViewports
));
434 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
437 void anv_CmdSetScissor(
438 VkCommandBuffer commandBuffer
,
439 uint32_t firstScissor
,
440 uint32_t scissorCount
,
441 const VkRect2D
* pScissors
)
443 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
445 const uint32_t total_count
= firstScissor
+ scissorCount
;
446 if (cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
< total_count
)
447 cmd_buffer
->state
.gfx
.dynamic
.scissor
.count
= total_count
;
449 memcpy(cmd_buffer
->state
.gfx
.dynamic
.scissor
.scissors
+ firstScissor
,
450 pScissors
, scissorCount
* sizeof(*pScissors
));
452 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
455 void anv_CmdSetLineWidth(
456 VkCommandBuffer commandBuffer
,
459 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
461 cmd_buffer
->state
.gfx
.dynamic
.line_width
= lineWidth
;
462 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
465 void anv_CmdSetDepthBias(
466 VkCommandBuffer commandBuffer
,
467 float depthBiasConstantFactor
,
468 float depthBiasClamp
,
469 float depthBiasSlopeFactor
)
471 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
473 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
474 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
475 cmd_buffer
->state
.gfx
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
477 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
480 void anv_CmdSetBlendConstants(
481 VkCommandBuffer commandBuffer
,
482 const float blendConstants
[4])
484 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
486 memcpy(cmd_buffer
->state
.gfx
.dynamic
.blend_constants
,
487 blendConstants
, sizeof(float) * 4);
489 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
492 void anv_CmdSetDepthBounds(
493 VkCommandBuffer commandBuffer
,
494 float minDepthBounds
,
495 float maxDepthBounds
)
497 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
499 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.min
= minDepthBounds
;
500 cmd_buffer
->state
.gfx
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
502 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
505 void anv_CmdSetStencilCompareMask(
506 VkCommandBuffer commandBuffer
,
507 VkStencilFaceFlags faceMask
,
508 uint32_t compareMask
)
510 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
512 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
513 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.front
= compareMask
;
514 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
515 cmd_buffer
->state
.gfx
.dynamic
.stencil_compare_mask
.back
= compareMask
;
517 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
520 void anv_CmdSetStencilWriteMask(
521 VkCommandBuffer commandBuffer
,
522 VkStencilFaceFlags faceMask
,
525 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
527 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
528 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.front
= writeMask
;
529 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
530 cmd_buffer
->state
.gfx
.dynamic
.stencil_write_mask
.back
= writeMask
;
532 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
535 void anv_CmdSetStencilReference(
536 VkCommandBuffer commandBuffer
,
537 VkStencilFaceFlags faceMask
,
540 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
542 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
543 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.front
= reference
;
544 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
545 cmd_buffer
->state
.gfx
.dynamic
.stencil_reference
.back
= reference
;
547 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
550 void anv_CmdSetLineStippleEXT(
551 VkCommandBuffer commandBuffer
,
552 uint32_t lineStippleFactor
,
553 uint16_t lineStipplePattern
)
555 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
557 cmd_buffer
->state
.gfx
.dynamic
.line_stipple
.factor
= lineStippleFactor
;
558 cmd_buffer
->state
.gfx
.dynamic
.line_stipple
.pattern
= lineStipplePattern
;
560 cmd_buffer
->state
.gfx
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE
;
564 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
565 VkPipelineBindPoint bind_point
,
566 struct anv_pipeline_layout
*layout
,
568 struct anv_descriptor_set
*set
,
569 uint32_t *dynamic_offset_count
,
570 const uint32_t **dynamic_offsets
)
572 struct anv_descriptor_set_layout
*set_layout
=
573 layout
->set
[set_index
].layout
;
575 struct anv_cmd_pipeline_state
*pipe_state
;
576 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
577 pipe_state
= &cmd_buffer
->state
.compute
.base
;
579 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
580 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
582 pipe_state
->descriptors
[set_index
] = set
;
584 if (dynamic_offsets
) {
585 if (set_layout
->dynamic_offset_count
> 0) {
586 uint32_t dynamic_offset_start
=
587 layout
->set
[set_index
].dynamic_offset_start
;
589 anv_foreach_stage(stage
, set_layout
->shader_stages
) {
590 struct anv_push_constants
*push
=
591 &cmd_buffer
->state
.push_constants
[stage
];
593 /* Assert that everything is in range */
594 assert(set_layout
->dynamic_offset_count
<= *dynamic_offset_count
);
595 assert(dynamic_offset_start
+ set_layout
->dynamic_offset_count
<=
596 ARRAY_SIZE(push
->dynamic_offsets
));
598 typed_memcpy(&push
->dynamic_offsets
[dynamic_offset_start
],
599 *dynamic_offsets
, set_layout
->dynamic_offset_count
);
602 *dynamic_offsets
+= set_layout
->dynamic_offset_count
;
603 *dynamic_offset_count
-= set_layout
->dynamic_offset_count
;
605 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
606 cmd_buffer
->state
.push_constants_dirty
|=
607 VK_SHADER_STAGE_COMPUTE_BIT
;
609 cmd_buffer
->state
.push_constants_dirty
|=
610 VK_SHADER_STAGE_ALL_GRAPHICS
;
615 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
616 cmd_buffer
->state
.descriptors_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
618 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
619 cmd_buffer
->state
.descriptors_dirty
|=
620 set_layout
->shader_stages
& VK_SHADER_STAGE_ALL_GRAPHICS
;
624 void anv_CmdBindDescriptorSets(
625 VkCommandBuffer commandBuffer
,
626 VkPipelineBindPoint pipelineBindPoint
,
627 VkPipelineLayout _layout
,
629 uint32_t descriptorSetCount
,
630 const VkDescriptorSet
* pDescriptorSets
,
631 uint32_t dynamicOffsetCount
,
632 const uint32_t* pDynamicOffsets
)
634 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
635 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
637 assert(firstSet
+ descriptorSetCount
<= MAX_SETS
);
639 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
640 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
641 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
642 layout
, firstSet
+ i
, set
,
648 void anv_CmdBindVertexBuffers(
649 VkCommandBuffer commandBuffer
,
650 uint32_t firstBinding
,
651 uint32_t bindingCount
,
652 const VkBuffer
* pBuffers
,
653 const VkDeviceSize
* pOffsets
)
655 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
656 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
658 /* We have to defer setting up vertex buffer since we need the buffer
659 * stride from the pipeline. */
661 assert(firstBinding
+ bindingCount
<= MAX_VBS
);
662 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
663 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
664 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
665 cmd_buffer
->state
.gfx
.vb_dirty
|= 1 << (firstBinding
+ i
);
669 void anv_CmdBindTransformFeedbackBuffersEXT(
670 VkCommandBuffer commandBuffer
,
671 uint32_t firstBinding
,
672 uint32_t bindingCount
,
673 const VkBuffer
* pBuffers
,
674 const VkDeviceSize
* pOffsets
,
675 const VkDeviceSize
* pSizes
)
677 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
678 struct anv_xfb_binding
*xfb
= cmd_buffer
->state
.xfb_bindings
;
680 /* We have to defer setting up vertex buffer since we need the buffer
681 * stride from the pipeline. */
683 assert(firstBinding
+ bindingCount
<= MAX_XFB_BUFFERS
);
684 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
685 if (pBuffers
[i
] == VK_NULL_HANDLE
) {
686 xfb
[firstBinding
+ i
].buffer
= NULL
;
688 ANV_FROM_HANDLE(anv_buffer
, buffer
, pBuffers
[i
]);
689 xfb
[firstBinding
+ i
].buffer
= buffer
;
690 xfb
[firstBinding
+ i
].offset
= pOffsets
[i
];
691 xfb
[firstBinding
+ i
].size
=
692 anv_buffer_get_range(buffer
, pOffsets
[i
],
693 pSizes
? pSizes
[i
] : VK_WHOLE_SIZE
);
699 anv_isl_format_for_descriptor_type(VkDescriptorType type
)
702 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
703 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
704 return ISL_FORMAT_R32G32B32A32_FLOAT
;
706 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
707 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
708 return ISL_FORMAT_RAW
;
711 unreachable("Invalid descriptor type");
716 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
717 const void *data
, uint32_t size
, uint32_t alignment
)
719 struct anv_state state
;
721 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
722 memcpy(state
.map
, data
, size
);
724 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
730 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
731 uint32_t *a
, uint32_t *b
,
732 uint32_t dwords
, uint32_t alignment
)
734 struct anv_state state
;
737 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
738 dwords
* 4, alignment
);
740 for (uint32_t i
= 0; i
< dwords
; i
++)
743 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
749 anv_push_constant_value(const struct anv_cmd_pipeline_state
*state
,
750 const struct anv_push_constants
*data
, uint32_t param
)
752 if (BRW_PARAM_IS_BUILTIN(param
)) {
754 case BRW_PARAM_BUILTIN_ZERO
:
756 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X
:
757 return data
->cs
.base_work_group_id
[0];
758 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y
:
759 return data
->cs
.base_work_group_id
[1];
760 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z
:
761 return data
->cs
.base_work_group_id
[2];
763 unreachable("Invalid param builtin");
765 } else if (ANV_PARAM_IS_PUSH(param
)) {
766 uint32_t offset
= ANV_PARAM_PUSH_OFFSET(param
);
767 assert(offset
% sizeof(uint32_t) == 0);
768 if (offset
< sizeof(data
->client_data
))
769 return *(uint32_t *)((uint8_t *)data
+ offset
);
772 } else if (ANV_PARAM_IS_DYN_OFFSET(param
)) {
773 unsigned idx
= ANV_PARAM_DYN_OFFSET_IDX(param
);
774 assert(idx
< MAX_DYNAMIC_BUFFERS
);
775 return data
->dynamic_offsets
[idx
];
778 assert(!"Invalid param");
783 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
784 gl_shader_stage stage
)
786 struct anv_cmd_pipeline_state
*pipeline_state
= &cmd_buffer
->state
.gfx
.base
;
787 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.gfx
.base
.pipeline
;
789 /* If we don't have this stage, bail. */
790 if (!anv_pipeline_has_stage(pipeline
, stage
))
791 return (struct anv_state
) { .offset
= 0 };
793 struct anv_push_constants
*data
=
794 &cmd_buffer
->state
.push_constants
[stage
];
795 const struct brw_stage_prog_data
*prog_data
=
796 pipeline
->shaders
[stage
]->prog_data
;
798 /* If we don't actually have any push constants, bail. */
799 if (prog_data
== NULL
|| prog_data
->nr_params
== 0)
800 return (struct anv_state
) { .offset
= 0 };
802 struct anv_state state
=
803 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
804 prog_data
->nr_params
* sizeof(float),
805 32 /* bottom 5 bits MBZ */);
807 /* Walk through the param array and fill the buffer with data */
808 uint32_t *u32_map
= state
.map
;
809 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
810 u32_map
[i
] = anv_push_constant_value(pipeline_state
, data
,
811 prog_data
->param
[i
]);
818 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
820 struct anv_cmd_pipeline_state
*pipeline_state
= &cmd_buffer
->state
.compute
.base
;
821 struct anv_push_constants
*data
=
822 &cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
823 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute
.base
.pipeline
;
824 const struct brw_cs_prog_data
*cs_prog_data
= get_cs_prog_data(pipeline
);
825 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
827 /* If we don't actually have any push constants, bail. */
828 if (cs_prog_data
->push
.total
.size
== 0)
829 return (struct anv_state
) { .offset
= 0 };
831 const unsigned push_constant_alignment
=
832 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
833 const unsigned aligned_total_push_constants_size
=
834 ALIGN(cs_prog_data
->push
.total
.size
, push_constant_alignment
);
835 struct anv_state state
=
836 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
837 aligned_total_push_constants_size
,
838 push_constant_alignment
);
840 /* Walk through the param array and fill the buffer with data */
841 uint32_t *u32_map
= state
.map
;
843 if (cs_prog_data
->push
.cross_thread
.size
> 0) {
845 i
< cs_prog_data
->push
.cross_thread
.dwords
;
847 assert(prog_data
->param
[i
] != BRW_PARAM_BUILTIN_SUBGROUP_ID
);
848 u32_map
[i
] = anv_push_constant_value(pipeline_state
, data
,
849 prog_data
->param
[i
]);
853 if (cs_prog_data
->push
.per_thread
.size
> 0) {
854 for (unsigned t
= 0; t
< cs_prog_data
->threads
; t
++) {
856 8 * (cs_prog_data
->push
.per_thread
.regs
* t
+
857 cs_prog_data
->push
.cross_thread
.regs
);
858 unsigned src
= cs_prog_data
->push
.cross_thread
.dwords
;
859 for ( ; src
< prog_data
->nr_params
; src
++, dst
++) {
860 if (prog_data
->param
[src
] == BRW_PARAM_BUILTIN_SUBGROUP_ID
) {
863 u32_map
[dst
] = anv_push_constant_value(pipeline_state
, data
,
864 prog_data
->param
[src
]);
873 void anv_CmdPushConstants(
874 VkCommandBuffer commandBuffer
,
875 VkPipelineLayout layout
,
876 VkShaderStageFlags stageFlags
,
881 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
883 anv_foreach_stage(stage
, stageFlags
) {
884 memcpy(cmd_buffer
->state
.push_constants
[stage
].client_data
+ offset
,
888 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
891 VkResult
anv_CreateCommandPool(
893 const VkCommandPoolCreateInfo
* pCreateInfo
,
894 const VkAllocationCallbacks
* pAllocator
,
895 VkCommandPool
* pCmdPool
)
897 ANV_FROM_HANDLE(anv_device
, device
, _device
);
898 struct anv_cmd_pool
*pool
;
900 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
901 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
903 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
906 pool
->alloc
= *pAllocator
;
908 pool
->alloc
= device
->alloc
;
910 list_inithead(&pool
->cmd_buffers
);
912 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
917 void anv_DestroyCommandPool(
919 VkCommandPool commandPool
,
920 const VkAllocationCallbacks
* pAllocator
)
922 ANV_FROM_HANDLE(anv_device
, device
, _device
);
923 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
928 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
929 &pool
->cmd_buffers
, pool_link
) {
930 anv_cmd_buffer_destroy(cmd_buffer
);
933 vk_free2(&device
->alloc
, pAllocator
, pool
);
936 VkResult
anv_ResetCommandPool(
938 VkCommandPool commandPool
,
939 VkCommandPoolResetFlags flags
)
941 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
943 list_for_each_entry(struct anv_cmd_buffer
, cmd_buffer
,
944 &pool
->cmd_buffers
, pool_link
) {
945 anv_cmd_buffer_reset(cmd_buffer
);
951 void anv_TrimCommandPool(
953 VkCommandPool commandPool
,
954 VkCommandPoolTrimFlags flags
)
956 /* Nothing for us to do here. Our pools stay pretty tidy. */
960 * Return NULL if the current subpass has no depthstencil attachment.
962 const struct anv_image_view
*
963 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
965 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
967 if (subpass
->depth_stencil_attachment
== NULL
)
970 const struct anv_image_view
*iview
=
971 cmd_buffer
->state
.attachments
[subpass
->depth_stencil_attachment
->attachment
].image_view
;
973 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
974 VK_IMAGE_ASPECT_STENCIL_BIT
));
979 static struct anv_descriptor_set
*
980 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer
*cmd_buffer
,
981 VkPipelineBindPoint bind_point
,
982 struct anv_descriptor_set_layout
*layout
,
985 struct anv_cmd_pipeline_state
*pipe_state
;
986 if (bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
987 pipe_state
= &cmd_buffer
->state
.compute
.base
;
989 assert(bind_point
== VK_PIPELINE_BIND_POINT_GRAPHICS
);
990 pipe_state
= &cmd_buffer
->state
.gfx
.base
;
993 struct anv_push_descriptor_set
**push_set
=
994 &pipe_state
->push_descriptors
[_set
];
996 if (*push_set
== NULL
) {
997 *push_set
= vk_zalloc(&cmd_buffer
->pool
->alloc
,
998 sizeof(struct anv_push_descriptor_set
), 8,
999 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1000 if (*push_set
== NULL
) {
1001 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
1006 struct anv_descriptor_set
*set
= &(*push_set
)->set
;
1008 if (set
->layout
!= layout
) {
1010 anv_descriptor_set_layout_unref(cmd_buffer
->device
, set
->layout
);
1011 anv_descriptor_set_layout_ref(layout
);
1012 set
->layout
= layout
;
1014 set
->size
= anv_descriptor_set_layout_size(layout
);
1015 set
->buffer_view_count
= layout
->buffer_view_count
;
1016 set
->buffer_views
= (*push_set
)->buffer_views
;
1018 if (layout
->descriptor_buffer_size
&&
1019 ((*push_set
)->set_used_on_gpu
||
1020 set
->desc_mem
.alloc_size
< layout
->descriptor_buffer_size
)) {
1021 /* The previous buffer is either actively used by some GPU command (so
1022 * we can't modify it) or is too small. Allocate a new one.
1024 struct anv_state desc_mem
=
1025 anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
1026 layout
->descriptor_buffer_size
, 32);
1027 if (set
->desc_mem
.alloc_size
) {
1028 /* TODO: Do we really need to copy all the time? */
1029 memcpy(desc_mem
.map
, set
->desc_mem
.map
,
1030 MIN2(desc_mem
.alloc_size
, set
->desc_mem
.alloc_size
));
1032 set
->desc_mem
= desc_mem
;
1034 struct anv_address addr
= {
1035 .bo
= cmd_buffer
->dynamic_state_stream
.state_pool
->block_pool
.bo
,
1036 .offset
= set
->desc_mem
.offset
,
1039 const struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
1040 set
->desc_surface_state
=
1041 anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
1042 isl_dev
->ss
.size
, isl_dev
->ss
.align
);
1043 anv_fill_buffer_surface_state(cmd_buffer
->device
,
1044 set
->desc_surface_state
,
1045 ISL_FORMAT_R32G32B32A32_FLOAT
,
1046 addr
, layout
->descriptor_buffer_size
, 1);
1052 void anv_CmdPushDescriptorSetKHR(
1053 VkCommandBuffer commandBuffer
,
1054 VkPipelineBindPoint pipelineBindPoint
,
1055 VkPipelineLayout _layout
,
1057 uint32_t descriptorWriteCount
,
1058 const VkWriteDescriptorSet
* pDescriptorWrites
)
1060 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1061 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1063 assert(_set
< MAX_SETS
);
1065 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1067 struct anv_descriptor_set
*set
=
1068 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1073 /* Go through the user supplied descriptors. */
1074 for (uint32_t i
= 0; i
< descriptorWriteCount
; i
++) {
1075 const VkWriteDescriptorSet
*write
= &pDescriptorWrites
[i
];
1077 switch (write
->descriptorType
) {
1078 case VK_DESCRIPTOR_TYPE_SAMPLER
:
1079 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
1080 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
1081 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
:
1082 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
1083 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1084 anv_descriptor_set_write_image_view(cmd_buffer
->device
, set
,
1085 write
->pImageInfo
+ j
,
1086 write
->descriptorType
,
1088 write
->dstArrayElement
+ j
);
1092 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
1093 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
1094 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1095 ANV_FROM_HANDLE(anv_buffer_view
, bview
,
1096 write
->pTexelBufferView
[j
]);
1098 anv_descriptor_set_write_buffer_view(cmd_buffer
->device
, set
,
1099 write
->descriptorType
,
1102 write
->dstArrayElement
+ j
);
1106 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
1107 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
1108 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
1109 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
1110 for (uint32_t j
= 0; j
< write
->descriptorCount
; j
++) {
1111 assert(write
->pBufferInfo
[j
].buffer
);
1112 ANV_FROM_HANDLE(anv_buffer
, buffer
, write
->pBufferInfo
[j
].buffer
);
1115 anv_descriptor_set_write_buffer(cmd_buffer
->device
, set
,
1116 &cmd_buffer
->surface_state_stream
,
1117 write
->descriptorType
,
1120 write
->dstArrayElement
+ j
,
1121 write
->pBufferInfo
[j
].offset
,
1122 write
->pBufferInfo
[j
].range
);
1131 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, pipelineBindPoint
,
1132 layout
, _set
, set
, NULL
, NULL
);
1135 void anv_CmdPushDescriptorSetWithTemplateKHR(
1136 VkCommandBuffer commandBuffer
,
1137 VkDescriptorUpdateTemplate descriptorUpdateTemplate
,
1138 VkPipelineLayout _layout
,
1142 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1143 ANV_FROM_HANDLE(anv_descriptor_update_template
, template,
1144 descriptorUpdateTemplate
);
1145 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
1147 assert(_set
< MAX_PUSH_DESCRIPTORS
);
1149 struct anv_descriptor_set_layout
*set_layout
= layout
->set
[_set
].layout
;
1151 struct anv_descriptor_set
*set
=
1152 anv_cmd_buffer_push_descriptor_set(cmd_buffer
, template->bind_point
,
1157 anv_descriptor_set_write_template(cmd_buffer
->device
, set
,
1158 &cmd_buffer
->surface_state_stream
,
1162 anv_cmd_buffer_bind_descriptor_set(cmd_buffer
, template->bind_point
,
1163 layout
, _set
, set
, NULL
, NULL
);
1166 void anv_CmdSetDeviceMask(
1167 VkCommandBuffer commandBuffer
,
1168 uint32_t deviceMask
)