2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
41 /* TODO: These are taken from GLES. We should check the Vulkan spec */
42 const struct anv_dynamic_state default_dynamic_state
= {
55 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
60 .stencil_compare_mask
= {
64 .stencil_write_mask
= {
68 .stencil_reference
= {
75 anv_dynamic_state_copy(struct anv_dynamic_state
*dest
,
76 const struct anv_dynamic_state
*src
,
79 if (copy_mask
& (1 << VK_DYNAMIC_STATE_VIEWPORT
)) {
80 dest
->viewport
.count
= src
->viewport
.count
;
81 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
85 if (copy_mask
& (1 << VK_DYNAMIC_STATE_SCISSOR
)) {
86 dest
->scissor
.count
= src
->scissor
.count
;
87 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
91 if (copy_mask
& (1 << VK_DYNAMIC_STATE_LINE_WIDTH
))
92 dest
->line_width
= src
->line_width
;
94 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BIAS
))
95 dest
->depth_bias
= src
->depth_bias
;
97 if (copy_mask
& (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS
))
98 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
100 if (copy_mask
& (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS
))
101 dest
->depth_bounds
= src
->depth_bounds
;
103 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
))
104 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
106 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
))
107 dest
->stencil_write_mask
= src
->stencil_write_mask
;
109 if (copy_mask
& (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE
))
110 dest
->stencil_reference
= src
->stencil_reference
;
114 anv_cmd_state_reset(struct anv_cmd_buffer
*cmd_buffer
)
116 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
118 memset(&state
->descriptors
, 0, sizeof(state
->descriptors
));
119 memset(&state
->push_constants
, 0, sizeof(state
->push_constants
));
120 memset(state
->binding_tables
, 0, sizeof(state
->binding_tables
));
121 memset(state
->samplers
, 0, sizeof(state
->samplers
));
123 /* 0 isn't a valid config. This ensures that we always configure L3$. */
124 cmd_buffer
->state
.current_l3_config
= 0;
128 state
->descriptors_dirty
= 0;
129 state
->push_constants_dirty
= 0;
130 state
->pipeline
= NULL
;
131 state
->restart_index
= UINT32_MAX
;
132 state
->dynamic
= default_dynamic_state
;
133 state
->need_query_wa
= true;
135 if (state
->attachments
!= NULL
) {
136 anv_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
137 state
->attachments
= NULL
;
140 state
->gen7
.index_buffer
= NULL
;
144 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
147 anv_cmd_state_setup_attachments(struct anv_cmd_buffer
*cmd_buffer
,
148 const VkRenderPassBeginInfo
*info
)
150 struct anv_cmd_state
*state
= &cmd_buffer
->state
;
151 ANV_FROM_HANDLE(anv_render_pass
, pass
, info
->renderPass
);
153 anv_free(&cmd_buffer
->pool
->alloc
, state
->attachments
);
155 if (pass
->attachment_count
== 0) {
156 state
->attachments
= NULL
;
160 state
->attachments
= anv_alloc(&cmd_buffer
->pool
->alloc
,
161 pass
->attachment_count
*
162 sizeof(state
->attachments
[0]),
163 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
164 if (state
->attachments
== NULL
) {
165 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
169 for (uint32_t i
= 0; i
< pass
->attachment_count
; ++i
) {
170 struct anv_render_pass_attachment
*att
= &pass
->attachments
[i
];
171 VkImageAspectFlags clear_aspects
= 0;
173 if (anv_format_is_color(att
->format
)) {
174 /* color attachment */
175 if (att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
176 clear_aspects
|= VK_IMAGE_ASPECT_COLOR_BIT
;
179 /* depthstencil attachment */
180 if (att
->format
->depth_format
&&
181 att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
182 clear_aspects
|= VK_IMAGE_ASPECT_DEPTH_BIT
;
184 if (att
->format
->has_stencil
&&
185 att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
186 clear_aspects
|= VK_IMAGE_ASPECT_STENCIL_BIT
;
190 state
->attachments
[i
].pending_clear_aspects
= clear_aspects
;
192 assert(info
->clearValueCount
> i
);
193 state
->attachments
[i
].clear_value
= info
->pClearValues
[i
];
199 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer
*cmd_buffer
,
200 gl_shader_stage stage
, uint32_t size
)
202 struct anv_push_constants
**ptr
= &cmd_buffer
->state
.push_constants
[stage
];
205 *ptr
= anv_alloc(&cmd_buffer
->pool
->alloc
, size
, 8,
206 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
208 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
209 } else if ((*ptr
)->size
< size
) {
210 *ptr
= anv_realloc(&cmd_buffer
->pool
->alloc
, *ptr
, size
, 8,
211 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
213 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
220 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
221 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
222 (offsetof(struct anv_push_constants, field) + \
223 sizeof(cmd_buffer->state.push_constants[0]->field)))
225 static VkResult
anv_create_cmd_buffer(
226 struct anv_device
* device
,
227 struct anv_cmd_pool
* pool
,
228 VkCommandBufferLevel level
,
229 VkCommandBuffer
* pCommandBuffer
)
231 struct anv_cmd_buffer
*cmd_buffer
;
234 cmd_buffer
= anv_alloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
235 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
236 if (cmd_buffer
== NULL
)
237 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
239 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
240 cmd_buffer
->device
= device
;
241 cmd_buffer
->pool
= pool
;
242 cmd_buffer
->level
= level
;
243 cmd_buffer
->state
.attachments
= NULL
;
245 result
= anv_cmd_buffer_init_batch_bo_chain(cmd_buffer
);
246 if (result
!= VK_SUCCESS
)
249 anv_state_stream_init(&cmd_buffer
->surface_state_stream
,
250 &device
->surface_state_block_pool
);
251 anv_state_stream_init(&cmd_buffer
->dynamic_state_stream
,
252 &device
->dynamic_state_block_pool
);
255 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
257 /* Init the pool_link so we can safefly call list_del when we destroy
260 list_inithead(&cmd_buffer
->pool_link
);
263 *pCommandBuffer
= anv_cmd_buffer_to_handle(cmd_buffer
);
268 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
273 VkResult
anv_AllocateCommandBuffers(
275 const VkCommandBufferAllocateInfo
* pAllocateInfo
,
276 VkCommandBuffer
* pCommandBuffers
)
278 ANV_FROM_HANDLE(anv_device
, device
, _device
);
279 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
281 VkResult result
= VK_SUCCESS
;
284 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
285 result
= anv_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
286 &pCommandBuffers
[i
]);
287 if (result
!= VK_SUCCESS
)
291 if (result
!= VK_SUCCESS
)
292 anv_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
,
299 anv_cmd_buffer_destroy(struct anv_cmd_buffer
*cmd_buffer
)
301 list_del(&cmd_buffer
->pool_link
);
303 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer
);
305 anv_state_stream_finish(&cmd_buffer
->surface_state_stream
);
306 anv_state_stream_finish(&cmd_buffer
->dynamic_state_stream
);
308 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
->state
.attachments
);
309 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
312 void anv_FreeCommandBuffers(
314 VkCommandPool commandPool
,
315 uint32_t commandBufferCount
,
316 const VkCommandBuffer
* pCommandBuffers
)
318 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
319 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
321 anv_cmd_buffer_destroy(cmd_buffer
);
325 VkResult
anv_ResetCommandBuffer(
326 VkCommandBuffer commandBuffer
,
327 VkCommandBufferResetFlags flags
)
329 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
331 cmd_buffer
->usage_flags
= 0;
332 cmd_buffer
->state
.current_pipeline
= UINT32_MAX
;
333 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer
);
334 anv_cmd_state_reset(cmd_buffer
);
340 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer
*cmd_buffer
)
342 switch (cmd_buffer
->device
->info
.gen
) {
344 if (cmd_buffer
->device
->info
.is_haswell
)
345 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
347 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer
);
349 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer
);
351 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer
);
353 unreachable("unsupported gen\n");
357 VkResult
anv_BeginCommandBuffer(
358 VkCommandBuffer commandBuffer
,
359 const VkCommandBufferBeginInfo
* pBeginInfo
)
361 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
363 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
364 * command buffer's state. Otherwise, we must *reset* its state. In both
367 * From the Vulkan 1.0 spec:
369 * If a command buffer is in the executable state and the command buffer
370 * was allocated from a command pool with the
371 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
372 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
373 * as if vkResetCommandBuffer had been called with
374 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
375 * the command buffer in the recording state.
377 anv_ResetCommandBuffer(commandBuffer
, /*flags*/ 0);
379 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
381 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
||
382 !(cmd_buffer
->usage_flags
& VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
));
384 anv_cmd_buffer_emit_state_base_address(cmd_buffer
);
386 if (cmd_buffer
->usage_flags
&
387 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
) {
388 cmd_buffer
->state
.framebuffer
=
389 anv_framebuffer_from_handle(pBeginInfo
->pInheritanceInfo
->framebuffer
);
390 cmd_buffer
->state
.pass
=
391 anv_render_pass_from_handle(pBeginInfo
->pInheritanceInfo
->renderPass
);
393 struct anv_subpass
*subpass
=
394 &cmd_buffer
->state
.pass
->subpasses
[pBeginInfo
->pInheritanceInfo
->subpass
];
396 anv_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
402 VkResult
anv_EndCommandBuffer(
403 VkCommandBuffer commandBuffer
)
405 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
406 struct anv_device
*device
= cmd_buffer
->device
;
408 anv_cmd_buffer_end_batch_buffer(cmd_buffer
);
410 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
411 /* The algorithm used to compute the validate list is not threadsafe as
412 * it uses the bo->index field. We have to lock the device around it.
413 * Fortunately, the chances for contention here are probably very low.
415 pthread_mutex_lock(&device
->mutex
);
416 anv_cmd_buffer_prepare_execbuf(cmd_buffer
);
417 pthread_mutex_unlock(&device
->mutex
);
423 void anv_CmdBindPipeline(
424 VkCommandBuffer commandBuffer
,
425 VkPipelineBindPoint pipelineBindPoint
,
426 VkPipeline _pipeline
)
428 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
429 ANV_FROM_HANDLE(anv_pipeline
, pipeline
, _pipeline
);
431 switch (pipelineBindPoint
) {
432 case VK_PIPELINE_BIND_POINT_COMPUTE
:
433 cmd_buffer
->state
.compute_pipeline
= pipeline
;
434 cmd_buffer
->state
.compute_dirty
|= ANV_CMD_DIRTY_PIPELINE
;
435 cmd_buffer
->state
.push_constants_dirty
|= VK_SHADER_STAGE_COMPUTE_BIT
;
438 case VK_PIPELINE_BIND_POINT_GRAPHICS
:
439 cmd_buffer
->state
.pipeline
= pipeline
;
440 cmd_buffer
->state
.vb_dirty
|= pipeline
->vb_used
;
441 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_PIPELINE
;
442 cmd_buffer
->state
.push_constants_dirty
|= pipeline
->active_stages
;
444 /* Apply the dynamic state from the pipeline */
445 cmd_buffer
->state
.dirty
|= pipeline
->dynamic_state_mask
;
446 anv_dynamic_state_copy(&cmd_buffer
->state
.dynamic
,
447 &pipeline
->dynamic_state
,
448 pipeline
->dynamic_state_mask
);
452 assert(!"invalid bind point");
457 void anv_CmdSetViewport(
458 VkCommandBuffer commandBuffer
,
459 uint32_t firstViewport
,
460 uint32_t viewportCount
,
461 const VkViewport
* pViewports
)
463 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
465 const uint32_t total_count
= firstViewport
+ viewportCount
;
466 if (cmd_buffer
->state
.dynamic
.viewport
.count
< total_count
);
467 cmd_buffer
->state
.dynamic
.viewport
.count
= total_count
;
469 memcpy(cmd_buffer
->state
.dynamic
.viewport
.viewports
+ firstViewport
,
470 pViewports
, viewportCount
* sizeof(*pViewports
));
472 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT
;
475 void anv_CmdSetScissor(
476 VkCommandBuffer commandBuffer
,
477 uint32_t firstScissor
,
478 uint32_t scissorCount
,
479 const VkRect2D
* pScissors
)
481 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
483 const uint32_t total_count
= firstScissor
+ scissorCount
;
484 if (cmd_buffer
->state
.dynamic
.scissor
.count
< total_count
);
485 cmd_buffer
->state
.dynamic
.scissor
.count
= total_count
;
487 memcpy(cmd_buffer
->state
.dynamic
.scissor
.scissors
+ firstScissor
,
488 pScissors
, scissorCount
* sizeof(*pScissors
));
490 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_SCISSOR
;
493 void anv_CmdSetLineWidth(
494 VkCommandBuffer commandBuffer
,
497 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
499 cmd_buffer
->state
.dynamic
.line_width
= lineWidth
;
500 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH
;
503 void anv_CmdSetDepthBias(
504 VkCommandBuffer commandBuffer
,
505 float depthBiasConstantFactor
,
506 float depthBiasClamp
,
507 float depthBiasSlopeFactor
)
509 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
511 cmd_buffer
->state
.dynamic
.depth_bias
.bias
= depthBiasConstantFactor
;
512 cmd_buffer
->state
.dynamic
.depth_bias
.clamp
= depthBiasClamp
;
513 cmd_buffer
->state
.dynamic
.depth_bias
.slope
= depthBiasSlopeFactor
;
515 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS
;
518 void anv_CmdSetBlendConstants(
519 VkCommandBuffer commandBuffer
,
520 const float blendConstants
[4])
522 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
524 memcpy(cmd_buffer
->state
.dynamic
.blend_constants
,
525 blendConstants
, sizeof(float) * 4);
527 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS
;
530 void anv_CmdSetDepthBounds(
531 VkCommandBuffer commandBuffer
,
532 float minDepthBounds
,
533 float maxDepthBounds
)
535 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
537 cmd_buffer
->state
.dynamic
.depth_bounds
.min
= minDepthBounds
;
538 cmd_buffer
->state
.dynamic
.depth_bounds
.max
= maxDepthBounds
;
540 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS
;
543 void anv_CmdSetStencilCompareMask(
544 VkCommandBuffer commandBuffer
,
545 VkStencilFaceFlags faceMask
,
546 uint32_t compareMask
)
548 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
550 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
551 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.front
= compareMask
;
552 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
553 cmd_buffer
->state
.dynamic
.stencil_compare_mask
.back
= compareMask
;
555 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK
;
558 void anv_CmdSetStencilWriteMask(
559 VkCommandBuffer commandBuffer
,
560 VkStencilFaceFlags faceMask
,
563 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
565 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
566 cmd_buffer
->state
.dynamic
.stencil_write_mask
.front
= writeMask
;
567 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
568 cmd_buffer
->state
.dynamic
.stencil_write_mask
.back
= writeMask
;
570 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK
;
573 void anv_CmdSetStencilReference(
574 VkCommandBuffer commandBuffer
,
575 VkStencilFaceFlags faceMask
,
578 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
580 if (faceMask
& VK_STENCIL_FACE_FRONT_BIT
)
581 cmd_buffer
->state
.dynamic
.stencil_reference
.front
= reference
;
582 if (faceMask
& VK_STENCIL_FACE_BACK_BIT
)
583 cmd_buffer
->state
.dynamic
.stencil_reference
.back
= reference
;
585 cmd_buffer
->state
.dirty
|= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE
;
588 void anv_CmdBindDescriptorSets(
589 VkCommandBuffer commandBuffer
,
590 VkPipelineBindPoint pipelineBindPoint
,
591 VkPipelineLayout _layout
,
593 uint32_t descriptorSetCount
,
594 const VkDescriptorSet
* pDescriptorSets
,
595 uint32_t dynamicOffsetCount
,
596 const uint32_t* pDynamicOffsets
)
598 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
599 ANV_FROM_HANDLE(anv_pipeline_layout
, layout
, _layout
);
600 struct anv_descriptor_set_layout
*set_layout
;
602 assert(firstSet
+ descriptorSetCount
< MAX_SETS
);
604 uint32_t dynamic_slot
= 0;
605 for (uint32_t i
= 0; i
< descriptorSetCount
; i
++) {
606 ANV_FROM_HANDLE(anv_descriptor_set
, set
, pDescriptorSets
[i
]);
607 set_layout
= layout
->set
[firstSet
+ i
].layout
;
609 if (cmd_buffer
->state
.descriptors
[firstSet
+ i
] != set
) {
610 cmd_buffer
->state
.descriptors
[firstSet
+ i
] = set
;
611 cmd_buffer
->state
.descriptors_dirty
|= set_layout
->shader_stages
;
614 if (set_layout
->dynamic_offset_count
> 0) {
615 anv_foreach_stage(s
, set_layout
->shader_stages
) {
616 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, s
, dynamic
);
618 struct anv_push_constants
*push
=
619 cmd_buffer
->state
.push_constants
[s
];
621 unsigned d
= layout
->set
[firstSet
+ i
].dynamic_offset_start
;
622 const uint32_t *offsets
= pDynamicOffsets
+ dynamic_slot
;
623 struct anv_descriptor
*desc
= set
->descriptors
;
625 for (unsigned b
= 0; b
< set_layout
->binding_count
; b
++) {
626 if (set_layout
->binding
[b
].dynamic_offset_index
< 0)
629 unsigned array_size
= set_layout
->binding
[b
].array_size
;
630 for (unsigned j
= 0; j
< array_size
; j
++) {
632 if (desc
->buffer_view
)
633 range
= desc
->buffer_view
->range
;
634 push
->dynamic
[d
].offset
= *(offsets
++);
635 push
->dynamic
[d
].range
= range
;
641 cmd_buffer
->state
.push_constants_dirty
|= set_layout
->shader_stages
;
646 void anv_CmdBindVertexBuffers(
647 VkCommandBuffer commandBuffer
,
648 uint32_t firstBinding
,
649 uint32_t bindingCount
,
650 const VkBuffer
* pBuffers
,
651 const VkDeviceSize
* pOffsets
)
653 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
654 struct anv_vertex_binding
*vb
= cmd_buffer
->state
.vertex_bindings
;
656 /* We have to defer setting up vertex buffer since we need the buffer
657 * stride from the pipeline. */
659 assert(firstBinding
+ bindingCount
< MAX_VBS
);
660 for (uint32_t i
= 0; i
< bindingCount
; i
++) {
661 vb
[firstBinding
+ i
].buffer
= anv_buffer_from_handle(pBuffers
[i
]);
662 vb
[firstBinding
+ i
].offset
= pOffsets
[i
];
663 cmd_buffer
->state
.vb_dirty
|= 1 << (firstBinding
+ i
);
668 add_surface_state_reloc(struct anv_cmd_buffer
*cmd_buffer
,
669 struct anv_state state
, struct anv_bo
*bo
, uint32_t offset
)
671 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
672 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
673 * the initial state to set the high bits to 0. */
675 const uint32_t dword
= cmd_buffer
->device
->info
.gen
< 8 ? 1 : 8;
677 anv_reloc_list_add(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
,
678 state
.offset
+ dword
* 4, bo
, offset
);
681 const struct anv_format
*
682 anv_format_for_descriptor_type(VkDescriptorType type
)
685 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
686 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
687 return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT
);
689 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
690 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
691 return anv_format_for_vk_format(VK_FORMAT_UNDEFINED
);
694 unreachable("Invalid descriptor type");
699 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
700 gl_shader_stage stage
,
701 struct anv_state
*bt_state
)
703 struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
704 struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
705 struct anv_pipeline_layout
*layout
;
706 uint32_t color_count
, bias
, state_offset
;
709 case MESA_SHADER_FRAGMENT
:
710 layout
= cmd_buffer
->state
.pipeline
->layout
;
712 color_count
= subpass
->color_count
;
714 case MESA_SHADER_COMPUTE
:
715 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
720 layout
= cmd_buffer
->state
.pipeline
->layout
;
726 /* This is a little awkward: layout can be NULL but we still have to
727 * allocate and set a binding table for the PS stage for render
729 uint32_t surface_count
= layout
? layout
->stage
[stage
].surface_count
: 0;
731 if (color_count
+ surface_count
== 0) {
732 *bt_state
= (struct anv_state
) { 0, };
736 *bt_state
= anv_cmd_buffer_alloc_binding_table(cmd_buffer
,
737 bias
+ surface_count
,
739 uint32_t *bt_map
= bt_state
->map
;
741 if (bt_state
->map
== NULL
)
742 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
744 for (uint32_t a
= 0; a
< color_count
; a
++) {
745 const struct anv_image_view
*iview
=
746 fb
->attachments
[subpass
->color_attachments
[a
]];
748 assert(iview
->color_rt_surface_state
.alloc_size
);
749 bt_map
[a
] = iview
->color_rt_surface_state
.offset
+ state_offset
;
750 add_surface_state_reloc(cmd_buffer
, iview
->color_rt_surface_state
,
751 iview
->bo
, iview
->offset
);
754 if (stage
== MESA_SHADER_COMPUTE
&&
755 cmd_buffer
->state
.compute_pipeline
->cs_prog_data
.uses_num_work_groups
) {
756 struct anv_bo
*bo
= cmd_buffer
->state
.num_workgroups_bo
;
757 uint32_t bo_offset
= cmd_buffer
->state
.num_workgroups_offset
;
759 struct anv_state surface_state
;
761 anv_cmd_buffer_alloc_surface_state(cmd_buffer
);
763 const struct anv_format
*format
=
764 anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
);
765 anv_fill_buffer_surface_state(cmd_buffer
->device
, surface_state
,
766 format
->surface_format
, bo_offset
, 12, 1);
768 bt_map
[0] = surface_state
.offset
+ state_offset
;
769 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
775 if (layout
->stage
[stage
].image_count
> 0) {
777 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, images
);
778 if (result
!= VK_SUCCESS
)
781 cmd_buffer
->state
.push_constants_dirty
|= 1 << stage
;
785 for (uint32_t s
= 0; s
< layout
->stage
[stage
].surface_count
; s
++) {
786 struct anv_pipeline_binding
*binding
=
787 &layout
->stage
[stage
].surface_to_descriptor
[s
];
788 struct anv_descriptor_set
*set
=
789 cmd_buffer
->state
.descriptors
[binding
->set
];
790 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
792 struct anv_state surface_state
;
796 switch (desc
->type
) {
797 case VK_DESCRIPTOR_TYPE_SAMPLER
:
798 /* Nothing for us to do here */
801 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
802 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
803 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
804 surface_state
= desc
->image_view
->sampler_surface_state
;
805 assert(surface_state
.alloc_size
);
806 bo
= desc
->image_view
->bo
;
807 bo_offset
= desc
->image_view
->offset
;
810 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
811 surface_state
= desc
->image_view
->storage_surface_state
;
812 assert(surface_state
.alloc_size
);
813 bo
= desc
->image_view
->bo
;
814 bo_offset
= desc
->image_view
->offset
;
816 struct brw_image_param
*image_param
=
817 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
819 anv_image_view_fill_image_param(cmd_buffer
->device
, desc
->image_view
,
821 image_param
->surface_idx
= bias
+ s
;
825 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
826 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
827 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
:
828 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
:
829 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
830 surface_state
= desc
->buffer_view
->surface_state
;
831 assert(surface_state
.alloc_size
);
832 bo
= desc
->buffer_view
->bo
;
833 bo_offset
= desc
->buffer_view
->offset
;
836 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
837 surface_state
= desc
->buffer_view
->storage_surface_state
;
838 assert(surface_state
.alloc_size
);
839 bo
= desc
->buffer_view
->bo
;
840 bo_offset
= desc
->buffer_view
->offset
;
842 struct brw_image_param
*image_param
=
843 &cmd_buffer
->state
.push_constants
[stage
]->images
[image
++];
845 anv_buffer_view_fill_image_param(cmd_buffer
->device
, desc
->buffer_view
,
847 image_param
->surface_idx
= bias
+ s
;
851 assert(!"Invalid descriptor type");
855 bt_map
[bias
+ s
] = surface_state
.offset
+ state_offset
;
856 add_surface_state_reloc(cmd_buffer
, surface_state
, bo
, bo_offset
);
858 assert(image
== layout
->stage
[stage
].image_count
);
861 if (!cmd_buffer
->device
->info
.has_llc
)
862 anv_state_clflush(*bt_state
);
868 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer
*cmd_buffer
,
869 gl_shader_stage stage
, struct anv_state
*state
)
871 struct anv_pipeline_layout
*layout
;
872 uint32_t sampler_count
;
874 if (stage
== MESA_SHADER_COMPUTE
)
875 layout
= cmd_buffer
->state
.compute_pipeline
->layout
;
877 layout
= cmd_buffer
->state
.pipeline
->layout
;
879 sampler_count
= layout
? layout
->stage
[stage
].sampler_count
: 0;
880 if (sampler_count
== 0) {
881 *state
= (struct anv_state
) { 0, };
885 uint32_t size
= sampler_count
* 16;
886 *state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, 32);
888 if (state
->map
== NULL
)
889 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
891 for (uint32_t s
= 0; s
< layout
->stage
[stage
].sampler_count
; s
++) {
892 struct anv_pipeline_binding
*binding
=
893 &layout
->stage
[stage
].sampler_to_descriptor
[s
];
894 struct anv_descriptor_set
*set
=
895 cmd_buffer
->state
.descriptors
[binding
->set
];
896 struct anv_descriptor
*desc
= &set
->descriptors
[binding
->offset
];
898 if (desc
->type
!= VK_DESCRIPTOR_TYPE_SAMPLER
&&
899 desc
->type
!= VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
)
902 struct anv_sampler
*sampler
= desc
->sampler
;
904 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
905 * happens to be zero.
910 memcpy(state
->map
+ (s
* 16),
911 sampler
->state
, sizeof(sampler
->state
));
914 if (!cmd_buffer
->device
->info
.has_llc
)
915 anv_state_clflush(*state
);
921 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
922 const void *data
, uint32_t size
, uint32_t alignment
)
924 struct anv_state state
;
926 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
, size
, alignment
);
927 memcpy(state
.map
, data
, size
);
929 if (!cmd_buffer
->device
->info
.has_llc
)
930 anv_state_clflush(state
);
932 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state
.map
, size
));
938 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer
*cmd_buffer
,
939 uint32_t *a
, uint32_t *b
,
940 uint32_t dwords
, uint32_t alignment
)
942 struct anv_state state
;
945 state
= anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
946 dwords
* 4, alignment
);
948 for (uint32_t i
= 0; i
< dwords
; i
++)
951 if (!cmd_buffer
->device
->info
.has_llc
)
952 anv_state_clflush(state
);
954 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p
, dwords
* 4));
960 * @brief Setup the command buffer for recording commands inside the given
963 * This does not record all commands needed for starting the subpass.
964 * Starting the subpass may require additional commands.
966 * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
967 * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
968 * command buffer for recording commands for some subpass. But only the first
969 * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
972 anv_cmd_buffer_set_subpass(struct anv_cmd_buffer
*cmd_buffer
,
973 struct anv_subpass
*subpass
)
975 switch (cmd_buffer
->device
->info
.gen
) {
977 gen7_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
980 gen8_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
983 gen9_cmd_buffer_set_subpass(cmd_buffer
, subpass
);
986 unreachable("unsupported gen\n");
991 anv_cmd_buffer_push_constants(struct anv_cmd_buffer
*cmd_buffer
,
992 gl_shader_stage stage
)
994 struct anv_push_constants
*data
=
995 cmd_buffer
->state
.push_constants
[stage
];
996 struct brw_stage_prog_data
*prog_data
=
997 cmd_buffer
->state
.pipeline
->prog_data
[stage
];
999 /* If we don't actually have any push constants, bail. */
1000 if (data
== NULL
|| prog_data
->nr_params
== 0)
1001 return (struct anv_state
) { .offset
= 0 };
1003 struct anv_state state
=
1004 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1005 prog_data
->nr_params
* sizeof(float),
1006 32 /* bottom 5 bits MBZ */);
1008 /* Walk through the param array and fill the buffer with data */
1009 uint32_t *u32_map
= state
.map
;
1010 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
1011 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
1012 u32_map
[i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1015 if (!cmd_buffer
->device
->info
.has_llc
)
1016 anv_state_clflush(state
);
1022 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer
*cmd_buffer
)
1024 struct anv_push_constants
*data
=
1025 cmd_buffer
->state
.push_constants
[MESA_SHADER_COMPUTE
];
1026 struct anv_pipeline
*pipeline
= cmd_buffer
->state
.compute_pipeline
;
1027 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
1028 const struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
1030 const unsigned local_id_dwords
= cs_prog_data
->local_invocation_id_regs
* 8;
1031 const unsigned push_constant_data_size
=
1032 (local_id_dwords
+ prog_data
->nr_params
) * 4;
1033 const unsigned reg_aligned_constant_size
= ALIGN(push_constant_data_size
, 32);
1034 const unsigned param_aligned_count
=
1035 reg_aligned_constant_size
/ sizeof(uint32_t);
1037 /* If we don't actually have any push constants, bail. */
1038 if (reg_aligned_constant_size
== 0)
1039 return (struct anv_state
) { .offset
= 0 };
1041 const unsigned threads
= pipeline
->cs_thread_width_max
;
1042 const unsigned total_push_constants_size
=
1043 reg_aligned_constant_size
* threads
;
1044 const unsigned push_constant_alignment
=
1045 cmd_buffer
->device
->info
.gen
< 8 ? 32 : 64;
1046 const unsigned aligned_total_push_constants_size
=
1047 ALIGN(total_push_constants_size
, push_constant_alignment
);
1048 struct anv_state state
=
1049 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer
,
1050 aligned_total_push_constants_size
,
1051 push_constant_alignment
);
1053 /* Walk through the param array and fill the buffer with data */
1054 uint32_t *u32_map
= state
.map
;
1056 brw_cs_fill_local_id_payload(cs_prog_data
, u32_map
, threads
,
1057 reg_aligned_constant_size
);
1059 /* Setup uniform data for the first thread */
1060 for (unsigned i
= 0; i
< prog_data
->nr_params
; i
++) {
1061 uint32_t offset
= (uintptr_t)prog_data
->param
[i
];
1062 u32_map
[local_id_dwords
+ i
] = *(uint32_t *)((uint8_t *)data
+ offset
);
1065 /* Copy uniform data from the first thread to every other thread */
1066 const size_t uniform_data_size
= prog_data
->nr_params
* sizeof(uint32_t);
1067 for (unsigned t
= 1; t
< threads
; t
++) {
1068 memcpy(&u32_map
[t
* param_aligned_count
+ local_id_dwords
],
1069 &u32_map
[local_id_dwords
],
1073 if (!cmd_buffer
->device
->info
.has_llc
)
1074 anv_state_clflush(state
);
1079 void anv_CmdPushConstants(
1080 VkCommandBuffer commandBuffer
,
1081 VkPipelineLayout layout
,
1082 VkShaderStageFlags stageFlags
,
1085 const void* pValues
)
1087 ANV_FROM_HANDLE(anv_cmd_buffer
, cmd_buffer
, commandBuffer
);
1089 anv_foreach_stage(stage
, stageFlags
) {
1090 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer
, stage
, client_data
);
1092 memcpy(cmd_buffer
->state
.push_constants
[stage
]->client_data
+ offset
,
1096 cmd_buffer
->state
.push_constants_dirty
|= stageFlags
;
1099 void anv_CmdExecuteCommands(
1100 VkCommandBuffer commandBuffer
,
1101 uint32_t commandBufferCount
,
1102 const VkCommandBuffer
* pCmdBuffers
)
1104 ANV_FROM_HANDLE(anv_cmd_buffer
, primary
, commandBuffer
);
1106 assert(primary
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
);
1108 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
1109 ANV_FROM_HANDLE(anv_cmd_buffer
, secondary
, pCmdBuffers
[i
]);
1111 assert(secondary
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
);
1113 anv_cmd_buffer_add_secondary(primary
, secondary
);
1117 VkResult
anv_CreateCommandPool(
1119 const VkCommandPoolCreateInfo
* pCreateInfo
,
1120 const VkAllocationCallbacks
* pAllocator
,
1121 VkCommandPool
* pCmdPool
)
1123 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1124 struct anv_cmd_pool
*pool
;
1126 pool
= anv_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
1127 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1129 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1132 pool
->alloc
= *pAllocator
;
1134 pool
->alloc
= device
->alloc
;
1136 list_inithead(&pool
->cmd_buffers
);
1138 *pCmdPool
= anv_cmd_pool_to_handle(pool
);
1143 void anv_DestroyCommandPool(
1145 VkCommandPool commandPool
,
1146 const VkAllocationCallbacks
* pAllocator
)
1148 ANV_FROM_HANDLE(anv_device
, device
, _device
);
1149 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1151 anv_ResetCommandPool(_device
, commandPool
, 0);
1153 anv_free2(&device
->alloc
, pAllocator
, pool
);
1156 VkResult
anv_ResetCommandPool(
1158 VkCommandPool commandPool
,
1159 VkCommandPoolResetFlags flags
)
1161 ANV_FROM_HANDLE(anv_cmd_pool
, pool
, commandPool
);
1163 /* FIXME: vkResetCommandPool must not destroy its command buffers. The
1164 * Vulkan 1.0 spec requires that it only reset them:
1166 * Resetting a command pool recycles all of the resources from all of
1167 * the command buffers allocated from the command pool back to the
1168 * command pool. All command buffers that have been allocated from the
1169 * command pool are put in the initial state.
1171 list_for_each_entry_safe(struct anv_cmd_buffer
, cmd_buffer
,
1172 &pool
->cmd_buffers
, pool_link
) {
1173 anv_cmd_buffer_destroy(cmd_buffer
);
1180 * Return NULL if the current subpass has no depthstencil attachment.
1182 const struct anv_image_view
*
1183 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer
*cmd_buffer
)
1185 const struct anv_subpass
*subpass
= cmd_buffer
->state
.subpass
;
1186 const struct anv_framebuffer
*fb
= cmd_buffer
->state
.framebuffer
;
1188 if (subpass
->depth_stencil_attachment
== VK_ATTACHMENT_UNUSED
)
1191 const struct anv_image_view
*iview
=
1192 fb
->attachments
[subpass
->depth_stencil_attachment
];
1194 assert(iview
->aspect_mask
& (VK_IMAGE_ASPECT_DEPTH_BIT
|
1195 VK_IMAGE_ASPECT_STENCIL_BIT
));