2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
35 tu_bo_list_init(struct tu_bo_list
*list
)
37 list
->count
= list
->capacity
= 0;
38 list
->bo_infos
= NULL
;
42 tu_bo_list_destroy(struct tu_bo_list
*list
)
48 tu_bo_list_reset(struct tu_bo_list
*list
)
54 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
57 tu_bo_list_add_info(struct tu_bo_list
*list
,
58 const struct drm_msm_gem_submit_bo
*bo_info
)
60 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
61 if (list
->bo_infos
[i
].handle
== bo_info
->handle
) {
62 assert(list
->bo_infos
[i
].presumed
== bo_info
->presumed
);
63 list
->bo_infos
[i
].flags
|= bo_info
->flags
;
68 /* grow list->bo_infos if needed */
69 if (list
->count
== list
->capacity
) {
70 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
71 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
72 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
74 return TU_BO_LIST_FAILED
;
75 list
->bo_infos
= new_bo_infos
;
76 list
->capacity
= new_capacity
;
79 list
->bo_infos
[list
->count
] = *bo_info
;
84 tu_bo_list_add(struct tu_bo_list
*list
,
85 const struct tu_bo
*bo
,
88 return tu_bo_list_add_info(list
, &(struct drm_msm_gem_submit_bo
) {
90 .handle
= bo
->gem_handle
,
96 tu_bo_list_merge(struct tu_bo_list
*list
, const struct tu_bo_list
*other
)
98 for (uint32_t i
= 0; i
< other
->count
; i
++) {
99 if (tu_bo_list_add_info(list
, other
->bo_infos
+ i
) == TU_BO_LIST_FAILED
)
100 return VK_ERROR_OUT_OF_HOST_MEMORY
;
106 const struct tu_dynamic_state default_dynamic_state
= {
122 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
128 .stencil_compare_mask
=
133 .stencil_write_mask
=
145 static void UNUSED
/* FINISHME */
146 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
147 const struct tu_dynamic_state
*src
)
149 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
150 uint32_t copy_mask
= src
->mask
;
151 uint32_t dest_mask
= 0;
153 tu_use_args(cmd_buffer
); /* FINISHME */
155 /* Make sure to copy the number of viewports/scissors because they can
156 * only be specified at pipeline creation time.
158 dest
->viewport
.count
= src
->viewport
.count
;
159 dest
->scissor
.count
= src
->scissor
.count
;
160 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
162 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
163 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
164 src
->viewport
.count
* sizeof(VkViewport
))) {
165 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
166 src
->viewport
.count
);
167 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
171 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
172 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
173 src
->scissor
.count
* sizeof(VkRect2D
))) {
174 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
176 dest_mask
|= TU_DYNAMIC_SCISSOR
;
180 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
181 if (dest
->line_width
!= src
->line_width
) {
182 dest
->line_width
= src
->line_width
;
183 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
187 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
188 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
189 sizeof(src
->depth_bias
))) {
190 dest
->depth_bias
= src
->depth_bias
;
191 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
195 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
196 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
197 sizeof(src
->blend_constants
))) {
198 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
199 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
203 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
204 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
205 sizeof(src
->depth_bounds
))) {
206 dest
->depth_bounds
= src
->depth_bounds
;
207 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
211 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
212 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
213 sizeof(src
->stencil_compare_mask
))) {
214 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
215 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
219 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
220 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
221 sizeof(src
->stencil_write_mask
))) {
222 dest
->stencil_write_mask
= src
->stencil_write_mask
;
223 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
227 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
228 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
229 sizeof(src
->stencil_reference
))) {
230 dest
->stencil_reference
= src
->stencil_reference
;
231 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
235 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
236 if (memcmp(&dest
->discard_rectangle
.rectangles
,
237 &src
->discard_rectangle
.rectangles
,
238 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
239 typed_memcpy(dest
->discard_rectangle
.rectangles
,
240 src
->discard_rectangle
.rectangles
,
241 src
->discard_rectangle
.count
);
242 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
248 tu_create_cmd_buffer(struct tu_device
*device
,
249 struct tu_cmd_pool
*pool
,
250 VkCommandBufferLevel level
,
251 VkCommandBuffer
*pCommandBuffer
)
253 struct tu_cmd_buffer
*cmd_buffer
;
254 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
255 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
256 if (cmd_buffer
== NULL
)
257 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
259 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
260 cmd_buffer
->device
= device
;
261 cmd_buffer
->pool
= pool
;
262 cmd_buffer
->level
= level
;
265 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
266 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
269 /* Init the pool_link so we can safely call list_del when we destroy
272 list_inithead(&cmd_buffer
->pool_link
);
273 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
276 tu_bo_list_init(&cmd_buffer
->bo_list
);
277 tu_cs_init(&cmd_buffer
->cs
);
279 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
281 list_inithead(&cmd_buffer
->upload
.list
);
287 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
289 list_del(&cmd_buffer
->pool_link
);
291 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
292 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
294 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->cs
);
295 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
296 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
300 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
302 cmd_buffer
->record_result
= VK_SUCCESS
;
304 tu_bo_list_reset(&cmd_buffer
->bo_list
);
305 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->cs
);
307 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
308 cmd_buffer
->descriptors
[i
].dirty
= 0;
309 cmd_buffer
->descriptors
[i
].valid
= 0;
310 cmd_buffer
->descriptors
[i
].push_dirty
= false;
313 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
315 return cmd_buffer
->record_result
;
319 tu_AllocateCommandBuffers(VkDevice _device
,
320 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
321 VkCommandBuffer
*pCommandBuffers
)
323 TU_FROM_HANDLE(tu_device
, device
, _device
);
324 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
326 VkResult result
= VK_SUCCESS
;
329 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
331 if (!list_empty(&pool
->free_cmd_buffers
)) {
332 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
333 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
335 list_del(&cmd_buffer
->pool_link
);
336 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
338 result
= tu_reset_cmd_buffer(cmd_buffer
);
339 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
340 cmd_buffer
->level
= pAllocateInfo
->level
;
342 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
344 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
345 &pCommandBuffers
[i
]);
347 if (result
!= VK_SUCCESS
)
351 if (result
!= VK_SUCCESS
) {
352 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
355 /* From the Vulkan 1.0.66 spec:
357 * "vkAllocateCommandBuffers can be used to create multiple
358 * command buffers. If the creation of any of those command
359 * buffers fails, the implementation must destroy all
360 * successfully created command buffer objects from this
361 * command, set all entries of the pCommandBuffers array to
362 * NULL and return the error."
364 memset(pCommandBuffers
, 0,
365 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
372 tu_FreeCommandBuffers(VkDevice device
,
373 VkCommandPool commandPool
,
374 uint32_t commandBufferCount
,
375 const VkCommandBuffer
*pCommandBuffers
)
377 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
378 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
381 if (cmd_buffer
->pool
) {
382 list_del(&cmd_buffer
->pool_link
);
383 list_addtail(&cmd_buffer
->pool_link
,
384 &cmd_buffer
->pool
->free_cmd_buffers
);
386 tu_cmd_buffer_destroy(cmd_buffer
);
392 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
393 VkCommandBufferResetFlags flags
)
395 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
396 return tu_reset_cmd_buffer(cmd_buffer
);
400 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
401 const VkCommandBufferBeginInfo
*pBeginInfo
)
403 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
404 VkResult result
= VK_SUCCESS
;
406 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
407 /* If the command buffer has already been resetted with
408 * vkResetCommandBuffer, no need to do it again.
410 result
= tu_reset_cmd_buffer(cmd_buffer
);
411 if (result
!= VK_SUCCESS
)
415 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
416 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
418 /* setup initial configuration into command buffer */
419 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
420 switch (cmd_buffer
->queue_family_index
) {
421 case TU_QUEUE_GENERAL
:
429 result
= tu_cs_begin(cmd_buffer
->device
, &cmd_buffer
->cs
, 4096);
430 if (result
!= VK_SUCCESS
)
433 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
435 /* Put some stuff in so we do not have empty command buffers. */
436 tu_cs_emit_pkt7(&cmd_buffer
->cs
, CP_NOP
, 4);
437 tu_cs_emit(&cmd_buffer
->cs
, 0);
438 tu_cs_emit(&cmd_buffer
->cs
, 0);
439 tu_cs_emit(&cmd_buffer
->cs
, 0);
440 tu_cs_emit(&cmd_buffer
->cs
, 0);
446 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
447 uint32_t firstBinding
,
448 uint32_t bindingCount
,
449 const VkBuffer
*pBuffers
,
450 const VkDeviceSize
*pOffsets
)
455 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
458 VkIndexType indexType
)
463 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
464 VkPipelineBindPoint pipelineBindPoint
,
465 VkPipelineLayout _layout
,
467 uint32_t descriptorSetCount
,
468 const VkDescriptorSet
*pDescriptorSets
,
469 uint32_t dynamicOffsetCount
,
470 const uint32_t *pDynamicOffsets
)
475 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
476 VkPipelineLayout layout
,
477 VkShaderStageFlags stageFlags
,
485 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
487 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
489 VkResult result
= tu_cs_end(&cmd_buffer
->cs
);
490 if (result
!= VK_SUCCESS
)
491 cmd_buffer
->record_result
= result
;
493 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
495 return cmd_buffer
->record_result
;
499 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
500 VkPipelineBindPoint pipelineBindPoint
,
501 VkPipeline _pipeline
)
506 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
507 uint32_t firstViewport
,
508 uint32_t viewportCount
,
509 const VkViewport
*pViewports
)
514 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
515 uint32_t firstScissor
,
516 uint32_t scissorCount
,
517 const VkRect2D
*pScissors
)
522 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
527 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
528 float depthBiasConstantFactor
,
529 float depthBiasClamp
,
530 float depthBiasSlopeFactor
)
535 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
536 const float blendConstants
[4])
541 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
542 float minDepthBounds
,
543 float maxDepthBounds
)
548 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
549 VkStencilFaceFlags faceMask
,
550 uint32_t compareMask
)
555 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
556 VkStencilFaceFlags faceMask
,
562 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
563 VkStencilFaceFlags faceMask
,
569 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
570 uint32_t commandBufferCount
,
571 const VkCommandBuffer
*pCmdBuffers
)
576 tu_CreateCommandPool(VkDevice _device
,
577 const VkCommandPoolCreateInfo
*pCreateInfo
,
578 const VkAllocationCallbacks
*pAllocator
,
579 VkCommandPool
*pCmdPool
)
581 TU_FROM_HANDLE(tu_device
, device
, _device
);
582 struct tu_cmd_pool
*pool
;
584 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
585 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
587 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
590 pool
->alloc
= *pAllocator
;
592 pool
->alloc
= device
->alloc
;
594 list_inithead(&pool
->cmd_buffers
);
595 list_inithead(&pool
->free_cmd_buffers
);
597 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
599 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
605 tu_DestroyCommandPool(VkDevice _device
,
606 VkCommandPool commandPool
,
607 const VkAllocationCallbacks
*pAllocator
)
609 TU_FROM_HANDLE(tu_device
, device
, _device
);
610 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
615 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
616 &pool
->cmd_buffers
, pool_link
)
618 tu_cmd_buffer_destroy(cmd_buffer
);
621 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
622 &pool
->free_cmd_buffers
, pool_link
)
624 tu_cmd_buffer_destroy(cmd_buffer
);
627 vk_free2(&device
->alloc
, pAllocator
, pool
);
631 tu_ResetCommandPool(VkDevice device
,
632 VkCommandPool commandPool
,
633 VkCommandPoolResetFlags flags
)
635 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
638 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
641 result
= tu_reset_cmd_buffer(cmd_buffer
);
642 if (result
!= VK_SUCCESS
)
650 tu_TrimCommandPool(VkDevice device
,
651 VkCommandPool commandPool
,
652 VkCommandPoolTrimFlagsKHR flags
)
654 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
659 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
660 &pool
->free_cmd_buffers
, pool_link
)
662 tu_cmd_buffer_destroy(cmd_buffer
);
667 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
668 const VkRenderPassBeginInfo
*pRenderPassBegin
,
669 VkSubpassContents contents
)
674 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer
,
675 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
676 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
678 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
679 pSubpassBeginInfo
->contents
);
683 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
688 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer
,
689 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
690 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
692 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
698 * Number of vertices.
703 * Index of the first vertex.
705 int32_t vertex_offset
;
710 uint32_t first_instance
;
713 * Number of instances.
715 uint32_t instance_count
;
718 * First index (indexed draws only).
720 uint32_t first_index
;
723 * Whether it's an indexed draw.
728 * Indirect draw parameters resource.
730 struct tu_buffer
*indirect
;
731 uint64_t indirect_offset
;
735 * Draw count parameters resource.
737 struct tu_buffer
*count_buffer
;
738 uint64_t count_buffer_offset
;
742 tu_draw(struct tu_cmd_buffer
*cmd_buffer
, const struct tu_draw_info
*info
)
747 tu_CmdDraw(VkCommandBuffer commandBuffer
,
748 uint32_t vertexCount
,
749 uint32_t instanceCount
,
750 uint32_t firstVertex
,
751 uint32_t firstInstance
)
753 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
754 struct tu_draw_info info
= {};
756 info
.count
= vertexCount
;
757 info
.instance_count
= instanceCount
;
758 info
.first_instance
= firstInstance
;
759 info
.vertex_offset
= firstVertex
;
761 tu_draw(cmd_buffer
, &info
);
765 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
767 uint32_t instanceCount
,
769 int32_t vertexOffset
,
770 uint32_t firstInstance
)
772 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
773 struct tu_draw_info info
= {};
776 info
.count
= indexCount
;
777 info
.instance_count
= instanceCount
;
778 info
.first_index
= firstIndex
;
779 info
.vertex_offset
= vertexOffset
;
780 info
.first_instance
= firstInstance
;
782 tu_draw(cmd_buffer
, &info
);
786 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
792 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
793 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
794 struct tu_draw_info info
= {};
796 info
.count
= drawCount
;
797 info
.indirect
= buffer
;
798 info
.indirect_offset
= offset
;
799 info
.stride
= stride
;
801 tu_draw(cmd_buffer
, &info
);
805 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
811 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
812 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
813 struct tu_draw_info info
= {};
816 info
.count
= drawCount
;
817 info
.indirect
= buffer
;
818 info
.indirect_offset
= offset
;
819 info
.stride
= stride
;
821 tu_draw(cmd_buffer
, &info
);
824 struct tu_dispatch_info
827 * Determine the layout of the grid (in block units) to be used.
832 * A starting offset for the grid. If unaligned is set, the offset
833 * must still be aligned.
837 * Whether it's an unaligned compute dispatch.
842 * Indirect compute parameters resource.
844 struct tu_buffer
*indirect
;
845 uint64_t indirect_offset
;
849 tu_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
850 const struct tu_dispatch_info
*info
)
855 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
863 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
864 struct tu_dispatch_info info
= {};
870 info
.offsets
[0] = base_x
;
871 info
.offsets
[1] = base_y
;
872 info
.offsets
[2] = base_z
;
873 tu_dispatch(cmd_buffer
, &info
);
877 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
882 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
886 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
890 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
891 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
892 struct tu_dispatch_info info
= {};
894 info
.indirect
= buffer
;
895 info
.indirect_offset
= offset
;
897 tu_dispatch(cmd_buffer
, &info
);
901 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
906 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer
,
907 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
909 tu_CmdEndRenderPass(commandBuffer
);
912 struct tu_barrier_info
915 const VkEvent
*pEvents
;
916 VkPipelineStageFlags srcStageMask
;
920 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
921 uint32_t memoryBarrierCount
,
922 const VkMemoryBarrier
*pMemoryBarriers
,
923 uint32_t bufferMemoryBarrierCount
,
924 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
925 uint32_t imageMemoryBarrierCount
,
926 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
927 const struct tu_barrier_info
*info
)
932 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
933 VkPipelineStageFlags srcStageMask
,
934 VkPipelineStageFlags destStageMask
,
936 uint32_t memoryBarrierCount
,
937 const VkMemoryBarrier
*pMemoryBarriers
,
938 uint32_t bufferMemoryBarrierCount
,
939 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
940 uint32_t imageMemoryBarrierCount
,
941 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
943 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
944 struct tu_barrier_info info
;
948 info
.srcStageMask
= srcStageMask
;
950 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
951 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
952 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
956 write_event(struct tu_cmd_buffer
*cmd_buffer
,
957 struct tu_event
*event
,
958 VkPipelineStageFlags stageMask
,
964 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
966 VkPipelineStageFlags stageMask
)
968 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
969 TU_FROM_HANDLE(tu_event
, event
, _event
);
971 write_event(cmd_buffer
, event
, stageMask
, 1);
975 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
977 VkPipelineStageFlags stageMask
)
979 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
980 TU_FROM_HANDLE(tu_event
, event
, _event
);
982 write_event(cmd_buffer
, event
, stageMask
, 0);
986 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
988 const VkEvent
*pEvents
,
989 VkPipelineStageFlags srcStageMask
,
990 VkPipelineStageFlags dstStageMask
,
991 uint32_t memoryBarrierCount
,
992 const VkMemoryBarrier
*pMemoryBarriers
,
993 uint32_t bufferMemoryBarrierCount
,
994 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
995 uint32_t imageMemoryBarrierCount
,
996 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
998 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
999 struct tu_barrier_info info
;
1001 info
.eventCount
= eventCount
;
1002 info
.pEvents
= pEvents
;
1003 info
.srcStageMask
= 0;
1005 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
1006 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
1007 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
1011 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)