2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
35 tu_bo_list_init(struct tu_bo_list
*list
)
37 list
->count
= list
->capacity
= 0;
38 list
->bo_infos
= NULL
;
42 tu_bo_list_destroy(struct tu_bo_list
*list
)
48 tu_bo_list_reset(struct tu_bo_list
*list
)
54 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
57 tu_bo_list_add(struct tu_bo_list
*list
,
58 const struct tu_bo
*bo
,
61 uint32_t handle
= bo
->gem_handle
;
62 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
63 if (list
->bo_infos
[i
].handle
== handle
) {
64 list
->bo_infos
[i
].flags
|= flags
;
69 /* grow list->bo_infos if needed */
70 if (list
->count
== list
->capacity
) {
71 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
72 struct drm_msm_gem_submit_bo
*new_bo_infos
= realloc(
73 list
->bo_infos
, new_capacity
* sizeof(struct drm_msm_gem_submit_bo
));
76 list
->bo_infos
= new_bo_infos
;
77 list
->capacity
= new_capacity
;
80 uint32_t ret
= list
->count
;
81 list
->bo_infos
[list
->count
] = (struct drm_msm_gem_submit_bo
) {
83 .handle
= bo
->gem_handle
,
91 const struct tu_dynamic_state default_dynamic_state
= {
107 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
113 .stencil_compare_mask
=
118 .stencil_write_mask
=
130 static void UNUSED
/* FINISHME */
131 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
132 const struct tu_dynamic_state
*src
)
134 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
135 uint32_t copy_mask
= src
->mask
;
136 uint32_t dest_mask
= 0;
138 tu_use_args(cmd_buffer
); /* FINISHME */
140 /* Make sure to copy the number of viewports/scissors because they can
141 * only be specified at pipeline creation time.
143 dest
->viewport
.count
= src
->viewport
.count
;
144 dest
->scissor
.count
= src
->scissor
.count
;
145 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
147 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
148 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
149 src
->viewport
.count
* sizeof(VkViewport
))) {
150 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
151 src
->viewport
.count
);
152 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
156 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
157 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
158 src
->scissor
.count
* sizeof(VkRect2D
))) {
159 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
161 dest_mask
|= TU_DYNAMIC_SCISSOR
;
165 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
166 if (dest
->line_width
!= src
->line_width
) {
167 dest
->line_width
= src
->line_width
;
168 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
172 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
173 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
174 sizeof(src
->depth_bias
))) {
175 dest
->depth_bias
= src
->depth_bias
;
176 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
180 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
181 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
182 sizeof(src
->blend_constants
))) {
183 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
184 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
188 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
189 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
190 sizeof(src
->depth_bounds
))) {
191 dest
->depth_bounds
= src
->depth_bounds
;
192 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
196 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
197 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
198 sizeof(src
->stencil_compare_mask
))) {
199 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
200 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
204 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
205 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
206 sizeof(src
->stencil_write_mask
))) {
207 dest
->stencil_write_mask
= src
->stencil_write_mask
;
208 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
212 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
213 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
214 sizeof(src
->stencil_reference
))) {
215 dest
->stencil_reference
= src
->stencil_reference
;
216 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
220 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
221 if (memcmp(&dest
->discard_rectangle
.rectangles
,
222 &src
->discard_rectangle
.rectangles
,
223 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
224 typed_memcpy(dest
->discard_rectangle
.rectangles
,
225 src
->discard_rectangle
.rectangles
,
226 src
->discard_rectangle
.count
);
227 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
233 tu_create_cmd_buffer(struct tu_device
*device
,
234 struct tu_cmd_pool
*pool
,
235 VkCommandBufferLevel level
,
236 VkCommandBuffer
*pCommandBuffer
)
238 struct tu_cmd_buffer
*cmd_buffer
;
239 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
240 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
241 if (cmd_buffer
== NULL
)
242 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
244 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
245 cmd_buffer
->device
= device
;
246 cmd_buffer
->pool
= pool
;
247 cmd_buffer
->level
= level
;
250 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
251 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
254 /* Init the pool_link so we can safely call list_del when we destroy
257 list_inithead(&cmd_buffer
->pool_link
);
258 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
261 tu_bo_list_init(&cmd_buffer
->bo_list
);
262 tu_cs_init(&cmd_buffer
->cs
);
264 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
266 list_inithead(&cmd_buffer
->upload
.list
);
272 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
274 list_del(&cmd_buffer
->pool_link
);
276 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
277 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
279 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->cs
);
280 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
281 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
285 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
287 cmd_buffer
->record_result
= VK_SUCCESS
;
289 tu_bo_list_reset(&cmd_buffer
->bo_list
);
290 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->cs
);
292 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
293 cmd_buffer
->descriptors
[i
].dirty
= 0;
294 cmd_buffer
->descriptors
[i
].valid
= 0;
295 cmd_buffer
->descriptors
[i
].push_dirty
= false;
298 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
300 return cmd_buffer
->record_result
;
304 tu_AllocateCommandBuffers(VkDevice _device
,
305 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
306 VkCommandBuffer
*pCommandBuffers
)
308 TU_FROM_HANDLE(tu_device
, device
, _device
);
309 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
311 VkResult result
= VK_SUCCESS
;
314 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
316 if (!list_empty(&pool
->free_cmd_buffers
)) {
317 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
318 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
320 list_del(&cmd_buffer
->pool_link
);
321 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
323 result
= tu_reset_cmd_buffer(cmd_buffer
);
324 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
325 cmd_buffer
->level
= pAllocateInfo
->level
;
327 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
329 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
330 &pCommandBuffers
[i
]);
332 if (result
!= VK_SUCCESS
)
336 if (result
!= VK_SUCCESS
) {
337 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
340 /* From the Vulkan 1.0.66 spec:
342 * "vkAllocateCommandBuffers can be used to create multiple
343 * command buffers. If the creation of any of those command
344 * buffers fails, the implementation must destroy all
345 * successfully created command buffer objects from this
346 * command, set all entries of the pCommandBuffers array to
347 * NULL and return the error."
349 memset(pCommandBuffers
, 0,
350 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
357 tu_FreeCommandBuffers(VkDevice device
,
358 VkCommandPool commandPool
,
359 uint32_t commandBufferCount
,
360 const VkCommandBuffer
*pCommandBuffers
)
362 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
363 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
366 if (cmd_buffer
->pool
) {
367 list_del(&cmd_buffer
->pool_link
);
368 list_addtail(&cmd_buffer
->pool_link
,
369 &cmd_buffer
->pool
->free_cmd_buffers
);
371 tu_cmd_buffer_destroy(cmd_buffer
);
377 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
378 VkCommandBufferResetFlags flags
)
380 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
381 return tu_reset_cmd_buffer(cmd_buffer
);
385 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
386 const VkCommandBufferBeginInfo
*pBeginInfo
)
388 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
389 VkResult result
= VK_SUCCESS
;
391 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
392 /* If the command buffer has already been resetted with
393 * vkResetCommandBuffer, no need to do it again.
395 result
= tu_reset_cmd_buffer(cmd_buffer
);
396 if (result
!= VK_SUCCESS
)
400 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
401 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
403 /* setup initial configuration into command buffer */
404 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
405 switch (cmd_buffer
->queue_family_index
) {
406 case TU_QUEUE_GENERAL
:
414 result
= tu_cs_begin(cmd_buffer
->device
, &cmd_buffer
->cs
, 4096);
415 if (result
!= VK_SUCCESS
)
418 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
420 /* Put some stuff in so we do not have empty command buffers. */
421 tu_cs_emit_pkt7(&cmd_buffer
->cs
, CP_NOP
, 4);
422 tu_cs_emit(&cmd_buffer
->cs
, 0);
423 tu_cs_emit(&cmd_buffer
->cs
, 0);
424 tu_cs_emit(&cmd_buffer
->cs
, 0);
425 tu_cs_emit(&cmd_buffer
->cs
, 0);
431 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
432 uint32_t firstBinding
,
433 uint32_t bindingCount
,
434 const VkBuffer
*pBuffers
,
435 const VkDeviceSize
*pOffsets
)
440 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
443 VkIndexType indexType
)
448 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
449 VkPipelineBindPoint pipelineBindPoint
,
450 VkPipelineLayout _layout
,
452 uint32_t descriptorSetCount
,
453 const VkDescriptorSet
*pDescriptorSets
,
454 uint32_t dynamicOffsetCount
,
455 const uint32_t *pDynamicOffsets
)
460 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
461 VkPipelineLayout layout
,
462 VkShaderStageFlags stageFlags
,
470 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
472 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
474 VkResult result
= tu_cs_end(&cmd_buffer
->cs
);
475 if (result
!= VK_SUCCESS
)
476 cmd_buffer
->record_result
= result
;
478 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
480 return cmd_buffer
->record_result
;
484 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
485 VkPipelineBindPoint pipelineBindPoint
,
486 VkPipeline _pipeline
)
491 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
492 uint32_t firstViewport
,
493 uint32_t viewportCount
,
494 const VkViewport
*pViewports
)
499 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
500 uint32_t firstScissor
,
501 uint32_t scissorCount
,
502 const VkRect2D
*pScissors
)
507 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
512 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
513 float depthBiasConstantFactor
,
514 float depthBiasClamp
,
515 float depthBiasSlopeFactor
)
520 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
521 const float blendConstants
[4])
526 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
527 float minDepthBounds
,
528 float maxDepthBounds
)
533 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
534 VkStencilFaceFlags faceMask
,
535 uint32_t compareMask
)
540 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
541 VkStencilFaceFlags faceMask
,
547 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
548 VkStencilFaceFlags faceMask
,
554 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
555 uint32_t commandBufferCount
,
556 const VkCommandBuffer
*pCmdBuffers
)
561 tu_CreateCommandPool(VkDevice _device
,
562 const VkCommandPoolCreateInfo
*pCreateInfo
,
563 const VkAllocationCallbacks
*pAllocator
,
564 VkCommandPool
*pCmdPool
)
566 TU_FROM_HANDLE(tu_device
, device
, _device
);
567 struct tu_cmd_pool
*pool
;
569 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
570 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
572 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
575 pool
->alloc
= *pAllocator
;
577 pool
->alloc
= device
->alloc
;
579 list_inithead(&pool
->cmd_buffers
);
580 list_inithead(&pool
->free_cmd_buffers
);
582 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
584 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
590 tu_DestroyCommandPool(VkDevice _device
,
591 VkCommandPool commandPool
,
592 const VkAllocationCallbacks
*pAllocator
)
594 TU_FROM_HANDLE(tu_device
, device
, _device
);
595 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
600 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
601 &pool
->cmd_buffers
, pool_link
)
603 tu_cmd_buffer_destroy(cmd_buffer
);
606 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
607 &pool
->free_cmd_buffers
, pool_link
)
609 tu_cmd_buffer_destroy(cmd_buffer
);
612 vk_free2(&device
->alloc
, pAllocator
, pool
);
616 tu_ResetCommandPool(VkDevice device
,
617 VkCommandPool commandPool
,
618 VkCommandPoolResetFlags flags
)
620 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
623 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
626 result
= tu_reset_cmd_buffer(cmd_buffer
);
627 if (result
!= VK_SUCCESS
)
635 tu_TrimCommandPool(VkDevice device
,
636 VkCommandPool commandPool
,
637 VkCommandPoolTrimFlagsKHR flags
)
639 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
644 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
645 &pool
->free_cmd_buffers
, pool_link
)
647 tu_cmd_buffer_destroy(cmd_buffer
);
652 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
653 const VkRenderPassBeginInfo
*pRenderPassBegin
,
654 VkSubpassContents contents
)
659 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer
,
660 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
661 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
663 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
664 pSubpassBeginInfo
->contents
);
668 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
673 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer
,
674 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
675 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
677 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
683 * Number of vertices.
688 * Index of the first vertex.
690 int32_t vertex_offset
;
695 uint32_t first_instance
;
698 * Number of instances.
700 uint32_t instance_count
;
703 * First index (indexed draws only).
705 uint32_t first_index
;
708 * Whether it's an indexed draw.
713 * Indirect draw parameters resource.
715 struct tu_buffer
*indirect
;
716 uint64_t indirect_offset
;
720 * Draw count parameters resource.
722 struct tu_buffer
*count_buffer
;
723 uint64_t count_buffer_offset
;
727 tu_draw(struct tu_cmd_buffer
*cmd_buffer
, const struct tu_draw_info
*info
)
732 tu_CmdDraw(VkCommandBuffer commandBuffer
,
733 uint32_t vertexCount
,
734 uint32_t instanceCount
,
735 uint32_t firstVertex
,
736 uint32_t firstInstance
)
738 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
739 struct tu_draw_info info
= {};
741 info
.count
= vertexCount
;
742 info
.instance_count
= instanceCount
;
743 info
.first_instance
= firstInstance
;
744 info
.vertex_offset
= firstVertex
;
746 tu_draw(cmd_buffer
, &info
);
750 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
752 uint32_t instanceCount
,
754 int32_t vertexOffset
,
755 uint32_t firstInstance
)
757 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
758 struct tu_draw_info info
= {};
761 info
.count
= indexCount
;
762 info
.instance_count
= instanceCount
;
763 info
.first_index
= firstIndex
;
764 info
.vertex_offset
= vertexOffset
;
765 info
.first_instance
= firstInstance
;
767 tu_draw(cmd_buffer
, &info
);
771 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
777 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
778 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
779 struct tu_draw_info info
= {};
781 info
.count
= drawCount
;
782 info
.indirect
= buffer
;
783 info
.indirect_offset
= offset
;
784 info
.stride
= stride
;
786 tu_draw(cmd_buffer
, &info
);
790 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
796 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
797 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
798 struct tu_draw_info info
= {};
801 info
.count
= drawCount
;
802 info
.indirect
= buffer
;
803 info
.indirect_offset
= offset
;
804 info
.stride
= stride
;
806 tu_draw(cmd_buffer
, &info
);
809 struct tu_dispatch_info
812 * Determine the layout of the grid (in block units) to be used.
817 * A starting offset for the grid. If unaligned is set, the offset
818 * must still be aligned.
822 * Whether it's an unaligned compute dispatch.
827 * Indirect compute parameters resource.
829 struct tu_buffer
*indirect
;
830 uint64_t indirect_offset
;
834 tu_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
835 const struct tu_dispatch_info
*info
)
840 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
848 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
849 struct tu_dispatch_info info
= {};
855 info
.offsets
[0] = base_x
;
856 info
.offsets
[1] = base_y
;
857 info
.offsets
[2] = base_z
;
858 tu_dispatch(cmd_buffer
, &info
);
862 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
867 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
871 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
875 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
876 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
877 struct tu_dispatch_info info
= {};
879 info
.indirect
= buffer
;
880 info
.indirect_offset
= offset
;
882 tu_dispatch(cmd_buffer
, &info
);
886 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
891 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer
,
892 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
894 tu_CmdEndRenderPass(commandBuffer
);
897 struct tu_barrier_info
900 const VkEvent
*pEvents
;
901 VkPipelineStageFlags srcStageMask
;
905 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
906 uint32_t memoryBarrierCount
,
907 const VkMemoryBarrier
*pMemoryBarriers
,
908 uint32_t bufferMemoryBarrierCount
,
909 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
910 uint32_t imageMemoryBarrierCount
,
911 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
912 const struct tu_barrier_info
*info
)
917 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
918 VkPipelineStageFlags srcStageMask
,
919 VkPipelineStageFlags destStageMask
,
921 uint32_t memoryBarrierCount
,
922 const VkMemoryBarrier
*pMemoryBarriers
,
923 uint32_t bufferMemoryBarrierCount
,
924 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
925 uint32_t imageMemoryBarrierCount
,
926 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
928 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
929 struct tu_barrier_info info
;
933 info
.srcStageMask
= srcStageMask
;
935 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
936 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
937 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
941 write_event(struct tu_cmd_buffer
*cmd_buffer
,
942 struct tu_event
*event
,
943 VkPipelineStageFlags stageMask
,
949 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
951 VkPipelineStageFlags stageMask
)
953 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
954 TU_FROM_HANDLE(tu_event
, event
, _event
);
956 write_event(cmd_buffer
, event
, stageMask
, 1);
960 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
962 VkPipelineStageFlags stageMask
)
964 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
965 TU_FROM_HANDLE(tu_event
, event
, _event
);
967 write_event(cmd_buffer
, event
, stageMask
, 0);
971 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
973 const VkEvent
*pEvents
,
974 VkPipelineStageFlags srcStageMask
,
975 VkPipelineStageFlags dstStageMask
,
976 uint32_t memoryBarrierCount
,
977 const VkMemoryBarrier
*pMemoryBarriers
,
978 uint32_t bufferMemoryBarrierCount
,
979 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
980 uint32_t imageMemoryBarrierCount
,
981 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
983 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
984 struct tu_barrier_info info
;
986 info
.eventCount
= eventCount
;
987 info
.pEvents
= pEvents
;
988 info
.srcStageMask
= 0;
990 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
991 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
992 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
996 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)