2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
35 tu_bo_list_init(struct tu_bo_list
*list
)
37 list
->count
= list
->capacity
= 0;
42 tu_bo_list_destroy(struct tu_bo_list
*list
)
48 tu_bo_list_reset(struct tu_bo_list
*list
)
54 tu_bo_list_add(struct tu_bo_list
*list
,
55 const struct tu_bo
*bo
)
57 uint32_t handle
= bo
->gem_handle
;
58 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
59 if (list
->handles
[i
] == handle
)
63 if (list
->count
== list
->capacity
) {
64 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
65 uint32_t *new_handles
= realloc(list
->handles
, new_capacity
* sizeof(uint32_t));
68 list
->handles
= new_handles
;
69 list
->capacity
= new_capacity
;
72 uint32_t ret
= list
->count
;
73 list
->handles
[list
->count
] = handle
;
79 const struct tu_dynamic_state default_dynamic_state
= {
95 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
101 .stencil_compare_mask
=
106 .stencil_write_mask
=
118 static void UNUSED
/* FINISHME */
119 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
120 const struct tu_dynamic_state
*src
)
122 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
123 uint32_t copy_mask
= src
->mask
;
124 uint32_t dest_mask
= 0;
126 tu_use_args(cmd_buffer
); /* FINISHME */
128 /* Make sure to copy the number of viewports/scissors because they can
129 * only be specified at pipeline creation time.
131 dest
->viewport
.count
= src
->viewport
.count
;
132 dest
->scissor
.count
= src
->scissor
.count
;
133 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
135 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
136 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
137 src
->viewport
.count
* sizeof(VkViewport
))) {
138 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
139 src
->viewport
.count
);
140 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
144 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
145 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
146 src
->scissor
.count
* sizeof(VkRect2D
))) {
147 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
149 dest_mask
|= TU_DYNAMIC_SCISSOR
;
153 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
154 if (dest
->line_width
!= src
->line_width
) {
155 dest
->line_width
= src
->line_width
;
156 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
160 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
161 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
162 sizeof(src
->depth_bias
))) {
163 dest
->depth_bias
= src
->depth_bias
;
164 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
168 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
169 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
170 sizeof(src
->blend_constants
))) {
171 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
172 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
176 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
177 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
178 sizeof(src
->depth_bounds
))) {
179 dest
->depth_bounds
= src
->depth_bounds
;
180 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
184 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
185 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
186 sizeof(src
->stencil_compare_mask
))) {
187 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
188 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
192 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
193 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
194 sizeof(src
->stencil_write_mask
))) {
195 dest
->stencil_write_mask
= src
->stencil_write_mask
;
196 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
200 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
201 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
202 sizeof(src
->stencil_reference
))) {
203 dest
->stencil_reference
= src
->stencil_reference
;
204 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
208 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
209 if (memcmp(&dest
->discard_rectangle
.rectangles
,
210 &src
->discard_rectangle
.rectangles
,
211 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
212 typed_memcpy(dest
->discard_rectangle
.rectangles
,
213 src
->discard_rectangle
.rectangles
,
214 src
->discard_rectangle
.count
);
215 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
221 tu_create_cmd_buffer(struct tu_device
*device
,
222 struct tu_cmd_pool
*pool
,
223 VkCommandBufferLevel level
,
224 VkCommandBuffer
*pCommandBuffer
)
226 struct tu_cmd_buffer
*cmd_buffer
;
227 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
228 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
229 if (cmd_buffer
== NULL
)
230 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
232 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
233 cmd_buffer
->device
= device
;
234 cmd_buffer
->pool
= pool
;
235 cmd_buffer
->level
= level
;
238 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
239 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
242 /* Init the pool_link so we can safely call list_del when we destroy
245 list_inithead(&cmd_buffer
->pool_link
);
246 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
249 tu_bo_list_init(&cmd_buffer
->bo_list
);
250 tu_cs_init(&cmd_buffer
->cs
);
252 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
254 list_inithead(&cmd_buffer
->upload
.list
);
260 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
262 list_del(&cmd_buffer
->pool_link
);
264 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
265 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
267 tu_cs_finish(cmd_buffer
->device
, &cmd_buffer
->cs
);
268 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
269 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
273 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
275 cmd_buffer
->record_result
= VK_SUCCESS
;
277 tu_bo_list_reset(&cmd_buffer
->bo_list
);
278 tu_cs_reset(cmd_buffer
->device
, &cmd_buffer
->cs
);
280 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
281 cmd_buffer
->descriptors
[i
].dirty
= 0;
282 cmd_buffer
->descriptors
[i
].valid
= 0;
283 cmd_buffer
->descriptors
[i
].push_dirty
= false;
286 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
288 return cmd_buffer
->record_result
;
292 tu_AllocateCommandBuffers(VkDevice _device
,
293 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
294 VkCommandBuffer
*pCommandBuffers
)
296 TU_FROM_HANDLE(tu_device
, device
, _device
);
297 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
299 VkResult result
= VK_SUCCESS
;
302 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
304 if (!list_empty(&pool
->free_cmd_buffers
)) {
305 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
306 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
308 list_del(&cmd_buffer
->pool_link
);
309 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
311 result
= tu_reset_cmd_buffer(cmd_buffer
);
312 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
313 cmd_buffer
->level
= pAllocateInfo
->level
;
315 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
317 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
318 &pCommandBuffers
[i
]);
320 if (result
!= VK_SUCCESS
)
324 if (result
!= VK_SUCCESS
) {
325 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
328 /* From the Vulkan 1.0.66 spec:
330 * "vkAllocateCommandBuffers can be used to create multiple
331 * command buffers. If the creation of any of those command
332 * buffers fails, the implementation must destroy all
333 * successfully created command buffer objects from this
334 * command, set all entries of the pCommandBuffers array to
335 * NULL and return the error."
337 memset(pCommandBuffers
, 0,
338 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
345 tu_FreeCommandBuffers(VkDevice device
,
346 VkCommandPool commandPool
,
347 uint32_t commandBufferCount
,
348 const VkCommandBuffer
*pCommandBuffers
)
350 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
351 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
354 if (cmd_buffer
->pool
) {
355 list_del(&cmd_buffer
->pool_link
);
356 list_addtail(&cmd_buffer
->pool_link
,
357 &cmd_buffer
->pool
->free_cmd_buffers
);
359 tu_cmd_buffer_destroy(cmd_buffer
);
365 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
366 VkCommandBufferResetFlags flags
)
368 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
369 return tu_reset_cmd_buffer(cmd_buffer
);
373 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
374 const VkCommandBufferBeginInfo
*pBeginInfo
)
376 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
377 VkResult result
= VK_SUCCESS
;
379 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
380 /* If the command buffer has already been resetted with
381 * vkResetCommandBuffer, no need to do it again.
383 result
= tu_reset_cmd_buffer(cmd_buffer
);
384 if (result
!= VK_SUCCESS
)
388 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
389 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
391 /* setup initial configuration into command buffer */
392 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
393 switch (cmd_buffer
->queue_family_index
) {
394 case TU_QUEUE_GENERAL
:
402 result
= tu_cs_begin(cmd_buffer
->device
, &cmd_buffer
->cs
, 4096);
403 if (result
!= VK_SUCCESS
)
406 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
408 /* Put some stuff in so we do not have empty command buffers. */
409 tu_cs_emit_pkt7(&cmd_buffer
->cs
, CP_NOP
, 4);
410 tu_cs_emit(&cmd_buffer
->cs
, 0);
411 tu_cs_emit(&cmd_buffer
->cs
, 0);
412 tu_cs_emit(&cmd_buffer
->cs
, 0);
413 tu_cs_emit(&cmd_buffer
->cs
, 0);
419 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
420 uint32_t firstBinding
,
421 uint32_t bindingCount
,
422 const VkBuffer
*pBuffers
,
423 const VkDeviceSize
*pOffsets
)
428 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
431 VkIndexType indexType
)
436 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
437 VkPipelineBindPoint pipelineBindPoint
,
438 VkPipelineLayout _layout
,
440 uint32_t descriptorSetCount
,
441 const VkDescriptorSet
*pDescriptorSets
,
442 uint32_t dynamicOffsetCount
,
443 const uint32_t *pDynamicOffsets
)
448 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
449 VkPipelineLayout layout
,
450 VkShaderStageFlags stageFlags
,
458 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
460 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
462 tu_cs_end(&cmd_buffer
->cs
);
463 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
465 return cmd_buffer
->record_result
;
469 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
470 VkPipelineBindPoint pipelineBindPoint
,
471 VkPipeline _pipeline
)
476 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
477 uint32_t firstViewport
,
478 uint32_t viewportCount
,
479 const VkViewport
*pViewports
)
484 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
485 uint32_t firstScissor
,
486 uint32_t scissorCount
,
487 const VkRect2D
*pScissors
)
492 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
497 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
498 float depthBiasConstantFactor
,
499 float depthBiasClamp
,
500 float depthBiasSlopeFactor
)
505 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
506 const float blendConstants
[4])
511 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
512 float minDepthBounds
,
513 float maxDepthBounds
)
518 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
519 VkStencilFaceFlags faceMask
,
520 uint32_t compareMask
)
525 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
526 VkStencilFaceFlags faceMask
,
532 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
533 VkStencilFaceFlags faceMask
,
539 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
540 uint32_t commandBufferCount
,
541 const VkCommandBuffer
*pCmdBuffers
)
546 tu_CreateCommandPool(VkDevice _device
,
547 const VkCommandPoolCreateInfo
*pCreateInfo
,
548 const VkAllocationCallbacks
*pAllocator
,
549 VkCommandPool
*pCmdPool
)
551 TU_FROM_HANDLE(tu_device
, device
, _device
);
552 struct tu_cmd_pool
*pool
;
554 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
555 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
557 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
560 pool
->alloc
= *pAllocator
;
562 pool
->alloc
= device
->alloc
;
564 list_inithead(&pool
->cmd_buffers
);
565 list_inithead(&pool
->free_cmd_buffers
);
567 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
569 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
575 tu_DestroyCommandPool(VkDevice _device
,
576 VkCommandPool commandPool
,
577 const VkAllocationCallbacks
*pAllocator
)
579 TU_FROM_HANDLE(tu_device
, device
, _device
);
580 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
585 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
586 &pool
->cmd_buffers
, pool_link
)
588 tu_cmd_buffer_destroy(cmd_buffer
);
591 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
592 &pool
->free_cmd_buffers
, pool_link
)
594 tu_cmd_buffer_destroy(cmd_buffer
);
597 vk_free2(&device
->alloc
, pAllocator
, pool
);
601 tu_ResetCommandPool(VkDevice device
,
602 VkCommandPool commandPool
,
603 VkCommandPoolResetFlags flags
)
605 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
608 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
611 result
= tu_reset_cmd_buffer(cmd_buffer
);
612 if (result
!= VK_SUCCESS
)
620 tu_TrimCommandPool(VkDevice device
,
621 VkCommandPool commandPool
,
622 VkCommandPoolTrimFlagsKHR flags
)
624 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
629 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
630 &pool
->free_cmd_buffers
, pool_link
)
632 tu_cmd_buffer_destroy(cmd_buffer
);
637 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
638 const VkRenderPassBeginInfo
*pRenderPassBegin
,
639 VkSubpassContents contents
)
644 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer
,
645 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
646 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
648 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
649 pSubpassBeginInfo
->contents
);
653 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
658 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer
,
659 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
660 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
662 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
668 * Number of vertices.
673 * Index of the first vertex.
675 int32_t vertex_offset
;
680 uint32_t first_instance
;
683 * Number of instances.
685 uint32_t instance_count
;
688 * First index (indexed draws only).
690 uint32_t first_index
;
693 * Whether it's an indexed draw.
698 * Indirect draw parameters resource.
700 struct tu_buffer
*indirect
;
701 uint64_t indirect_offset
;
705 * Draw count parameters resource.
707 struct tu_buffer
*count_buffer
;
708 uint64_t count_buffer_offset
;
712 tu_draw(struct tu_cmd_buffer
*cmd_buffer
, const struct tu_draw_info
*info
)
717 tu_CmdDraw(VkCommandBuffer commandBuffer
,
718 uint32_t vertexCount
,
719 uint32_t instanceCount
,
720 uint32_t firstVertex
,
721 uint32_t firstInstance
)
723 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
724 struct tu_draw_info info
= {};
726 info
.count
= vertexCount
;
727 info
.instance_count
= instanceCount
;
728 info
.first_instance
= firstInstance
;
729 info
.vertex_offset
= firstVertex
;
731 tu_draw(cmd_buffer
, &info
);
735 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
737 uint32_t instanceCount
,
739 int32_t vertexOffset
,
740 uint32_t firstInstance
)
742 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
743 struct tu_draw_info info
= {};
746 info
.count
= indexCount
;
747 info
.instance_count
= instanceCount
;
748 info
.first_index
= firstIndex
;
749 info
.vertex_offset
= vertexOffset
;
750 info
.first_instance
= firstInstance
;
752 tu_draw(cmd_buffer
, &info
);
756 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
762 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
763 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
764 struct tu_draw_info info
= {};
766 info
.count
= drawCount
;
767 info
.indirect
= buffer
;
768 info
.indirect_offset
= offset
;
769 info
.stride
= stride
;
771 tu_draw(cmd_buffer
, &info
);
775 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
781 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
782 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
783 struct tu_draw_info info
= {};
786 info
.count
= drawCount
;
787 info
.indirect
= buffer
;
788 info
.indirect_offset
= offset
;
789 info
.stride
= stride
;
791 tu_draw(cmd_buffer
, &info
);
794 struct tu_dispatch_info
797 * Determine the layout of the grid (in block units) to be used.
802 * A starting offset for the grid. If unaligned is set, the offset
803 * must still be aligned.
807 * Whether it's an unaligned compute dispatch.
812 * Indirect compute parameters resource.
814 struct tu_buffer
*indirect
;
815 uint64_t indirect_offset
;
819 tu_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
820 const struct tu_dispatch_info
*info
)
825 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
833 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
834 struct tu_dispatch_info info
= {};
840 info
.offsets
[0] = base_x
;
841 info
.offsets
[1] = base_y
;
842 info
.offsets
[2] = base_z
;
843 tu_dispatch(cmd_buffer
, &info
);
847 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
852 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
856 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
860 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
861 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
862 struct tu_dispatch_info info
= {};
864 info
.indirect
= buffer
;
865 info
.indirect_offset
= offset
;
867 tu_dispatch(cmd_buffer
, &info
);
871 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
876 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer
,
877 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
879 tu_CmdEndRenderPass(commandBuffer
);
882 struct tu_barrier_info
885 const VkEvent
*pEvents
;
886 VkPipelineStageFlags srcStageMask
;
890 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
891 uint32_t memoryBarrierCount
,
892 const VkMemoryBarrier
*pMemoryBarriers
,
893 uint32_t bufferMemoryBarrierCount
,
894 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
895 uint32_t imageMemoryBarrierCount
,
896 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
897 const struct tu_barrier_info
*info
)
902 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
903 VkPipelineStageFlags srcStageMask
,
904 VkPipelineStageFlags destStageMask
,
906 uint32_t memoryBarrierCount
,
907 const VkMemoryBarrier
*pMemoryBarriers
,
908 uint32_t bufferMemoryBarrierCount
,
909 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
910 uint32_t imageMemoryBarrierCount
,
911 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
913 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
914 struct tu_barrier_info info
;
918 info
.srcStageMask
= srcStageMask
;
920 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
921 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
922 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
926 write_event(struct tu_cmd_buffer
*cmd_buffer
,
927 struct tu_event
*event
,
928 VkPipelineStageFlags stageMask
,
934 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
936 VkPipelineStageFlags stageMask
)
938 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
939 TU_FROM_HANDLE(tu_event
, event
, _event
);
941 write_event(cmd_buffer
, event
, stageMask
, 1);
945 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
947 VkPipelineStageFlags stageMask
)
949 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
950 TU_FROM_HANDLE(tu_event
, event
, _event
);
952 write_event(cmd_buffer
, event
, stageMask
, 0);
956 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
958 const VkEvent
*pEvents
,
959 VkPipelineStageFlags srcStageMask
,
960 VkPipelineStageFlags dstStageMask
,
961 uint32_t memoryBarrierCount
,
962 const VkMemoryBarrier
*pMemoryBarriers
,
963 uint32_t bufferMemoryBarrierCount
,
964 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
965 uint32_t imageMemoryBarrierCount
,
966 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
968 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
969 struct tu_barrier_info info
;
971 info
.eventCount
= eventCount
;
972 info
.pEvents
= pEvents
;
973 info
.srcStageMask
= 0;
975 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
976 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
977 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
981 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)