2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "tu_private.h"
29 #include "vk_format.h"
31 const struct tu_dynamic_state default_dynamic_state
= {
47 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
53 .stencil_compare_mask
=
71 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
72 const struct tu_dynamic_state
*src
)
74 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
75 uint32_t copy_mask
= src
->mask
;
76 uint32_t dest_mask
= 0;
78 /* Make sure to copy the number of viewports/scissors because they can
79 * only be specified at pipeline creation time.
81 dest
->viewport
.count
= src
->viewport
.count
;
82 dest
->scissor
.count
= src
->scissor
.count
;
83 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
85 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
86 if (memcmp(&dest
->viewport
.viewports
,
87 &src
->viewport
.viewports
,
88 src
->viewport
.count
* sizeof(VkViewport
))) {
89 typed_memcpy(dest
->viewport
.viewports
,
90 src
->viewport
.viewports
,
92 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
96 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
97 if (memcmp(&dest
->scissor
.scissors
,
98 &src
->scissor
.scissors
,
99 src
->scissor
.count
* sizeof(VkRect2D
))) {
101 dest
->scissor
.scissors
, src
->scissor
.scissors
, src
->scissor
.count
);
102 dest_mask
|= TU_DYNAMIC_SCISSOR
;
106 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
107 if (dest
->line_width
!= src
->line_width
) {
108 dest
->line_width
= src
->line_width
;
109 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
113 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
115 &dest
->depth_bias
, &src
->depth_bias
, sizeof(src
->depth_bias
))) {
116 dest
->depth_bias
= src
->depth_bias
;
117 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
121 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
122 if (memcmp(&dest
->blend_constants
,
123 &src
->blend_constants
,
124 sizeof(src
->blend_constants
))) {
125 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
126 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
130 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
131 if (memcmp(&dest
->depth_bounds
,
133 sizeof(src
->depth_bounds
))) {
134 dest
->depth_bounds
= src
->depth_bounds
;
135 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
139 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
140 if (memcmp(&dest
->stencil_compare_mask
,
141 &src
->stencil_compare_mask
,
142 sizeof(src
->stencil_compare_mask
))) {
143 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
144 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
148 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
149 if (memcmp(&dest
->stencil_write_mask
,
150 &src
->stencil_write_mask
,
151 sizeof(src
->stencil_write_mask
))) {
152 dest
->stencil_write_mask
= src
->stencil_write_mask
;
153 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
157 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
158 if (memcmp(&dest
->stencil_reference
,
159 &src
->stencil_reference
,
160 sizeof(src
->stencil_reference
))) {
161 dest
->stencil_reference
= src
->stencil_reference
;
162 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
166 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
167 if (memcmp(&dest
->discard_rectangle
.rectangles
,
168 &src
->discard_rectangle
.rectangles
,
169 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
170 typed_memcpy(dest
->discard_rectangle
.rectangles
,
171 src
->discard_rectangle
.rectangles
,
172 src
->discard_rectangle
.count
);
173 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
179 tu_create_cmd_buffer(struct tu_device
*device
,
180 struct tu_cmd_pool
*pool
,
181 VkCommandBufferLevel level
,
182 VkCommandBuffer
*pCommandBuffer
)
184 struct tu_cmd_buffer
*cmd_buffer
;
185 cmd_buffer
= vk_zalloc(
186 &pool
->alloc
, sizeof(*cmd_buffer
), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
187 if (cmd_buffer
== NULL
)
188 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
190 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
191 cmd_buffer
->device
= device
;
192 cmd_buffer
->pool
= pool
;
193 cmd_buffer
->level
= level
;
196 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
197 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
200 /* Init the pool_link so we can safely call list_del when we destroy
203 list_inithead(&cmd_buffer
->pool_link
);
204 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
207 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
209 list_inithead(&cmd_buffer
->upload
.list
);
215 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
217 list_del(&cmd_buffer
->pool_link
);
219 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
220 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
222 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
226 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
228 cmd_buffer
->record_result
= VK_SUCCESS
;
230 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
231 cmd_buffer
->descriptors
[i
].dirty
= 0;
232 cmd_buffer
->descriptors
[i
].valid
= 0;
233 cmd_buffer
->descriptors
[i
].push_dirty
= false;
236 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
238 return cmd_buffer
->record_result
;
242 tu_AllocateCommandBuffers(VkDevice _device
,
243 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
244 VkCommandBuffer
*pCommandBuffers
)
246 TU_FROM_HANDLE(tu_device
, device
, _device
);
247 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
249 VkResult result
= VK_SUCCESS
;
252 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
254 if (!list_empty(&pool
->free_cmd_buffers
)) {
255 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
256 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
258 list_del(&cmd_buffer
->pool_link
);
259 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
261 result
= tu_reset_cmd_buffer(cmd_buffer
);
262 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
263 cmd_buffer
->level
= pAllocateInfo
->level
;
265 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
267 result
= tu_create_cmd_buffer(
268 device
, pool
, pAllocateInfo
->level
, &pCommandBuffers
[i
]);
270 if (result
!= VK_SUCCESS
)
274 if (result
!= VK_SUCCESS
) {
275 tu_FreeCommandBuffers(
276 _device
, pAllocateInfo
->commandPool
, i
, pCommandBuffers
);
278 /* From the Vulkan 1.0.66 spec:
280 * "vkAllocateCommandBuffers can be used to create multiple
281 * command buffers. If the creation of any of those command
282 * buffers fails, the implementation must destroy all
283 * successfully created command buffer objects from this
284 * command, set all entries of the pCommandBuffers array to
285 * NULL and return the error."
287 memset(pCommandBuffers
,
289 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
296 tu_FreeCommandBuffers(VkDevice device
,
297 VkCommandPool commandPool
,
298 uint32_t commandBufferCount
,
299 const VkCommandBuffer
*pCommandBuffers
)
301 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
302 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
305 if (cmd_buffer
->pool
) {
306 list_del(&cmd_buffer
->pool_link
);
307 list_addtail(&cmd_buffer
->pool_link
,
308 &cmd_buffer
->pool
->free_cmd_buffers
);
310 tu_cmd_buffer_destroy(cmd_buffer
);
316 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
317 VkCommandBufferResetFlags flags
)
319 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
320 return tu_reset_cmd_buffer(cmd_buffer
);
324 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
325 const VkCommandBufferBeginInfo
*pBeginInfo
)
327 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
328 VkResult result
= VK_SUCCESS
;
330 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
331 /* If the command buffer has already been resetted with
332 * vkResetCommandBuffer, no need to do it again.
334 result
= tu_reset_cmd_buffer(cmd_buffer
);
335 if (result
!= VK_SUCCESS
)
339 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
340 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
342 /* setup initial configuration into command buffer */
343 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
344 switch (cmd_buffer
->queue_family_index
) {
345 case TU_QUEUE_GENERAL
:
353 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
359 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
360 uint32_t firstBinding
,
361 uint32_t bindingCount
,
362 const VkBuffer
*pBuffers
,
363 const VkDeviceSize
*pOffsets
)
368 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
371 VkIndexType indexType
)
376 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
377 VkPipelineBindPoint pipelineBindPoint
,
378 VkPipelineLayout _layout
,
380 uint32_t descriptorSetCount
,
381 const VkDescriptorSet
*pDescriptorSets
,
382 uint32_t dynamicOffsetCount
,
383 const uint32_t *pDynamicOffsets
)
388 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
389 VkPipelineLayout layout
,
390 VkShaderStageFlags stageFlags
,
398 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
400 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
402 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
404 return cmd_buffer
->record_result
;
408 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
409 VkPipelineBindPoint pipelineBindPoint
,
410 VkPipeline _pipeline
)
415 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
416 uint32_t firstViewport
,
417 uint32_t viewportCount
,
418 const VkViewport
*pViewports
)
423 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
424 uint32_t firstScissor
,
425 uint32_t scissorCount
,
426 const VkRect2D
*pScissors
)
431 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
436 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
437 float depthBiasConstantFactor
,
438 float depthBiasClamp
,
439 float depthBiasSlopeFactor
)
444 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
445 const float blendConstants
[4])
450 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
451 float minDepthBounds
,
452 float maxDepthBounds
)
457 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
458 VkStencilFaceFlags faceMask
,
459 uint32_t compareMask
)
464 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
465 VkStencilFaceFlags faceMask
,
471 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
472 VkStencilFaceFlags faceMask
,
478 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
479 uint32_t commandBufferCount
,
480 const VkCommandBuffer
*pCmdBuffers
)
485 tu_CreateCommandPool(VkDevice _device
,
486 const VkCommandPoolCreateInfo
*pCreateInfo
,
487 const VkAllocationCallbacks
*pAllocator
,
488 VkCommandPool
*pCmdPool
)
490 TU_FROM_HANDLE(tu_device
, device
, _device
);
491 struct tu_cmd_pool
*pool
;
493 pool
= vk_alloc2(&device
->alloc
,
497 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
499 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
502 pool
->alloc
= *pAllocator
;
504 pool
->alloc
= device
->alloc
;
506 list_inithead(&pool
->cmd_buffers
);
507 list_inithead(&pool
->free_cmd_buffers
);
509 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
511 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
517 tu_DestroyCommandPool(VkDevice _device
,
518 VkCommandPool commandPool
,
519 const VkAllocationCallbacks
*pAllocator
)
521 TU_FROM_HANDLE(tu_device
, device
, _device
);
522 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
527 list_for_each_entry_safe(
528 struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
, pool_link
)
530 tu_cmd_buffer_destroy(cmd_buffer
);
533 list_for_each_entry_safe(
534 struct tu_cmd_buffer
, cmd_buffer
, &pool
->free_cmd_buffers
, pool_link
)
536 tu_cmd_buffer_destroy(cmd_buffer
);
539 vk_free2(&device
->alloc
, pAllocator
, pool
);
543 tu_ResetCommandPool(VkDevice device
,
544 VkCommandPool commandPool
,
545 VkCommandPoolResetFlags flags
)
547 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
551 struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
, pool_link
)
553 result
= tu_reset_cmd_buffer(cmd_buffer
);
554 if (result
!= VK_SUCCESS
)
562 tu_TrimCommandPool(VkDevice device
,
563 VkCommandPool commandPool
,
564 VkCommandPoolTrimFlagsKHR flags
)
566 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
571 list_for_each_entry_safe(
572 struct tu_cmd_buffer
, cmd_buffer
, &pool
->free_cmd_buffers
, pool_link
)
574 tu_cmd_buffer_destroy(cmd_buffer
);
579 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
580 const VkRenderPassBeginInfo
*pRenderPassBegin
,
581 VkSubpassContents contents
)
586 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer
,
587 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
588 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
590 tu_CmdBeginRenderPass(
591 commandBuffer
, pRenderPassBeginInfo
, pSubpassBeginInfo
->contents
);
595 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
600 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer
,
601 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
602 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
604 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
610 * Number of vertices.
615 * Index of the first vertex.
617 int32_t vertex_offset
;
622 uint32_t first_instance
;
625 * Number of instances.
627 uint32_t instance_count
;
630 * First index (indexed draws only).
632 uint32_t first_index
;
635 * Whether it's an indexed draw.
640 * Indirect draw parameters resource.
642 struct tu_buffer
*indirect
;
643 uint64_t indirect_offset
;
647 * Draw count parameters resource.
649 struct tu_buffer
*count_buffer
;
650 uint64_t count_buffer_offset
;
654 tu_draw(struct tu_cmd_buffer
*cmd_buffer
, const struct tu_draw_info
*info
)
659 tu_CmdDraw(VkCommandBuffer commandBuffer
,
660 uint32_t vertexCount
,
661 uint32_t instanceCount
,
662 uint32_t firstVertex
,
663 uint32_t firstInstance
)
665 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
666 struct tu_draw_info info
= {};
668 info
.count
= vertexCount
;
669 info
.instance_count
= instanceCount
;
670 info
.first_instance
= firstInstance
;
671 info
.vertex_offset
= firstVertex
;
673 tu_draw(cmd_buffer
, &info
);
677 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
679 uint32_t instanceCount
,
681 int32_t vertexOffset
,
682 uint32_t firstInstance
)
684 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
685 struct tu_draw_info info
= {};
688 info
.count
= indexCount
;
689 info
.instance_count
= instanceCount
;
690 info
.first_index
= firstIndex
;
691 info
.vertex_offset
= vertexOffset
;
692 info
.first_instance
= firstInstance
;
694 tu_draw(cmd_buffer
, &info
);
698 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
704 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
705 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
706 struct tu_draw_info info
= {};
708 info
.count
= drawCount
;
709 info
.indirect
= buffer
;
710 info
.indirect_offset
= offset
;
711 info
.stride
= stride
;
713 tu_draw(cmd_buffer
, &info
);
717 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
723 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
724 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
725 struct tu_draw_info info
= {};
728 info
.count
= drawCount
;
729 info
.indirect
= buffer
;
730 info
.indirect_offset
= offset
;
731 info
.stride
= stride
;
733 tu_draw(cmd_buffer
, &info
);
736 struct tu_dispatch_info
739 * Determine the layout of the grid (in block units) to be used.
744 * A starting offset for the grid. If unaligned is set, the offset
745 * must still be aligned.
749 * Whether it's an unaligned compute dispatch.
754 * Indirect compute parameters resource.
756 struct tu_buffer
*indirect
;
757 uint64_t indirect_offset
;
761 tu_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
762 const struct tu_dispatch_info
*info
)
767 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
775 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
776 struct tu_dispatch_info info
= {};
782 info
.offsets
[0] = base_x
;
783 info
.offsets
[1] = base_y
;
784 info
.offsets
[2] = base_z
;
785 tu_dispatch(cmd_buffer
, &info
);
789 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
794 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
798 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
802 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
803 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
804 struct tu_dispatch_info info
= {};
806 info
.indirect
= buffer
;
807 info
.indirect_offset
= offset
;
809 tu_dispatch(cmd_buffer
, &info
);
813 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
818 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer
,
819 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
821 tu_CmdEndRenderPass(commandBuffer
);
824 struct tu_barrier_info
827 const VkEvent
*pEvents
;
828 VkPipelineStageFlags srcStageMask
;
832 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
833 uint32_t memoryBarrierCount
,
834 const VkMemoryBarrier
*pMemoryBarriers
,
835 uint32_t bufferMemoryBarrierCount
,
836 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
837 uint32_t imageMemoryBarrierCount
,
838 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
839 const struct tu_barrier_info
*info
)
844 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
845 VkPipelineStageFlags srcStageMask
,
846 VkPipelineStageFlags destStageMask
,
848 uint32_t memoryBarrierCount
,
849 const VkMemoryBarrier
*pMemoryBarriers
,
850 uint32_t bufferMemoryBarrierCount
,
851 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
852 uint32_t imageMemoryBarrierCount
,
853 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
855 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
856 struct tu_barrier_info info
;
860 info
.srcStageMask
= srcStageMask
;
862 tu_barrier(cmd_buffer
,
865 bufferMemoryBarrierCount
,
866 pBufferMemoryBarriers
,
867 imageMemoryBarrierCount
,
868 pImageMemoryBarriers
,
873 write_event(struct tu_cmd_buffer
*cmd_buffer
,
874 struct tu_event
*event
,
875 VkPipelineStageFlags stageMask
,
881 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
883 VkPipelineStageFlags stageMask
)
885 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
886 TU_FROM_HANDLE(tu_event
, event
, _event
);
888 write_event(cmd_buffer
, event
, stageMask
, 1);
892 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
894 VkPipelineStageFlags stageMask
)
896 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
897 TU_FROM_HANDLE(tu_event
, event
, _event
);
899 write_event(cmd_buffer
, event
, stageMask
, 0);
903 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
905 const VkEvent
*pEvents
,
906 VkPipelineStageFlags srcStageMask
,
907 VkPipelineStageFlags dstStageMask
,
908 uint32_t memoryBarrierCount
,
909 const VkMemoryBarrier
*pMemoryBarriers
,
910 uint32_t bufferMemoryBarrierCount
,
911 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
912 uint32_t imageMemoryBarrierCount
,
913 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
915 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
916 struct tu_barrier_info info
;
918 info
.eventCount
= eventCount
;
919 info
.pEvents
= pEvents
;
920 info
.srcStageMask
= 0;
922 tu_barrier(cmd_buffer
,
925 bufferMemoryBarrierCount
,
926 pBufferMemoryBarriers
,
927 imageMemoryBarrierCount
,
928 pImageMemoryBarriers
,
933 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)