2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
35 tu_bo_list_init(struct tu_bo_list
*list
)
37 list
->count
= list
->capacity
= 0;
42 tu_bo_list_destroy(struct tu_bo_list
*list
)
48 tu_bo_list_reset(struct tu_bo_list
*list
)
54 tu_bo_list_add(struct tu_bo_list
*list
,
55 const struct tu_bo
*bo
)
57 uint32_t handle
= bo
->gem_handle
;
58 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
59 if (list
->handles
[i
] == handle
)
63 if (list
->count
== list
->capacity
) {
64 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
65 uint32_t *new_handles
= realloc(list
->handles
, new_capacity
* sizeof(uint32_t));
68 list
->handles
= new_handles
;
69 list
->capacity
= new_capacity
;
72 uint32_t ret
= list
->count
;
73 list
->handles
[list
->count
] = handle
;
80 tu_cmd_stream_init(struct tu_cmd_stream
*stream
)
82 stream
->start
= stream
->cur
= stream
->end
= NULL
;
84 stream
->entry_count
= stream
->entry_capacity
= 0;
85 stream
->entries
= NULL
;
87 stream
->bo_count
= stream
->bo_capacity
= 0;
92 tu_cmd_stream_finish(struct tu_device
*dev
,
93 struct tu_cmd_stream
*stream
)
95 for (uint32_t i
= 0; i
< stream
->bo_count
; ++i
) {
96 tu_bo_finish(dev
, stream
->bos
[i
]);
100 free(stream
->entries
);
105 tu_cmd_stream_begin(struct tu_device
*dev
,
106 struct tu_cmd_stream
*stream
,
107 uint32_t reserve_size
)
109 assert(reserve_size
);
111 if (stream
->end
- stream
->cur
< reserve_size
) {
112 if (stream
->bo_count
== stream
->bo_capacity
) {
113 uint32_t new_capacity
= MAX2(4, 2 * stream
->bo_capacity
);
114 struct tu_bo
**new_bos
= realloc(stream
->bos
,
115 new_capacity
* sizeof(struct tu_bo
*));
119 stream
->bo_capacity
= new_capacity
;
120 stream
->bos
= new_bos
;
123 uint32_t new_size
= MAX2(16384, reserve_size
* sizeof(uint32_t));
124 if (stream
->bo_count
)
125 new_size
= MAX2(new_size
, stream
->bos
[stream
->bo_count
- 1]->size
* 2);
127 struct tu_bo
*new_bo
= malloc(sizeof(struct tu_bo
));
131 VkResult result
= tu_bo_init_new(dev
, new_bo
, new_size
);
132 if (result
!= VK_SUCCESS
) {
137 result
= tu_bo_map(dev
, new_bo
);
138 if (result
!= VK_SUCCESS
) {
139 tu_bo_finish(dev
, new_bo
);
144 stream
->bos
[stream
->bo_count
] = new_bo
;
147 stream
->start
= stream
->cur
= (uint32_t*)new_bo
->map
;
148 stream
->end
= stream
->start
+ new_bo
->size
/ sizeof(uint32_t);
150 stream
->start
= stream
->cur
;
156 tu_cmd_stream_end(struct tu_cmd_stream
*stream
)
158 if (stream
->start
== stream
->cur
)
161 if (stream
->entry_capacity
== stream
->entry_count
) {
162 uint32_t new_capacity
= MAX2(stream
->entry_capacity
* 2, 4);
163 struct tu_cmd_stream_entry
*new_entries
=
164 realloc(stream
->entries
, new_capacity
* sizeof(struct tu_cmd_stream_entry
));
168 stream
->entries
= new_entries
;
169 stream
->entry_capacity
= new_capacity
;
172 assert (stream
->bo_count
);
174 struct tu_cmd_stream_entry entry
;
175 entry
.bo
= stream
->bos
[stream
->bo_count
- 1];
176 entry
.size
= (stream
->cur
- stream
->start
) * sizeof(uint32_t);
177 entry
.offset
= (stream
->start
- (uint32_t*)entry
.bo
->map
) * sizeof(uint32_t);
179 stream
->entries
[stream
->entry_count
] = entry
;
180 ++stream
->entry_count
;
186 tu_cmd_stream_reset(struct tu_device
*dev
,
187 struct tu_cmd_stream
*stream
)
189 for (uint32_t i
= 0; i
+ 1 < stream
->bo_count
; ++i
) {
190 tu_bo_finish(dev
, stream
->bos
[i
]);
191 free(stream
->bos
[i
]);
194 if (stream
->bo_count
) {
195 stream
->bos
[0] = stream
->bos
[stream
->bo_count
- 1];
196 stream
->bo_count
= 1;
198 stream
->start
= stream
->cur
= (uint32_t*)stream
->bos
[0]->map
;
199 stream
->end
= stream
->start
+ stream
->bos
[0]->size
/ sizeof(uint32_t);
202 stream
->entry_count
= 0;
206 tu_cs_check_space(struct tu_device
*dev
,
207 struct tu_cmd_stream
*stream
,
210 if (stream
->end
- stream
->cur
>= size
)
213 VkResult result
= tu_cmd_stream_end(stream
);
214 if (result
!= VK_SUCCESS
)
217 return tu_cmd_stream_begin(dev
, stream
, size
);
220 const struct tu_dynamic_state default_dynamic_state
= {
236 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
242 .stencil_compare_mask
=
247 .stencil_write_mask
=
259 static void UNUSED
/* FINISHME */
260 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
261 const struct tu_dynamic_state
*src
)
263 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
264 uint32_t copy_mask
= src
->mask
;
265 uint32_t dest_mask
= 0;
267 tu_use_args(cmd_buffer
); /* FINISHME */
269 /* Make sure to copy the number of viewports/scissors because they can
270 * only be specified at pipeline creation time.
272 dest
->viewport
.count
= src
->viewport
.count
;
273 dest
->scissor
.count
= src
->scissor
.count
;
274 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
276 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
277 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
278 src
->viewport
.count
* sizeof(VkViewport
))) {
279 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
280 src
->viewport
.count
);
281 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
285 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
286 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
287 src
->scissor
.count
* sizeof(VkRect2D
))) {
288 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
290 dest_mask
|= TU_DYNAMIC_SCISSOR
;
294 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
295 if (dest
->line_width
!= src
->line_width
) {
296 dest
->line_width
= src
->line_width
;
297 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
301 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
302 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
303 sizeof(src
->depth_bias
))) {
304 dest
->depth_bias
= src
->depth_bias
;
305 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
309 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
310 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
311 sizeof(src
->blend_constants
))) {
312 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
313 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
317 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
318 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
319 sizeof(src
->depth_bounds
))) {
320 dest
->depth_bounds
= src
->depth_bounds
;
321 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
325 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
326 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
327 sizeof(src
->stencil_compare_mask
))) {
328 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
329 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
333 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
334 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
335 sizeof(src
->stencil_write_mask
))) {
336 dest
->stencil_write_mask
= src
->stencil_write_mask
;
337 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
341 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
342 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
343 sizeof(src
->stencil_reference
))) {
344 dest
->stencil_reference
= src
->stencil_reference
;
345 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
349 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
350 if (memcmp(&dest
->discard_rectangle
.rectangles
,
351 &src
->discard_rectangle
.rectangles
,
352 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
353 typed_memcpy(dest
->discard_rectangle
.rectangles
,
354 src
->discard_rectangle
.rectangles
,
355 src
->discard_rectangle
.count
);
356 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
362 tu_create_cmd_buffer(struct tu_device
*device
,
363 struct tu_cmd_pool
*pool
,
364 VkCommandBufferLevel level
,
365 VkCommandBuffer
*pCommandBuffer
)
367 struct tu_cmd_buffer
*cmd_buffer
;
368 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
369 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
370 if (cmd_buffer
== NULL
)
371 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
373 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
374 cmd_buffer
->device
= device
;
375 cmd_buffer
->pool
= pool
;
376 cmd_buffer
->level
= level
;
379 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
380 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
383 /* Init the pool_link so we can safely call list_del when we destroy
386 list_inithead(&cmd_buffer
->pool_link
);
387 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
390 tu_bo_list_init(&cmd_buffer
->bo_list
);
391 tu_cmd_stream_init(&cmd_buffer
->cs
);
393 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
395 list_inithead(&cmd_buffer
->upload
.list
);
401 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
403 list_del(&cmd_buffer
->pool_link
);
405 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
406 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
408 tu_cmd_stream_finish(cmd_buffer
->device
, &cmd_buffer
->cs
);
409 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
410 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
414 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
416 cmd_buffer
->record_result
= VK_SUCCESS
;
418 tu_bo_list_reset(&cmd_buffer
->bo_list
);
419 tu_cmd_stream_reset(cmd_buffer
->device
, &cmd_buffer
->cs
);
421 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
422 cmd_buffer
->descriptors
[i
].dirty
= 0;
423 cmd_buffer
->descriptors
[i
].valid
= 0;
424 cmd_buffer
->descriptors
[i
].push_dirty
= false;
427 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
429 return cmd_buffer
->record_result
;
433 tu_AllocateCommandBuffers(VkDevice _device
,
434 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
435 VkCommandBuffer
*pCommandBuffers
)
437 TU_FROM_HANDLE(tu_device
, device
, _device
);
438 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
440 VkResult result
= VK_SUCCESS
;
443 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
445 if (!list_empty(&pool
->free_cmd_buffers
)) {
446 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
447 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
449 list_del(&cmd_buffer
->pool_link
);
450 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
452 result
= tu_reset_cmd_buffer(cmd_buffer
);
453 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
454 cmd_buffer
->level
= pAllocateInfo
->level
;
456 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
458 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
459 &pCommandBuffers
[i
]);
461 if (result
!= VK_SUCCESS
)
465 if (result
!= VK_SUCCESS
) {
466 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
469 /* From the Vulkan 1.0.66 spec:
471 * "vkAllocateCommandBuffers can be used to create multiple
472 * command buffers. If the creation of any of those command
473 * buffers fails, the implementation must destroy all
474 * successfully created command buffer objects from this
475 * command, set all entries of the pCommandBuffers array to
476 * NULL and return the error."
478 memset(pCommandBuffers
, 0,
479 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
486 tu_FreeCommandBuffers(VkDevice device
,
487 VkCommandPool commandPool
,
488 uint32_t commandBufferCount
,
489 const VkCommandBuffer
*pCommandBuffers
)
491 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
492 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
495 if (cmd_buffer
->pool
) {
496 list_del(&cmd_buffer
->pool_link
);
497 list_addtail(&cmd_buffer
->pool_link
,
498 &cmd_buffer
->pool
->free_cmd_buffers
);
500 tu_cmd_buffer_destroy(cmd_buffer
);
506 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
507 VkCommandBufferResetFlags flags
)
509 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
510 return tu_reset_cmd_buffer(cmd_buffer
);
514 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
515 const VkCommandBufferBeginInfo
*pBeginInfo
)
517 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
518 VkResult result
= VK_SUCCESS
;
520 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
521 /* If the command buffer has already been resetted with
522 * vkResetCommandBuffer, no need to do it again.
524 result
= tu_reset_cmd_buffer(cmd_buffer
);
525 if (result
!= VK_SUCCESS
)
529 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
530 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
532 /* setup initial configuration into command buffer */
533 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
534 switch (cmd_buffer
->queue_family_index
) {
535 case TU_QUEUE_GENERAL
:
543 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
545 result
= tu_cmd_stream_begin(cmd_buffer
->device
,
546 &cmd_buffer
->cs
, 4096);
548 /* Put some stuff in so we do not have empty command buffers. */
549 tu_cs_emit_pkt7(&cmd_buffer
->cs
, CP_NOP
, 4);
550 tu_cs_emit(&cmd_buffer
->cs
, 0);
551 tu_cs_emit(&cmd_buffer
->cs
, 0);
552 tu_cs_emit(&cmd_buffer
->cs
, 0);
553 tu_cs_emit(&cmd_buffer
->cs
, 0);
559 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
560 uint32_t firstBinding
,
561 uint32_t bindingCount
,
562 const VkBuffer
*pBuffers
,
563 const VkDeviceSize
*pOffsets
)
568 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
571 VkIndexType indexType
)
576 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
577 VkPipelineBindPoint pipelineBindPoint
,
578 VkPipelineLayout _layout
,
580 uint32_t descriptorSetCount
,
581 const VkDescriptorSet
*pDescriptorSets
,
582 uint32_t dynamicOffsetCount
,
583 const uint32_t *pDynamicOffsets
)
588 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
589 VkPipelineLayout layout
,
590 VkShaderStageFlags stageFlags
,
598 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
600 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
602 tu_cmd_stream_end(&cmd_buffer
->cs
);
603 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
605 return cmd_buffer
->record_result
;
609 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
610 VkPipelineBindPoint pipelineBindPoint
,
611 VkPipeline _pipeline
)
616 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
617 uint32_t firstViewport
,
618 uint32_t viewportCount
,
619 const VkViewport
*pViewports
)
624 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
625 uint32_t firstScissor
,
626 uint32_t scissorCount
,
627 const VkRect2D
*pScissors
)
632 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
637 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
638 float depthBiasConstantFactor
,
639 float depthBiasClamp
,
640 float depthBiasSlopeFactor
)
645 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
646 const float blendConstants
[4])
651 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
652 float minDepthBounds
,
653 float maxDepthBounds
)
658 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
659 VkStencilFaceFlags faceMask
,
660 uint32_t compareMask
)
665 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
666 VkStencilFaceFlags faceMask
,
672 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
673 VkStencilFaceFlags faceMask
,
679 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
680 uint32_t commandBufferCount
,
681 const VkCommandBuffer
*pCmdBuffers
)
686 tu_CreateCommandPool(VkDevice _device
,
687 const VkCommandPoolCreateInfo
*pCreateInfo
,
688 const VkAllocationCallbacks
*pAllocator
,
689 VkCommandPool
*pCmdPool
)
691 TU_FROM_HANDLE(tu_device
, device
, _device
);
692 struct tu_cmd_pool
*pool
;
694 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
695 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
697 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
700 pool
->alloc
= *pAllocator
;
702 pool
->alloc
= device
->alloc
;
704 list_inithead(&pool
->cmd_buffers
);
705 list_inithead(&pool
->free_cmd_buffers
);
707 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
709 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
715 tu_DestroyCommandPool(VkDevice _device
,
716 VkCommandPool commandPool
,
717 const VkAllocationCallbacks
*pAllocator
)
719 TU_FROM_HANDLE(tu_device
, device
, _device
);
720 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
725 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
726 &pool
->cmd_buffers
, pool_link
)
728 tu_cmd_buffer_destroy(cmd_buffer
);
731 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
732 &pool
->free_cmd_buffers
, pool_link
)
734 tu_cmd_buffer_destroy(cmd_buffer
);
737 vk_free2(&device
->alloc
, pAllocator
, pool
);
741 tu_ResetCommandPool(VkDevice device
,
742 VkCommandPool commandPool
,
743 VkCommandPoolResetFlags flags
)
745 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
748 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
751 result
= tu_reset_cmd_buffer(cmd_buffer
);
752 if (result
!= VK_SUCCESS
)
760 tu_TrimCommandPool(VkDevice device
,
761 VkCommandPool commandPool
,
762 VkCommandPoolTrimFlagsKHR flags
)
764 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
769 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
770 &pool
->free_cmd_buffers
, pool_link
)
772 tu_cmd_buffer_destroy(cmd_buffer
);
777 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
778 const VkRenderPassBeginInfo
*pRenderPassBegin
,
779 VkSubpassContents contents
)
784 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer
,
785 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
786 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
788 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
789 pSubpassBeginInfo
->contents
);
793 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
798 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer
,
799 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
800 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
802 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
808 * Number of vertices.
813 * Index of the first vertex.
815 int32_t vertex_offset
;
820 uint32_t first_instance
;
823 * Number of instances.
825 uint32_t instance_count
;
828 * First index (indexed draws only).
830 uint32_t first_index
;
833 * Whether it's an indexed draw.
838 * Indirect draw parameters resource.
840 struct tu_buffer
*indirect
;
841 uint64_t indirect_offset
;
845 * Draw count parameters resource.
847 struct tu_buffer
*count_buffer
;
848 uint64_t count_buffer_offset
;
852 tu_draw(struct tu_cmd_buffer
*cmd_buffer
, const struct tu_draw_info
*info
)
857 tu_CmdDraw(VkCommandBuffer commandBuffer
,
858 uint32_t vertexCount
,
859 uint32_t instanceCount
,
860 uint32_t firstVertex
,
861 uint32_t firstInstance
)
863 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
864 struct tu_draw_info info
= {};
866 info
.count
= vertexCount
;
867 info
.instance_count
= instanceCount
;
868 info
.first_instance
= firstInstance
;
869 info
.vertex_offset
= firstVertex
;
871 tu_draw(cmd_buffer
, &info
);
875 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
877 uint32_t instanceCount
,
879 int32_t vertexOffset
,
880 uint32_t firstInstance
)
882 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
883 struct tu_draw_info info
= {};
886 info
.count
= indexCount
;
887 info
.instance_count
= instanceCount
;
888 info
.first_index
= firstIndex
;
889 info
.vertex_offset
= vertexOffset
;
890 info
.first_instance
= firstInstance
;
892 tu_draw(cmd_buffer
, &info
);
896 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
902 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
903 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
904 struct tu_draw_info info
= {};
906 info
.count
= drawCount
;
907 info
.indirect
= buffer
;
908 info
.indirect_offset
= offset
;
909 info
.stride
= stride
;
911 tu_draw(cmd_buffer
, &info
);
915 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
921 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
922 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
923 struct tu_draw_info info
= {};
926 info
.count
= drawCount
;
927 info
.indirect
= buffer
;
928 info
.indirect_offset
= offset
;
929 info
.stride
= stride
;
931 tu_draw(cmd_buffer
, &info
);
934 struct tu_dispatch_info
937 * Determine the layout of the grid (in block units) to be used.
942 * A starting offset for the grid. If unaligned is set, the offset
943 * must still be aligned.
947 * Whether it's an unaligned compute dispatch.
952 * Indirect compute parameters resource.
954 struct tu_buffer
*indirect
;
955 uint64_t indirect_offset
;
959 tu_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
960 const struct tu_dispatch_info
*info
)
965 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
973 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
974 struct tu_dispatch_info info
= {};
980 info
.offsets
[0] = base_x
;
981 info
.offsets
[1] = base_y
;
982 info
.offsets
[2] = base_z
;
983 tu_dispatch(cmd_buffer
, &info
);
987 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
992 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
996 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
1000 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1001 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1002 struct tu_dispatch_info info
= {};
1004 info
.indirect
= buffer
;
1005 info
.indirect_offset
= offset
;
1007 tu_dispatch(cmd_buffer
, &info
);
1011 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
1016 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer
,
1017 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
1019 tu_CmdEndRenderPass(commandBuffer
);
1022 struct tu_barrier_info
1024 uint32_t eventCount
;
1025 const VkEvent
*pEvents
;
1026 VkPipelineStageFlags srcStageMask
;
1030 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
1031 uint32_t memoryBarrierCount
,
1032 const VkMemoryBarrier
*pMemoryBarriers
,
1033 uint32_t bufferMemoryBarrierCount
,
1034 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1035 uint32_t imageMemoryBarrierCount
,
1036 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
1037 const struct tu_barrier_info
*info
)
1042 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
1043 VkPipelineStageFlags srcStageMask
,
1044 VkPipelineStageFlags destStageMask
,
1046 uint32_t memoryBarrierCount
,
1047 const VkMemoryBarrier
*pMemoryBarriers
,
1048 uint32_t bufferMemoryBarrierCount
,
1049 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1050 uint32_t imageMemoryBarrierCount
,
1051 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
1053 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1054 struct tu_barrier_info info
;
1056 info
.eventCount
= 0;
1057 info
.pEvents
= NULL
;
1058 info
.srcStageMask
= srcStageMask
;
1060 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
1061 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
1062 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
1066 write_event(struct tu_cmd_buffer
*cmd_buffer
,
1067 struct tu_event
*event
,
1068 VkPipelineStageFlags stageMask
,
1074 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
1076 VkPipelineStageFlags stageMask
)
1078 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1079 TU_FROM_HANDLE(tu_event
, event
, _event
);
1081 write_event(cmd_buffer
, event
, stageMask
, 1);
1085 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
1087 VkPipelineStageFlags stageMask
)
1089 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1090 TU_FROM_HANDLE(tu_event
, event
, _event
);
1092 write_event(cmd_buffer
, event
, stageMask
, 0);
1096 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
1097 uint32_t eventCount
,
1098 const VkEvent
*pEvents
,
1099 VkPipelineStageFlags srcStageMask
,
1100 VkPipelineStageFlags dstStageMask
,
1101 uint32_t memoryBarrierCount
,
1102 const VkMemoryBarrier
*pMemoryBarriers
,
1103 uint32_t bufferMemoryBarrierCount
,
1104 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1105 uint32_t imageMemoryBarrierCount
,
1106 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
1108 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1109 struct tu_barrier_info info
;
1111 info
.eventCount
= eventCount
;
1112 info
.pEvents
= pEvents
;
1113 info
.srcStageMask
= 0;
1115 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
1116 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
1117 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
1121 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)