2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "vk_format.h"
33 tu_bo_list_init(struct tu_bo_list
*list
)
35 list
->count
= list
->capacity
= 0;
40 tu_bo_list_destroy(struct tu_bo_list
*list
)
46 tu_bo_list_reset(struct tu_bo_list
*list
)
52 tu_bo_list_add(struct tu_bo_list
*list
,
53 const struct tu_bo
*bo
)
55 uint32_t handle
= bo
->gem_handle
;
56 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
57 if (list
->handles
[i
] == handle
)
61 if (list
->count
== list
->capacity
) {
62 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
63 uint32_t *new_handles
= realloc(list
->handles
, new_capacity
* sizeof(uint32_t));
66 list
->handles
= new_handles
;
67 list
->capacity
= new_capacity
;
70 uint32_t ret
= list
->count
;
71 list
->handles
[list
->count
] = handle
;
78 tu_cmd_stream_init(struct tu_cmd_stream
*stream
)
80 stream
->start
= stream
->cur
= stream
->end
= NULL
;
82 stream
->entry_count
= stream
->entry_capacity
= 0;
83 stream
->entries
= NULL
;
85 stream
->bo_count
= stream
->bo_capacity
= 0;
90 tu_cmd_stream_finish(struct tu_device
*dev
,
91 struct tu_cmd_stream
*stream
)
93 for (uint32_t i
= 0; i
< stream
->bo_count
; ++i
) {
94 tu_bo_finish(dev
, stream
->bos
[i
]);
98 free(stream
->entries
);
103 tu_cmd_stream_begin(struct tu_device
*dev
,
104 struct tu_cmd_stream
*stream
,
105 uint32_t reserve_size
)
107 assert(reserve_size
);
109 if (stream
->end
- stream
->cur
< reserve_size
) {
110 if (stream
->bo_count
== stream
->bo_capacity
) {
111 uint32_t new_capacity
= MAX2(4, 2 * stream
->bo_capacity
);
112 struct tu_bo
**new_bos
= realloc(stream
->bos
,
113 new_capacity
* sizeof(struct tu_bo
*));
117 stream
->bo_capacity
= new_capacity
;
118 stream
->bos
= new_bos
;
121 uint32_t new_size
= MAX2(16384, reserve_size
* sizeof(uint32_t));
122 if (stream
->bo_count
)
123 new_size
= MAX2(new_size
, stream
->bos
[stream
->bo_count
- 1]->size
* 2);
125 struct tu_bo
*new_bo
= malloc(sizeof(struct tu_bo
));
129 VkResult result
= tu_bo_init_new(dev
, new_bo
, new_size
);
130 if (result
!= VK_SUCCESS
) {
135 result
= tu_bo_map(dev
, new_bo
);
136 if (result
!= VK_SUCCESS
) {
137 tu_bo_finish(dev
, new_bo
);
142 stream
->bos
[stream
->bo_count
] = new_bo
;
145 stream
->start
= stream
->cur
= (uint32_t*)new_bo
->map
;
146 stream
->end
= stream
->start
+ new_bo
->size
/ sizeof(uint32_t);
148 stream
->start
= stream
->cur
;
154 tu_cmd_stream_end(struct tu_cmd_stream
*stream
)
156 if (stream
->start
== stream
->cur
)
159 if (stream
->entry_capacity
== stream
->entry_count
) {
160 uint32_t new_capacity
= MAX2(stream
->entry_capacity
* 2, 4);
161 struct tu_cmd_stream_entry
*new_entries
=
162 realloc(stream
->entries
, new_capacity
* sizeof(struct tu_cmd_stream_entry
));
166 stream
->entries
= new_entries
;
167 stream
->entry_capacity
= new_capacity
;
170 assert (stream
->bo_count
);
172 struct tu_cmd_stream_entry entry
;
173 entry
.bo
= stream
->bos
[stream
->bo_count
- 1];
174 entry
.size
= (stream
->cur
- stream
->start
) * sizeof(uint32_t);
175 entry
.offset
= (stream
->start
- (uint32_t*)entry
.bo
->map
) * sizeof(uint32_t);
177 stream
->entries
[stream
->entry_count
] = entry
;
178 ++stream
->entry_count
;
184 tu_cmd_stream_reset(struct tu_device
*dev
,
185 struct tu_cmd_stream
*stream
)
187 for (uint32_t i
= 0; i
+ 1 < stream
->bo_count
; ++i
) {
188 tu_bo_finish(dev
, stream
->bos
[i
]);
189 free(stream
->bos
[i
]);
192 if (stream
->bo_count
) {
193 stream
->bos
[0] = stream
->bos
[stream
->bo_count
- 1];
194 stream
->bo_count
= 1;
196 stream
->start
= stream
->cur
= (uint32_t*)stream
->bos
[0]->map
;
197 stream
->end
= stream
->start
+ stream
->bos
[0]->size
/ sizeof(uint32_t);
200 stream
->entry_count
= 0;
203 const struct tu_dynamic_state default_dynamic_state
= {
219 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
225 .stencil_compare_mask
=
230 .stencil_write_mask
=
242 static void UNUSED
/* FINISHME */
243 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
244 const struct tu_dynamic_state
*src
)
246 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
247 uint32_t copy_mask
= src
->mask
;
248 uint32_t dest_mask
= 0;
250 tu_use_args(cmd_buffer
); /* FINISHME */
252 /* Make sure to copy the number of viewports/scissors because they can
253 * only be specified at pipeline creation time.
255 dest
->viewport
.count
= src
->viewport
.count
;
256 dest
->scissor
.count
= src
->scissor
.count
;
257 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
259 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
260 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
261 src
->viewport
.count
* sizeof(VkViewport
))) {
262 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
263 src
->viewport
.count
);
264 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
268 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
269 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
270 src
->scissor
.count
* sizeof(VkRect2D
))) {
271 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
273 dest_mask
|= TU_DYNAMIC_SCISSOR
;
277 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
278 if (dest
->line_width
!= src
->line_width
) {
279 dest
->line_width
= src
->line_width
;
280 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
284 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
285 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
286 sizeof(src
->depth_bias
))) {
287 dest
->depth_bias
= src
->depth_bias
;
288 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
292 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
293 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
294 sizeof(src
->blend_constants
))) {
295 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
296 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
300 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
301 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
302 sizeof(src
->depth_bounds
))) {
303 dest
->depth_bounds
= src
->depth_bounds
;
304 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
308 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
309 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
310 sizeof(src
->stencil_compare_mask
))) {
311 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
312 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
316 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
317 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
318 sizeof(src
->stencil_write_mask
))) {
319 dest
->stencil_write_mask
= src
->stencil_write_mask
;
320 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
324 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
325 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
326 sizeof(src
->stencil_reference
))) {
327 dest
->stencil_reference
= src
->stencil_reference
;
328 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
332 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
333 if (memcmp(&dest
->discard_rectangle
.rectangles
,
334 &src
->discard_rectangle
.rectangles
,
335 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
336 typed_memcpy(dest
->discard_rectangle
.rectangles
,
337 src
->discard_rectangle
.rectangles
,
338 src
->discard_rectangle
.count
);
339 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
345 tu_create_cmd_buffer(struct tu_device
*device
,
346 struct tu_cmd_pool
*pool
,
347 VkCommandBufferLevel level
,
348 VkCommandBuffer
*pCommandBuffer
)
350 struct tu_cmd_buffer
*cmd_buffer
;
351 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
352 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
353 if (cmd_buffer
== NULL
)
354 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
356 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
357 cmd_buffer
->device
= device
;
358 cmd_buffer
->pool
= pool
;
359 cmd_buffer
->level
= level
;
362 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
363 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
366 /* Init the pool_link so we can safely call list_del when we destroy
369 list_inithead(&cmd_buffer
->pool_link
);
370 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
373 tu_bo_list_init(&cmd_buffer
->bo_list
);
374 tu_cmd_stream_init(&cmd_buffer
->primary_cmd_stream
);
376 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
378 list_inithead(&cmd_buffer
->upload
.list
);
384 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
386 list_del(&cmd_buffer
->pool_link
);
388 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
389 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
391 tu_cmd_stream_finish(cmd_buffer
->device
, &cmd_buffer
->primary_cmd_stream
);
392 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
393 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
397 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
399 cmd_buffer
->record_result
= VK_SUCCESS
;
401 tu_bo_list_reset(&cmd_buffer
->bo_list
);
402 tu_cmd_stream_reset(cmd_buffer
->device
, &cmd_buffer
->primary_cmd_stream
);
404 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
405 cmd_buffer
->descriptors
[i
].dirty
= 0;
406 cmd_buffer
->descriptors
[i
].valid
= 0;
407 cmd_buffer
->descriptors
[i
].push_dirty
= false;
410 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
412 return cmd_buffer
->record_result
;
416 tu_AllocateCommandBuffers(VkDevice _device
,
417 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
418 VkCommandBuffer
*pCommandBuffers
)
420 TU_FROM_HANDLE(tu_device
, device
, _device
);
421 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
423 VkResult result
= VK_SUCCESS
;
426 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
428 if (!list_empty(&pool
->free_cmd_buffers
)) {
429 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
430 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
432 list_del(&cmd_buffer
->pool_link
);
433 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
435 result
= tu_reset_cmd_buffer(cmd_buffer
);
436 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
437 cmd_buffer
->level
= pAllocateInfo
->level
;
439 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
441 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
442 &pCommandBuffers
[i
]);
444 if (result
!= VK_SUCCESS
)
448 if (result
!= VK_SUCCESS
) {
449 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
452 /* From the Vulkan 1.0.66 spec:
454 * "vkAllocateCommandBuffers can be used to create multiple
455 * command buffers. If the creation of any of those command
456 * buffers fails, the implementation must destroy all
457 * successfully created command buffer objects from this
458 * command, set all entries of the pCommandBuffers array to
459 * NULL and return the error."
461 memset(pCommandBuffers
, 0,
462 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
469 tu_FreeCommandBuffers(VkDevice device
,
470 VkCommandPool commandPool
,
471 uint32_t commandBufferCount
,
472 const VkCommandBuffer
*pCommandBuffers
)
474 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
475 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
478 if (cmd_buffer
->pool
) {
479 list_del(&cmd_buffer
->pool_link
);
480 list_addtail(&cmd_buffer
->pool_link
,
481 &cmd_buffer
->pool
->free_cmd_buffers
);
483 tu_cmd_buffer_destroy(cmd_buffer
);
489 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
490 VkCommandBufferResetFlags flags
)
492 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
493 return tu_reset_cmd_buffer(cmd_buffer
);
497 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
498 const VkCommandBufferBeginInfo
*pBeginInfo
)
500 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
501 VkResult result
= VK_SUCCESS
;
503 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
504 /* If the command buffer has already been resetted with
505 * vkResetCommandBuffer, no need to do it again.
507 result
= tu_reset_cmd_buffer(cmd_buffer
);
508 if (result
!= VK_SUCCESS
)
512 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
513 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
515 /* setup initial configuration into command buffer */
516 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
517 switch (cmd_buffer
->queue_family_index
) {
518 case TU_QUEUE_GENERAL
:
526 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
532 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
533 uint32_t firstBinding
,
534 uint32_t bindingCount
,
535 const VkBuffer
*pBuffers
,
536 const VkDeviceSize
*pOffsets
)
541 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
544 VkIndexType indexType
)
549 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
550 VkPipelineBindPoint pipelineBindPoint
,
551 VkPipelineLayout _layout
,
553 uint32_t descriptorSetCount
,
554 const VkDescriptorSet
*pDescriptorSets
,
555 uint32_t dynamicOffsetCount
,
556 const uint32_t *pDynamicOffsets
)
561 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
562 VkPipelineLayout layout
,
563 VkShaderStageFlags stageFlags
,
571 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
573 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
575 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
577 return cmd_buffer
->record_result
;
581 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
582 VkPipelineBindPoint pipelineBindPoint
,
583 VkPipeline _pipeline
)
588 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
589 uint32_t firstViewport
,
590 uint32_t viewportCount
,
591 const VkViewport
*pViewports
)
596 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
597 uint32_t firstScissor
,
598 uint32_t scissorCount
,
599 const VkRect2D
*pScissors
)
604 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
609 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
610 float depthBiasConstantFactor
,
611 float depthBiasClamp
,
612 float depthBiasSlopeFactor
)
617 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
618 const float blendConstants
[4])
623 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
624 float minDepthBounds
,
625 float maxDepthBounds
)
630 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
631 VkStencilFaceFlags faceMask
,
632 uint32_t compareMask
)
637 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
638 VkStencilFaceFlags faceMask
,
644 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
645 VkStencilFaceFlags faceMask
,
651 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
652 uint32_t commandBufferCount
,
653 const VkCommandBuffer
*pCmdBuffers
)
658 tu_CreateCommandPool(VkDevice _device
,
659 const VkCommandPoolCreateInfo
*pCreateInfo
,
660 const VkAllocationCallbacks
*pAllocator
,
661 VkCommandPool
*pCmdPool
)
663 TU_FROM_HANDLE(tu_device
, device
, _device
);
664 struct tu_cmd_pool
*pool
;
666 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
667 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
669 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
672 pool
->alloc
= *pAllocator
;
674 pool
->alloc
= device
->alloc
;
676 list_inithead(&pool
->cmd_buffers
);
677 list_inithead(&pool
->free_cmd_buffers
);
679 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
681 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
687 tu_DestroyCommandPool(VkDevice _device
,
688 VkCommandPool commandPool
,
689 const VkAllocationCallbacks
*pAllocator
)
691 TU_FROM_HANDLE(tu_device
, device
, _device
);
692 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
697 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
698 &pool
->cmd_buffers
, pool_link
)
700 tu_cmd_buffer_destroy(cmd_buffer
);
703 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
704 &pool
->free_cmd_buffers
, pool_link
)
706 tu_cmd_buffer_destroy(cmd_buffer
);
709 vk_free2(&device
->alloc
, pAllocator
, pool
);
713 tu_ResetCommandPool(VkDevice device
,
714 VkCommandPool commandPool
,
715 VkCommandPoolResetFlags flags
)
717 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
720 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
723 result
= tu_reset_cmd_buffer(cmd_buffer
);
724 if (result
!= VK_SUCCESS
)
732 tu_TrimCommandPool(VkDevice device
,
733 VkCommandPool commandPool
,
734 VkCommandPoolTrimFlagsKHR flags
)
736 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
741 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
742 &pool
->free_cmd_buffers
, pool_link
)
744 tu_cmd_buffer_destroy(cmd_buffer
);
749 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
750 const VkRenderPassBeginInfo
*pRenderPassBegin
,
751 VkSubpassContents contents
)
756 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer
,
757 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
758 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
760 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
761 pSubpassBeginInfo
->contents
);
765 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
770 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer
,
771 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
772 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
774 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
780 * Number of vertices.
785 * Index of the first vertex.
787 int32_t vertex_offset
;
792 uint32_t first_instance
;
795 * Number of instances.
797 uint32_t instance_count
;
800 * First index (indexed draws only).
802 uint32_t first_index
;
805 * Whether it's an indexed draw.
810 * Indirect draw parameters resource.
812 struct tu_buffer
*indirect
;
813 uint64_t indirect_offset
;
817 * Draw count parameters resource.
819 struct tu_buffer
*count_buffer
;
820 uint64_t count_buffer_offset
;
824 tu_draw(struct tu_cmd_buffer
*cmd_buffer
, const struct tu_draw_info
*info
)
829 tu_CmdDraw(VkCommandBuffer commandBuffer
,
830 uint32_t vertexCount
,
831 uint32_t instanceCount
,
832 uint32_t firstVertex
,
833 uint32_t firstInstance
)
835 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
836 struct tu_draw_info info
= {};
838 info
.count
= vertexCount
;
839 info
.instance_count
= instanceCount
;
840 info
.first_instance
= firstInstance
;
841 info
.vertex_offset
= firstVertex
;
843 tu_draw(cmd_buffer
, &info
);
847 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
849 uint32_t instanceCount
,
851 int32_t vertexOffset
,
852 uint32_t firstInstance
)
854 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
855 struct tu_draw_info info
= {};
858 info
.count
= indexCount
;
859 info
.instance_count
= instanceCount
;
860 info
.first_index
= firstIndex
;
861 info
.vertex_offset
= vertexOffset
;
862 info
.first_instance
= firstInstance
;
864 tu_draw(cmd_buffer
, &info
);
868 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
874 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
875 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
876 struct tu_draw_info info
= {};
878 info
.count
= drawCount
;
879 info
.indirect
= buffer
;
880 info
.indirect_offset
= offset
;
881 info
.stride
= stride
;
883 tu_draw(cmd_buffer
, &info
);
887 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
893 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
894 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
895 struct tu_draw_info info
= {};
898 info
.count
= drawCount
;
899 info
.indirect
= buffer
;
900 info
.indirect_offset
= offset
;
901 info
.stride
= stride
;
903 tu_draw(cmd_buffer
, &info
);
906 struct tu_dispatch_info
909 * Determine the layout of the grid (in block units) to be used.
914 * A starting offset for the grid. If unaligned is set, the offset
915 * must still be aligned.
919 * Whether it's an unaligned compute dispatch.
924 * Indirect compute parameters resource.
926 struct tu_buffer
*indirect
;
927 uint64_t indirect_offset
;
931 tu_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
932 const struct tu_dispatch_info
*info
)
937 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
945 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
946 struct tu_dispatch_info info
= {};
952 info
.offsets
[0] = base_x
;
953 info
.offsets
[1] = base_y
;
954 info
.offsets
[2] = base_z
;
955 tu_dispatch(cmd_buffer
, &info
);
959 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
964 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
968 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
972 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
973 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
974 struct tu_dispatch_info info
= {};
976 info
.indirect
= buffer
;
977 info
.indirect_offset
= offset
;
979 tu_dispatch(cmd_buffer
, &info
);
983 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
988 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer
,
989 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
991 tu_CmdEndRenderPass(commandBuffer
);
994 struct tu_barrier_info
997 const VkEvent
*pEvents
;
998 VkPipelineStageFlags srcStageMask
;
1002 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
1003 uint32_t memoryBarrierCount
,
1004 const VkMemoryBarrier
*pMemoryBarriers
,
1005 uint32_t bufferMemoryBarrierCount
,
1006 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1007 uint32_t imageMemoryBarrierCount
,
1008 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
1009 const struct tu_barrier_info
*info
)
1014 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
1015 VkPipelineStageFlags srcStageMask
,
1016 VkPipelineStageFlags destStageMask
,
1018 uint32_t memoryBarrierCount
,
1019 const VkMemoryBarrier
*pMemoryBarriers
,
1020 uint32_t bufferMemoryBarrierCount
,
1021 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1022 uint32_t imageMemoryBarrierCount
,
1023 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
1025 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1026 struct tu_barrier_info info
;
1028 info
.eventCount
= 0;
1029 info
.pEvents
= NULL
;
1030 info
.srcStageMask
= srcStageMask
;
1032 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
1033 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
1034 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
1038 write_event(struct tu_cmd_buffer
*cmd_buffer
,
1039 struct tu_event
*event
,
1040 VkPipelineStageFlags stageMask
,
1046 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
1048 VkPipelineStageFlags stageMask
)
1050 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1051 TU_FROM_HANDLE(tu_event
, event
, _event
);
1053 write_event(cmd_buffer
, event
, stageMask
, 1);
1057 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
1059 VkPipelineStageFlags stageMask
)
1061 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1062 TU_FROM_HANDLE(tu_event
, event
, _event
);
1064 write_event(cmd_buffer
, event
, stageMask
, 0);
1068 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
1069 uint32_t eventCount
,
1070 const VkEvent
*pEvents
,
1071 VkPipelineStageFlags srcStageMask
,
1072 VkPipelineStageFlags dstStageMask
,
1073 uint32_t memoryBarrierCount
,
1074 const VkMemoryBarrier
*pMemoryBarriers
,
1075 uint32_t bufferMemoryBarrierCount
,
1076 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1077 uint32_t imageMemoryBarrierCount
,
1078 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
1080 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1081 struct tu_barrier_info info
;
1083 info
.eventCount
= eventCount
;
1084 info
.pEvents
= pEvents
;
1085 info
.srcStageMask
= 0;
1087 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
1088 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
1089 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
1093 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)