2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 #include "tu_private.h"
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
33 tu_bo_list_init(struct tu_bo_list
*list
)
35 list
->count
= list
->capacity
= 0;
40 tu_bo_list_destroy(struct tu_bo_list
*list
)
46 tu_bo_list_reset(struct tu_bo_list
*list
)
52 tu_bo_list_add(struct tu_bo_list
*list
,
53 const struct tu_bo
*bo
)
55 uint32_t handle
= bo
->gem_handle
;
56 for (uint32_t i
= 0; i
< list
->count
; ++i
) {
57 if (list
->handles
[i
] == handle
)
61 if (list
->count
== list
->capacity
) {
62 uint32_t new_capacity
= MAX2(2 * list
->count
, 16);
63 uint32_t *new_handles
= realloc(list
->handles
, new_capacity
* sizeof(uint32_t));
66 list
->handles
= new_handles
;
67 list
->capacity
= new_capacity
;
70 uint32_t ret
= list
->count
;
71 list
->handles
[list
->count
] = handle
;
78 tu_cmd_stream_init(struct tu_cmd_stream
*stream
)
80 stream
->start
= stream
->cur
= stream
->end
= NULL
;
82 stream
->entry_count
= stream
->entry_capacity
= 0;
83 stream
->entries
= NULL
;
85 stream
->bo_count
= stream
->bo_capacity
= 0;
90 tu_cmd_stream_finish(struct tu_device
*dev
,
91 struct tu_cmd_stream
*stream
)
93 for (uint32_t i
= 0; i
< stream
->bo_count
; ++i
) {
94 tu_bo_finish(dev
, stream
->bos
[i
]);
98 free(stream
->entries
);
103 tu_cmd_stream_begin(struct tu_device
*dev
,
104 struct tu_cmd_stream
*stream
,
105 uint32_t reserve_size
)
107 assert(reserve_size
);
109 if (stream
->end
- stream
->cur
< reserve_size
) {
110 if (stream
->bo_count
== stream
->bo_capacity
) {
111 uint32_t new_capacity
= MAX2(4, 2 * stream
->bo_capacity
);
112 struct tu_bo
**new_bos
= realloc(stream
->bos
,
113 new_capacity
* sizeof(struct tu_bo
*));
117 stream
->bo_capacity
= new_capacity
;
118 stream
->bos
= new_bos
;
121 uint32_t new_size
= MAX2(16384, reserve_size
* sizeof(uint32_t));
122 if (stream
->bo_count
)
123 new_size
= MAX2(new_size
, stream
->bos
[stream
->bo_count
- 1]->size
* 2);
125 struct tu_bo
*new_bo
= malloc(sizeof(struct tu_bo
));
129 VkResult result
= tu_bo_init_new(dev
, new_bo
, new_size
);
130 if (result
!= VK_SUCCESS
) {
135 result
= tu_bo_map(dev
, new_bo
);
136 if (result
!= VK_SUCCESS
) {
137 tu_bo_finish(dev
, new_bo
);
142 stream
->bos
[stream
->bo_count
] = new_bo
;
145 stream
->start
= stream
->cur
= (uint32_t*)new_bo
->map
;
146 stream
->end
= stream
->start
+ new_bo
->size
/ sizeof(uint32_t);
148 stream
->start
= stream
->cur
;
154 tu_cmd_stream_end(struct tu_cmd_stream
*stream
)
156 if (stream
->start
== stream
->cur
)
159 if (stream
->entry_capacity
== stream
->entry_count
) {
160 uint32_t new_capacity
= MAX2(stream
->entry_capacity
* 2, 4);
161 struct tu_cmd_stream_entry
*new_entries
=
162 realloc(stream
->entries
, new_capacity
* sizeof(struct tu_cmd_stream_entry
));
166 stream
->entries
= new_entries
;
167 stream
->entry_capacity
= new_capacity
;
170 assert (stream
->bo_count
);
172 struct tu_cmd_stream_entry entry
;
173 entry
.bo
= stream
->bos
[stream
->bo_count
- 1];
174 entry
.size
= (stream
->cur
- stream
->start
) * sizeof(uint32_t);
175 entry
.offset
= (stream
->start
- (uint32_t*)entry
.bo
->map
) * sizeof(uint32_t);
177 stream
->entries
[stream
->entry_count
] = entry
;
178 ++stream
->entry_count
;
184 tu_cmd_stream_reset(struct tu_device
*dev
,
185 struct tu_cmd_stream
*stream
)
187 for (uint32_t i
= 0; i
+ 1 < stream
->bo_count
; ++i
) {
188 tu_bo_finish(dev
, stream
->bos
[i
]);
189 free(stream
->bos
[i
]);
192 if (stream
->bo_count
) {
193 stream
->bos
[0] = stream
->bos
[stream
->bo_count
- 1];
194 stream
->bo_count
= 1;
196 stream
->start
= stream
->cur
= (uint32_t*)stream
->bos
[0]->map
;
197 stream
->end
= stream
->start
+ stream
->bos
[0]->size
/ sizeof(uint32_t);
200 stream
->entry_count
= 0;
204 _odd_parity_bit(unsigned val
)
206 /* See: http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
207 * note that we want odd parity so 0x6996 is inverted.
213 return (~0x6996 >> val
) & 1;
217 OUT_PKT7(struct tu_cmd_stream
*stream
, uint8_t opcode
, uint16_t cnt
)
219 *stream
->cur
++ = CP_TYPE7_PKT
| cnt
|
220 (_odd_parity_bit(cnt
) << 15) |
221 ((opcode
& 0x7f) << 16) |
222 ((_odd_parity_bit(opcode
) << 23));
227 const struct tu_dynamic_state default_dynamic_state
= {
243 .blend_constants
= { 0.0f
, 0.0f
, 0.0f
, 0.0f
},
249 .stencil_compare_mask
=
254 .stencil_write_mask
=
266 static void UNUSED
/* FINISHME */
267 tu_bind_dynamic_state(struct tu_cmd_buffer
*cmd_buffer
,
268 const struct tu_dynamic_state
*src
)
270 struct tu_dynamic_state
*dest
= &cmd_buffer
->state
.dynamic
;
271 uint32_t copy_mask
= src
->mask
;
272 uint32_t dest_mask
= 0;
274 tu_use_args(cmd_buffer
); /* FINISHME */
276 /* Make sure to copy the number of viewports/scissors because they can
277 * only be specified at pipeline creation time.
279 dest
->viewport
.count
= src
->viewport
.count
;
280 dest
->scissor
.count
= src
->scissor
.count
;
281 dest
->discard_rectangle
.count
= src
->discard_rectangle
.count
;
283 if (copy_mask
& TU_DYNAMIC_VIEWPORT
) {
284 if (memcmp(&dest
->viewport
.viewports
, &src
->viewport
.viewports
,
285 src
->viewport
.count
* sizeof(VkViewport
))) {
286 typed_memcpy(dest
->viewport
.viewports
, src
->viewport
.viewports
,
287 src
->viewport
.count
);
288 dest_mask
|= TU_DYNAMIC_VIEWPORT
;
292 if (copy_mask
& TU_DYNAMIC_SCISSOR
) {
293 if (memcmp(&dest
->scissor
.scissors
, &src
->scissor
.scissors
,
294 src
->scissor
.count
* sizeof(VkRect2D
))) {
295 typed_memcpy(dest
->scissor
.scissors
, src
->scissor
.scissors
,
297 dest_mask
|= TU_DYNAMIC_SCISSOR
;
301 if (copy_mask
& TU_DYNAMIC_LINE_WIDTH
) {
302 if (dest
->line_width
!= src
->line_width
) {
303 dest
->line_width
= src
->line_width
;
304 dest_mask
|= TU_DYNAMIC_LINE_WIDTH
;
308 if (copy_mask
& TU_DYNAMIC_DEPTH_BIAS
) {
309 if (memcmp(&dest
->depth_bias
, &src
->depth_bias
,
310 sizeof(src
->depth_bias
))) {
311 dest
->depth_bias
= src
->depth_bias
;
312 dest_mask
|= TU_DYNAMIC_DEPTH_BIAS
;
316 if (copy_mask
& TU_DYNAMIC_BLEND_CONSTANTS
) {
317 if (memcmp(&dest
->blend_constants
, &src
->blend_constants
,
318 sizeof(src
->blend_constants
))) {
319 typed_memcpy(dest
->blend_constants
, src
->blend_constants
, 4);
320 dest_mask
|= TU_DYNAMIC_BLEND_CONSTANTS
;
324 if (copy_mask
& TU_DYNAMIC_DEPTH_BOUNDS
) {
325 if (memcmp(&dest
->depth_bounds
, &src
->depth_bounds
,
326 sizeof(src
->depth_bounds
))) {
327 dest
->depth_bounds
= src
->depth_bounds
;
328 dest_mask
|= TU_DYNAMIC_DEPTH_BOUNDS
;
332 if (copy_mask
& TU_DYNAMIC_STENCIL_COMPARE_MASK
) {
333 if (memcmp(&dest
->stencil_compare_mask
, &src
->stencil_compare_mask
,
334 sizeof(src
->stencil_compare_mask
))) {
335 dest
->stencil_compare_mask
= src
->stencil_compare_mask
;
336 dest_mask
|= TU_DYNAMIC_STENCIL_COMPARE_MASK
;
340 if (copy_mask
& TU_DYNAMIC_STENCIL_WRITE_MASK
) {
341 if (memcmp(&dest
->stencil_write_mask
, &src
->stencil_write_mask
,
342 sizeof(src
->stencil_write_mask
))) {
343 dest
->stencil_write_mask
= src
->stencil_write_mask
;
344 dest_mask
|= TU_DYNAMIC_STENCIL_WRITE_MASK
;
348 if (copy_mask
& TU_DYNAMIC_STENCIL_REFERENCE
) {
349 if (memcmp(&dest
->stencil_reference
, &src
->stencil_reference
,
350 sizeof(src
->stencil_reference
))) {
351 dest
->stencil_reference
= src
->stencil_reference
;
352 dest_mask
|= TU_DYNAMIC_STENCIL_REFERENCE
;
356 if (copy_mask
& TU_DYNAMIC_DISCARD_RECTANGLE
) {
357 if (memcmp(&dest
->discard_rectangle
.rectangles
,
358 &src
->discard_rectangle
.rectangles
,
359 src
->discard_rectangle
.count
* sizeof(VkRect2D
))) {
360 typed_memcpy(dest
->discard_rectangle
.rectangles
,
361 src
->discard_rectangle
.rectangles
,
362 src
->discard_rectangle
.count
);
363 dest_mask
|= TU_DYNAMIC_DISCARD_RECTANGLE
;
369 tu_create_cmd_buffer(struct tu_device
*device
,
370 struct tu_cmd_pool
*pool
,
371 VkCommandBufferLevel level
,
372 VkCommandBuffer
*pCommandBuffer
)
374 struct tu_cmd_buffer
*cmd_buffer
;
375 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
376 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
377 if (cmd_buffer
== NULL
)
378 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
380 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
381 cmd_buffer
->device
= device
;
382 cmd_buffer
->pool
= pool
;
383 cmd_buffer
->level
= level
;
386 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
387 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
390 /* Init the pool_link so we can safely call list_del when we destroy
393 list_inithead(&cmd_buffer
->pool_link
);
394 cmd_buffer
->queue_family_index
= TU_QUEUE_GENERAL
;
397 tu_bo_list_init(&cmd_buffer
->bo_list
);
398 tu_cmd_stream_init(&cmd_buffer
->primary_cmd_stream
);
400 *pCommandBuffer
= tu_cmd_buffer_to_handle(cmd_buffer
);
402 list_inithead(&cmd_buffer
->upload
.list
);
408 tu_cmd_buffer_destroy(struct tu_cmd_buffer
*cmd_buffer
)
410 list_del(&cmd_buffer
->pool_link
);
412 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++)
413 free(cmd_buffer
->descriptors
[i
].push_set
.set
.mapped_ptr
);
415 tu_cmd_stream_finish(cmd_buffer
->device
, &cmd_buffer
->primary_cmd_stream
);
416 tu_bo_list_destroy(&cmd_buffer
->bo_list
);
417 vk_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
);
421 tu_reset_cmd_buffer(struct tu_cmd_buffer
*cmd_buffer
)
423 cmd_buffer
->record_result
= VK_SUCCESS
;
425 tu_bo_list_reset(&cmd_buffer
->bo_list
);
426 tu_cmd_stream_reset(cmd_buffer
->device
, &cmd_buffer
->primary_cmd_stream
);
428 for (unsigned i
= 0; i
< VK_PIPELINE_BIND_POINT_RANGE_SIZE
; i
++) {
429 cmd_buffer
->descriptors
[i
].dirty
= 0;
430 cmd_buffer
->descriptors
[i
].valid
= 0;
431 cmd_buffer
->descriptors
[i
].push_dirty
= false;
434 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_INITIAL
;
436 return cmd_buffer
->record_result
;
440 tu_AllocateCommandBuffers(VkDevice _device
,
441 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
442 VkCommandBuffer
*pCommandBuffers
)
444 TU_FROM_HANDLE(tu_device
, device
, _device
);
445 TU_FROM_HANDLE(tu_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
447 VkResult result
= VK_SUCCESS
;
450 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
452 if (!list_empty(&pool
->free_cmd_buffers
)) {
453 struct tu_cmd_buffer
*cmd_buffer
= list_first_entry(
454 &pool
->free_cmd_buffers
, struct tu_cmd_buffer
, pool_link
);
456 list_del(&cmd_buffer
->pool_link
);
457 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
459 result
= tu_reset_cmd_buffer(cmd_buffer
);
460 cmd_buffer
->_loader_data
.loaderMagic
= ICD_LOADER_MAGIC
;
461 cmd_buffer
->level
= pAllocateInfo
->level
;
463 pCommandBuffers
[i
] = tu_cmd_buffer_to_handle(cmd_buffer
);
465 result
= tu_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
466 &pCommandBuffers
[i
]);
468 if (result
!= VK_SUCCESS
)
472 if (result
!= VK_SUCCESS
) {
473 tu_FreeCommandBuffers(_device
, pAllocateInfo
->commandPool
, i
,
476 /* From the Vulkan 1.0.66 spec:
478 * "vkAllocateCommandBuffers can be used to create multiple
479 * command buffers. If the creation of any of those command
480 * buffers fails, the implementation must destroy all
481 * successfully created command buffer objects from this
482 * command, set all entries of the pCommandBuffers array to
483 * NULL and return the error."
485 memset(pCommandBuffers
, 0,
486 sizeof(*pCommandBuffers
) * pAllocateInfo
->commandBufferCount
);
493 tu_FreeCommandBuffers(VkDevice device
,
494 VkCommandPool commandPool
,
495 uint32_t commandBufferCount
,
496 const VkCommandBuffer
*pCommandBuffers
)
498 for (uint32_t i
= 0; i
< commandBufferCount
; i
++) {
499 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, pCommandBuffers
[i
]);
502 if (cmd_buffer
->pool
) {
503 list_del(&cmd_buffer
->pool_link
);
504 list_addtail(&cmd_buffer
->pool_link
,
505 &cmd_buffer
->pool
->free_cmd_buffers
);
507 tu_cmd_buffer_destroy(cmd_buffer
);
513 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer
,
514 VkCommandBufferResetFlags flags
)
516 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
517 return tu_reset_cmd_buffer(cmd_buffer
);
521 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer
,
522 const VkCommandBufferBeginInfo
*pBeginInfo
)
524 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
525 VkResult result
= VK_SUCCESS
;
527 if (cmd_buffer
->status
!= TU_CMD_BUFFER_STATUS_INITIAL
) {
528 /* If the command buffer has already been resetted with
529 * vkResetCommandBuffer, no need to do it again.
531 result
= tu_reset_cmd_buffer(cmd_buffer
);
532 if (result
!= VK_SUCCESS
)
536 memset(&cmd_buffer
->state
, 0, sizeof(cmd_buffer
->state
));
537 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
539 /* setup initial configuration into command buffer */
540 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
541 switch (cmd_buffer
->queue_family_index
) {
542 case TU_QUEUE_GENERAL
:
550 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_RECORDING
;
552 result
= tu_cmd_stream_begin(cmd_buffer
->device
,
553 &cmd_buffer
->primary_cmd_stream
, 4096);
555 /* Put some stuff in so we do not have empty command buffers. */
556 OUT_PKT7(&cmd_buffer
->primary_cmd_stream
, CP_NOP
, 4);
557 *cmd_buffer
->primary_cmd_stream
.cur
++ = 0;
558 *cmd_buffer
->primary_cmd_stream
.cur
++ = 0;
559 *cmd_buffer
->primary_cmd_stream
.cur
++ = 0;
560 *cmd_buffer
->primary_cmd_stream
.cur
++ = 0;
565 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer
,
566 uint32_t firstBinding
,
567 uint32_t bindingCount
,
568 const VkBuffer
*pBuffers
,
569 const VkDeviceSize
*pOffsets
)
574 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer
,
577 VkIndexType indexType
)
582 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer
,
583 VkPipelineBindPoint pipelineBindPoint
,
584 VkPipelineLayout _layout
,
586 uint32_t descriptorSetCount
,
587 const VkDescriptorSet
*pDescriptorSets
,
588 uint32_t dynamicOffsetCount
,
589 const uint32_t *pDynamicOffsets
)
594 tu_CmdPushConstants(VkCommandBuffer commandBuffer
,
595 VkPipelineLayout layout
,
596 VkShaderStageFlags stageFlags
,
604 tu_EndCommandBuffer(VkCommandBuffer commandBuffer
)
606 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
608 tu_cmd_stream_end(&cmd_buffer
->primary_cmd_stream
);
609 cmd_buffer
->status
= TU_CMD_BUFFER_STATUS_EXECUTABLE
;
611 return cmd_buffer
->record_result
;
615 tu_CmdBindPipeline(VkCommandBuffer commandBuffer
,
616 VkPipelineBindPoint pipelineBindPoint
,
617 VkPipeline _pipeline
)
622 tu_CmdSetViewport(VkCommandBuffer commandBuffer
,
623 uint32_t firstViewport
,
624 uint32_t viewportCount
,
625 const VkViewport
*pViewports
)
630 tu_CmdSetScissor(VkCommandBuffer commandBuffer
,
631 uint32_t firstScissor
,
632 uint32_t scissorCount
,
633 const VkRect2D
*pScissors
)
638 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer
, float lineWidth
)
643 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer
,
644 float depthBiasConstantFactor
,
645 float depthBiasClamp
,
646 float depthBiasSlopeFactor
)
651 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer
,
652 const float blendConstants
[4])
657 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer
,
658 float minDepthBounds
,
659 float maxDepthBounds
)
664 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer
,
665 VkStencilFaceFlags faceMask
,
666 uint32_t compareMask
)
671 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer
,
672 VkStencilFaceFlags faceMask
,
678 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer
,
679 VkStencilFaceFlags faceMask
,
685 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer
,
686 uint32_t commandBufferCount
,
687 const VkCommandBuffer
*pCmdBuffers
)
692 tu_CreateCommandPool(VkDevice _device
,
693 const VkCommandPoolCreateInfo
*pCreateInfo
,
694 const VkAllocationCallbacks
*pAllocator
,
695 VkCommandPool
*pCmdPool
)
697 TU_FROM_HANDLE(tu_device
, device
, _device
);
698 struct tu_cmd_pool
*pool
;
700 pool
= vk_alloc2(&device
->alloc
, pAllocator
, sizeof(*pool
), 8,
701 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
703 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
706 pool
->alloc
= *pAllocator
;
708 pool
->alloc
= device
->alloc
;
710 list_inithead(&pool
->cmd_buffers
);
711 list_inithead(&pool
->free_cmd_buffers
);
713 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
715 *pCmdPool
= tu_cmd_pool_to_handle(pool
);
721 tu_DestroyCommandPool(VkDevice _device
,
722 VkCommandPool commandPool
,
723 const VkAllocationCallbacks
*pAllocator
)
725 TU_FROM_HANDLE(tu_device
, device
, _device
);
726 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
731 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
732 &pool
->cmd_buffers
, pool_link
)
734 tu_cmd_buffer_destroy(cmd_buffer
);
737 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
738 &pool
->free_cmd_buffers
, pool_link
)
740 tu_cmd_buffer_destroy(cmd_buffer
);
743 vk_free2(&device
->alloc
, pAllocator
, pool
);
747 tu_ResetCommandPool(VkDevice device
,
748 VkCommandPool commandPool
,
749 VkCommandPoolResetFlags flags
)
751 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
754 list_for_each_entry(struct tu_cmd_buffer
, cmd_buffer
, &pool
->cmd_buffers
,
757 result
= tu_reset_cmd_buffer(cmd_buffer
);
758 if (result
!= VK_SUCCESS
)
766 tu_TrimCommandPool(VkDevice device
,
767 VkCommandPool commandPool
,
768 VkCommandPoolTrimFlagsKHR flags
)
770 TU_FROM_HANDLE(tu_cmd_pool
, pool
, commandPool
);
775 list_for_each_entry_safe(struct tu_cmd_buffer
, cmd_buffer
,
776 &pool
->free_cmd_buffers
, pool_link
)
778 tu_cmd_buffer_destroy(cmd_buffer
);
783 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer
,
784 const VkRenderPassBeginInfo
*pRenderPassBegin
,
785 VkSubpassContents contents
)
790 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer
,
791 const VkRenderPassBeginInfo
*pRenderPassBeginInfo
,
792 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
)
794 tu_CmdBeginRenderPass(commandBuffer
, pRenderPassBeginInfo
,
795 pSubpassBeginInfo
->contents
);
799 tu_CmdNextSubpass(VkCommandBuffer commandBuffer
, VkSubpassContents contents
)
804 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer
,
805 const VkSubpassBeginInfoKHR
*pSubpassBeginInfo
,
806 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
808 tu_CmdNextSubpass(commandBuffer
, pSubpassBeginInfo
->contents
);
814 * Number of vertices.
819 * Index of the first vertex.
821 int32_t vertex_offset
;
826 uint32_t first_instance
;
829 * Number of instances.
831 uint32_t instance_count
;
834 * First index (indexed draws only).
836 uint32_t first_index
;
839 * Whether it's an indexed draw.
844 * Indirect draw parameters resource.
846 struct tu_buffer
*indirect
;
847 uint64_t indirect_offset
;
851 * Draw count parameters resource.
853 struct tu_buffer
*count_buffer
;
854 uint64_t count_buffer_offset
;
858 tu_draw(struct tu_cmd_buffer
*cmd_buffer
, const struct tu_draw_info
*info
)
863 tu_CmdDraw(VkCommandBuffer commandBuffer
,
864 uint32_t vertexCount
,
865 uint32_t instanceCount
,
866 uint32_t firstVertex
,
867 uint32_t firstInstance
)
869 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
870 struct tu_draw_info info
= {};
872 info
.count
= vertexCount
;
873 info
.instance_count
= instanceCount
;
874 info
.first_instance
= firstInstance
;
875 info
.vertex_offset
= firstVertex
;
877 tu_draw(cmd_buffer
, &info
);
881 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer
,
883 uint32_t instanceCount
,
885 int32_t vertexOffset
,
886 uint32_t firstInstance
)
888 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
889 struct tu_draw_info info
= {};
892 info
.count
= indexCount
;
893 info
.instance_count
= instanceCount
;
894 info
.first_index
= firstIndex
;
895 info
.vertex_offset
= vertexOffset
;
896 info
.first_instance
= firstInstance
;
898 tu_draw(cmd_buffer
, &info
);
902 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer
,
908 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
909 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
910 struct tu_draw_info info
= {};
912 info
.count
= drawCount
;
913 info
.indirect
= buffer
;
914 info
.indirect_offset
= offset
;
915 info
.stride
= stride
;
917 tu_draw(cmd_buffer
, &info
);
921 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer
,
927 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
928 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
929 struct tu_draw_info info
= {};
932 info
.count
= drawCount
;
933 info
.indirect
= buffer
;
934 info
.indirect_offset
= offset
;
935 info
.stride
= stride
;
937 tu_draw(cmd_buffer
, &info
);
940 struct tu_dispatch_info
943 * Determine the layout of the grid (in block units) to be used.
948 * A starting offset for the grid. If unaligned is set, the offset
949 * must still be aligned.
953 * Whether it's an unaligned compute dispatch.
958 * Indirect compute parameters resource.
960 struct tu_buffer
*indirect
;
961 uint64_t indirect_offset
;
965 tu_dispatch(struct tu_cmd_buffer
*cmd_buffer
,
966 const struct tu_dispatch_info
*info
)
971 tu_CmdDispatchBase(VkCommandBuffer commandBuffer
,
979 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
980 struct tu_dispatch_info info
= {};
986 info
.offsets
[0] = base_x
;
987 info
.offsets
[1] = base_y
;
988 info
.offsets
[2] = base_z
;
989 tu_dispatch(cmd_buffer
, &info
);
993 tu_CmdDispatch(VkCommandBuffer commandBuffer
,
998 tu_CmdDispatchBase(commandBuffer
, 0, 0, 0, x
, y
, z
);
1002 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer
,
1004 VkDeviceSize offset
)
1006 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1007 TU_FROM_HANDLE(tu_buffer
, buffer
, _buffer
);
1008 struct tu_dispatch_info info
= {};
1010 info
.indirect
= buffer
;
1011 info
.indirect_offset
= offset
;
1013 tu_dispatch(cmd_buffer
, &info
);
1017 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer
)
1022 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer
,
1023 const VkSubpassEndInfoKHR
*pSubpassEndInfo
)
1025 tu_CmdEndRenderPass(commandBuffer
);
1028 struct tu_barrier_info
1030 uint32_t eventCount
;
1031 const VkEvent
*pEvents
;
1032 VkPipelineStageFlags srcStageMask
;
1036 tu_barrier(struct tu_cmd_buffer
*cmd_buffer
,
1037 uint32_t memoryBarrierCount
,
1038 const VkMemoryBarrier
*pMemoryBarriers
,
1039 uint32_t bufferMemoryBarrierCount
,
1040 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1041 uint32_t imageMemoryBarrierCount
,
1042 const VkImageMemoryBarrier
*pImageMemoryBarriers
,
1043 const struct tu_barrier_info
*info
)
1048 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer
,
1049 VkPipelineStageFlags srcStageMask
,
1050 VkPipelineStageFlags destStageMask
,
1052 uint32_t memoryBarrierCount
,
1053 const VkMemoryBarrier
*pMemoryBarriers
,
1054 uint32_t bufferMemoryBarrierCount
,
1055 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1056 uint32_t imageMemoryBarrierCount
,
1057 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
1059 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1060 struct tu_barrier_info info
;
1062 info
.eventCount
= 0;
1063 info
.pEvents
= NULL
;
1064 info
.srcStageMask
= srcStageMask
;
1066 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
1067 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
1068 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
1072 write_event(struct tu_cmd_buffer
*cmd_buffer
,
1073 struct tu_event
*event
,
1074 VkPipelineStageFlags stageMask
,
1080 tu_CmdSetEvent(VkCommandBuffer commandBuffer
,
1082 VkPipelineStageFlags stageMask
)
1084 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1085 TU_FROM_HANDLE(tu_event
, event
, _event
);
1087 write_event(cmd_buffer
, event
, stageMask
, 1);
1091 tu_CmdResetEvent(VkCommandBuffer commandBuffer
,
1093 VkPipelineStageFlags stageMask
)
1095 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1096 TU_FROM_HANDLE(tu_event
, event
, _event
);
1098 write_event(cmd_buffer
, event
, stageMask
, 0);
1102 tu_CmdWaitEvents(VkCommandBuffer commandBuffer
,
1103 uint32_t eventCount
,
1104 const VkEvent
*pEvents
,
1105 VkPipelineStageFlags srcStageMask
,
1106 VkPipelineStageFlags dstStageMask
,
1107 uint32_t memoryBarrierCount
,
1108 const VkMemoryBarrier
*pMemoryBarriers
,
1109 uint32_t bufferMemoryBarrierCount
,
1110 const VkBufferMemoryBarrier
*pBufferMemoryBarriers
,
1111 uint32_t imageMemoryBarrierCount
,
1112 const VkImageMemoryBarrier
*pImageMemoryBarriers
)
1114 TU_FROM_HANDLE(tu_cmd_buffer
, cmd_buffer
, commandBuffer
);
1115 struct tu_barrier_info info
;
1117 info
.eventCount
= eventCount
;
1118 info
.pEvents
= pEvents
;
1119 info
.srcStageMask
= 0;
1121 tu_barrier(cmd_buffer
, memoryBarrierCount
, pMemoryBarriers
,
1122 bufferMemoryBarrierCount
, pBufferMemoryBarriers
,
1123 imageMemoryBarrierCount
, pImageMemoryBarriers
, &info
);
1127 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer
, uint32_t deviceMask
)