turnip: add tu_cs_add_bo
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
32 #include "tu_cs.h"
33
34 void
35 tu_bo_list_init(struct tu_bo_list *list)
36 {
37 list->count = list->capacity = 0;
38 list->handles = NULL;
39 }
40
41 void
42 tu_bo_list_destroy(struct tu_bo_list *list)
43 {
44 free(list->handles);
45 }
46
47 void
48 tu_bo_list_reset(struct tu_bo_list *list)
49 {
50 list->count = 0;
51 }
52
53 uint32_t
54 tu_bo_list_add(struct tu_bo_list *list,
55 const struct tu_bo *bo)
56 {
57 uint32_t handle = bo->gem_handle;
58 for (uint32_t i = 0; i < list->count; ++i) {
59 if (list->handles[i] == handle)
60 return i;
61 }
62
63 if (list->count == list->capacity) {
64 uint32_t new_capacity = MAX2(2 * list->count, 16);
65 uint32_t *new_handles = realloc(list->handles, new_capacity * sizeof(uint32_t));
66 if (!new_handles)
67 return ~0;
68 list->handles = new_handles;
69 list->capacity = new_capacity;
70 }
71
72 uint32_t ret = list->count;
73 list->handles[list->count] = handle;
74 ++list->count;
75
76 return ret;
77 }
78
79 const struct tu_dynamic_state default_dynamic_state = {
80 .viewport =
81 {
82 .count = 0,
83 },
84 .scissor =
85 {
86 .count = 0,
87 },
88 .line_width = 1.0f,
89 .depth_bias =
90 {
91 .bias = 0.0f,
92 .clamp = 0.0f,
93 .slope = 0.0f,
94 },
95 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
96 .depth_bounds =
97 {
98 .min = 0.0f,
99 .max = 1.0f,
100 },
101 .stencil_compare_mask =
102 {
103 .front = ~0u,
104 .back = ~0u,
105 },
106 .stencil_write_mask =
107 {
108 .front = ~0u,
109 .back = ~0u,
110 },
111 .stencil_reference =
112 {
113 .front = 0u,
114 .back = 0u,
115 },
116 };
117
118 static void UNUSED /* FINISHME */
119 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
120 const struct tu_dynamic_state *src)
121 {
122 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
123 uint32_t copy_mask = src->mask;
124 uint32_t dest_mask = 0;
125
126 tu_use_args(cmd_buffer); /* FINISHME */
127
128 /* Make sure to copy the number of viewports/scissors because they can
129 * only be specified at pipeline creation time.
130 */
131 dest->viewport.count = src->viewport.count;
132 dest->scissor.count = src->scissor.count;
133 dest->discard_rectangle.count = src->discard_rectangle.count;
134
135 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
136 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
137 src->viewport.count * sizeof(VkViewport))) {
138 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
139 src->viewport.count);
140 dest_mask |= TU_DYNAMIC_VIEWPORT;
141 }
142 }
143
144 if (copy_mask & TU_DYNAMIC_SCISSOR) {
145 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
146 src->scissor.count * sizeof(VkRect2D))) {
147 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
148 src->scissor.count);
149 dest_mask |= TU_DYNAMIC_SCISSOR;
150 }
151 }
152
153 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
154 if (dest->line_width != src->line_width) {
155 dest->line_width = src->line_width;
156 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
157 }
158 }
159
160 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
161 if (memcmp(&dest->depth_bias, &src->depth_bias,
162 sizeof(src->depth_bias))) {
163 dest->depth_bias = src->depth_bias;
164 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
165 }
166 }
167
168 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
169 if (memcmp(&dest->blend_constants, &src->blend_constants,
170 sizeof(src->blend_constants))) {
171 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
172 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
173 }
174 }
175
176 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
177 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
178 sizeof(src->depth_bounds))) {
179 dest->depth_bounds = src->depth_bounds;
180 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
181 }
182 }
183
184 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
185 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
186 sizeof(src->stencil_compare_mask))) {
187 dest->stencil_compare_mask = src->stencil_compare_mask;
188 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
189 }
190 }
191
192 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
193 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
194 sizeof(src->stencil_write_mask))) {
195 dest->stencil_write_mask = src->stencil_write_mask;
196 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
197 }
198 }
199
200 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
201 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
202 sizeof(src->stencil_reference))) {
203 dest->stencil_reference = src->stencil_reference;
204 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
205 }
206 }
207
208 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
209 if (memcmp(&dest->discard_rectangle.rectangles,
210 &src->discard_rectangle.rectangles,
211 src->discard_rectangle.count * sizeof(VkRect2D))) {
212 typed_memcpy(dest->discard_rectangle.rectangles,
213 src->discard_rectangle.rectangles,
214 src->discard_rectangle.count);
215 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
216 }
217 }
218 }
219
220 static VkResult
221 tu_create_cmd_buffer(struct tu_device *device,
222 struct tu_cmd_pool *pool,
223 VkCommandBufferLevel level,
224 VkCommandBuffer *pCommandBuffer)
225 {
226 struct tu_cmd_buffer *cmd_buffer;
227 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
228 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
229 if (cmd_buffer == NULL)
230 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
231
232 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
233 cmd_buffer->device = device;
234 cmd_buffer->pool = pool;
235 cmd_buffer->level = level;
236
237 if (pool) {
238 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
239 cmd_buffer->queue_family_index = pool->queue_family_index;
240
241 } else {
242 /* Init the pool_link so we can safely call list_del when we destroy
243 * the command buffer
244 */
245 list_inithead(&cmd_buffer->pool_link);
246 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
247 }
248
249 tu_bo_list_init(&cmd_buffer->bo_list);
250 tu_cs_init(&cmd_buffer->cs);
251
252 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
253
254 list_inithead(&cmd_buffer->upload.list);
255
256 return VK_SUCCESS;
257 }
258
259 static void
260 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
261 {
262 list_del(&cmd_buffer->pool_link);
263
264 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
265 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
266
267 tu_cs_finish(cmd_buffer->device, &cmd_buffer->cs);
268 tu_bo_list_destroy(&cmd_buffer->bo_list);
269 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
270 }
271
272 static VkResult
273 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
274 {
275 cmd_buffer->record_result = VK_SUCCESS;
276
277 tu_bo_list_reset(&cmd_buffer->bo_list);
278 tu_cs_reset(cmd_buffer->device, &cmd_buffer->cs);
279
280 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
281 cmd_buffer->descriptors[i].dirty = 0;
282 cmd_buffer->descriptors[i].valid = 0;
283 cmd_buffer->descriptors[i].push_dirty = false;
284 }
285
286 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
287
288 return cmd_buffer->record_result;
289 }
290
291 VkResult
292 tu_AllocateCommandBuffers(VkDevice _device,
293 const VkCommandBufferAllocateInfo *pAllocateInfo,
294 VkCommandBuffer *pCommandBuffers)
295 {
296 TU_FROM_HANDLE(tu_device, device, _device);
297 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
298
299 VkResult result = VK_SUCCESS;
300 uint32_t i;
301
302 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
303
304 if (!list_empty(&pool->free_cmd_buffers)) {
305 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
306 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
307
308 list_del(&cmd_buffer->pool_link);
309 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
310
311 result = tu_reset_cmd_buffer(cmd_buffer);
312 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
313 cmd_buffer->level = pAllocateInfo->level;
314
315 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
316 } else {
317 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
318 &pCommandBuffers[i]);
319 }
320 if (result != VK_SUCCESS)
321 break;
322 }
323
324 if (result != VK_SUCCESS) {
325 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
326 pCommandBuffers);
327
328 /* From the Vulkan 1.0.66 spec:
329 *
330 * "vkAllocateCommandBuffers can be used to create multiple
331 * command buffers. If the creation of any of those command
332 * buffers fails, the implementation must destroy all
333 * successfully created command buffer objects from this
334 * command, set all entries of the pCommandBuffers array to
335 * NULL and return the error."
336 */
337 memset(pCommandBuffers, 0,
338 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
339 }
340
341 return result;
342 }
343
344 void
345 tu_FreeCommandBuffers(VkDevice device,
346 VkCommandPool commandPool,
347 uint32_t commandBufferCount,
348 const VkCommandBuffer *pCommandBuffers)
349 {
350 for (uint32_t i = 0; i < commandBufferCount; i++) {
351 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
352
353 if (cmd_buffer) {
354 if (cmd_buffer->pool) {
355 list_del(&cmd_buffer->pool_link);
356 list_addtail(&cmd_buffer->pool_link,
357 &cmd_buffer->pool->free_cmd_buffers);
358 } else
359 tu_cmd_buffer_destroy(cmd_buffer);
360 }
361 }
362 }
363
364 VkResult
365 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
366 VkCommandBufferResetFlags flags)
367 {
368 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
369 return tu_reset_cmd_buffer(cmd_buffer);
370 }
371
372 VkResult
373 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
374 const VkCommandBufferBeginInfo *pBeginInfo)
375 {
376 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
377 VkResult result = VK_SUCCESS;
378
379 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
380 /* If the command buffer has already been resetted with
381 * vkResetCommandBuffer, no need to do it again.
382 */
383 result = tu_reset_cmd_buffer(cmd_buffer);
384 if (result != VK_SUCCESS)
385 return result;
386 }
387
388 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
389 cmd_buffer->usage_flags = pBeginInfo->flags;
390
391 /* setup initial configuration into command buffer */
392 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
393 switch (cmd_buffer->queue_family_index) {
394 case TU_QUEUE_GENERAL:
395 /* init */
396 break;
397 default:
398 break;
399 }
400 }
401
402 result = tu_cs_begin(cmd_buffer->device, &cmd_buffer->cs, 4096);
403 if (result != VK_SUCCESS)
404 return result;
405
406 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
407
408 /* Put some stuff in so we do not have empty command buffers. */
409 tu_cs_emit_pkt7(&cmd_buffer->cs, CP_NOP, 4);
410 tu_cs_emit(&cmd_buffer->cs, 0);
411 tu_cs_emit(&cmd_buffer->cs, 0);
412 tu_cs_emit(&cmd_buffer->cs, 0);
413 tu_cs_emit(&cmd_buffer->cs, 0);
414
415 return VK_SUCCESS;
416 }
417
418 void
419 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
420 uint32_t firstBinding,
421 uint32_t bindingCount,
422 const VkBuffer *pBuffers,
423 const VkDeviceSize *pOffsets)
424 {
425 }
426
427 void
428 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
429 VkBuffer buffer,
430 VkDeviceSize offset,
431 VkIndexType indexType)
432 {
433 }
434
435 void
436 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
437 VkPipelineBindPoint pipelineBindPoint,
438 VkPipelineLayout _layout,
439 uint32_t firstSet,
440 uint32_t descriptorSetCount,
441 const VkDescriptorSet *pDescriptorSets,
442 uint32_t dynamicOffsetCount,
443 const uint32_t *pDynamicOffsets)
444 {
445 }
446
447 void
448 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
449 VkPipelineLayout layout,
450 VkShaderStageFlags stageFlags,
451 uint32_t offset,
452 uint32_t size,
453 const void *pValues)
454 {
455 }
456
457 VkResult
458 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
459 {
460 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
461
462 tu_cs_end(&cmd_buffer->cs);
463 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
464
465 return cmd_buffer->record_result;
466 }
467
468 void
469 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
470 VkPipelineBindPoint pipelineBindPoint,
471 VkPipeline _pipeline)
472 {
473 }
474
475 void
476 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
477 uint32_t firstViewport,
478 uint32_t viewportCount,
479 const VkViewport *pViewports)
480 {
481 }
482
483 void
484 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
485 uint32_t firstScissor,
486 uint32_t scissorCount,
487 const VkRect2D *pScissors)
488 {
489 }
490
491 void
492 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
493 {
494 }
495
496 void
497 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
498 float depthBiasConstantFactor,
499 float depthBiasClamp,
500 float depthBiasSlopeFactor)
501 {
502 }
503
504 void
505 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
506 const float blendConstants[4])
507 {
508 }
509
510 void
511 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
512 float minDepthBounds,
513 float maxDepthBounds)
514 {
515 }
516
517 void
518 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
519 VkStencilFaceFlags faceMask,
520 uint32_t compareMask)
521 {
522 }
523
524 void
525 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
526 VkStencilFaceFlags faceMask,
527 uint32_t writeMask)
528 {
529 }
530
531 void
532 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
533 VkStencilFaceFlags faceMask,
534 uint32_t reference)
535 {
536 }
537
538 void
539 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
540 uint32_t commandBufferCount,
541 const VkCommandBuffer *pCmdBuffers)
542 {
543 }
544
545 VkResult
546 tu_CreateCommandPool(VkDevice _device,
547 const VkCommandPoolCreateInfo *pCreateInfo,
548 const VkAllocationCallbacks *pAllocator,
549 VkCommandPool *pCmdPool)
550 {
551 TU_FROM_HANDLE(tu_device, device, _device);
552 struct tu_cmd_pool *pool;
553
554 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
555 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
556 if (pool == NULL)
557 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
558
559 if (pAllocator)
560 pool->alloc = *pAllocator;
561 else
562 pool->alloc = device->alloc;
563
564 list_inithead(&pool->cmd_buffers);
565 list_inithead(&pool->free_cmd_buffers);
566
567 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
568
569 *pCmdPool = tu_cmd_pool_to_handle(pool);
570
571 return VK_SUCCESS;
572 }
573
574 void
575 tu_DestroyCommandPool(VkDevice _device,
576 VkCommandPool commandPool,
577 const VkAllocationCallbacks *pAllocator)
578 {
579 TU_FROM_HANDLE(tu_device, device, _device);
580 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
581
582 if (!pool)
583 return;
584
585 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
586 &pool->cmd_buffers, pool_link)
587 {
588 tu_cmd_buffer_destroy(cmd_buffer);
589 }
590
591 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
592 &pool->free_cmd_buffers, pool_link)
593 {
594 tu_cmd_buffer_destroy(cmd_buffer);
595 }
596
597 vk_free2(&device->alloc, pAllocator, pool);
598 }
599
600 VkResult
601 tu_ResetCommandPool(VkDevice device,
602 VkCommandPool commandPool,
603 VkCommandPoolResetFlags flags)
604 {
605 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
606 VkResult result;
607
608 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
609 pool_link)
610 {
611 result = tu_reset_cmd_buffer(cmd_buffer);
612 if (result != VK_SUCCESS)
613 return result;
614 }
615
616 return VK_SUCCESS;
617 }
618
619 void
620 tu_TrimCommandPool(VkDevice device,
621 VkCommandPool commandPool,
622 VkCommandPoolTrimFlagsKHR flags)
623 {
624 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
625
626 if (!pool)
627 return;
628
629 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
630 &pool->free_cmd_buffers, pool_link)
631 {
632 tu_cmd_buffer_destroy(cmd_buffer);
633 }
634 }
635
636 void
637 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
638 const VkRenderPassBeginInfo *pRenderPassBegin,
639 VkSubpassContents contents)
640 {
641 }
642
643 void
644 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
645 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
646 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
647 {
648 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
649 pSubpassBeginInfo->contents);
650 }
651
652 void
653 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
654 {
655 }
656
657 void
658 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
659 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
660 const VkSubpassEndInfoKHR *pSubpassEndInfo)
661 {
662 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
663 }
664
665 struct tu_draw_info
666 {
667 /**
668 * Number of vertices.
669 */
670 uint32_t count;
671
672 /**
673 * Index of the first vertex.
674 */
675 int32_t vertex_offset;
676
677 /**
678 * First instance id.
679 */
680 uint32_t first_instance;
681
682 /**
683 * Number of instances.
684 */
685 uint32_t instance_count;
686
687 /**
688 * First index (indexed draws only).
689 */
690 uint32_t first_index;
691
692 /**
693 * Whether it's an indexed draw.
694 */
695 bool indexed;
696
697 /**
698 * Indirect draw parameters resource.
699 */
700 struct tu_buffer *indirect;
701 uint64_t indirect_offset;
702 uint32_t stride;
703
704 /**
705 * Draw count parameters resource.
706 */
707 struct tu_buffer *count_buffer;
708 uint64_t count_buffer_offset;
709 };
710
711 static void
712 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
713 {
714 }
715
716 void
717 tu_CmdDraw(VkCommandBuffer commandBuffer,
718 uint32_t vertexCount,
719 uint32_t instanceCount,
720 uint32_t firstVertex,
721 uint32_t firstInstance)
722 {
723 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
724 struct tu_draw_info info = {};
725
726 info.count = vertexCount;
727 info.instance_count = instanceCount;
728 info.first_instance = firstInstance;
729 info.vertex_offset = firstVertex;
730
731 tu_draw(cmd_buffer, &info);
732 }
733
734 void
735 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
736 uint32_t indexCount,
737 uint32_t instanceCount,
738 uint32_t firstIndex,
739 int32_t vertexOffset,
740 uint32_t firstInstance)
741 {
742 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
743 struct tu_draw_info info = {};
744
745 info.indexed = true;
746 info.count = indexCount;
747 info.instance_count = instanceCount;
748 info.first_index = firstIndex;
749 info.vertex_offset = vertexOffset;
750 info.first_instance = firstInstance;
751
752 tu_draw(cmd_buffer, &info);
753 }
754
755 void
756 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
757 VkBuffer _buffer,
758 VkDeviceSize offset,
759 uint32_t drawCount,
760 uint32_t stride)
761 {
762 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
763 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
764 struct tu_draw_info info = {};
765
766 info.count = drawCount;
767 info.indirect = buffer;
768 info.indirect_offset = offset;
769 info.stride = stride;
770
771 tu_draw(cmd_buffer, &info);
772 }
773
774 void
775 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
776 VkBuffer _buffer,
777 VkDeviceSize offset,
778 uint32_t drawCount,
779 uint32_t stride)
780 {
781 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
782 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
783 struct tu_draw_info info = {};
784
785 info.indexed = true;
786 info.count = drawCount;
787 info.indirect = buffer;
788 info.indirect_offset = offset;
789 info.stride = stride;
790
791 tu_draw(cmd_buffer, &info);
792 }
793
794 struct tu_dispatch_info
795 {
796 /**
797 * Determine the layout of the grid (in block units) to be used.
798 */
799 uint32_t blocks[3];
800
801 /**
802 * A starting offset for the grid. If unaligned is set, the offset
803 * must still be aligned.
804 */
805 uint32_t offsets[3];
806 /**
807 * Whether it's an unaligned compute dispatch.
808 */
809 bool unaligned;
810
811 /**
812 * Indirect compute parameters resource.
813 */
814 struct tu_buffer *indirect;
815 uint64_t indirect_offset;
816 };
817
818 static void
819 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
820 const struct tu_dispatch_info *info)
821 {
822 }
823
824 void
825 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
826 uint32_t base_x,
827 uint32_t base_y,
828 uint32_t base_z,
829 uint32_t x,
830 uint32_t y,
831 uint32_t z)
832 {
833 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
834 struct tu_dispatch_info info = {};
835
836 info.blocks[0] = x;
837 info.blocks[1] = y;
838 info.blocks[2] = z;
839
840 info.offsets[0] = base_x;
841 info.offsets[1] = base_y;
842 info.offsets[2] = base_z;
843 tu_dispatch(cmd_buffer, &info);
844 }
845
846 void
847 tu_CmdDispatch(VkCommandBuffer commandBuffer,
848 uint32_t x,
849 uint32_t y,
850 uint32_t z)
851 {
852 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
853 }
854
855 void
856 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
857 VkBuffer _buffer,
858 VkDeviceSize offset)
859 {
860 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
861 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
862 struct tu_dispatch_info info = {};
863
864 info.indirect = buffer;
865 info.indirect_offset = offset;
866
867 tu_dispatch(cmd_buffer, &info);
868 }
869
870 void
871 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
872 {
873 }
874
875 void
876 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
877 const VkSubpassEndInfoKHR *pSubpassEndInfo)
878 {
879 tu_CmdEndRenderPass(commandBuffer);
880 }
881
882 struct tu_barrier_info
883 {
884 uint32_t eventCount;
885 const VkEvent *pEvents;
886 VkPipelineStageFlags srcStageMask;
887 };
888
889 static void
890 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
891 uint32_t memoryBarrierCount,
892 const VkMemoryBarrier *pMemoryBarriers,
893 uint32_t bufferMemoryBarrierCount,
894 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
895 uint32_t imageMemoryBarrierCount,
896 const VkImageMemoryBarrier *pImageMemoryBarriers,
897 const struct tu_barrier_info *info)
898 {
899 }
900
901 void
902 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
903 VkPipelineStageFlags srcStageMask,
904 VkPipelineStageFlags destStageMask,
905 VkBool32 byRegion,
906 uint32_t memoryBarrierCount,
907 const VkMemoryBarrier *pMemoryBarriers,
908 uint32_t bufferMemoryBarrierCount,
909 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
910 uint32_t imageMemoryBarrierCount,
911 const VkImageMemoryBarrier *pImageMemoryBarriers)
912 {
913 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
914 struct tu_barrier_info info;
915
916 info.eventCount = 0;
917 info.pEvents = NULL;
918 info.srcStageMask = srcStageMask;
919
920 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
921 bufferMemoryBarrierCount, pBufferMemoryBarriers,
922 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
923 }
924
925 static void
926 write_event(struct tu_cmd_buffer *cmd_buffer,
927 struct tu_event *event,
928 VkPipelineStageFlags stageMask,
929 unsigned value)
930 {
931 }
932
933 void
934 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
935 VkEvent _event,
936 VkPipelineStageFlags stageMask)
937 {
938 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
939 TU_FROM_HANDLE(tu_event, event, _event);
940
941 write_event(cmd_buffer, event, stageMask, 1);
942 }
943
944 void
945 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
946 VkEvent _event,
947 VkPipelineStageFlags stageMask)
948 {
949 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
950 TU_FROM_HANDLE(tu_event, event, _event);
951
952 write_event(cmd_buffer, event, stageMask, 0);
953 }
954
955 void
956 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
957 uint32_t eventCount,
958 const VkEvent *pEvents,
959 VkPipelineStageFlags srcStageMask,
960 VkPipelineStageFlags dstStageMask,
961 uint32_t memoryBarrierCount,
962 const VkMemoryBarrier *pMemoryBarriers,
963 uint32_t bufferMemoryBarrierCount,
964 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
965 uint32_t imageMemoryBarrierCount,
966 const VkImageMemoryBarrier *pImageMemoryBarriers)
967 {
968 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
969 struct tu_barrier_info info;
970
971 info.eventCount = eventCount;
972 info.pEvents = pEvents;
973 info.srcStageMask = 0;
974
975 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
976 bufferMemoryBarrierCount, pBufferMemoryBarriers,
977 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
978 }
979
980 void
981 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
982 {
983 /* No-op */
984 }