turnip: build drm_msm_gem_submit_bo array directly
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
32 #include "tu_cs.h"
33
34 void
35 tu_bo_list_init(struct tu_bo_list *list)
36 {
37 list->count = list->capacity = 0;
38 list->bo_infos = NULL;
39 }
40
41 void
42 tu_bo_list_destroy(struct tu_bo_list *list)
43 {
44 free(list->bo_infos);
45 }
46
47 void
48 tu_bo_list_reset(struct tu_bo_list *list)
49 {
50 list->count = 0;
51 }
52
53 /**
54 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
55 */
56 uint32_t
57 tu_bo_list_add(struct tu_bo_list *list,
58 const struct tu_bo *bo,
59 uint32_t flags)
60 {
61 uint32_t handle = bo->gem_handle;
62 for (uint32_t i = 0; i < list->count; ++i) {
63 if (list->bo_infos[i].handle == handle) {
64 list->bo_infos[i].flags |= flags;
65 return i;
66 }
67 }
68
69 /* grow list->bo_infos if needed */
70 if (list->count == list->capacity) {
71 uint32_t new_capacity = MAX2(2 * list->count, 16);
72 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
73 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
74 if (!new_bo_infos)
75 return ~0;
76 list->bo_infos = new_bo_infos;
77 list->capacity = new_capacity;
78 }
79
80 uint32_t ret = list->count;
81 list->bo_infos[list->count] = (struct drm_msm_gem_submit_bo) {
82 .flags = flags,
83 .handle = bo->gem_handle,
84 .presumed = bo->iova,
85 };
86 ++list->count;
87
88 return ret;
89 }
90
91 const struct tu_dynamic_state default_dynamic_state = {
92 .viewport =
93 {
94 .count = 0,
95 },
96 .scissor =
97 {
98 .count = 0,
99 },
100 .line_width = 1.0f,
101 .depth_bias =
102 {
103 .bias = 0.0f,
104 .clamp = 0.0f,
105 .slope = 0.0f,
106 },
107 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
108 .depth_bounds =
109 {
110 .min = 0.0f,
111 .max = 1.0f,
112 },
113 .stencil_compare_mask =
114 {
115 .front = ~0u,
116 .back = ~0u,
117 },
118 .stencil_write_mask =
119 {
120 .front = ~0u,
121 .back = ~0u,
122 },
123 .stencil_reference =
124 {
125 .front = 0u,
126 .back = 0u,
127 },
128 };
129
130 static void UNUSED /* FINISHME */
131 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
132 const struct tu_dynamic_state *src)
133 {
134 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
135 uint32_t copy_mask = src->mask;
136 uint32_t dest_mask = 0;
137
138 tu_use_args(cmd_buffer); /* FINISHME */
139
140 /* Make sure to copy the number of viewports/scissors because they can
141 * only be specified at pipeline creation time.
142 */
143 dest->viewport.count = src->viewport.count;
144 dest->scissor.count = src->scissor.count;
145 dest->discard_rectangle.count = src->discard_rectangle.count;
146
147 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
148 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
149 src->viewport.count * sizeof(VkViewport))) {
150 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
151 src->viewport.count);
152 dest_mask |= TU_DYNAMIC_VIEWPORT;
153 }
154 }
155
156 if (copy_mask & TU_DYNAMIC_SCISSOR) {
157 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
158 src->scissor.count * sizeof(VkRect2D))) {
159 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
160 src->scissor.count);
161 dest_mask |= TU_DYNAMIC_SCISSOR;
162 }
163 }
164
165 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
166 if (dest->line_width != src->line_width) {
167 dest->line_width = src->line_width;
168 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
169 }
170 }
171
172 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
173 if (memcmp(&dest->depth_bias, &src->depth_bias,
174 sizeof(src->depth_bias))) {
175 dest->depth_bias = src->depth_bias;
176 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
177 }
178 }
179
180 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
181 if (memcmp(&dest->blend_constants, &src->blend_constants,
182 sizeof(src->blend_constants))) {
183 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
184 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
185 }
186 }
187
188 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
189 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
190 sizeof(src->depth_bounds))) {
191 dest->depth_bounds = src->depth_bounds;
192 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
193 }
194 }
195
196 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
197 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
198 sizeof(src->stencil_compare_mask))) {
199 dest->stencil_compare_mask = src->stencil_compare_mask;
200 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
201 }
202 }
203
204 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
205 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
206 sizeof(src->stencil_write_mask))) {
207 dest->stencil_write_mask = src->stencil_write_mask;
208 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
209 }
210 }
211
212 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
213 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
214 sizeof(src->stencil_reference))) {
215 dest->stencil_reference = src->stencil_reference;
216 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
217 }
218 }
219
220 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
221 if (memcmp(&dest->discard_rectangle.rectangles,
222 &src->discard_rectangle.rectangles,
223 src->discard_rectangle.count * sizeof(VkRect2D))) {
224 typed_memcpy(dest->discard_rectangle.rectangles,
225 src->discard_rectangle.rectangles,
226 src->discard_rectangle.count);
227 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
228 }
229 }
230 }
231
232 static VkResult
233 tu_create_cmd_buffer(struct tu_device *device,
234 struct tu_cmd_pool *pool,
235 VkCommandBufferLevel level,
236 VkCommandBuffer *pCommandBuffer)
237 {
238 struct tu_cmd_buffer *cmd_buffer;
239 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
240 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
241 if (cmd_buffer == NULL)
242 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
243
244 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
245 cmd_buffer->device = device;
246 cmd_buffer->pool = pool;
247 cmd_buffer->level = level;
248
249 if (pool) {
250 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
251 cmd_buffer->queue_family_index = pool->queue_family_index;
252
253 } else {
254 /* Init the pool_link so we can safely call list_del when we destroy
255 * the command buffer
256 */
257 list_inithead(&cmd_buffer->pool_link);
258 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
259 }
260
261 tu_bo_list_init(&cmd_buffer->bo_list);
262 tu_cs_init(&cmd_buffer->cs);
263
264 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
265
266 list_inithead(&cmd_buffer->upload.list);
267
268 return VK_SUCCESS;
269 }
270
271 static void
272 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
273 {
274 list_del(&cmd_buffer->pool_link);
275
276 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
277 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
278
279 tu_cs_finish(cmd_buffer->device, &cmd_buffer->cs);
280 tu_bo_list_destroy(&cmd_buffer->bo_list);
281 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
282 }
283
284 static VkResult
285 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
286 {
287 cmd_buffer->record_result = VK_SUCCESS;
288
289 tu_bo_list_reset(&cmd_buffer->bo_list);
290 tu_cs_reset(cmd_buffer->device, &cmd_buffer->cs);
291
292 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
293 cmd_buffer->descriptors[i].dirty = 0;
294 cmd_buffer->descriptors[i].valid = 0;
295 cmd_buffer->descriptors[i].push_dirty = false;
296 }
297
298 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
299
300 return cmd_buffer->record_result;
301 }
302
303 VkResult
304 tu_AllocateCommandBuffers(VkDevice _device,
305 const VkCommandBufferAllocateInfo *pAllocateInfo,
306 VkCommandBuffer *pCommandBuffers)
307 {
308 TU_FROM_HANDLE(tu_device, device, _device);
309 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
310
311 VkResult result = VK_SUCCESS;
312 uint32_t i;
313
314 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
315
316 if (!list_empty(&pool->free_cmd_buffers)) {
317 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
318 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
319
320 list_del(&cmd_buffer->pool_link);
321 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
322
323 result = tu_reset_cmd_buffer(cmd_buffer);
324 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
325 cmd_buffer->level = pAllocateInfo->level;
326
327 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
328 } else {
329 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
330 &pCommandBuffers[i]);
331 }
332 if (result != VK_SUCCESS)
333 break;
334 }
335
336 if (result != VK_SUCCESS) {
337 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
338 pCommandBuffers);
339
340 /* From the Vulkan 1.0.66 spec:
341 *
342 * "vkAllocateCommandBuffers can be used to create multiple
343 * command buffers. If the creation of any of those command
344 * buffers fails, the implementation must destroy all
345 * successfully created command buffer objects from this
346 * command, set all entries of the pCommandBuffers array to
347 * NULL and return the error."
348 */
349 memset(pCommandBuffers, 0,
350 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
351 }
352
353 return result;
354 }
355
356 void
357 tu_FreeCommandBuffers(VkDevice device,
358 VkCommandPool commandPool,
359 uint32_t commandBufferCount,
360 const VkCommandBuffer *pCommandBuffers)
361 {
362 for (uint32_t i = 0; i < commandBufferCount; i++) {
363 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
364
365 if (cmd_buffer) {
366 if (cmd_buffer->pool) {
367 list_del(&cmd_buffer->pool_link);
368 list_addtail(&cmd_buffer->pool_link,
369 &cmd_buffer->pool->free_cmd_buffers);
370 } else
371 tu_cmd_buffer_destroy(cmd_buffer);
372 }
373 }
374 }
375
376 VkResult
377 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
378 VkCommandBufferResetFlags flags)
379 {
380 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
381 return tu_reset_cmd_buffer(cmd_buffer);
382 }
383
384 VkResult
385 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
386 const VkCommandBufferBeginInfo *pBeginInfo)
387 {
388 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
389 VkResult result = VK_SUCCESS;
390
391 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
392 /* If the command buffer has already been resetted with
393 * vkResetCommandBuffer, no need to do it again.
394 */
395 result = tu_reset_cmd_buffer(cmd_buffer);
396 if (result != VK_SUCCESS)
397 return result;
398 }
399
400 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
401 cmd_buffer->usage_flags = pBeginInfo->flags;
402
403 /* setup initial configuration into command buffer */
404 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
405 switch (cmd_buffer->queue_family_index) {
406 case TU_QUEUE_GENERAL:
407 /* init */
408 break;
409 default:
410 break;
411 }
412 }
413
414 result = tu_cs_begin(cmd_buffer->device, &cmd_buffer->cs, 4096);
415 if (result != VK_SUCCESS)
416 return result;
417
418 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
419
420 /* Put some stuff in so we do not have empty command buffers. */
421 tu_cs_emit_pkt7(&cmd_buffer->cs, CP_NOP, 4);
422 tu_cs_emit(&cmd_buffer->cs, 0);
423 tu_cs_emit(&cmd_buffer->cs, 0);
424 tu_cs_emit(&cmd_buffer->cs, 0);
425 tu_cs_emit(&cmd_buffer->cs, 0);
426
427 return VK_SUCCESS;
428 }
429
430 void
431 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
432 uint32_t firstBinding,
433 uint32_t bindingCount,
434 const VkBuffer *pBuffers,
435 const VkDeviceSize *pOffsets)
436 {
437 }
438
439 void
440 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
441 VkBuffer buffer,
442 VkDeviceSize offset,
443 VkIndexType indexType)
444 {
445 }
446
447 void
448 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
449 VkPipelineBindPoint pipelineBindPoint,
450 VkPipelineLayout _layout,
451 uint32_t firstSet,
452 uint32_t descriptorSetCount,
453 const VkDescriptorSet *pDescriptorSets,
454 uint32_t dynamicOffsetCount,
455 const uint32_t *pDynamicOffsets)
456 {
457 }
458
459 void
460 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
461 VkPipelineLayout layout,
462 VkShaderStageFlags stageFlags,
463 uint32_t offset,
464 uint32_t size,
465 const void *pValues)
466 {
467 }
468
469 VkResult
470 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
471 {
472 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
473
474 VkResult result = tu_cs_end(&cmd_buffer->cs);
475 if (result != VK_SUCCESS)
476 cmd_buffer->record_result = result;
477
478 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
479
480 return cmd_buffer->record_result;
481 }
482
483 void
484 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
485 VkPipelineBindPoint pipelineBindPoint,
486 VkPipeline _pipeline)
487 {
488 }
489
490 void
491 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
492 uint32_t firstViewport,
493 uint32_t viewportCount,
494 const VkViewport *pViewports)
495 {
496 }
497
498 void
499 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
500 uint32_t firstScissor,
501 uint32_t scissorCount,
502 const VkRect2D *pScissors)
503 {
504 }
505
506 void
507 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
508 {
509 }
510
511 void
512 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
513 float depthBiasConstantFactor,
514 float depthBiasClamp,
515 float depthBiasSlopeFactor)
516 {
517 }
518
519 void
520 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
521 const float blendConstants[4])
522 {
523 }
524
525 void
526 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
527 float minDepthBounds,
528 float maxDepthBounds)
529 {
530 }
531
532 void
533 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
534 VkStencilFaceFlags faceMask,
535 uint32_t compareMask)
536 {
537 }
538
539 void
540 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
541 VkStencilFaceFlags faceMask,
542 uint32_t writeMask)
543 {
544 }
545
546 void
547 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
548 VkStencilFaceFlags faceMask,
549 uint32_t reference)
550 {
551 }
552
553 void
554 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
555 uint32_t commandBufferCount,
556 const VkCommandBuffer *pCmdBuffers)
557 {
558 }
559
560 VkResult
561 tu_CreateCommandPool(VkDevice _device,
562 const VkCommandPoolCreateInfo *pCreateInfo,
563 const VkAllocationCallbacks *pAllocator,
564 VkCommandPool *pCmdPool)
565 {
566 TU_FROM_HANDLE(tu_device, device, _device);
567 struct tu_cmd_pool *pool;
568
569 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
570 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
571 if (pool == NULL)
572 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
573
574 if (pAllocator)
575 pool->alloc = *pAllocator;
576 else
577 pool->alloc = device->alloc;
578
579 list_inithead(&pool->cmd_buffers);
580 list_inithead(&pool->free_cmd_buffers);
581
582 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
583
584 *pCmdPool = tu_cmd_pool_to_handle(pool);
585
586 return VK_SUCCESS;
587 }
588
589 void
590 tu_DestroyCommandPool(VkDevice _device,
591 VkCommandPool commandPool,
592 const VkAllocationCallbacks *pAllocator)
593 {
594 TU_FROM_HANDLE(tu_device, device, _device);
595 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
596
597 if (!pool)
598 return;
599
600 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
601 &pool->cmd_buffers, pool_link)
602 {
603 tu_cmd_buffer_destroy(cmd_buffer);
604 }
605
606 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
607 &pool->free_cmd_buffers, pool_link)
608 {
609 tu_cmd_buffer_destroy(cmd_buffer);
610 }
611
612 vk_free2(&device->alloc, pAllocator, pool);
613 }
614
615 VkResult
616 tu_ResetCommandPool(VkDevice device,
617 VkCommandPool commandPool,
618 VkCommandPoolResetFlags flags)
619 {
620 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
621 VkResult result;
622
623 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
624 pool_link)
625 {
626 result = tu_reset_cmd_buffer(cmd_buffer);
627 if (result != VK_SUCCESS)
628 return result;
629 }
630
631 return VK_SUCCESS;
632 }
633
634 void
635 tu_TrimCommandPool(VkDevice device,
636 VkCommandPool commandPool,
637 VkCommandPoolTrimFlagsKHR flags)
638 {
639 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
640
641 if (!pool)
642 return;
643
644 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
645 &pool->free_cmd_buffers, pool_link)
646 {
647 tu_cmd_buffer_destroy(cmd_buffer);
648 }
649 }
650
651 void
652 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
653 const VkRenderPassBeginInfo *pRenderPassBegin,
654 VkSubpassContents contents)
655 {
656 }
657
658 void
659 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
660 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
661 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
662 {
663 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
664 pSubpassBeginInfo->contents);
665 }
666
667 void
668 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
669 {
670 }
671
672 void
673 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
674 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
675 const VkSubpassEndInfoKHR *pSubpassEndInfo)
676 {
677 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
678 }
679
680 struct tu_draw_info
681 {
682 /**
683 * Number of vertices.
684 */
685 uint32_t count;
686
687 /**
688 * Index of the first vertex.
689 */
690 int32_t vertex_offset;
691
692 /**
693 * First instance id.
694 */
695 uint32_t first_instance;
696
697 /**
698 * Number of instances.
699 */
700 uint32_t instance_count;
701
702 /**
703 * First index (indexed draws only).
704 */
705 uint32_t first_index;
706
707 /**
708 * Whether it's an indexed draw.
709 */
710 bool indexed;
711
712 /**
713 * Indirect draw parameters resource.
714 */
715 struct tu_buffer *indirect;
716 uint64_t indirect_offset;
717 uint32_t stride;
718
719 /**
720 * Draw count parameters resource.
721 */
722 struct tu_buffer *count_buffer;
723 uint64_t count_buffer_offset;
724 };
725
726 static void
727 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
728 {
729 }
730
731 void
732 tu_CmdDraw(VkCommandBuffer commandBuffer,
733 uint32_t vertexCount,
734 uint32_t instanceCount,
735 uint32_t firstVertex,
736 uint32_t firstInstance)
737 {
738 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
739 struct tu_draw_info info = {};
740
741 info.count = vertexCount;
742 info.instance_count = instanceCount;
743 info.first_instance = firstInstance;
744 info.vertex_offset = firstVertex;
745
746 tu_draw(cmd_buffer, &info);
747 }
748
749 void
750 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
751 uint32_t indexCount,
752 uint32_t instanceCount,
753 uint32_t firstIndex,
754 int32_t vertexOffset,
755 uint32_t firstInstance)
756 {
757 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
758 struct tu_draw_info info = {};
759
760 info.indexed = true;
761 info.count = indexCount;
762 info.instance_count = instanceCount;
763 info.first_index = firstIndex;
764 info.vertex_offset = vertexOffset;
765 info.first_instance = firstInstance;
766
767 tu_draw(cmd_buffer, &info);
768 }
769
770 void
771 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
772 VkBuffer _buffer,
773 VkDeviceSize offset,
774 uint32_t drawCount,
775 uint32_t stride)
776 {
777 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
778 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
779 struct tu_draw_info info = {};
780
781 info.count = drawCount;
782 info.indirect = buffer;
783 info.indirect_offset = offset;
784 info.stride = stride;
785
786 tu_draw(cmd_buffer, &info);
787 }
788
789 void
790 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
791 VkBuffer _buffer,
792 VkDeviceSize offset,
793 uint32_t drawCount,
794 uint32_t stride)
795 {
796 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
797 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
798 struct tu_draw_info info = {};
799
800 info.indexed = true;
801 info.count = drawCount;
802 info.indirect = buffer;
803 info.indirect_offset = offset;
804 info.stride = stride;
805
806 tu_draw(cmd_buffer, &info);
807 }
808
809 struct tu_dispatch_info
810 {
811 /**
812 * Determine the layout of the grid (in block units) to be used.
813 */
814 uint32_t blocks[3];
815
816 /**
817 * A starting offset for the grid. If unaligned is set, the offset
818 * must still be aligned.
819 */
820 uint32_t offsets[3];
821 /**
822 * Whether it's an unaligned compute dispatch.
823 */
824 bool unaligned;
825
826 /**
827 * Indirect compute parameters resource.
828 */
829 struct tu_buffer *indirect;
830 uint64_t indirect_offset;
831 };
832
833 static void
834 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
835 const struct tu_dispatch_info *info)
836 {
837 }
838
839 void
840 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
841 uint32_t base_x,
842 uint32_t base_y,
843 uint32_t base_z,
844 uint32_t x,
845 uint32_t y,
846 uint32_t z)
847 {
848 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
849 struct tu_dispatch_info info = {};
850
851 info.blocks[0] = x;
852 info.blocks[1] = y;
853 info.blocks[2] = z;
854
855 info.offsets[0] = base_x;
856 info.offsets[1] = base_y;
857 info.offsets[2] = base_z;
858 tu_dispatch(cmd_buffer, &info);
859 }
860
861 void
862 tu_CmdDispatch(VkCommandBuffer commandBuffer,
863 uint32_t x,
864 uint32_t y,
865 uint32_t z)
866 {
867 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
868 }
869
870 void
871 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
872 VkBuffer _buffer,
873 VkDeviceSize offset)
874 {
875 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
876 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
877 struct tu_dispatch_info info = {};
878
879 info.indirect = buffer;
880 info.indirect_offset = offset;
881
882 tu_dispatch(cmd_buffer, &info);
883 }
884
885 void
886 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
887 {
888 }
889
890 void
891 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
892 const VkSubpassEndInfoKHR *pSubpassEndInfo)
893 {
894 tu_CmdEndRenderPass(commandBuffer);
895 }
896
897 struct tu_barrier_info
898 {
899 uint32_t eventCount;
900 const VkEvent *pEvents;
901 VkPipelineStageFlags srcStageMask;
902 };
903
904 static void
905 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
906 uint32_t memoryBarrierCount,
907 const VkMemoryBarrier *pMemoryBarriers,
908 uint32_t bufferMemoryBarrierCount,
909 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
910 uint32_t imageMemoryBarrierCount,
911 const VkImageMemoryBarrier *pImageMemoryBarriers,
912 const struct tu_barrier_info *info)
913 {
914 }
915
916 void
917 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
918 VkPipelineStageFlags srcStageMask,
919 VkPipelineStageFlags destStageMask,
920 VkBool32 byRegion,
921 uint32_t memoryBarrierCount,
922 const VkMemoryBarrier *pMemoryBarriers,
923 uint32_t bufferMemoryBarrierCount,
924 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
925 uint32_t imageMemoryBarrierCount,
926 const VkImageMemoryBarrier *pImageMemoryBarriers)
927 {
928 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
929 struct tu_barrier_info info;
930
931 info.eventCount = 0;
932 info.pEvents = NULL;
933 info.srcStageMask = srcStageMask;
934
935 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
936 bufferMemoryBarrierCount, pBufferMemoryBarriers,
937 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
938 }
939
940 static void
941 write_event(struct tu_cmd_buffer *cmd_buffer,
942 struct tu_event *event,
943 VkPipelineStageFlags stageMask,
944 unsigned value)
945 {
946 }
947
948 void
949 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
950 VkEvent _event,
951 VkPipelineStageFlags stageMask)
952 {
953 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
954 TU_FROM_HANDLE(tu_event, event, _event);
955
956 write_event(cmd_buffer, event, stageMask, 1);
957 }
958
959 void
960 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
961 VkEvent _event,
962 VkPipelineStageFlags stageMask)
963 {
964 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
965 TU_FROM_HANDLE(tu_event, event, _event);
966
967 write_event(cmd_buffer, event, stageMask, 0);
968 }
969
970 void
971 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
972 uint32_t eventCount,
973 const VkEvent *pEvents,
974 VkPipelineStageFlags srcStageMask,
975 VkPipelineStageFlags dstStageMask,
976 uint32_t memoryBarrierCount,
977 const VkMemoryBarrier *pMemoryBarriers,
978 uint32_t bufferMemoryBarrierCount,
979 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
980 uint32_t imageMemoryBarrierCount,
981 const VkImageMemoryBarrier *pImageMemoryBarriers)
982 {
983 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
984 struct tu_barrier_info info;
985
986 info.eventCount = eventCount;
987 info.pEvents = pEvents;
988 info.srcStageMask = 0;
989
990 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
991 bufferMemoryBarrierCount, pBufferMemoryBarriers,
992 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
993 }
994
995 void
996 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
997 {
998 /* No-op */
999 }