turnip: add tu_bo_list_merge
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
32 #include "tu_cs.h"
33
34 void
35 tu_bo_list_init(struct tu_bo_list *list)
36 {
37 list->count = list->capacity = 0;
38 list->bo_infos = NULL;
39 }
40
41 void
42 tu_bo_list_destroy(struct tu_bo_list *list)
43 {
44 free(list->bo_infos);
45 }
46
47 void
48 tu_bo_list_reset(struct tu_bo_list *list)
49 {
50 list->count = 0;
51 }
52
53 /**
54 * \a flags consists of MSM_SUBMIT_BO_FLAGS.
55 */
56 static uint32_t
57 tu_bo_list_add_info(struct tu_bo_list *list,
58 const struct drm_msm_gem_submit_bo *bo_info)
59 {
60 for (uint32_t i = 0; i < list->count; ++i) {
61 if (list->bo_infos[i].handle == bo_info->handle) {
62 assert(list->bo_infos[i].presumed == bo_info->presumed);
63 list->bo_infos[i].flags |= bo_info->flags;
64 return i;
65 }
66 }
67
68 /* grow list->bo_infos if needed */
69 if (list->count == list->capacity) {
70 uint32_t new_capacity = MAX2(2 * list->count, 16);
71 struct drm_msm_gem_submit_bo *new_bo_infos = realloc(
72 list->bo_infos, new_capacity * sizeof(struct drm_msm_gem_submit_bo));
73 if (!new_bo_infos)
74 return TU_BO_LIST_FAILED;
75 list->bo_infos = new_bo_infos;
76 list->capacity = new_capacity;
77 }
78
79 list->bo_infos[list->count] = *bo_info;
80 return list->count++;
81 }
82
83 uint32_t
84 tu_bo_list_add(struct tu_bo_list *list,
85 const struct tu_bo *bo,
86 uint32_t flags)
87 {
88 return tu_bo_list_add_info(list, &(struct drm_msm_gem_submit_bo) {
89 .flags = flags,
90 .handle = bo->gem_handle,
91 .presumed = bo->iova,
92 });
93 }
94
95 VkResult
96 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other)
97 {
98 for (uint32_t i = 0; i < other->count; i++) {
99 if (tu_bo_list_add_info(list, other->bo_infos + i) == TU_BO_LIST_FAILED)
100 return VK_ERROR_OUT_OF_HOST_MEMORY;
101 }
102
103 return VK_SUCCESS;
104 }
105
106 const struct tu_dynamic_state default_dynamic_state = {
107 .viewport =
108 {
109 .count = 0,
110 },
111 .scissor =
112 {
113 .count = 0,
114 },
115 .line_width = 1.0f,
116 .depth_bias =
117 {
118 .bias = 0.0f,
119 .clamp = 0.0f,
120 .slope = 0.0f,
121 },
122 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
123 .depth_bounds =
124 {
125 .min = 0.0f,
126 .max = 1.0f,
127 },
128 .stencil_compare_mask =
129 {
130 .front = ~0u,
131 .back = ~0u,
132 },
133 .stencil_write_mask =
134 {
135 .front = ~0u,
136 .back = ~0u,
137 },
138 .stencil_reference =
139 {
140 .front = 0u,
141 .back = 0u,
142 },
143 };
144
145 static void UNUSED /* FINISHME */
146 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
147 const struct tu_dynamic_state *src)
148 {
149 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
150 uint32_t copy_mask = src->mask;
151 uint32_t dest_mask = 0;
152
153 tu_use_args(cmd_buffer); /* FINISHME */
154
155 /* Make sure to copy the number of viewports/scissors because they can
156 * only be specified at pipeline creation time.
157 */
158 dest->viewport.count = src->viewport.count;
159 dest->scissor.count = src->scissor.count;
160 dest->discard_rectangle.count = src->discard_rectangle.count;
161
162 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
163 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
164 src->viewport.count * sizeof(VkViewport))) {
165 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
166 src->viewport.count);
167 dest_mask |= TU_DYNAMIC_VIEWPORT;
168 }
169 }
170
171 if (copy_mask & TU_DYNAMIC_SCISSOR) {
172 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
173 src->scissor.count * sizeof(VkRect2D))) {
174 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
175 src->scissor.count);
176 dest_mask |= TU_DYNAMIC_SCISSOR;
177 }
178 }
179
180 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
181 if (dest->line_width != src->line_width) {
182 dest->line_width = src->line_width;
183 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
184 }
185 }
186
187 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
188 if (memcmp(&dest->depth_bias, &src->depth_bias,
189 sizeof(src->depth_bias))) {
190 dest->depth_bias = src->depth_bias;
191 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
192 }
193 }
194
195 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
196 if (memcmp(&dest->blend_constants, &src->blend_constants,
197 sizeof(src->blend_constants))) {
198 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
199 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
200 }
201 }
202
203 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
204 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
205 sizeof(src->depth_bounds))) {
206 dest->depth_bounds = src->depth_bounds;
207 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
208 }
209 }
210
211 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
212 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
213 sizeof(src->stencil_compare_mask))) {
214 dest->stencil_compare_mask = src->stencil_compare_mask;
215 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
216 }
217 }
218
219 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
220 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
221 sizeof(src->stencil_write_mask))) {
222 dest->stencil_write_mask = src->stencil_write_mask;
223 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
224 }
225 }
226
227 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
228 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
229 sizeof(src->stencil_reference))) {
230 dest->stencil_reference = src->stencil_reference;
231 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
232 }
233 }
234
235 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
236 if (memcmp(&dest->discard_rectangle.rectangles,
237 &src->discard_rectangle.rectangles,
238 src->discard_rectangle.count * sizeof(VkRect2D))) {
239 typed_memcpy(dest->discard_rectangle.rectangles,
240 src->discard_rectangle.rectangles,
241 src->discard_rectangle.count);
242 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
243 }
244 }
245 }
246
247 static VkResult
248 tu_create_cmd_buffer(struct tu_device *device,
249 struct tu_cmd_pool *pool,
250 VkCommandBufferLevel level,
251 VkCommandBuffer *pCommandBuffer)
252 {
253 struct tu_cmd_buffer *cmd_buffer;
254 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
255 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
256 if (cmd_buffer == NULL)
257 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
258
259 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
260 cmd_buffer->device = device;
261 cmd_buffer->pool = pool;
262 cmd_buffer->level = level;
263
264 if (pool) {
265 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
266 cmd_buffer->queue_family_index = pool->queue_family_index;
267
268 } else {
269 /* Init the pool_link so we can safely call list_del when we destroy
270 * the command buffer
271 */
272 list_inithead(&cmd_buffer->pool_link);
273 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
274 }
275
276 tu_bo_list_init(&cmd_buffer->bo_list);
277 tu_cs_init(&cmd_buffer->cs);
278
279 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
280
281 list_inithead(&cmd_buffer->upload.list);
282
283 return VK_SUCCESS;
284 }
285
286 static void
287 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
288 {
289 list_del(&cmd_buffer->pool_link);
290
291 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
292 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
293
294 tu_cs_finish(cmd_buffer->device, &cmd_buffer->cs);
295 tu_bo_list_destroy(&cmd_buffer->bo_list);
296 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
297 }
298
299 static VkResult
300 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
301 {
302 cmd_buffer->record_result = VK_SUCCESS;
303
304 tu_bo_list_reset(&cmd_buffer->bo_list);
305 tu_cs_reset(cmd_buffer->device, &cmd_buffer->cs);
306
307 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
308 cmd_buffer->descriptors[i].dirty = 0;
309 cmd_buffer->descriptors[i].valid = 0;
310 cmd_buffer->descriptors[i].push_dirty = false;
311 }
312
313 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
314
315 return cmd_buffer->record_result;
316 }
317
318 VkResult
319 tu_AllocateCommandBuffers(VkDevice _device,
320 const VkCommandBufferAllocateInfo *pAllocateInfo,
321 VkCommandBuffer *pCommandBuffers)
322 {
323 TU_FROM_HANDLE(tu_device, device, _device);
324 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
325
326 VkResult result = VK_SUCCESS;
327 uint32_t i;
328
329 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
330
331 if (!list_empty(&pool->free_cmd_buffers)) {
332 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
333 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
334
335 list_del(&cmd_buffer->pool_link);
336 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
337
338 result = tu_reset_cmd_buffer(cmd_buffer);
339 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
340 cmd_buffer->level = pAllocateInfo->level;
341
342 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
343 } else {
344 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
345 &pCommandBuffers[i]);
346 }
347 if (result != VK_SUCCESS)
348 break;
349 }
350
351 if (result != VK_SUCCESS) {
352 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
353 pCommandBuffers);
354
355 /* From the Vulkan 1.0.66 spec:
356 *
357 * "vkAllocateCommandBuffers can be used to create multiple
358 * command buffers. If the creation of any of those command
359 * buffers fails, the implementation must destroy all
360 * successfully created command buffer objects from this
361 * command, set all entries of the pCommandBuffers array to
362 * NULL and return the error."
363 */
364 memset(pCommandBuffers, 0,
365 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
366 }
367
368 return result;
369 }
370
371 void
372 tu_FreeCommandBuffers(VkDevice device,
373 VkCommandPool commandPool,
374 uint32_t commandBufferCount,
375 const VkCommandBuffer *pCommandBuffers)
376 {
377 for (uint32_t i = 0; i < commandBufferCount; i++) {
378 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
379
380 if (cmd_buffer) {
381 if (cmd_buffer->pool) {
382 list_del(&cmd_buffer->pool_link);
383 list_addtail(&cmd_buffer->pool_link,
384 &cmd_buffer->pool->free_cmd_buffers);
385 } else
386 tu_cmd_buffer_destroy(cmd_buffer);
387 }
388 }
389 }
390
391 VkResult
392 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
393 VkCommandBufferResetFlags flags)
394 {
395 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
396 return tu_reset_cmd_buffer(cmd_buffer);
397 }
398
399 VkResult
400 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
401 const VkCommandBufferBeginInfo *pBeginInfo)
402 {
403 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
404 VkResult result = VK_SUCCESS;
405
406 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
407 /* If the command buffer has already been resetted with
408 * vkResetCommandBuffer, no need to do it again.
409 */
410 result = tu_reset_cmd_buffer(cmd_buffer);
411 if (result != VK_SUCCESS)
412 return result;
413 }
414
415 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
416 cmd_buffer->usage_flags = pBeginInfo->flags;
417
418 /* setup initial configuration into command buffer */
419 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
420 switch (cmd_buffer->queue_family_index) {
421 case TU_QUEUE_GENERAL:
422 /* init */
423 break;
424 default:
425 break;
426 }
427 }
428
429 result = tu_cs_begin(cmd_buffer->device, &cmd_buffer->cs, 4096);
430 if (result != VK_SUCCESS)
431 return result;
432
433 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
434
435 /* Put some stuff in so we do not have empty command buffers. */
436 tu_cs_emit_pkt7(&cmd_buffer->cs, CP_NOP, 4);
437 tu_cs_emit(&cmd_buffer->cs, 0);
438 tu_cs_emit(&cmd_buffer->cs, 0);
439 tu_cs_emit(&cmd_buffer->cs, 0);
440 tu_cs_emit(&cmd_buffer->cs, 0);
441
442 return VK_SUCCESS;
443 }
444
445 void
446 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
447 uint32_t firstBinding,
448 uint32_t bindingCount,
449 const VkBuffer *pBuffers,
450 const VkDeviceSize *pOffsets)
451 {
452 }
453
454 void
455 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
456 VkBuffer buffer,
457 VkDeviceSize offset,
458 VkIndexType indexType)
459 {
460 }
461
462 void
463 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
464 VkPipelineBindPoint pipelineBindPoint,
465 VkPipelineLayout _layout,
466 uint32_t firstSet,
467 uint32_t descriptorSetCount,
468 const VkDescriptorSet *pDescriptorSets,
469 uint32_t dynamicOffsetCount,
470 const uint32_t *pDynamicOffsets)
471 {
472 }
473
474 void
475 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
476 VkPipelineLayout layout,
477 VkShaderStageFlags stageFlags,
478 uint32_t offset,
479 uint32_t size,
480 const void *pValues)
481 {
482 }
483
484 VkResult
485 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
486 {
487 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
488
489 VkResult result = tu_cs_end(&cmd_buffer->cs);
490 if (result != VK_SUCCESS)
491 cmd_buffer->record_result = result;
492
493 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
494
495 return cmd_buffer->record_result;
496 }
497
498 void
499 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
500 VkPipelineBindPoint pipelineBindPoint,
501 VkPipeline _pipeline)
502 {
503 }
504
505 void
506 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
507 uint32_t firstViewport,
508 uint32_t viewportCount,
509 const VkViewport *pViewports)
510 {
511 }
512
513 void
514 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
515 uint32_t firstScissor,
516 uint32_t scissorCount,
517 const VkRect2D *pScissors)
518 {
519 }
520
521 void
522 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
523 {
524 }
525
526 void
527 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
528 float depthBiasConstantFactor,
529 float depthBiasClamp,
530 float depthBiasSlopeFactor)
531 {
532 }
533
534 void
535 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
536 const float blendConstants[4])
537 {
538 }
539
540 void
541 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
542 float minDepthBounds,
543 float maxDepthBounds)
544 {
545 }
546
547 void
548 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
549 VkStencilFaceFlags faceMask,
550 uint32_t compareMask)
551 {
552 }
553
554 void
555 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
556 VkStencilFaceFlags faceMask,
557 uint32_t writeMask)
558 {
559 }
560
561 void
562 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
563 VkStencilFaceFlags faceMask,
564 uint32_t reference)
565 {
566 }
567
568 void
569 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
570 uint32_t commandBufferCount,
571 const VkCommandBuffer *pCmdBuffers)
572 {
573 }
574
575 VkResult
576 tu_CreateCommandPool(VkDevice _device,
577 const VkCommandPoolCreateInfo *pCreateInfo,
578 const VkAllocationCallbacks *pAllocator,
579 VkCommandPool *pCmdPool)
580 {
581 TU_FROM_HANDLE(tu_device, device, _device);
582 struct tu_cmd_pool *pool;
583
584 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
585 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
586 if (pool == NULL)
587 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
588
589 if (pAllocator)
590 pool->alloc = *pAllocator;
591 else
592 pool->alloc = device->alloc;
593
594 list_inithead(&pool->cmd_buffers);
595 list_inithead(&pool->free_cmd_buffers);
596
597 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
598
599 *pCmdPool = tu_cmd_pool_to_handle(pool);
600
601 return VK_SUCCESS;
602 }
603
604 void
605 tu_DestroyCommandPool(VkDevice _device,
606 VkCommandPool commandPool,
607 const VkAllocationCallbacks *pAllocator)
608 {
609 TU_FROM_HANDLE(tu_device, device, _device);
610 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
611
612 if (!pool)
613 return;
614
615 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
616 &pool->cmd_buffers, pool_link)
617 {
618 tu_cmd_buffer_destroy(cmd_buffer);
619 }
620
621 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
622 &pool->free_cmd_buffers, pool_link)
623 {
624 tu_cmd_buffer_destroy(cmd_buffer);
625 }
626
627 vk_free2(&device->alloc, pAllocator, pool);
628 }
629
630 VkResult
631 tu_ResetCommandPool(VkDevice device,
632 VkCommandPool commandPool,
633 VkCommandPoolResetFlags flags)
634 {
635 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
636 VkResult result;
637
638 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
639 pool_link)
640 {
641 result = tu_reset_cmd_buffer(cmd_buffer);
642 if (result != VK_SUCCESS)
643 return result;
644 }
645
646 return VK_SUCCESS;
647 }
648
649 void
650 tu_TrimCommandPool(VkDevice device,
651 VkCommandPool commandPool,
652 VkCommandPoolTrimFlagsKHR flags)
653 {
654 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
655
656 if (!pool)
657 return;
658
659 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
660 &pool->free_cmd_buffers, pool_link)
661 {
662 tu_cmd_buffer_destroy(cmd_buffer);
663 }
664 }
665
666 void
667 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
668 const VkRenderPassBeginInfo *pRenderPassBegin,
669 VkSubpassContents contents)
670 {
671 }
672
673 void
674 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
675 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
676 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
677 {
678 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
679 pSubpassBeginInfo->contents);
680 }
681
682 void
683 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
684 {
685 }
686
687 void
688 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
689 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
690 const VkSubpassEndInfoKHR *pSubpassEndInfo)
691 {
692 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
693 }
694
695 struct tu_draw_info
696 {
697 /**
698 * Number of vertices.
699 */
700 uint32_t count;
701
702 /**
703 * Index of the first vertex.
704 */
705 int32_t vertex_offset;
706
707 /**
708 * First instance id.
709 */
710 uint32_t first_instance;
711
712 /**
713 * Number of instances.
714 */
715 uint32_t instance_count;
716
717 /**
718 * First index (indexed draws only).
719 */
720 uint32_t first_index;
721
722 /**
723 * Whether it's an indexed draw.
724 */
725 bool indexed;
726
727 /**
728 * Indirect draw parameters resource.
729 */
730 struct tu_buffer *indirect;
731 uint64_t indirect_offset;
732 uint32_t stride;
733
734 /**
735 * Draw count parameters resource.
736 */
737 struct tu_buffer *count_buffer;
738 uint64_t count_buffer_offset;
739 };
740
741 static void
742 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
743 {
744 }
745
746 void
747 tu_CmdDraw(VkCommandBuffer commandBuffer,
748 uint32_t vertexCount,
749 uint32_t instanceCount,
750 uint32_t firstVertex,
751 uint32_t firstInstance)
752 {
753 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
754 struct tu_draw_info info = {};
755
756 info.count = vertexCount;
757 info.instance_count = instanceCount;
758 info.first_instance = firstInstance;
759 info.vertex_offset = firstVertex;
760
761 tu_draw(cmd_buffer, &info);
762 }
763
764 void
765 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
766 uint32_t indexCount,
767 uint32_t instanceCount,
768 uint32_t firstIndex,
769 int32_t vertexOffset,
770 uint32_t firstInstance)
771 {
772 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
773 struct tu_draw_info info = {};
774
775 info.indexed = true;
776 info.count = indexCount;
777 info.instance_count = instanceCount;
778 info.first_index = firstIndex;
779 info.vertex_offset = vertexOffset;
780 info.first_instance = firstInstance;
781
782 tu_draw(cmd_buffer, &info);
783 }
784
785 void
786 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
787 VkBuffer _buffer,
788 VkDeviceSize offset,
789 uint32_t drawCount,
790 uint32_t stride)
791 {
792 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
793 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
794 struct tu_draw_info info = {};
795
796 info.count = drawCount;
797 info.indirect = buffer;
798 info.indirect_offset = offset;
799 info.stride = stride;
800
801 tu_draw(cmd_buffer, &info);
802 }
803
804 void
805 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
806 VkBuffer _buffer,
807 VkDeviceSize offset,
808 uint32_t drawCount,
809 uint32_t stride)
810 {
811 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
812 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
813 struct tu_draw_info info = {};
814
815 info.indexed = true;
816 info.count = drawCount;
817 info.indirect = buffer;
818 info.indirect_offset = offset;
819 info.stride = stride;
820
821 tu_draw(cmd_buffer, &info);
822 }
823
824 struct tu_dispatch_info
825 {
826 /**
827 * Determine the layout of the grid (in block units) to be used.
828 */
829 uint32_t blocks[3];
830
831 /**
832 * A starting offset for the grid. If unaligned is set, the offset
833 * must still be aligned.
834 */
835 uint32_t offsets[3];
836 /**
837 * Whether it's an unaligned compute dispatch.
838 */
839 bool unaligned;
840
841 /**
842 * Indirect compute parameters resource.
843 */
844 struct tu_buffer *indirect;
845 uint64_t indirect_offset;
846 };
847
848 static void
849 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
850 const struct tu_dispatch_info *info)
851 {
852 }
853
854 void
855 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
856 uint32_t base_x,
857 uint32_t base_y,
858 uint32_t base_z,
859 uint32_t x,
860 uint32_t y,
861 uint32_t z)
862 {
863 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
864 struct tu_dispatch_info info = {};
865
866 info.blocks[0] = x;
867 info.blocks[1] = y;
868 info.blocks[2] = z;
869
870 info.offsets[0] = base_x;
871 info.offsets[1] = base_y;
872 info.offsets[2] = base_z;
873 tu_dispatch(cmd_buffer, &info);
874 }
875
876 void
877 tu_CmdDispatch(VkCommandBuffer commandBuffer,
878 uint32_t x,
879 uint32_t y,
880 uint32_t z)
881 {
882 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
883 }
884
885 void
886 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
887 VkBuffer _buffer,
888 VkDeviceSize offset)
889 {
890 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
891 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
892 struct tu_dispatch_info info = {};
893
894 info.indirect = buffer;
895 info.indirect_offset = offset;
896
897 tu_dispatch(cmd_buffer, &info);
898 }
899
900 void
901 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
902 {
903 }
904
905 void
906 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
907 const VkSubpassEndInfoKHR *pSubpassEndInfo)
908 {
909 tu_CmdEndRenderPass(commandBuffer);
910 }
911
912 struct tu_barrier_info
913 {
914 uint32_t eventCount;
915 const VkEvent *pEvents;
916 VkPipelineStageFlags srcStageMask;
917 };
918
919 static void
920 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
921 uint32_t memoryBarrierCount,
922 const VkMemoryBarrier *pMemoryBarriers,
923 uint32_t bufferMemoryBarrierCount,
924 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
925 uint32_t imageMemoryBarrierCount,
926 const VkImageMemoryBarrier *pImageMemoryBarriers,
927 const struct tu_barrier_info *info)
928 {
929 }
930
931 void
932 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
933 VkPipelineStageFlags srcStageMask,
934 VkPipelineStageFlags destStageMask,
935 VkBool32 byRegion,
936 uint32_t memoryBarrierCount,
937 const VkMemoryBarrier *pMemoryBarriers,
938 uint32_t bufferMemoryBarrierCount,
939 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
940 uint32_t imageMemoryBarrierCount,
941 const VkImageMemoryBarrier *pImageMemoryBarriers)
942 {
943 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
944 struct tu_barrier_info info;
945
946 info.eventCount = 0;
947 info.pEvents = NULL;
948 info.srcStageMask = srcStageMask;
949
950 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
951 bufferMemoryBarrierCount, pBufferMemoryBarriers,
952 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
953 }
954
955 static void
956 write_event(struct tu_cmd_buffer *cmd_buffer,
957 struct tu_event *event,
958 VkPipelineStageFlags stageMask,
959 unsigned value)
960 {
961 }
962
963 void
964 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
965 VkEvent _event,
966 VkPipelineStageFlags stageMask)
967 {
968 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
969 TU_FROM_HANDLE(tu_event, event, _event);
970
971 write_event(cmd_buffer, event, stageMask, 1);
972 }
973
974 void
975 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
976 VkEvent _event,
977 VkPipelineStageFlags stageMask)
978 {
979 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
980 TU_FROM_HANDLE(tu_event, event, _event);
981
982 write_event(cmd_buffer, event, stageMask, 0);
983 }
984
985 void
986 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
987 uint32_t eventCount,
988 const VkEvent *pEvents,
989 VkPipelineStageFlags srcStageMask,
990 VkPipelineStageFlags dstStageMask,
991 uint32_t memoryBarrierCount,
992 const VkMemoryBarrier *pMemoryBarriers,
993 uint32_t bufferMemoryBarrierCount,
994 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
995 uint32_t imageMemoryBarrierCount,
996 const VkImageMemoryBarrier *pImageMemoryBarriers)
997 {
998 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
999 struct tu_barrier_info info;
1000
1001 info.eventCount = eventCount;
1002 info.pEvents = pEvents;
1003 info.srcStageMask = 0;
1004
1005 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1006 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1007 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1008 }
1009
1010 void
1011 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
1012 {
1013 /* No-op */
1014 }