turnip: Add emit functions in a header.
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
32 #include "tu_cs.h"
33
34 void
35 tu_bo_list_init(struct tu_bo_list *list)
36 {
37 list->count = list->capacity = 0;
38 list->handles = NULL;
39 }
40
41 void
42 tu_bo_list_destroy(struct tu_bo_list *list)
43 {
44 free(list->handles);
45 }
46
47 void
48 tu_bo_list_reset(struct tu_bo_list *list)
49 {
50 list->count = 0;
51 }
52
53 uint32_t
54 tu_bo_list_add(struct tu_bo_list *list,
55 const struct tu_bo *bo)
56 {
57 uint32_t handle = bo->gem_handle;
58 for (uint32_t i = 0; i < list->count; ++i) {
59 if (list->handles[i] == handle)
60 return i;
61 }
62
63 if (list->count == list->capacity) {
64 uint32_t new_capacity = MAX2(2 * list->count, 16);
65 uint32_t *new_handles = realloc(list->handles, new_capacity * sizeof(uint32_t));
66 if (!new_handles)
67 return ~0;
68 list->handles = new_handles;
69 list->capacity = new_capacity;
70 }
71
72 uint32_t ret = list->count;
73 list->handles[list->count] = handle;
74 ++list->count;
75
76 return ret;
77 }
78
79 static void
80 tu_cmd_stream_init(struct tu_cmd_stream *stream)
81 {
82 stream->start = stream->cur = stream->end = NULL;
83
84 stream->entry_count = stream->entry_capacity = 0;
85 stream->entries = NULL;
86
87 stream->bo_count = stream->bo_capacity = 0;
88 stream->bos = NULL;
89 }
90
91 static void
92 tu_cmd_stream_finish(struct tu_device *dev,
93 struct tu_cmd_stream *stream)
94 {
95 for (uint32_t i = 0; i < stream->bo_count; ++i) {
96 tu_bo_finish(dev, stream->bos[i]);
97 free(stream->bos[i]);
98 }
99
100 free(stream->entries);
101 free(stream->bos);
102 }
103
104 static VkResult
105 tu_cmd_stream_begin(struct tu_device *dev,
106 struct tu_cmd_stream *stream,
107 uint32_t reserve_size)
108 {
109 assert(reserve_size);
110
111 if (stream->end - stream->cur < reserve_size) {
112 if (stream->bo_count == stream->bo_capacity) {
113 uint32_t new_capacity = MAX2(4, 2 * stream->bo_capacity);
114 struct tu_bo **new_bos = realloc(stream->bos,
115 new_capacity * sizeof(struct tu_bo*));
116 if (!new_bos)
117 abort();
118
119 stream->bo_capacity = new_capacity;
120 stream->bos = new_bos;
121 }
122
123 uint32_t new_size = MAX2(16384, reserve_size * sizeof(uint32_t));
124 if (stream->bo_count)
125 new_size = MAX2(new_size, stream->bos[stream->bo_count - 1]->size * 2);
126
127 struct tu_bo *new_bo = malloc(sizeof(struct tu_bo));
128 if (!new_bo)
129 abort();
130
131 VkResult result = tu_bo_init_new(dev, new_bo, new_size);
132 if (result != VK_SUCCESS) {
133 free(new_bo);
134 return result;
135 }
136
137 result = tu_bo_map(dev, new_bo);
138 if (result != VK_SUCCESS) {
139 tu_bo_finish(dev, new_bo);
140 free(new_bo);
141 return result;
142 }
143
144 stream->bos[stream->bo_count] = new_bo;
145 ++stream->bo_count;
146
147 stream->start = stream->cur = (uint32_t*)new_bo->map;
148 stream->end = stream->start + new_bo->size / sizeof(uint32_t);
149 }
150 stream->start = stream->cur;
151
152 return VK_SUCCESS;
153 }
154
155 static VkResult
156 tu_cmd_stream_end(struct tu_cmd_stream *stream)
157 {
158 if (stream->start == stream->cur)
159 return VK_SUCCESS;
160
161 if (stream->entry_capacity == stream->entry_count) {
162 uint32_t new_capacity = MAX2(stream->entry_capacity * 2, 4);
163 struct tu_cmd_stream_entry *new_entries =
164 realloc(stream->entries, new_capacity * sizeof(struct tu_cmd_stream_entry));
165 if (!new_entries)
166 abort(); /* TODO */
167
168 stream->entries = new_entries;
169 stream->entry_capacity = new_capacity;
170 }
171
172 assert (stream->bo_count);
173
174 struct tu_cmd_stream_entry entry;
175 entry.bo = stream->bos[stream->bo_count - 1];
176 entry.size = (stream->cur - stream->start) * sizeof(uint32_t);
177 entry.offset = (stream->start - (uint32_t*)entry.bo->map) * sizeof(uint32_t);
178
179 stream->entries[stream->entry_count] = entry;
180 ++stream->entry_count;
181
182 return VK_SUCCESS;
183 }
184
185 static void
186 tu_cmd_stream_reset(struct tu_device *dev,
187 struct tu_cmd_stream *stream)
188 {
189 for (uint32_t i = 0; i + 1 < stream->bo_count; ++i) {
190 tu_bo_finish(dev, stream->bos[i]);
191 free(stream->bos[i]);
192 }
193
194 if (stream->bo_count) {
195 stream->bos[0] = stream->bos[stream->bo_count - 1];
196 stream->bo_count = 1;
197
198 stream->start = stream->cur = (uint32_t*)stream->bos[0]->map;
199 stream->end = stream->start + stream->bos[0]->size / sizeof(uint32_t);
200 }
201
202 stream->entry_count = 0;
203 }
204
205 VkResult
206 tu_cs_check_space(struct tu_device *dev,
207 struct tu_cmd_stream *stream,
208 size_t size)
209 {
210 if (stream->end - stream->cur >= size)
211 return VK_SUCCESS;
212
213 VkResult result = tu_cmd_stream_end(stream);
214 if (result != VK_SUCCESS)
215 return result;
216
217 return tu_cmd_stream_begin(dev, stream, size);
218 }
219
220 const struct tu_dynamic_state default_dynamic_state = {
221 .viewport =
222 {
223 .count = 0,
224 },
225 .scissor =
226 {
227 .count = 0,
228 },
229 .line_width = 1.0f,
230 .depth_bias =
231 {
232 .bias = 0.0f,
233 .clamp = 0.0f,
234 .slope = 0.0f,
235 },
236 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
237 .depth_bounds =
238 {
239 .min = 0.0f,
240 .max = 1.0f,
241 },
242 .stencil_compare_mask =
243 {
244 .front = ~0u,
245 .back = ~0u,
246 },
247 .stencil_write_mask =
248 {
249 .front = ~0u,
250 .back = ~0u,
251 },
252 .stencil_reference =
253 {
254 .front = 0u,
255 .back = 0u,
256 },
257 };
258
259 static void UNUSED /* FINISHME */
260 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
261 const struct tu_dynamic_state *src)
262 {
263 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
264 uint32_t copy_mask = src->mask;
265 uint32_t dest_mask = 0;
266
267 tu_use_args(cmd_buffer); /* FINISHME */
268
269 /* Make sure to copy the number of viewports/scissors because they can
270 * only be specified at pipeline creation time.
271 */
272 dest->viewport.count = src->viewport.count;
273 dest->scissor.count = src->scissor.count;
274 dest->discard_rectangle.count = src->discard_rectangle.count;
275
276 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
277 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
278 src->viewport.count * sizeof(VkViewport))) {
279 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
280 src->viewport.count);
281 dest_mask |= TU_DYNAMIC_VIEWPORT;
282 }
283 }
284
285 if (copy_mask & TU_DYNAMIC_SCISSOR) {
286 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
287 src->scissor.count * sizeof(VkRect2D))) {
288 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
289 src->scissor.count);
290 dest_mask |= TU_DYNAMIC_SCISSOR;
291 }
292 }
293
294 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
295 if (dest->line_width != src->line_width) {
296 dest->line_width = src->line_width;
297 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
298 }
299 }
300
301 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
302 if (memcmp(&dest->depth_bias, &src->depth_bias,
303 sizeof(src->depth_bias))) {
304 dest->depth_bias = src->depth_bias;
305 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
306 }
307 }
308
309 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
310 if (memcmp(&dest->blend_constants, &src->blend_constants,
311 sizeof(src->blend_constants))) {
312 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
313 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
314 }
315 }
316
317 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
318 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
319 sizeof(src->depth_bounds))) {
320 dest->depth_bounds = src->depth_bounds;
321 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
322 }
323 }
324
325 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
326 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
327 sizeof(src->stencil_compare_mask))) {
328 dest->stencil_compare_mask = src->stencil_compare_mask;
329 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
330 }
331 }
332
333 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
334 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
335 sizeof(src->stencil_write_mask))) {
336 dest->stencil_write_mask = src->stencil_write_mask;
337 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
338 }
339 }
340
341 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
342 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
343 sizeof(src->stencil_reference))) {
344 dest->stencil_reference = src->stencil_reference;
345 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
346 }
347 }
348
349 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
350 if (memcmp(&dest->discard_rectangle.rectangles,
351 &src->discard_rectangle.rectangles,
352 src->discard_rectangle.count * sizeof(VkRect2D))) {
353 typed_memcpy(dest->discard_rectangle.rectangles,
354 src->discard_rectangle.rectangles,
355 src->discard_rectangle.count);
356 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
357 }
358 }
359 }
360
361 static VkResult
362 tu_create_cmd_buffer(struct tu_device *device,
363 struct tu_cmd_pool *pool,
364 VkCommandBufferLevel level,
365 VkCommandBuffer *pCommandBuffer)
366 {
367 struct tu_cmd_buffer *cmd_buffer;
368 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
369 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
370 if (cmd_buffer == NULL)
371 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
372
373 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
374 cmd_buffer->device = device;
375 cmd_buffer->pool = pool;
376 cmd_buffer->level = level;
377
378 if (pool) {
379 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
380 cmd_buffer->queue_family_index = pool->queue_family_index;
381
382 } else {
383 /* Init the pool_link so we can safely call list_del when we destroy
384 * the command buffer
385 */
386 list_inithead(&cmd_buffer->pool_link);
387 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
388 }
389
390 tu_bo_list_init(&cmd_buffer->bo_list);
391 tu_cmd_stream_init(&cmd_buffer->cs);
392
393 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
394
395 list_inithead(&cmd_buffer->upload.list);
396
397 return VK_SUCCESS;
398 }
399
400 static void
401 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
402 {
403 list_del(&cmd_buffer->pool_link);
404
405 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
406 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
407
408 tu_cmd_stream_finish(cmd_buffer->device, &cmd_buffer->cs);
409 tu_bo_list_destroy(&cmd_buffer->bo_list);
410 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
411 }
412
413 static VkResult
414 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
415 {
416 cmd_buffer->record_result = VK_SUCCESS;
417
418 tu_bo_list_reset(&cmd_buffer->bo_list);
419 tu_cmd_stream_reset(cmd_buffer->device, &cmd_buffer->cs);
420
421 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
422 cmd_buffer->descriptors[i].dirty = 0;
423 cmd_buffer->descriptors[i].valid = 0;
424 cmd_buffer->descriptors[i].push_dirty = false;
425 }
426
427 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
428
429 return cmd_buffer->record_result;
430 }
431
432 VkResult
433 tu_AllocateCommandBuffers(VkDevice _device,
434 const VkCommandBufferAllocateInfo *pAllocateInfo,
435 VkCommandBuffer *pCommandBuffers)
436 {
437 TU_FROM_HANDLE(tu_device, device, _device);
438 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
439
440 VkResult result = VK_SUCCESS;
441 uint32_t i;
442
443 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
444
445 if (!list_empty(&pool->free_cmd_buffers)) {
446 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
447 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
448
449 list_del(&cmd_buffer->pool_link);
450 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
451
452 result = tu_reset_cmd_buffer(cmd_buffer);
453 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
454 cmd_buffer->level = pAllocateInfo->level;
455
456 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
457 } else {
458 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
459 &pCommandBuffers[i]);
460 }
461 if (result != VK_SUCCESS)
462 break;
463 }
464
465 if (result != VK_SUCCESS) {
466 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
467 pCommandBuffers);
468
469 /* From the Vulkan 1.0.66 spec:
470 *
471 * "vkAllocateCommandBuffers can be used to create multiple
472 * command buffers. If the creation of any of those command
473 * buffers fails, the implementation must destroy all
474 * successfully created command buffer objects from this
475 * command, set all entries of the pCommandBuffers array to
476 * NULL and return the error."
477 */
478 memset(pCommandBuffers, 0,
479 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
480 }
481
482 return result;
483 }
484
485 void
486 tu_FreeCommandBuffers(VkDevice device,
487 VkCommandPool commandPool,
488 uint32_t commandBufferCount,
489 const VkCommandBuffer *pCommandBuffers)
490 {
491 for (uint32_t i = 0; i < commandBufferCount; i++) {
492 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
493
494 if (cmd_buffer) {
495 if (cmd_buffer->pool) {
496 list_del(&cmd_buffer->pool_link);
497 list_addtail(&cmd_buffer->pool_link,
498 &cmd_buffer->pool->free_cmd_buffers);
499 } else
500 tu_cmd_buffer_destroy(cmd_buffer);
501 }
502 }
503 }
504
505 VkResult
506 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
507 VkCommandBufferResetFlags flags)
508 {
509 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
510 return tu_reset_cmd_buffer(cmd_buffer);
511 }
512
513 VkResult
514 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
515 const VkCommandBufferBeginInfo *pBeginInfo)
516 {
517 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
518 VkResult result = VK_SUCCESS;
519
520 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
521 /* If the command buffer has already been resetted with
522 * vkResetCommandBuffer, no need to do it again.
523 */
524 result = tu_reset_cmd_buffer(cmd_buffer);
525 if (result != VK_SUCCESS)
526 return result;
527 }
528
529 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
530 cmd_buffer->usage_flags = pBeginInfo->flags;
531
532 /* setup initial configuration into command buffer */
533 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
534 switch (cmd_buffer->queue_family_index) {
535 case TU_QUEUE_GENERAL:
536 /* init */
537 break;
538 default:
539 break;
540 }
541 }
542
543 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
544
545 result = tu_cmd_stream_begin(cmd_buffer->device,
546 &cmd_buffer->cs, 4096);
547
548 /* Put some stuff in so we do not have empty command buffers. */
549 tu_cs_emit_pkt7(&cmd_buffer->cs, CP_NOP, 4);
550 tu_cs_emit(&cmd_buffer->cs, 0);
551 tu_cs_emit(&cmd_buffer->cs, 0);
552 tu_cs_emit(&cmd_buffer->cs, 0);
553 tu_cs_emit(&cmd_buffer->cs, 0);
554
555 return result;
556 }
557
558 void
559 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
560 uint32_t firstBinding,
561 uint32_t bindingCount,
562 const VkBuffer *pBuffers,
563 const VkDeviceSize *pOffsets)
564 {
565 }
566
567 void
568 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
569 VkBuffer buffer,
570 VkDeviceSize offset,
571 VkIndexType indexType)
572 {
573 }
574
575 void
576 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
577 VkPipelineBindPoint pipelineBindPoint,
578 VkPipelineLayout _layout,
579 uint32_t firstSet,
580 uint32_t descriptorSetCount,
581 const VkDescriptorSet *pDescriptorSets,
582 uint32_t dynamicOffsetCount,
583 const uint32_t *pDynamicOffsets)
584 {
585 }
586
587 void
588 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
589 VkPipelineLayout layout,
590 VkShaderStageFlags stageFlags,
591 uint32_t offset,
592 uint32_t size,
593 const void *pValues)
594 {
595 }
596
597 VkResult
598 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
599 {
600 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
601
602 tu_cmd_stream_end(&cmd_buffer->cs);
603 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
604
605 return cmd_buffer->record_result;
606 }
607
608 void
609 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
610 VkPipelineBindPoint pipelineBindPoint,
611 VkPipeline _pipeline)
612 {
613 }
614
615 void
616 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
617 uint32_t firstViewport,
618 uint32_t viewportCount,
619 const VkViewport *pViewports)
620 {
621 }
622
623 void
624 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
625 uint32_t firstScissor,
626 uint32_t scissorCount,
627 const VkRect2D *pScissors)
628 {
629 }
630
631 void
632 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
633 {
634 }
635
636 void
637 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
638 float depthBiasConstantFactor,
639 float depthBiasClamp,
640 float depthBiasSlopeFactor)
641 {
642 }
643
644 void
645 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
646 const float blendConstants[4])
647 {
648 }
649
650 void
651 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
652 float minDepthBounds,
653 float maxDepthBounds)
654 {
655 }
656
657 void
658 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
659 VkStencilFaceFlags faceMask,
660 uint32_t compareMask)
661 {
662 }
663
664 void
665 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
666 VkStencilFaceFlags faceMask,
667 uint32_t writeMask)
668 {
669 }
670
671 void
672 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
673 VkStencilFaceFlags faceMask,
674 uint32_t reference)
675 {
676 }
677
678 void
679 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
680 uint32_t commandBufferCount,
681 const VkCommandBuffer *pCmdBuffers)
682 {
683 }
684
685 VkResult
686 tu_CreateCommandPool(VkDevice _device,
687 const VkCommandPoolCreateInfo *pCreateInfo,
688 const VkAllocationCallbacks *pAllocator,
689 VkCommandPool *pCmdPool)
690 {
691 TU_FROM_HANDLE(tu_device, device, _device);
692 struct tu_cmd_pool *pool;
693
694 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
695 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
696 if (pool == NULL)
697 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
698
699 if (pAllocator)
700 pool->alloc = *pAllocator;
701 else
702 pool->alloc = device->alloc;
703
704 list_inithead(&pool->cmd_buffers);
705 list_inithead(&pool->free_cmd_buffers);
706
707 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
708
709 *pCmdPool = tu_cmd_pool_to_handle(pool);
710
711 return VK_SUCCESS;
712 }
713
714 void
715 tu_DestroyCommandPool(VkDevice _device,
716 VkCommandPool commandPool,
717 const VkAllocationCallbacks *pAllocator)
718 {
719 TU_FROM_HANDLE(tu_device, device, _device);
720 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
721
722 if (!pool)
723 return;
724
725 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
726 &pool->cmd_buffers, pool_link)
727 {
728 tu_cmd_buffer_destroy(cmd_buffer);
729 }
730
731 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
732 &pool->free_cmd_buffers, pool_link)
733 {
734 tu_cmd_buffer_destroy(cmd_buffer);
735 }
736
737 vk_free2(&device->alloc, pAllocator, pool);
738 }
739
740 VkResult
741 tu_ResetCommandPool(VkDevice device,
742 VkCommandPool commandPool,
743 VkCommandPoolResetFlags flags)
744 {
745 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
746 VkResult result;
747
748 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
749 pool_link)
750 {
751 result = tu_reset_cmd_buffer(cmd_buffer);
752 if (result != VK_SUCCESS)
753 return result;
754 }
755
756 return VK_SUCCESS;
757 }
758
759 void
760 tu_TrimCommandPool(VkDevice device,
761 VkCommandPool commandPool,
762 VkCommandPoolTrimFlagsKHR flags)
763 {
764 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
765
766 if (!pool)
767 return;
768
769 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
770 &pool->free_cmd_buffers, pool_link)
771 {
772 tu_cmd_buffer_destroy(cmd_buffer);
773 }
774 }
775
776 void
777 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
778 const VkRenderPassBeginInfo *pRenderPassBegin,
779 VkSubpassContents contents)
780 {
781 }
782
783 void
784 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
785 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
786 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
787 {
788 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
789 pSubpassBeginInfo->contents);
790 }
791
792 void
793 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
794 {
795 }
796
797 void
798 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
799 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
800 const VkSubpassEndInfoKHR *pSubpassEndInfo)
801 {
802 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
803 }
804
805 struct tu_draw_info
806 {
807 /**
808 * Number of vertices.
809 */
810 uint32_t count;
811
812 /**
813 * Index of the first vertex.
814 */
815 int32_t vertex_offset;
816
817 /**
818 * First instance id.
819 */
820 uint32_t first_instance;
821
822 /**
823 * Number of instances.
824 */
825 uint32_t instance_count;
826
827 /**
828 * First index (indexed draws only).
829 */
830 uint32_t first_index;
831
832 /**
833 * Whether it's an indexed draw.
834 */
835 bool indexed;
836
837 /**
838 * Indirect draw parameters resource.
839 */
840 struct tu_buffer *indirect;
841 uint64_t indirect_offset;
842 uint32_t stride;
843
844 /**
845 * Draw count parameters resource.
846 */
847 struct tu_buffer *count_buffer;
848 uint64_t count_buffer_offset;
849 };
850
851 static void
852 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
853 {
854 }
855
856 void
857 tu_CmdDraw(VkCommandBuffer commandBuffer,
858 uint32_t vertexCount,
859 uint32_t instanceCount,
860 uint32_t firstVertex,
861 uint32_t firstInstance)
862 {
863 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
864 struct tu_draw_info info = {};
865
866 info.count = vertexCount;
867 info.instance_count = instanceCount;
868 info.first_instance = firstInstance;
869 info.vertex_offset = firstVertex;
870
871 tu_draw(cmd_buffer, &info);
872 }
873
874 void
875 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
876 uint32_t indexCount,
877 uint32_t instanceCount,
878 uint32_t firstIndex,
879 int32_t vertexOffset,
880 uint32_t firstInstance)
881 {
882 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
883 struct tu_draw_info info = {};
884
885 info.indexed = true;
886 info.count = indexCount;
887 info.instance_count = instanceCount;
888 info.first_index = firstIndex;
889 info.vertex_offset = vertexOffset;
890 info.first_instance = firstInstance;
891
892 tu_draw(cmd_buffer, &info);
893 }
894
895 void
896 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
897 VkBuffer _buffer,
898 VkDeviceSize offset,
899 uint32_t drawCount,
900 uint32_t stride)
901 {
902 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
903 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
904 struct tu_draw_info info = {};
905
906 info.count = drawCount;
907 info.indirect = buffer;
908 info.indirect_offset = offset;
909 info.stride = stride;
910
911 tu_draw(cmd_buffer, &info);
912 }
913
914 void
915 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
916 VkBuffer _buffer,
917 VkDeviceSize offset,
918 uint32_t drawCount,
919 uint32_t stride)
920 {
921 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
922 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
923 struct tu_draw_info info = {};
924
925 info.indexed = true;
926 info.count = drawCount;
927 info.indirect = buffer;
928 info.indirect_offset = offset;
929 info.stride = stride;
930
931 tu_draw(cmd_buffer, &info);
932 }
933
934 struct tu_dispatch_info
935 {
936 /**
937 * Determine the layout of the grid (in block units) to be used.
938 */
939 uint32_t blocks[3];
940
941 /**
942 * A starting offset for the grid. If unaligned is set, the offset
943 * must still be aligned.
944 */
945 uint32_t offsets[3];
946 /**
947 * Whether it's an unaligned compute dispatch.
948 */
949 bool unaligned;
950
951 /**
952 * Indirect compute parameters resource.
953 */
954 struct tu_buffer *indirect;
955 uint64_t indirect_offset;
956 };
957
958 static void
959 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
960 const struct tu_dispatch_info *info)
961 {
962 }
963
964 void
965 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
966 uint32_t base_x,
967 uint32_t base_y,
968 uint32_t base_z,
969 uint32_t x,
970 uint32_t y,
971 uint32_t z)
972 {
973 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
974 struct tu_dispatch_info info = {};
975
976 info.blocks[0] = x;
977 info.blocks[1] = y;
978 info.blocks[2] = z;
979
980 info.offsets[0] = base_x;
981 info.offsets[1] = base_y;
982 info.offsets[2] = base_z;
983 tu_dispatch(cmd_buffer, &info);
984 }
985
986 void
987 tu_CmdDispatch(VkCommandBuffer commandBuffer,
988 uint32_t x,
989 uint32_t y,
990 uint32_t z)
991 {
992 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
993 }
994
995 void
996 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
997 VkBuffer _buffer,
998 VkDeviceSize offset)
999 {
1000 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1001 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1002 struct tu_dispatch_info info = {};
1003
1004 info.indirect = buffer;
1005 info.indirect_offset = offset;
1006
1007 tu_dispatch(cmd_buffer, &info);
1008 }
1009
1010 void
1011 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
1012 {
1013 }
1014
1015 void
1016 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
1017 const VkSubpassEndInfoKHR *pSubpassEndInfo)
1018 {
1019 tu_CmdEndRenderPass(commandBuffer);
1020 }
1021
1022 struct tu_barrier_info
1023 {
1024 uint32_t eventCount;
1025 const VkEvent *pEvents;
1026 VkPipelineStageFlags srcStageMask;
1027 };
1028
1029 static void
1030 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
1031 uint32_t memoryBarrierCount,
1032 const VkMemoryBarrier *pMemoryBarriers,
1033 uint32_t bufferMemoryBarrierCount,
1034 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1035 uint32_t imageMemoryBarrierCount,
1036 const VkImageMemoryBarrier *pImageMemoryBarriers,
1037 const struct tu_barrier_info *info)
1038 {
1039 }
1040
1041 void
1042 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
1043 VkPipelineStageFlags srcStageMask,
1044 VkPipelineStageFlags destStageMask,
1045 VkBool32 byRegion,
1046 uint32_t memoryBarrierCount,
1047 const VkMemoryBarrier *pMemoryBarriers,
1048 uint32_t bufferMemoryBarrierCount,
1049 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1050 uint32_t imageMemoryBarrierCount,
1051 const VkImageMemoryBarrier *pImageMemoryBarriers)
1052 {
1053 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1054 struct tu_barrier_info info;
1055
1056 info.eventCount = 0;
1057 info.pEvents = NULL;
1058 info.srcStageMask = srcStageMask;
1059
1060 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1061 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1062 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1063 }
1064
1065 static void
1066 write_event(struct tu_cmd_buffer *cmd_buffer,
1067 struct tu_event *event,
1068 VkPipelineStageFlags stageMask,
1069 unsigned value)
1070 {
1071 }
1072
1073 void
1074 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
1075 VkEvent _event,
1076 VkPipelineStageFlags stageMask)
1077 {
1078 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1079 TU_FROM_HANDLE(tu_event, event, _event);
1080
1081 write_event(cmd_buffer, event, stageMask, 1);
1082 }
1083
1084 void
1085 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
1086 VkEvent _event,
1087 VkPipelineStageFlags stageMask)
1088 {
1089 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1090 TU_FROM_HANDLE(tu_event, event, _event);
1091
1092 write_event(cmd_buffer, event, stageMask, 0);
1093 }
1094
1095 void
1096 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
1097 uint32_t eventCount,
1098 const VkEvent *pEvents,
1099 VkPipelineStageFlags srcStageMask,
1100 VkPipelineStageFlags dstStageMask,
1101 uint32_t memoryBarrierCount,
1102 const VkMemoryBarrier *pMemoryBarriers,
1103 uint32_t bufferMemoryBarrierCount,
1104 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1105 uint32_t imageMemoryBarrierCount,
1106 const VkImageMemoryBarrier *pImageMemoryBarriers)
1107 {
1108 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1109 struct tu_barrier_info info;
1110
1111 info.eventCount = eventCount;
1112 info.pEvents = pEvents;
1113 info.srcStageMask = 0;
1114
1115 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1116 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1117 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1118 }
1119
1120 void
1121 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
1122 {
1123 /* No-op */
1124 }