turnip: Add a command stream.
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "vk_format.h"
31
32 static void
33 tu_bo_list_init(struct tu_bo_list *list)
34 {
35 list->count = list->capacity = 0;
36 list->handles = NULL;
37 }
38
39 static void
40 tu_bo_list_destroy(struct tu_bo_list *list)
41 {
42 free(list->handles);
43 }
44
45 static void
46 tu_bo_list_reset(struct tu_bo_list *list)
47 {
48 list->count = 0;
49 }
50
51 static uint32_t
52 tu_bo_list_add(struct tu_bo_list *list,
53 const struct tu_bo *bo)
54 {
55 uint32_t handle = bo->gem_handle;
56 for (uint32_t i = 0; i < list->count; ++i) {
57 if (list->handles[i] == handle)
58 return i;
59 }
60
61 if (list->count == list->capacity) {
62 uint32_t new_capacity = MAX2(2 * list->count, 16);
63 uint32_t *new_handles = realloc(list->handles, new_capacity * sizeof(uint32_t));
64 if (!new_handles)
65 return ~0;
66 list->handles = new_handles;
67 list->capacity = new_capacity;
68 }
69
70 uint32_t ret = list->count;
71 list->handles[list->count] = handle;
72 ++list->count;
73
74 return ret;
75 }
76
77 static void
78 tu_cmd_stream_init(struct tu_cmd_stream *stream)
79 {
80 stream->start = stream->cur = stream->end = NULL;
81
82 stream->entry_count = stream->entry_capacity = 0;
83 stream->entries = NULL;
84
85 stream->bo_count = stream->bo_capacity = 0;
86 stream->bos = NULL;
87 }
88
89 static void
90 tu_cmd_stream_finish(struct tu_device *dev,
91 struct tu_cmd_stream *stream)
92 {
93 for (uint32_t i = 0; i < stream->bo_count; ++i) {
94 tu_bo_finish(dev, stream->bos[i]);
95 free(stream->bos[i]);
96 }
97
98 free(stream->entries);
99 free(stream->bos);
100 }
101
102 static VkResult
103 tu_cmd_stream_begin(struct tu_device *dev,
104 struct tu_cmd_stream *stream,
105 uint32_t reserve_size)
106 {
107 assert(reserve_size);
108
109 if (stream->end - stream->cur < reserve_size) {
110 if (stream->bo_count == stream->bo_capacity) {
111 uint32_t new_capacity = MAX2(4, 2 * stream->bo_capacity);
112 struct tu_bo **new_bos = realloc(stream->bos,
113 new_capacity * sizeof(struct tu_bo*));
114 if (!new_bos)
115 abort();
116
117 stream->bo_capacity = new_capacity;
118 stream->bos = new_bos;
119 }
120
121 uint32_t new_size = MAX2(16384, reserve_size * sizeof(uint32_t));
122 if (stream->bo_count)
123 new_size = MAX2(new_size, stream->bos[stream->bo_count - 1]->size * 2);
124
125 struct tu_bo *new_bo = malloc(sizeof(struct tu_bo));
126 if (!new_bo)
127 abort();
128
129 VkResult result = tu_bo_init_new(dev, new_bo, new_size);
130 if (result != VK_SUCCESS) {
131 free(new_bo);
132 return result;
133 }
134
135 result = tu_bo_map(dev, new_bo);
136 if (result != VK_SUCCESS) {
137 tu_bo_finish(dev, new_bo);
138 free(new_bo);
139 return result;
140 }
141
142 stream->bos[stream->bo_count] = new_bo;
143 ++stream->bo_count;
144
145 stream->start = stream->cur = (uint32_t*)new_bo->map;
146 stream->end = stream->start + new_bo->size / sizeof(uint32_t);
147 }
148 stream->start = stream->cur;
149
150 return VK_SUCCESS;
151 }
152
153 static VkResult
154 tu_cmd_stream_end(struct tu_cmd_stream *stream)
155 {
156 if (stream->start == stream->cur)
157 return VK_SUCCESS;
158
159 if (stream->entry_capacity == stream->entry_count) {
160 uint32_t new_capacity = MAX2(stream->entry_capacity * 2, 4);
161 struct tu_cmd_stream_entry *new_entries =
162 realloc(stream->entries, new_capacity * sizeof(struct tu_cmd_stream_entry));
163 if (!new_entries)
164 abort(); /* TODO */
165
166 stream->entries = new_entries;
167 stream->entry_capacity = new_capacity;
168 }
169
170 assert (stream->bo_count);
171
172 struct tu_cmd_stream_entry entry;
173 entry.bo = stream->bos[stream->bo_count - 1];
174 entry.size = (stream->cur - stream->start) * sizeof(uint32_t);
175 entry.offset = (stream->start - (uint32_t*)entry.bo->map) * sizeof(uint32_t);
176
177 stream->entries[stream->entry_count] = entry;
178 ++stream->entry_count;
179
180 return VK_SUCCESS;
181 }
182
183 static void
184 tu_cmd_stream_reset(struct tu_device *dev,
185 struct tu_cmd_stream *stream)
186 {
187 for (uint32_t i = 0; i + 1 < stream->bo_count; ++i) {
188 tu_bo_finish(dev, stream->bos[i]);
189 free(stream->bos[i]);
190 }
191
192 if (stream->bo_count) {
193 stream->bos[0] = stream->bos[stream->bo_count - 1];
194 stream->bo_count = 1;
195
196 stream->start = stream->cur = (uint32_t*)stream->bos[0]->map;
197 stream->end = stream->start + stream->bos[0]->size / sizeof(uint32_t);
198 }
199
200 stream->entry_count = 0;
201 }
202
203 const struct tu_dynamic_state default_dynamic_state = {
204 .viewport =
205 {
206 .count = 0,
207 },
208 .scissor =
209 {
210 .count = 0,
211 },
212 .line_width = 1.0f,
213 .depth_bias =
214 {
215 .bias = 0.0f,
216 .clamp = 0.0f,
217 .slope = 0.0f,
218 },
219 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
220 .depth_bounds =
221 {
222 .min = 0.0f,
223 .max = 1.0f,
224 },
225 .stencil_compare_mask =
226 {
227 .front = ~0u,
228 .back = ~0u,
229 },
230 .stencil_write_mask =
231 {
232 .front = ~0u,
233 .back = ~0u,
234 },
235 .stencil_reference =
236 {
237 .front = 0u,
238 .back = 0u,
239 },
240 };
241
242 static void UNUSED /* FINISHME */
243 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
244 const struct tu_dynamic_state *src)
245 {
246 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
247 uint32_t copy_mask = src->mask;
248 uint32_t dest_mask = 0;
249
250 tu_use_args(cmd_buffer); /* FINISHME */
251
252 /* Make sure to copy the number of viewports/scissors because they can
253 * only be specified at pipeline creation time.
254 */
255 dest->viewport.count = src->viewport.count;
256 dest->scissor.count = src->scissor.count;
257 dest->discard_rectangle.count = src->discard_rectangle.count;
258
259 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
260 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
261 src->viewport.count * sizeof(VkViewport))) {
262 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
263 src->viewport.count);
264 dest_mask |= TU_DYNAMIC_VIEWPORT;
265 }
266 }
267
268 if (copy_mask & TU_DYNAMIC_SCISSOR) {
269 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
270 src->scissor.count * sizeof(VkRect2D))) {
271 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
272 src->scissor.count);
273 dest_mask |= TU_DYNAMIC_SCISSOR;
274 }
275 }
276
277 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
278 if (dest->line_width != src->line_width) {
279 dest->line_width = src->line_width;
280 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
281 }
282 }
283
284 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
285 if (memcmp(&dest->depth_bias, &src->depth_bias,
286 sizeof(src->depth_bias))) {
287 dest->depth_bias = src->depth_bias;
288 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
289 }
290 }
291
292 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
293 if (memcmp(&dest->blend_constants, &src->blend_constants,
294 sizeof(src->blend_constants))) {
295 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
296 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
297 }
298 }
299
300 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
301 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
302 sizeof(src->depth_bounds))) {
303 dest->depth_bounds = src->depth_bounds;
304 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
305 }
306 }
307
308 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
309 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
310 sizeof(src->stencil_compare_mask))) {
311 dest->stencil_compare_mask = src->stencil_compare_mask;
312 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
313 }
314 }
315
316 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
317 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
318 sizeof(src->stencil_write_mask))) {
319 dest->stencil_write_mask = src->stencil_write_mask;
320 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
321 }
322 }
323
324 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
325 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
326 sizeof(src->stencil_reference))) {
327 dest->stencil_reference = src->stencil_reference;
328 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
329 }
330 }
331
332 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
333 if (memcmp(&dest->discard_rectangle.rectangles,
334 &src->discard_rectangle.rectangles,
335 src->discard_rectangle.count * sizeof(VkRect2D))) {
336 typed_memcpy(dest->discard_rectangle.rectangles,
337 src->discard_rectangle.rectangles,
338 src->discard_rectangle.count);
339 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
340 }
341 }
342 }
343
344 static VkResult
345 tu_create_cmd_buffer(struct tu_device *device,
346 struct tu_cmd_pool *pool,
347 VkCommandBufferLevel level,
348 VkCommandBuffer *pCommandBuffer)
349 {
350 struct tu_cmd_buffer *cmd_buffer;
351 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
352 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
353 if (cmd_buffer == NULL)
354 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
355
356 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
357 cmd_buffer->device = device;
358 cmd_buffer->pool = pool;
359 cmd_buffer->level = level;
360
361 if (pool) {
362 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
363 cmd_buffer->queue_family_index = pool->queue_family_index;
364
365 } else {
366 /* Init the pool_link so we can safely call list_del when we destroy
367 * the command buffer
368 */
369 list_inithead(&cmd_buffer->pool_link);
370 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
371 }
372
373 tu_bo_list_init(&cmd_buffer->bo_list);
374 tu_cmd_stream_init(&cmd_buffer->primary_cmd_stream);
375
376 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
377
378 list_inithead(&cmd_buffer->upload.list);
379
380 return VK_SUCCESS;
381 }
382
383 static void
384 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
385 {
386 list_del(&cmd_buffer->pool_link);
387
388 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
389 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
390
391 tu_cmd_stream_finish(cmd_buffer->device, &cmd_buffer->primary_cmd_stream);
392 tu_bo_list_destroy(&cmd_buffer->bo_list);
393 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
394 }
395
396 static VkResult
397 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
398 {
399 cmd_buffer->record_result = VK_SUCCESS;
400
401 tu_bo_list_reset(&cmd_buffer->bo_list);
402 tu_cmd_stream_reset(cmd_buffer->device, &cmd_buffer->primary_cmd_stream);
403
404 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
405 cmd_buffer->descriptors[i].dirty = 0;
406 cmd_buffer->descriptors[i].valid = 0;
407 cmd_buffer->descriptors[i].push_dirty = false;
408 }
409
410 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
411
412 return cmd_buffer->record_result;
413 }
414
415 VkResult
416 tu_AllocateCommandBuffers(VkDevice _device,
417 const VkCommandBufferAllocateInfo *pAllocateInfo,
418 VkCommandBuffer *pCommandBuffers)
419 {
420 TU_FROM_HANDLE(tu_device, device, _device);
421 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
422
423 VkResult result = VK_SUCCESS;
424 uint32_t i;
425
426 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
427
428 if (!list_empty(&pool->free_cmd_buffers)) {
429 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
430 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
431
432 list_del(&cmd_buffer->pool_link);
433 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
434
435 result = tu_reset_cmd_buffer(cmd_buffer);
436 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
437 cmd_buffer->level = pAllocateInfo->level;
438
439 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
440 } else {
441 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
442 &pCommandBuffers[i]);
443 }
444 if (result != VK_SUCCESS)
445 break;
446 }
447
448 if (result != VK_SUCCESS) {
449 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
450 pCommandBuffers);
451
452 /* From the Vulkan 1.0.66 spec:
453 *
454 * "vkAllocateCommandBuffers can be used to create multiple
455 * command buffers. If the creation of any of those command
456 * buffers fails, the implementation must destroy all
457 * successfully created command buffer objects from this
458 * command, set all entries of the pCommandBuffers array to
459 * NULL and return the error."
460 */
461 memset(pCommandBuffers, 0,
462 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
463 }
464
465 return result;
466 }
467
468 void
469 tu_FreeCommandBuffers(VkDevice device,
470 VkCommandPool commandPool,
471 uint32_t commandBufferCount,
472 const VkCommandBuffer *pCommandBuffers)
473 {
474 for (uint32_t i = 0; i < commandBufferCount; i++) {
475 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
476
477 if (cmd_buffer) {
478 if (cmd_buffer->pool) {
479 list_del(&cmd_buffer->pool_link);
480 list_addtail(&cmd_buffer->pool_link,
481 &cmd_buffer->pool->free_cmd_buffers);
482 } else
483 tu_cmd_buffer_destroy(cmd_buffer);
484 }
485 }
486 }
487
488 VkResult
489 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
490 VkCommandBufferResetFlags flags)
491 {
492 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
493 return tu_reset_cmd_buffer(cmd_buffer);
494 }
495
496 VkResult
497 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
498 const VkCommandBufferBeginInfo *pBeginInfo)
499 {
500 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
501 VkResult result = VK_SUCCESS;
502
503 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
504 /* If the command buffer has already been resetted with
505 * vkResetCommandBuffer, no need to do it again.
506 */
507 result = tu_reset_cmd_buffer(cmd_buffer);
508 if (result != VK_SUCCESS)
509 return result;
510 }
511
512 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
513 cmd_buffer->usage_flags = pBeginInfo->flags;
514
515 /* setup initial configuration into command buffer */
516 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
517 switch (cmd_buffer->queue_family_index) {
518 case TU_QUEUE_GENERAL:
519 /* init */
520 break;
521 default:
522 break;
523 }
524 }
525
526 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
527
528 return result;
529 }
530
531 void
532 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
533 uint32_t firstBinding,
534 uint32_t bindingCount,
535 const VkBuffer *pBuffers,
536 const VkDeviceSize *pOffsets)
537 {
538 }
539
540 void
541 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
542 VkBuffer buffer,
543 VkDeviceSize offset,
544 VkIndexType indexType)
545 {
546 }
547
548 void
549 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
550 VkPipelineBindPoint pipelineBindPoint,
551 VkPipelineLayout _layout,
552 uint32_t firstSet,
553 uint32_t descriptorSetCount,
554 const VkDescriptorSet *pDescriptorSets,
555 uint32_t dynamicOffsetCount,
556 const uint32_t *pDynamicOffsets)
557 {
558 }
559
560 void
561 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
562 VkPipelineLayout layout,
563 VkShaderStageFlags stageFlags,
564 uint32_t offset,
565 uint32_t size,
566 const void *pValues)
567 {
568 }
569
570 VkResult
571 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
572 {
573 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
574
575 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
576
577 return cmd_buffer->record_result;
578 }
579
580 void
581 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
582 VkPipelineBindPoint pipelineBindPoint,
583 VkPipeline _pipeline)
584 {
585 }
586
587 void
588 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
589 uint32_t firstViewport,
590 uint32_t viewportCount,
591 const VkViewport *pViewports)
592 {
593 }
594
595 void
596 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
597 uint32_t firstScissor,
598 uint32_t scissorCount,
599 const VkRect2D *pScissors)
600 {
601 }
602
603 void
604 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
605 {
606 }
607
608 void
609 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
610 float depthBiasConstantFactor,
611 float depthBiasClamp,
612 float depthBiasSlopeFactor)
613 {
614 }
615
616 void
617 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
618 const float blendConstants[4])
619 {
620 }
621
622 void
623 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
624 float minDepthBounds,
625 float maxDepthBounds)
626 {
627 }
628
629 void
630 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
631 VkStencilFaceFlags faceMask,
632 uint32_t compareMask)
633 {
634 }
635
636 void
637 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
638 VkStencilFaceFlags faceMask,
639 uint32_t writeMask)
640 {
641 }
642
643 void
644 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
645 VkStencilFaceFlags faceMask,
646 uint32_t reference)
647 {
648 }
649
650 void
651 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
652 uint32_t commandBufferCount,
653 const VkCommandBuffer *pCmdBuffers)
654 {
655 }
656
657 VkResult
658 tu_CreateCommandPool(VkDevice _device,
659 const VkCommandPoolCreateInfo *pCreateInfo,
660 const VkAllocationCallbacks *pAllocator,
661 VkCommandPool *pCmdPool)
662 {
663 TU_FROM_HANDLE(tu_device, device, _device);
664 struct tu_cmd_pool *pool;
665
666 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
667 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
668 if (pool == NULL)
669 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
670
671 if (pAllocator)
672 pool->alloc = *pAllocator;
673 else
674 pool->alloc = device->alloc;
675
676 list_inithead(&pool->cmd_buffers);
677 list_inithead(&pool->free_cmd_buffers);
678
679 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
680
681 *pCmdPool = tu_cmd_pool_to_handle(pool);
682
683 return VK_SUCCESS;
684 }
685
686 void
687 tu_DestroyCommandPool(VkDevice _device,
688 VkCommandPool commandPool,
689 const VkAllocationCallbacks *pAllocator)
690 {
691 TU_FROM_HANDLE(tu_device, device, _device);
692 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
693
694 if (!pool)
695 return;
696
697 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
698 &pool->cmd_buffers, pool_link)
699 {
700 tu_cmd_buffer_destroy(cmd_buffer);
701 }
702
703 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
704 &pool->free_cmd_buffers, pool_link)
705 {
706 tu_cmd_buffer_destroy(cmd_buffer);
707 }
708
709 vk_free2(&device->alloc, pAllocator, pool);
710 }
711
712 VkResult
713 tu_ResetCommandPool(VkDevice device,
714 VkCommandPool commandPool,
715 VkCommandPoolResetFlags flags)
716 {
717 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
718 VkResult result;
719
720 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
721 pool_link)
722 {
723 result = tu_reset_cmd_buffer(cmd_buffer);
724 if (result != VK_SUCCESS)
725 return result;
726 }
727
728 return VK_SUCCESS;
729 }
730
731 void
732 tu_TrimCommandPool(VkDevice device,
733 VkCommandPool commandPool,
734 VkCommandPoolTrimFlagsKHR flags)
735 {
736 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
737
738 if (!pool)
739 return;
740
741 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
742 &pool->free_cmd_buffers, pool_link)
743 {
744 tu_cmd_buffer_destroy(cmd_buffer);
745 }
746 }
747
748 void
749 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
750 const VkRenderPassBeginInfo *pRenderPassBegin,
751 VkSubpassContents contents)
752 {
753 }
754
755 void
756 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
757 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
758 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
759 {
760 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
761 pSubpassBeginInfo->contents);
762 }
763
764 void
765 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
766 {
767 }
768
769 void
770 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
771 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
772 const VkSubpassEndInfoKHR *pSubpassEndInfo)
773 {
774 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
775 }
776
777 struct tu_draw_info
778 {
779 /**
780 * Number of vertices.
781 */
782 uint32_t count;
783
784 /**
785 * Index of the first vertex.
786 */
787 int32_t vertex_offset;
788
789 /**
790 * First instance id.
791 */
792 uint32_t first_instance;
793
794 /**
795 * Number of instances.
796 */
797 uint32_t instance_count;
798
799 /**
800 * First index (indexed draws only).
801 */
802 uint32_t first_index;
803
804 /**
805 * Whether it's an indexed draw.
806 */
807 bool indexed;
808
809 /**
810 * Indirect draw parameters resource.
811 */
812 struct tu_buffer *indirect;
813 uint64_t indirect_offset;
814 uint32_t stride;
815
816 /**
817 * Draw count parameters resource.
818 */
819 struct tu_buffer *count_buffer;
820 uint64_t count_buffer_offset;
821 };
822
823 static void
824 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
825 {
826 }
827
828 void
829 tu_CmdDraw(VkCommandBuffer commandBuffer,
830 uint32_t vertexCount,
831 uint32_t instanceCount,
832 uint32_t firstVertex,
833 uint32_t firstInstance)
834 {
835 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
836 struct tu_draw_info info = {};
837
838 info.count = vertexCount;
839 info.instance_count = instanceCount;
840 info.first_instance = firstInstance;
841 info.vertex_offset = firstVertex;
842
843 tu_draw(cmd_buffer, &info);
844 }
845
846 void
847 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
848 uint32_t indexCount,
849 uint32_t instanceCount,
850 uint32_t firstIndex,
851 int32_t vertexOffset,
852 uint32_t firstInstance)
853 {
854 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
855 struct tu_draw_info info = {};
856
857 info.indexed = true;
858 info.count = indexCount;
859 info.instance_count = instanceCount;
860 info.first_index = firstIndex;
861 info.vertex_offset = vertexOffset;
862 info.first_instance = firstInstance;
863
864 tu_draw(cmd_buffer, &info);
865 }
866
867 void
868 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
869 VkBuffer _buffer,
870 VkDeviceSize offset,
871 uint32_t drawCount,
872 uint32_t stride)
873 {
874 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
875 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
876 struct tu_draw_info info = {};
877
878 info.count = drawCount;
879 info.indirect = buffer;
880 info.indirect_offset = offset;
881 info.stride = stride;
882
883 tu_draw(cmd_buffer, &info);
884 }
885
886 void
887 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
888 VkBuffer _buffer,
889 VkDeviceSize offset,
890 uint32_t drawCount,
891 uint32_t stride)
892 {
893 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
894 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
895 struct tu_draw_info info = {};
896
897 info.indexed = true;
898 info.count = drawCount;
899 info.indirect = buffer;
900 info.indirect_offset = offset;
901 info.stride = stride;
902
903 tu_draw(cmd_buffer, &info);
904 }
905
906 struct tu_dispatch_info
907 {
908 /**
909 * Determine the layout of the grid (in block units) to be used.
910 */
911 uint32_t blocks[3];
912
913 /**
914 * A starting offset for the grid. If unaligned is set, the offset
915 * must still be aligned.
916 */
917 uint32_t offsets[3];
918 /**
919 * Whether it's an unaligned compute dispatch.
920 */
921 bool unaligned;
922
923 /**
924 * Indirect compute parameters resource.
925 */
926 struct tu_buffer *indirect;
927 uint64_t indirect_offset;
928 };
929
930 static void
931 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
932 const struct tu_dispatch_info *info)
933 {
934 }
935
936 void
937 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
938 uint32_t base_x,
939 uint32_t base_y,
940 uint32_t base_z,
941 uint32_t x,
942 uint32_t y,
943 uint32_t z)
944 {
945 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
946 struct tu_dispatch_info info = {};
947
948 info.blocks[0] = x;
949 info.blocks[1] = y;
950 info.blocks[2] = z;
951
952 info.offsets[0] = base_x;
953 info.offsets[1] = base_y;
954 info.offsets[2] = base_z;
955 tu_dispatch(cmd_buffer, &info);
956 }
957
958 void
959 tu_CmdDispatch(VkCommandBuffer commandBuffer,
960 uint32_t x,
961 uint32_t y,
962 uint32_t z)
963 {
964 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
965 }
966
967 void
968 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
969 VkBuffer _buffer,
970 VkDeviceSize offset)
971 {
972 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
973 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
974 struct tu_dispatch_info info = {};
975
976 info.indirect = buffer;
977 info.indirect_offset = offset;
978
979 tu_dispatch(cmd_buffer, &info);
980 }
981
982 void
983 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
984 {
985 }
986
987 void
988 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
989 const VkSubpassEndInfoKHR *pSubpassEndInfo)
990 {
991 tu_CmdEndRenderPass(commandBuffer);
992 }
993
994 struct tu_barrier_info
995 {
996 uint32_t eventCount;
997 const VkEvent *pEvents;
998 VkPipelineStageFlags srcStageMask;
999 };
1000
1001 static void
1002 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
1003 uint32_t memoryBarrierCount,
1004 const VkMemoryBarrier *pMemoryBarriers,
1005 uint32_t bufferMemoryBarrierCount,
1006 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1007 uint32_t imageMemoryBarrierCount,
1008 const VkImageMemoryBarrier *pImageMemoryBarriers,
1009 const struct tu_barrier_info *info)
1010 {
1011 }
1012
1013 void
1014 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
1015 VkPipelineStageFlags srcStageMask,
1016 VkPipelineStageFlags destStageMask,
1017 VkBool32 byRegion,
1018 uint32_t memoryBarrierCount,
1019 const VkMemoryBarrier *pMemoryBarriers,
1020 uint32_t bufferMemoryBarrierCount,
1021 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1022 uint32_t imageMemoryBarrierCount,
1023 const VkImageMemoryBarrier *pImageMemoryBarriers)
1024 {
1025 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1026 struct tu_barrier_info info;
1027
1028 info.eventCount = 0;
1029 info.pEvents = NULL;
1030 info.srcStageMask = srcStageMask;
1031
1032 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1033 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1034 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1035 }
1036
1037 static void
1038 write_event(struct tu_cmd_buffer *cmd_buffer,
1039 struct tu_event *event,
1040 VkPipelineStageFlags stageMask,
1041 unsigned value)
1042 {
1043 }
1044
1045 void
1046 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
1047 VkEvent _event,
1048 VkPipelineStageFlags stageMask)
1049 {
1050 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1051 TU_FROM_HANDLE(tu_event, event, _event);
1052
1053 write_event(cmd_buffer, event, stageMask, 1);
1054 }
1055
1056 void
1057 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
1058 VkEvent _event,
1059 VkPipelineStageFlags stageMask)
1060 {
1061 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1062 TU_FROM_HANDLE(tu_event, event, _event);
1063
1064 write_event(cmd_buffer, event, stageMask, 0);
1065 }
1066
1067 void
1068 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
1069 uint32_t eventCount,
1070 const VkEvent *pEvents,
1071 VkPipelineStageFlags srcStageMask,
1072 VkPipelineStageFlags dstStageMask,
1073 uint32_t memoryBarrierCount,
1074 const VkMemoryBarrier *pMemoryBarriers,
1075 uint32_t bufferMemoryBarrierCount,
1076 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1077 uint32_t imageMemoryBarrierCount,
1078 const VkImageMemoryBarrier *pImageMemoryBarriers)
1079 {
1080 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1081 struct tu_barrier_info info;
1082
1083 info.eventCount = eventCount;
1084 info.pEvents = pEvents;
1085 info.srcStageMask = 0;
1086
1087 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1088 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1089 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1090 }
1091
1092 void
1093 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
1094 {
1095 /* No-op */
1096 }