turnip: Fill command buffer
[mesa.git] / src / freedreno / vulkan / tu_cmd_buffer.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #include "tu_private.h"
29
30 #include "vk_format.h"
31 #include "adreno_pm4.xml.h"
32 void
33 tu_bo_list_init(struct tu_bo_list *list)
34 {
35 list->count = list->capacity = 0;
36 list->handles = NULL;
37 }
38
39 void
40 tu_bo_list_destroy(struct tu_bo_list *list)
41 {
42 free(list->handles);
43 }
44
45 void
46 tu_bo_list_reset(struct tu_bo_list *list)
47 {
48 list->count = 0;
49 }
50
51 uint32_t
52 tu_bo_list_add(struct tu_bo_list *list,
53 const struct tu_bo *bo)
54 {
55 uint32_t handle = bo->gem_handle;
56 for (uint32_t i = 0; i < list->count; ++i) {
57 if (list->handles[i] == handle)
58 return i;
59 }
60
61 if (list->count == list->capacity) {
62 uint32_t new_capacity = MAX2(2 * list->count, 16);
63 uint32_t *new_handles = realloc(list->handles, new_capacity * sizeof(uint32_t));
64 if (!new_handles)
65 return ~0;
66 list->handles = new_handles;
67 list->capacity = new_capacity;
68 }
69
70 uint32_t ret = list->count;
71 list->handles[list->count] = handle;
72 ++list->count;
73
74 return ret;
75 }
76
77 static void
78 tu_cmd_stream_init(struct tu_cmd_stream *stream)
79 {
80 stream->start = stream->cur = stream->end = NULL;
81
82 stream->entry_count = stream->entry_capacity = 0;
83 stream->entries = NULL;
84
85 stream->bo_count = stream->bo_capacity = 0;
86 stream->bos = NULL;
87 }
88
89 static void
90 tu_cmd_stream_finish(struct tu_device *dev,
91 struct tu_cmd_stream *stream)
92 {
93 for (uint32_t i = 0; i < stream->bo_count; ++i) {
94 tu_bo_finish(dev, stream->bos[i]);
95 free(stream->bos[i]);
96 }
97
98 free(stream->entries);
99 free(stream->bos);
100 }
101
102 static VkResult
103 tu_cmd_stream_begin(struct tu_device *dev,
104 struct tu_cmd_stream *stream,
105 uint32_t reserve_size)
106 {
107 assert(reserve_size);
108
109 if (stream->end - stream->cur < reserve_size) {
110 if (stream->bo_count == stream->bo_capacity) {
111 uint32_t new_capacity = MAX2(4, 2 * stream->bo_capacity);
112 struct tu_bo **new_bos = realloc(stream->bos,
113 new_capacity * sizeof(struct tu_bo*));
114 if (!new_bos)
115 abort();
116
117 stream->bo_capacity = new_capacity;
118 stream->bos = new_bos;
119 }
120
121 uint32_t new_size = MAX2(16384, reserve_size * sizeof(uint32_t));
122 if (stream->bo_count)
123 new_size = MAX2(new_size, stream->bos[stream->bo_count - 1]->size * 2);
124
125 struct tu_bo *new_bo = malloc(sizeof(struct tu_bo));
126 if (!new_bo)
127 abort();
128
129 VkResult result = tu_bo_init_new(dev, new_bo, new_size);
130 if (result != VK_SUCCESS) {
131 free(new_bo);
132 return result;
133 }
134
135 result = tu_bo_map(dev, new_bo);
136 if (result != VK_SUCCESS) {
137 tu_bo_finish(dev, new_bo);
138 free(new_bo);
139 return result;
140 }
141
142 stream->bos[stream->bo_count] = new_bo;
143 ++stream->bo_count;
144
145 stream->start = stream->cur = (uint32_t*)new_bo->map;
146 stream->end = stream->start + new_bo->size / sizeof(uint32_t);
147 }
148 stream->start = stream->cur;
149
150 return VK_SUCCESS;
151 }
152
153 static VkResult
154 tu_cmd_stream_end(struct tu_cmd_stream *stream)
155 {
156 if (stream->start == stream->cur)
157 return VK_SUCCESS;
158
159 if (stream->entry_capacity == stream->entry_count) {
160 uint32_t new_capacity = MAX2(stream->entry_capacity * 2, 4);
161 struct tu_cmd_stream_entry *new_entries =
162 realloc(stream->entries, new_capacity * sizeof(struct tu_cmd_stream_entry));
163 if (!new_entries)
164 abort(); /* TODO */
165
166 stream->entries = new_entries;
167 stream->entry_capacity = new_capacity;
168 }
169
170 assert (stream->bo_count);
171
172 struct tu_cmd_stream_entry entry;
173 entry.bo = stream->bos[stream->bo_count - 1];
174 entry.size = (stream->cur - stream->start) * sizeof(uint32_t);
175 entry.offset = (stream->start - (uint32_t*)entry.bo->map) * sizeof(uint32_t);
176
177 stream->entries[stream->entry_count] = entry;
178 ++stream->entry_count;
179
180 return VK_SUCCESS;
181 }
182
183 static void
184 tu_cmd_stream_reset(struct tu_device *dev,
185 struct tu_cmd_stream *stream)
186 {
187 for (uint32_t i = 0; i + 1 < stream->bo_count; ++i) {
188 tu_bo_finish(dev, stream->bos[i]);
189 free(stream->bos[i]);
190 }
191
192 if (stream->bo_count) {
193 stream->bos[0] = stream->bos[stream->bo_count - 1];
194 stream->bo_count = 1;
195
196 stream->start = stream->cur = (uint32_t*)stream->bos[0]->map;
197 stream->end = stream->start + stream->bos[0]->size / sizeof(uint32_t);
198 }
199
200 stream->entry_count = 0;
201 }
202
203 static unsigned
204 _odd_parity_bit(unsigned val)
205 {
206 /* See: http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
207 * note that we want odd parity so 0x6996 is inverted.
208 */
209 val ^= val >> 16;
210 val ^= val >> 8;
211 val ^= val >> 4;
212 val &= 0xf;
213 return (~0x6996 >> val) & 1;
214 }
215
216 static void
217 OUT_PKT7(struct tu_cmd_stream *stream, uint8_t opcode, uint16_t cnt)
218 {
219 *stream->cur++ = CP_TYPE7_PKT | cnt |
220 (_odd_parity_bit(cnt) << 15) |
221 ((opcode & 0x7f) << 16) |
222 ((_odd_parity_bit(opcode) << 23));
223 }
224
225
226
227 const struct tu_dynamic_state default_dynamic_state = {
228 .viewport =
229 {
230 .count = 0,
231 },
232 .scissor =
233 {
234 .count = 0,
235 },
236 .line_width = 1.0f,
237 .depth_bias =
238 {
239 .bias = 0.0f,
240 .clamp = 0.0f,
241 .slope = 0.0f,
242 },
243 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
244 .depth_bounds =
245 {
246 .min = 0.0f,
247 .max = 1.0f,
248 },
249 .stencil_compare_mask =
250 {
251 .front = ~0u,
252 .back = ~0u,
253 },
254 .stencil_write_mask =
255 {
256 .front = ~0u,
257 .back = ~0u,
258 },
259 .stencil_reference =
260 {
261 .front = 0u,
262 .back = 0u,
263 },
264 };
265
266 static void UNUSED /* FINISHME */
267 tu_bind_dynamic_state(struct tu_cmd_buffer *cmd_buffer,
268 const struct tu_dynamic_state *src)
269 {
270 struct tu_dynamic_state *dest = &cmd_buffer->state.dynamic;
271 uint32_t copy_mask = src->mask;
272 uint32_t dest_mask = 0;
273
274 tu_use_args(cmd_buffer); /* FINISHME */
275
276 /* Make sure to copy the number of viewports/scissors because they can
277 * only be specified at pipeline creation time.
278 */
279 dest->viewport.count = src->viewport.count;
280 dest->scissor.count = src->scissor.count;
281 dest->discard_rectangle.count = src->discard_rectangle.count;
282
283 if (copy_mask & TU_DYNAMIC_VIEWPORT) {
284 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
285 src->viewport.count * sizeof(VkViewport))) {
286 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
287 src->viewport.count);
288 dest_mask |= TU_DYNAMIC_VIEWPORT;
289 }
290 }
291
292 if (copy_mask & TU_DYNAMIC_SCISSOR) {
293 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
294 src->scissor.count * sizeof(VkRect2D))) {
295 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
296 src->scissor.count);
297 dest_mask |= TU_DYNAMIC_SCISSOR;
298 }
299 }
300
301 if (copy_mask & TU_DYNAMIC_LINE_WIDTH) {
302 if (dest->line_width != src->line_width) {
303 dest->line_width = src->line_width;
304 dest_mask |= TU_DYNAMIC_LINE_WIDTH;
305 }
306 }
307
308 if (copy_mask & TU_DYNAMIC_DEPTH_BIAS) {
309 if (memcmp(&dest->depth_bias, &src->depth_bias,
310 sizeof(src->depth_bias))) {
311 dest->depth_bias = src->depth_bias;
312 dest_mask |= TU_DYNAMIC_DEPTH_BIAS;
313 }
314 }
315
316 if (copy_mask & TU_DYNAMIC_BLEND_CONSTANTS) {
317 if (memcmp(&dest->blend_constants, &src->blend_constants,
318 sizeof(src->blend_constants))) {
319 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
320 dest_mask |= TU_DYNAMIC_BLEND_CONSTANTS;
321 }
322 }
323
324 if (copy_mask & TU_DYNAMIC_DEPTH_BOUNDS) {
325 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
326 sizeof(src->depth_bounds))) {
327 dest->depth_bounds = src->depth_bounds;
328 dest_mask |= TU_DYNAMIC_DEPTH_BOUNDS;
329 }
330 }
331
332 if (copy_mask & TU_DYNAMIC_STENCIL_COMPARE_MASK) {
333 if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
334 sizeof(src->stencil_compare_mask))) {
335 dest->stencil_compare_mask = src->stencil_compare_mask;
336 dest_mask |= TU_DYNAMIC_STENCIL_COMPARE_MASK;
337 }
338 }
339
340 if (copy_mask & TU_DYNAMIC_STENCIL_WRITE_MASK) {
341 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
342 sizeof(src->stencil_write_mask))) {
343 dest->stencil_write_mask = src->stencil_write_mask;
344 dest_mask |= TU_DYNAMIC_STENCIL_WRITE_MASK;
345 }
346 }
347
348 if (copy_mask & TU_DYNAMIC_STENCIL_REFERENCE) {
349 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
350 sizeof(src->stencil_reference))) {
351 dest->stencil_reference = src->stencil_reference;
352 dest_mask |= TU_DYNAMIC_STENCIL_REFERENCE;
353 }
354 }
355
356 if (copy_mask & TU_DYNAMIC_DISCARD_RECTANGLE) {
357 if (memcmp(&dest->discard_rectangle.rectangles,
358 &src->discard_rectangle.rectangles,
359 src->discard_rectangle.count * sizeof(VkRect2D))) {
360 typed_memcpy(dest->discard_rectangle.rectangles,
361 src->discard_rectangle.rectangles,
362 src->discard_rectangle.count);
363 dest_mask |= TU_DYNAMIC_DISCARD_RECTANGLE;
364 }
365 }
366 }
367
368 static VkResult
369 tu_create_cmd_buffer(struct tu_device *device,
370 struct tu_cmd_pool *pool,
371 VkCommandBufferLevel level,
372 VkCommandBuffer *pCommandBuffer)
373 {
374 struct tu_cmd_buffer *cmd_buffer;
375 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
376 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
377 if (cmd_buffer == NULL)
378 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
379
380 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
381 cmd_buffer->device = device;
382 cmd_buffer->pool = pool;
383 cmd_buffer->level = level;
384
385 if (pool) {
386 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
387 cmd_buffer->queue_family_index = pool->queue_family_index;
388
389 } else {
390 /* Init the pool_link so we can safely call list_del when we destroy
391 * the command buffer
392 */
393 list_inithead(&cmd_buffer->pool_link);
394 cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
395 }
396
397 tu_bo_list_init(&cmd_buffer->bo_list);
398 tu_cmd_stream_init(&cmd_buffer->primary_cmd_stream);
399
400 *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
401
402 list_inithead(&cmd_buffer->upload.list);
403
404 return VK_SUCCESS;
405 }
406
407 static void
408 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
409 {
410 list_del(&cmd_buffer->pool_link);
411
412 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
413 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
414
415 tu_cmd_stream_finish(cmd_buffer->device, &cmd_buffer->primary_cmd_stream);
416 tu_bo_list_destroy(&cmd_buffer->bo_list);
417 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
418 }
419
420 static VkResult
421 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
422 {
423 cmd_buffer->record_result = VK_SUCCESS;
424
425 tu_bo_list_reset(&cmd_buffer->bo_list);
426 tu_cmd_stream_reset(cmd_buffer->device, &cmd_buffer->primary_cmd_stream);
427
428 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
429 cmd_buffer->descriptors[i].dirty = 0;
430 cmd_buffer->descriptors[i].valid = 0;
431 cmd_buffer->descriptors[i].push_dirty = false;
432 }
433
434 cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
435
436 return cmd_buffer->record_result;
437 }
438
439 VkResult
440 tu_AllocateCommandBuffers(VkDevice _device,
441 const VkCommandBufferAllocateInfo *pAllocateInfo,
442 VkCommandBuffer *pCommandBuffers)
443 {
444 TU_FROM_HANDLE(tu_device, device, _device);
445 TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
446
447 VkResult result = VK_SUCCESS;
448 uint32_t i;
449
450 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
451
452 if (!list_empty(&pool->free_cmd_buffers)) {
453 struct tu_cmd_buffer *cmd_buffer = list_first_entry(
454 &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
455
456 list_del(&cmd_buffer->pool_link);
457 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
458
459 result = tu_reset_cmd_buffer(cmd_buffer);
460 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
461 cmd_buffer->level = pAllocateInfo->level;
462
463 pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
464 } else {
465 result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
466 &pCommandBuffers[i]);
467 }
468 if (result != VK_SUCCESS)
469 break;
470 }
471
472 if (result != VK_SUCCESS) {
473 tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
474 pCommandBuffers);
475
476 /* From the Vulkan 1.0.66 spec:
477 *
478 * "vkAllocateCommandBuffers can be used to create multiple
479 * command buffers. If the creation of any of those command
480 * buffers fails, the implementation must destroy all
481 * successfully created command buffer objects from this
482 * command, set all entries of the pCommandBuffers array to
483 * NULL and return the error."
484 */
485 memset(pCommandBuffers, 0,
486 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
487 }
488
489 return result;
490 }
491
492 void
493 tu_FreeCommandBuffers(VkDevice device,
494 VkCommandPool commandPool,
495 uint32_t commandBufferCount,
496 const VkCommandBuffer *pCommandBuffers)
497 {
498 for (uint32_t i = 0; i < commandBufferCount; i++) {
499 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
500
501 if (cmd_buffer) {
502 if (cmd_buffer->pool) {
503 list_del(&cmd_buffer->pool_link);
504 list_addtail(&cmd_buffer->pool_link,
505 &cmd_buffer->pool->free_cmd_buffers);
506 } else
507 tu_cmd_buffer_destroy(cmd_buffer);
508 }
509 }
510 }
511
512 VkResult
513 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
514 VkCommandBufferResetFlags flags)
515 {
516 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
517 return tu_reset_cmd_buffer(cmd_buffer);
518 }
519
520 VkResult
521 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
522 const VkCommandBufferBeginInfo *pBeginInfo)
523 {
524 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
525 VkResult result = VK_SUCCESS;
526
527 if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
528 /* If the command buffer has already been resetted with
529 * vkResetCommandBuffer, no need to do it again.
530 */
531 result = tu_reset_cmd_buffer(cmd_buffer);
532 if (result != VK_SUCCESS)
533 return result;
534 }
535
536 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
537 cmd_buffer->usage_flags = pBeginInfo->flags;
538
539 /* setup initial configuration into command buffer */
540 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
541 switch (cmd_buffer->queue_family_index) {
542 case TU_QUEUE_GENERAL:
543 /* init */
544 break;
545 default:
546 break;
547 }
548 }
549
550 cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
551
552 result = tu_cmd_stream_begin(cmd_buffer->device,
553 &cmd_buffer->primary_cmd_stream, 4096);
554
555 /* Put some stuff in so we do not have empty command buffers. */
556 OUT_PKT7(&cmd_buffer->primary_cmd_stream, CP_NOP, 4);
557 *cmd_buffer->primary_cmd_stream.cur++ = 0;
558 *cmd_buffer->primary_cmd_stream.cur++ = 0;
559 *cmd_buffer->primary_cmd_stream.cur++ = 0;
560 *cmd_buffer->primary_cmd_stream.cur++ = 0;
561 return result;
562 }
563
564 void
565 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
566 uint32_t firstBinding,
567 uint32_t bindingCount,
568 const VkBuffer *pBuffers,
569 const VkDeviceSize *pOffsets)
570 {
571 }
572
573 void
574 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
575 VkBuffer buffer,
576 VkDeviceSize offset,
577 VkIndexType indexType)
578 {
579 }
580
581 void
582 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
583 VkPipelineBindPoint pipelineBindPoint,
584 VkPipelineLayout _layout,
585 uint32_t firstSet,
586 uint32_t descriptorSetCount,
587 const VkDescriptorSet *pDescriptorSets,
588 uint32_t dynamicOffsetCount,
589 const uint32_t *pDynamicOffsets)
590 {
591 }
592
593 void
594 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
595 VkPipelineLayout layout,
596 VkShaderStageFlags stageFlags,
597 uint32_t offset,
598 uint32_t size,
599 const void *pValues)
600 {
601 }
602
603 VkResult
604 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
605 {
606 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
607
608 tu_cmd_stream_end(&cmd_buffer->primary_cmd_stream);
609 cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
610
611 return cmd_buffer->record_result;
612 }
613
614 void
615 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
616 VkPipelineBindPoint pipelineBindPoint,
617 VkPipeline _pipeline)
618 {
619 }
620
621 void
622 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
623 uint32_t firstViewport,
624 uint32_t viewportCount,
625 const VkViewport *pViewports)
626 {
627 }
628
629 void
630 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
631 uint32_t firstScissor,
632 uint32_t scissorCount,
633 const VkRect2D *pScissors)
634 {
635 }
636
637 void
638 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
639 {
640 }
641
642 void
643 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
644 float depthBiasConstantFactor,
645 float depthBiasClamp,
646 float depthBiasSlopeFactor)
647 {
648 }
649
650 void
651 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
652 const float blendConstants[4])
653 {
654 }
655
656 void
657 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
658 float minDepthBounds,
659 float maxDepthBounds)
660 {
661 }
662
663 void
664 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
665 VkStencilFaceFlags faceMask,
666 uint32_t compareMask)
667 {
668 }
669
670 void
671 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
672 VkStencilFaceFlags faceMask,
673 uint32_t writeMask)
674 {
675 }
676
677 void
678 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
679 VkStencilFaceFlags faceMask,
680 uint32_t reference)
681 {
682 }
683
684 void
685 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
686 uint32_t commandBufferCount,
687 const VkCommandBuffer *pCmdBuffers)
688 {
689 }
690
691 VkResult
692 tu_CreateCommandPool(VkDevice _device,
693 const VkCommandPoolCreateInfo *pCreateInfo,
694 const VkAllocationCallbacks *pAllocator,
695 VkCommandPool *pCmdPool)
696 {
697 TU_FROM_HANDLE(tu_device, device, _device);
698 struct tu_cmd_pool *pool;
699
700 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
701 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
702 if (pool == NULL)
703 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
704
705 if (pAllocator)
706 pool->alloc = *pAllocator;
707 else
708 pool->alloc = device->alloc;
709
710 list_inithead(&pool->cmd_buffers);
711 list_inithead(&pool->free_cmd_buffers);
712
713 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
714
715 *pCmdPool = tu_cmd_pool_to_handle(pool);
716
717 return VK_SUCCESS;
718 }
719
720 void
721 tu_DestroyCommandPool(VkDevice _device,
722 VkCommandPool commandPool,
723 const VkAllocationCallbacks *pAllocator)
724 {
725 TU_FROM_HANDLE(tu_device, device, _device);
726 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
727
728 if (!pool)
729 return;
730
731 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
732 &pool->cmd_buffers, pool_link)
733 {
734 tu_cmd_buffer_destroy(cmd_buffer);
735 }
736
737 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
738 &pool->free_cmd_buffers, pool_link)
739 {
740 tu_cmd_buffer_destroy(cmd_buffer);
741 }
742
743 vk_free2(&device->alloc, pAllocator, pool);
744 }
745
746 VkResult
747 tu_ResetCommandPool(VkDevice device,
748 VkCommandPool commandPool,
749 VkCommandPoolResetFlags flags)
750 {
751 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
752 VkResult result;
753
754 list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
755 pool_link)
756 {
757 result = tu_reset_cmd_buffer(cmd_buffer);
758 if (result != VK_SUCCESS)
759 return result;
760 }
761
762 return VK_SUCCESS;
763 }
764
765 void
766 tu_TrimCommandPool(VkDevice device,
767 VkCommandPool commandPool,
768 VkCommandPoolTrimFlagsKHR flags)
769 {
770 TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
771
772 if (!pool)
773 return;
774
775 list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
776 &pool->free_cmd_buffers, pool_link)
777 {
778 tu_cmd_buffer_destroy(cmd_buffer);
779 }
780 }
781
782 void
783 tu_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
784 const VkRenderPassBeginInfo *pRenderPassBegin,
785 VkSubpassContents contents)
786 {
787 }
788
789 void
790 tu_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
791 const VkRenderPassBeginInfo *pRenderPassBeginInfo,
792 const VkSubpassBeginInfoKHR *pSubpassBeginInfo)
793 {
794 tu_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
795 pSubpassBeginInfo->contents);
796 }
797
798 void
799 tu_CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
800 {
801 }
802
803 void
804 tu_CmdNextSubpass2KHR(VkCommandBuffer commandBuffer,
805 const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
806 const VkSubpassEndInfoKHR *pSubpassEndInfo)
807 {
808 tu_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
809 }
810
811 struct tu_draw_info
812 {
813 /**
814 * Number of vertices.
815 */
816 uint32_t count;
817
818 /**
819 * Index of the first vertex.
820 */
821 int32_t vertex_offset;
822
823 /**
824 * First instance id.
825 */
826 uint32_t first_instance;
827
828 /**
829 * Number of instances.
830 */
831 uint32_t instance_count;
832
833 /**
834 * First index (indexed draws only).
835 */
836 uint32_t first_index;
837
838 /**
839 * Whether it's an indexed draw.
840 */
841 bool indexed;
842
843 /**
844 * Indirect draw parameters resource.
845 */
846 struct tu_buffer *indirect;
847 uint64_t indirect_offset;
848 uint32_t stride;
849
850 /**
851 * Draw count parameters resource.
852 */
853 struct tu_buffer *count_buffer;
854 uint64_t count_buffer_offset;
855 };
856
857 static void
858 tu_draw(struct tu_cmd_buffer *cmd_buffer, const struct tu_draw_info *info)
859 {
860 }
861
862 void
863 tu_CmdDraw(VkCommandBuffer commandBuffer,
864 uint32_t vertexCount,
865 uint32_t instanceCount,
866 uint32_t firstVertex,
867 uint32_t firstInstance)
868 {
869 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
870 struct tu_draw_info info = {};
871
872 info.count = vertexCount;
873 info.instance_count = instanceCount;
874 info.first_instance = firstInstance;
875 info.vertex_offset = firstVertex;
876
877 tu_draw(cmd_buffer, &info);
878 }
879
880 void
881 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
882 uint32_t indexCount,
883 uint32_t instanceCount,
884 uint32_t firstIndex,
885 int32_t vertexOffset,
886 uint32_t firstInstance)
887 {
888 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
889 struct tu_draw_info info = {};
890
891 info.indexed = true;
892 info.count = indexCount;
893 info.instance_count = instanceCount;
894 info.first_index = firstIndex;
895 info.vertex_offset = vertexOffset;
896 info.first_instance = firstInstance;
897
898 tu_draw(cmd_buffer, &info);
899 }
900
901 void
902 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
903 VkBuffer _buffer,
904 VkDeviceSize offset,
905 uint32_t drawCount,
906 uint32_t stride)
907 {
908 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
909 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
910 struct tu_draw_info info = {};
911
912 info.count = drawCount;
913 info.indirect = buffer;
914 info.indirect_offset = offset;
915 info.stride = stride;
916
917 tu_draw(cmd_buffer, &info);
918 }
919
920 void
921 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
922 VkBuffer _buffer,
923 VkDeviceSize offset,
924 uint32_t drawCount,
925 uint32_t stride)
926 {
927 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
928 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
929 struct tu_draw_info info = {};
930
931 info.indexed = true;
932 info.count = drawCount;
933 info.indirect = buffer;
934 info.indirect_offset = offset;
935 info.stride = stride;
936
937 tu_draw(cmd_buffer, &info);
938 }
939
940 struct tu_dispatch_info
941 {
942 /**
943 * Determine the layout of the grid (in block units) to be used.
944 */
945 uint32_t blocks[3];
946
947 /**
948 * A starting offset for the grid. If unaligned is set, the offset
949 * must still be aligned.
950 */
951 uint32_t offsets[3];
952 /**
953 * Whether it's an unaligned compute dispatch.
954 */
955 bool unaligned;
956
957 /**
958 * Indirect compute parameters resource.
959 */
960 struct tu_buffer *indirect;
961 uint64_t indirect_offset;
962 };
963
964 static void
965 tu_dispatch(struct tu_cmd_buffer *cmd_buffer,
966 const struct tu_dispatch_info *info)
967 {
968 }
969
970 void
971 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
972 uint32_t base_x,
973 uint32_t base_y,
974 uint32_t base_z,
975 uint32_t x,
976 uint32_t y,
977 uint32_t z)
978 {
979 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
980 struct tu_dispatch_info info = {};
981
982 info.blocks[0] = x;
983 info.blocks[1] = y;
984 info.blocks[2] = z;
985
986 info.offsets[0] = base_x;
987 info.offsets[1] = base_y;
988 info.offsets[2] = base_z;
989 tu_dispatch(cmd_buffer, &info);
990 }
991
992 void
993 tu_CmdDispatch(VkCommandBuffer commandBuffer,
994 uint32_t x,
995 uint32_t y,
996 uint32_t z)
997 {
998 tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
999 }
1000
1001 void
1002 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
1003 VkBuffer _buffer,
1004 VkDeviceSize offset)
1005 {
1006 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1007 TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
1008 struct tu_dispatch_info info = {};
1009
1010 info.indirect = buffer;
1011 info.indirect_offset = offset;
1012
1013 tu_dispatch(cmd_buffer, &info);
1014 }
1015
1016 void
1017 tu_CmdEndRenderPass(VkCommandBuffer commandBuffer)
1018 {
1019 }
1020
1021 void
1022 tu_CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
1023 const VkSubpassEndInfoKHR *pSubpassEndInfo)
1024 {
1025 tu_CmdEndRenderPass(commandBuffer);
1026 }
1027
1028 struct tu_barrier_info
1029 {
1030 uint32_t eventCount;
1031 const VkEvent *pEvents;
1032 VkPipelineStageFlags srcStageMask;
1033 };
1034
1035 static void
1036 tu_barrier(struct tu_cmd_buffer *cmd_buffer,
1037 uint32_t memoryBarrierCount,
1038 const VkMemoryBarrier *pMemoryBarriers,
1039 uint32_t bufferMemoryBarrierCount,
1040 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1041 uint32_t imageMemoryBarrierCount,
1042 const VkImageMemoryBarrier *pImageMemoryBarriers,
1043 const struct tu_barrier_info *info)
1044 {
1045 }
1046
1047 void
1048 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
1049 VkPipelineStageFlags srcStageMask,
1050 VkPipelineStageFlags destStageMask,
1051 VkBool32 byRegion,
1052 uint32_t memoryBarrierCount,
1053 const VkMemoryBarrier *pMemoryBarriers,
1054 uint32_t bufferMemoryBarrierCount,
1055 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1056 uint32_t imageMemoryBarrierCount,
1057 const VkImageMemoryBarrier *pImageMemoryBarriers)
1058 {
1059 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1060 struct tu_barrier_info info;
1061
1062 info.eventCount = 0;
1063 info.pEvents = NULL;
1064 info.srcStageMask = srcStageMask;
1065
1066 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1067 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1068 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1069 }
1070
1071 static void
1072 write_event(struct tu_cmd_buffer *cmd_buffer,
1073 struct tu_event *event,
1074 VkPipelineStageFlags stageMask,
1075 unsigned value)
1076 {
1077 }
1078
1079 void
1080 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
1081 VkEvent _event,
1082 VkPipelineStageFlags stageMask)
1083 {
1084 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1085 TU_FROM_HANDLE(tu_event, event, _event);
1086
1087 write_event(cmd_buffer, event, stageMask, 1);
1088 }
1089
1090 void
1091 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
1092 VkEvent _event,
1093 VkPipelineStageFlags stageMask)
1094 {
1095 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1096 TU_FROM_HANDLE(tu_event, event, _event);
1097
1098 write_event(cmd_buffer, event, stageMask, 0);
1099 }
1100
1101 void
1102 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
1103 uint32_t eventCount,
1104 const VkEvent *pEvents,
1105 VkPipelineStageFlags srcStageMask,
1106 VkPipelineStageFlags dstStageMask,
1107 uint32_t memoryBarrierCount,
1108 const VkMemoryBarrier *pMemoryBarriers,
1109 uint32_t bufferMemoryBarrierCount,
1110 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
1111 uint32_t imageMemoryBarrierCount,
1112 const VkImageMemoryBarrier *pImageMemoryBarriers)
1113 {
1114 TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1115 struct tu_barrier_info info;
1116
1117 info.eventCount = eventCount;
1118 info.pEvents = pEvents;
1119 info.srcStageMask = 0;
1120
1121 tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
1122 bufferMemoryBarrierCount, pBufferMemoryBarriers,
1123 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
1124 }
1125
1126 void
1127 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
1128 {
1129 /* No-op */
1130 }