3 * Copyright © 2016 Red Hat.
4 * Copyright © 2016 Bas Nieuwenhuizen
6 * based in part on anv driver which is:
7 * Copyright © 2015 Intel Corporation
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "libresoc_private.h"
31 void libresoc_FreeCommandBuffers(
33 VkCommandPool commandPool
,
34 uint32_t commandBufferCount
,
35 const VkCommandBuffer
*pCommandBuffers
)
40 VkResult
libresoc_CreateCommandPool(
42 const VkCommandPoolCreateInfo
* pCreateInfo
,
43 const VkAllocationCallbacks
* pAllocator
,
44 VkCommandPool
* pCmdPool
)
46 LIBRESOC_FROM_HANDLE(libresoc_device
, device
, _device
);
47 struct libresoc_cmd_pool
*pool
;
49 pool
= vk_alloc2(&device
->vk
.alloc
, pAllocator
, sizeof(*pool
), 8,
50 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
52 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
54 vk_object_base_init(&device
->vk
, &pool
->base
,
55 VK_OBJECT_TYPE_COMMAND_POOL
);
58 pool
->alloc
= *pAllocator
;
60 pool
->alloc
= device
->vk
.alloc
;
62 list_inithead(&pool
->cmd_buffers
);
63 list_inithead(&pool
->free_cmd_buffers
);
65 pool
->queue_family_index
= pCreateInfo
->queueFamilyIndex
;
67 *pCmdPool
= libresoc_cmd_pool_to_handle(pool
);
73 void libresoc_DestroyCommandPool(
75 VkCommandPool commandPool
,
76 const VkAllocationCallbacks
* pAllocator
)
78 LIBRESOC_FROM_HANDLE(libresoc_device
, device
, _device
);
79 LIBRESOC_FROM_HANDLE(libresoc_cmd_pool
, pool
, commandPool
);
84 // list_for_each_entry_safe(struct libresoc_cmd_buffer, cmd_buffer,
85 // &pool->cmd_buffers, pool_link) {
86 // libresoc_destroy_cmd_buffer(cmd_buffer);
89 // list_for_each_entry_safe(struct libresoc_cmd_buffer, cmd_buffer,
90 // &pool->free_cmd_buffers, pool_link) {
91 // libresoc_destroy_cmd_buffer(cmd_buffer);
94 vk_object_base_finish(&pool
->base
);
95 vk_free2(&device
->vk
.alloc
, pAllocator
, pool
);
98 static VkResult
libresoc_create_cmd_buffer(
99 struct libresoc_device
* device
,
100 struct libresoc_cmd_pool
* pool
,
101 VkCommandBufferLevel level
,
102 VkCommandBuffer
* pCommandBuffer
)
104 struct libresoc_cmd_buffer
*cmd_buffer
;
106 cmd_buffer
= vk_zalloc(&pool
->alloc
, sizeof(*cmd_buffer
), 8,
107 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
108 if (cmd_buffer
== NULL
)
109 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
111 vk_object_base_init(&device
->vk
, &cmd_buffer
->base
,
112 VK_OBJECT_TYPE_COMMAND_BUFFER
);
114 cmd_buffer
->device
= device
;
115 cmd_buffer
->pool
= pool
;
116 cmd_buffer
->level
= level
;
118 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
119 cmd_buffer
->queue_family_index
= pool
->queue_family_index
;
121 // ring = libresoc_queue_family_to_ring(cmd_buffer->queue_family_index);
123 // cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
124 // if (!cmd_buffer->cs) {
125 // libresoc_destroy_cmd_buffer(cmd_buffer);
126 // return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
129 *pCommandBuffer
= libresoc_cmd_buffer_to_handle(cmd_buffer
);
131 list_inithead(&cmd_buffer
->upload
.list
);
136 VkResult
libresoc_AllocateCommandBuffers(
138 const VkCommandBufferAllocateInfo
*pAllocateInfo
,
139 VkCommandBuffer
*pCommandBuffers
)
141 LIBRESOC_FROM_HANDLE(libresoc_device
, device
, _device
);
142 LIBRESOC_FROM_HANDLE(libresoc_cmd_pool
, pool
, pAllocateInfo
->commandPool
);
144 VkResult result
= VK_SUCCESS
;
147 for (i
= 0; i
< pAllocateInfo
->commandBufferCount
; i
++) {
149 if (!list_is_empty(&pool
->free_cmd_buffers
)) {
150 struct libresoc_cmd_buffer
*cmd_buffer
= list_first_entry(&pool
->free_cmd_buffers
, struct libresoc_cmd_buffer
, pool_link
);
152 list_del(&cmd_buffer
->pool_link
);
153 list_addtail(&cmd_buffer
->pool_link
, &pool
->cmd_buffers
);
155 //result = libresoc_reset_cmd_buffer(cmd_buffer);
156 cmd_buffer
->level
= pAllocateInfo
->level
;
158 pCommandBuffers
[i
] = libresoc_cmd_buffer_to_handle(cmd_buffer
);
160 result
= libresoc_create_cmd_buffer(device
, pool
, pAllocateInfo
->level
,
161 &pCommandBuffers
[i
]);
163 if (result
!= VK_SUCCESS
)
167 // if (result != VK_SUCCESS) {
168 // libresoc_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
169 // i, pCommandBuffers);
171 // /* From the Vulkan 1.0.66 spec:
173 // * "vkAllocateCommandBuffers can be used to create multiple
174 // * command buffers. If the creation of any of those command
175 // * buffers fails, the implementation must destroy all
176 // * successfully created command buffer objects from this
177 // * command, set all entries of the pCommandBuffers array to
178 // * NULL and return the error."
180 // memset(pCommandBuffers, 0,
181 // sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
187 VkResult
libresoc_BeginCommandBuffer(
188 VkCommandBuffer commandBuffer
,
189 const VkCommandBufferBeginInfo
*pBeginInfo
)
191 LIBRESOC_FROM_HANDLE(libresoc_cmd_buffer
, cmd_buffer
, commandBuffer
);
192 VkResult result
= VK_SUCCESS
;
195 // memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
196 // cmd_buffer->state.last_primitive_reset_en = -1;
197 // cmd_buffer->state.last_index_type = -1;
198 // cmd_buffer->state.last_num_instances = -1;
199 // cmd_buffer->state.last_vertex_offset = -1;
200 // cmd_buffer->state.last_first_instance = -1;
201 // cmd_buffer->state.predication_type = -1;
202 // cmd_buffer->state.last_sx_ps_downconvert = -1;
203 // cmd_buffer->state.last_sx_blend_opt_epsilon = -1;
204 // cmd_buffer->state.last_sx_blend_opt_control = -1;
205 cmd_buffer
->usage_flags
= pBeginInfo
->flags
;
207 // if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
208 // (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
209 // assert(pBeginInfo->pInheritanceInfo);
210 // cmd_buffer->state.framebuffer = libresoc_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
211 // cmd_buffer->state.pass = libresoc_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
213 // struct libresoc_subpass *subpass =
214 // &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
216 // if (cmd_buffer->state.framebuffer) {
217 // result = libresoc_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
218 // if (result != VK_SUCCESS)
222 // cmd_buffer->state.inherited_pipeline_statistics =
223 // pBeginInfo->pInheritanceInfo->pipelineStatistics;
225 // libresoc_cmd_buffer_set_subpass(cmd_buffer, subpass);
228 // if (unlikely(cmd_buffer->device->trace_bo))
229 // libresoc_cmd_buffer_trace_emit(cmd_buffer);
231 // libresoc_describe_begin_cmd_buffer(cmd_buffer);
233 //cmd_buffer->status = LIBRESOC_CMD_BUFFER_STATUS_RECORDING;
238 void libresoc_CmdPipelineBarrier(
239 VkCommandBuffer commandBuffer
,
240 VkPipelineStageFlags srcStageMask
,
241 VkPipelineStageFlags destStageMask
,
243 uint32_t memoryBarrierCount
,
244 const VkMemoryBarrier
* pMemoryBarriers
,
245 uint32_t bufferMemoryBarrierCount
,
246 const VkBufferMemoryBarrier
* pBufferMemoryBarriers
,
247 uint32_t imageMemoryBarrierCount
,
248 const VkImageMemoryBarrier
* pImageMemoryBarriers
)
250 // LIBRESOC_FROM_HANDLE(libresoc_cmd_buffer, cmd_buffer, commandBuffer);
251 // struct libresoc_barrier_info info;
253 // info.reason = RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER;
254 // info.eventCount = 0;
255 // info.pEvents = NULL;
256 // info.srcStageMask = srcStageMask;
257 // info.dstStageMask = destStageMask;
259 // libresoc_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
260 // bufferMemoryBarrierCount, pBufferMemoryBarriers,
261 // imageMemoryBarrierCount, pImageMemoryBarriers, &info);
264 VkResult
libresoc_EndCommandBuffer(
265 VkCommandBuffer commandBuffer
)
268 LIBRESOC_FROM_HANDLE(libresoc_cmd_buffer
, cmd_buffer
, commandBuffer
);
270 // if (cmd_buffer->queue_family_index != LIBRESOC_QUEUE_TRANSFER) {
271 // if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
272 // cmd_buffer->state.flush_bits |= LIBRESOC_CMD_FLAG_CS_PARTIAL_FLUSH | LIBRESOC_CMD_FLAG_PS_PARTIAL_FLUSH | LIBRESOC_CMD_FLAG_WB_L2;
274 // /* Make sure to sync all pending active queries at the end of
277 // cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
279 // /* Since NGG streamout uses GDS, we need to make GDS idle when
280 // * we leave the IB, otherwise another process might overwrite
281 // * it while our shaders are busy.
283 // if (cmd_buffer->gds_needed)
284 // cmd_buffer->state.flush_bits |= LIBRESOC_CMD_FLAG_PS_PARTIAL_FLUSH;
286 // si_emit_cache_flush(cmd_buffer);
289 // /* Make sure CP DMA is idle at the end of IBs because the kernel
290 // * doesn't wait for it.
292 // si_cp_dma_wait_for_idle(cmd_buffer);
294 // libresoc_describe_end_cmd_buffer(cmd_buffer);
296 // vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
297 // vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
299 // VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
300 // if (result != VK_SUCCESS)
301 // return vk_error(cmd_buffer->device->instance, result);
303 // cmd_buffer->status = LIBRESOC_CMD_BUFFER_STATUS_EXECUTABLE;
305 return cmd_buffer
->record_result
;