Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / libre-soc / vulkan / libresoc_cmd_buffer.c
1
2 /*
3 * Copyright © 2016 Red Hat.
4 * Copyright © 2016 Bas Nieuwenhuizen
5 *
6 * based in part on anv driver which is:
7 * Copyright © 2015 Intel Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * IN THE SOFTWARE.
27 */
28
29 #include "libresoc_private.h"
30
31 void libresoc_CmdEndRenderPass(
32 VkCommandBuffer commandBuffer)
33 {}
34
35 void libresoc_CmdDraw(
36 VkCommandBuffer commandBuffer,
37 uint32_t vertexCount,
38 uint32_t instanceCount,
39 uint32_t firstVertex,
40 uint32_t firstInstance)
41 {
42 }
43
44 void libresoc_CmdBindPipeline(
45 VkCommandBuffer commandBuffer,
46 VkPipelineBindPoint pipelineBindPoint,
47 VkPipeline _pipeline)
48 {
49 }
50
51
52 void libresoc_CmdBeginRenderPass(
53 VkCommandBuffer commandBuffer,
54 const VkRenderPassBeginInfo* pRenderPassBegin,
55 VkSubpassContents contents)
56 {
57 }
58
59 void libresoc_FreeCommandBuffers(
60 VkDevice device,
61 VkCommandPool commandPool,
62 uint32_t commandBufferCount,
63 const VkCommandBuffer *pCommandBuffers)
64 {
65 //TODO: stub
66 }
67
68 VkResult libresoc_CreateCommandPool(
69 VkDevice _device,
70 const VkCommandPoolCreateInfo* pCreateInfo,
71 const VkAllocationCallbacks* pAllocator,
72 VkCommandPool* pCmdPool)
73 {
74 LIBRESOC_FROM_HANDLE(libresoc_device, device, _device);
75 struct libresoc_cmd_pool *pool;
76
77 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
78 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
79 if (pool == NULL)
80 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
81
82 vk_object_base_init(&device->vk, &pool->base,
83 VK_OBJECT_TYPE_COMMAND_POOL);
84
85 if (pAllocator)
86 pool->alloc = *pAllocator;
87 else
88 pool->alloc = device->vk.alloc;
89
90 list_inithead(&pool->cmd_buffers);
91 list_inithead(&pool->free_cmd_buffers);
92
93 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
94
95 *pCmdPool = libresoc_cmd_pool_to_handle(pool);
96
97 return VK_SUCCESS;
98
99 }
100
101 void libresoc_DestroyCommandPool(
102 VkDevice _device,
103 VkCommandPool commandPool,
104 const VkAllocationCallbacks* pAllocator)
105 {
106 LIBRESOC_FROM_HANDLE(libresoc_device, device, _device);
107 LIBRESOC_FROM_HANDLE(libresoc_cmd_pool, pool, commandPool);
108
109 if (!pool)
110 return;
111
112 // list_for_each_entry_safe(struct libresoc_cmd_buffer, cmd_buffer,
113 // &pool->cmd_buffers, pool_link) {
114 // libresoc_destroy_cmd_buffer(cmd_buffer);
115 // }
116
117 // list_for_each_entry_safe(struct libresoc_cmd_buffer, cmd_buffer,
118 // &pool->free_cmd_buffers, pool_link) {
119 // libresoc_destroy_cmd_buffer(cmd_buffer);
120 // }
121
122 vk_object_base_finish(&pool->base);
123 vk_free2(&device->vk.alloc, pAllocator, pool);
124 }
125
126 static VkResult libresoc_create_cmd_buffer(
127 struct libresoc_device * device,
128 struct libresoc_cmd_pool * pool,
129 VkCommandBufferLevel level,
130 VkCommandBuffer* pCommandBuffer)
131 {
132 struct libresoc_cmd_buffer *cmd_buffer;
133 unsigned ring;
134 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
135 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
136 if (cmd_buffer == NULL)
137 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
138
139 vk_object_base_init(&device->vk, &cmd_buffer->base,
140 VK_OBJECT_TYPE_COMMAND_BUFFER);
141
142 cmd_buffer->device = device;
143 cmd_buffer->pool = pool;
144 cmd_buffer->level = level;
145
146 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
147 cmd_buffer->queue_family_index = pool->queue_family_index;
148
149 // ring = libresoc_queue_family_to_ring(cmd_buffer->queue_family_index);
150
151 // cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
152 // if (!cmd_buffer->cs) {
153 // libresoc_destroy_cmd_buffer(cmd_buffer);
154 // return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
155 // }
156
157 *pCommandBuffer = libresoc_cmd_buffer_to_handle(cmd_buffer);
158
159 list_inithead(&cmd_buffer->upload.list);
160
161 return VK_SUCCESS;
162 }
163
164 VkResult libresoc_AllocateCommandBuffers(
165 VkDevice _device,
166 const VkCommandBufferAllocateInfo *pAllocateInfo,
167 VkCommandBuffer *pCommandBuffers)
168 {
169 LIBRESOC_FROM_HANDLE(libresoc_device, device, _device);
170 LIBRESOC_FROM_HANDLE(libresoc_cmd_pool, pool, pAllocateInfo->commandPool);
171
172 VkResult result = VK_SUCCESS;
173 uint32_t i;
174
175 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
176
177 if (!list_is_empty(&pool->free_cmd_buffers)) {
178 struct libresoc_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct libresoc_cmd_buffer, pool_link);
179
180 list_del(&cmd_buffer->pool_link);
181 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
182
183 //result = libresoc_reset_cmd_buffer(cmd_buffer);
184 cmd_buffer->level = pAllocateInfo->level;
185
186 pCommandBuffers[i] = libresoc_cmd_buffer_to_handle(cmd_buffer);
187 } else {
188 result = libresoc_create_cmd_buffer(device, pool, pAllocateInfo->level,
189 &pCommandBuffers[i]);
190 }
191 if (result != VK_SUCCESS)
192 break;
193 }
194
195 // if (result != VK_SUCCESS) {
196 // libresoc_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
197 // i, pCommandBuffers);
198
199 // /* From the Vulkan 1.0.66 spec:
200 // *
201 // * "vkAllocateCommandBuffers can be used to create multiple
202 // * command buffers. If the creation of any of those command
203 // * buffers fails, the implementation must destroy all
204 // * successfully created command buffer objects from this
205 // * command, set all entries of the pCommandBuffers array to
206 // * NULL and return the error."
207 // */
208 // memset(pCommandBuffers, 0,
209 // sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
210 // }
211
212 return result;
213 }
214
215 VkResult libresoc_BeginCommandBuffer(
216 VkCommandBuffer commandBuffer,
217 const VkCommandBufferBeginInfo *pBeginInfo)
218 {
219 LIBRESOC_FROM_HANDLE(libresoc_cmd_buffer, cmd_buffer, commandBuffer);
220 VkResult result = VK_SUCCESS;
221
222
223 // memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
224 // cmd_buffer->state.last_primitive_reset_en = -1;
225 // cmd_buffer->state.last_index_type = -1;
226 // cmd_buffer->state.last_num_instances = -1;
227 // cmd_buffer->state.last_vertex_offset = -1;
228 // cmd_buffer->state.last_first_instance = -1;
229 // cmd_buffer->state.predication_type = -1;
230 // cmd_buffer->state.last_sx_ps_downconvert = -1;
231 // cmd_buffer->state.last_sx_blend_opt_epsilon = -1;
232 // cmd_buffer->state.last_sx_blend_opt_control = -1;
233 cmd_buffer->usage_flags = pBeginInfo->flags;
234
235 // if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
236 // (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
237 // assert(pBeginInfo->pInheritanceInfo);
238 // cmd_buffer->state.framebuffer = libresoc_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
239 // cmd_buffer->state.pass = libresoc_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
240
241 // struct libresoc_subpass *subpass =
242 // &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
243
244 // if (cmd_buffer->state.framebuffer) {
245 // result = libresoc_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
246 // if (result != VK_SUCCESS)
247 // return result;
248 // }
249
250 // cmd_buffer->state.inherited_pipeline_statistics =
251 // pBeginInfo->pInheritanceInfo->pipelineStatistics;
252
253 // libresoc_cmd_buffer_set_subpass(cmd_buffer, subpass);
254 // }
255
256 // if (unlikely(cmd_buffer->device->trace_bo))
257 // libresoc_cmd_buffer_trace_emit(cmd_buffer);
258
259 // libresoc_describe_begin_cmd_buffer(cmd_buffer);
260
261 //cmd_buffer->status = LIBRESOC_CMD_BUFFER_STATUS_RECORDING;
262
263 return result;
264 }
265
266 void libresoc_CmdPipelineBarrier(
267 VkCommandBuffer commandBuffer,
268 VkPipelineStageFlags srcStageMask,
269 VkPipelineStageFlags destStageMask,
270 VkBool32 byRegion,
271 uint32_t memoryBarrierCount,
272 const VkMemoryBarrier* pMemoryBarriers,
273 uint32_t bufferMemoryBarrierCount,
274 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
275 uint32_t imageMemoryBarrierCount,
276 const VkImageMemoryBarrier* pImageMemoryBarriers)
277 {
278 // LIBRESOC_FROM_HANDLE(libresoc_cmd_buffer, cmd_buffer, commandBuffer);
279 // struct libresoc_barrier_info info;
280
281 // info.reason = RGP_BARRIER_EXTERNAL_CMD_PIPELINE_BARRIER;
282 // info.eventCount = 0;
283 // info.pEvents = NULL;
284 // info.srcStageMask = srcStageMask;
285 // info.dstStageMask = destStageMask;
286
287 // libresoc_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
288 // bufferMemoryBarrierCount, pBufferMemoryBarriers,
289 // imageMemoryBarrierCount, pImageMemoryBarriers, &info);
290 }
291
292 VkResult libresoc_EndCommandBuffer(
293 VkCommandBuffer commandBuffer)
294 {
295
296 LIBRESOC_FROM_HANDLE(libresoc_cmd_buffer, cmd_buffer, commandBuffer);
297
298 // if (cmd_buffer->queue_family_index != LIBRESOC_QUEUE_TRANSFER) {
299 // if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX6)
300 // cmd_buffer->state.flush_bits |= LIBRESOC_CMD_FLAG_CS_PARTIAL_FLUSH | LIBRESOC_CMD_FLAG_PS_PARTIAL_FLUSH | LIBRESOC_CMD_FLAG_WB_L2;
301
302 // /* Make sure to sync all pending active queries at the end of
303 // * command buffer.
304 // */
305 // cmd_buffer->state.flush_bits |= cmd_buffer->active_query_flush_bits;
306
307 // /* Since NGG streamout uses GDS, we need to make GDS idle when
308 // * we leave the IB, otherwise another process might overwrite
309 // * it while our shaders are busy.
310 // */
311 // if (cmd_buffer->gds_needed)
312 // cmd_buffer->state.flush_bits |= LIBRESOC_CMD_FLAG_PS_PARTIAL_FLUSH;
313
314 // si_emit_cache_flush(cmd_buffer);
315 // }
316
317 // /* Make sure CP DMA is idle at the end of IBs because the kernel
318 // * doesn't wait for it.
319 // */
320 // si_cp_dma_wait_for_idle(cmd_buffer);
321
322 // libresoc_describe_end_cmd_buffer(cmd_buffer);
323
324 // vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
325 // vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.subpass_sample_locs);
326
327 // VkResult result = cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs);
328 // if (result != VK_SUCCESS)
329 // return vk_error(cmd_buffer->device->instance, result);
330
331 // cmd_buffer->status = LIBRESOC_CMD_BUFFER_STATUS_EXECUTABLE;
332
333 return cmd_buffer->record_result;
334 }