radv: use vk_error() everywhere an error is returned
[mesa.git] / src / amd / vulkan / radv_wsi.c
1 /*
2 * Copyright © 2016 Red Hat
3 * based on intel anv code:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include "radv_private.h"
27 #include "radv_meta.h"
28 #include "wsi_common.h"
29 #include "vk_util.h"
30 #include "util/macros.h"
31
32 MAYBE_UNUSED static const struct wsi_callbacks wsi_cbs = {
33 .get_phys_device_format_properties = radv_GetPhysicalDeviceFormatProperties,
34 };
35
36 VkResult
37 radv_init_wsi(struct radv_physical_device *physical_device)
38 {
39 VkResult result;
40
41 memset(physical_device->wsi_device.wsi, 0, sizeof(physical_device->wsi_device.wsi));
42
43 #ifdef VK_USE_PLATFORM_XCB_KHR
44 result = wsi_x11_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
45 if (result != VK_SUCCESS)
46 return result;
47 #endif
48
49 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
50 result = wsi_wl_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc,
51 radv_physical_device_to_handle(physical_device),
52 &wsi_cbs);
53 if (result != VK_SUCCESS) {
54 #ifdef VK_USE_PLATFORM_XCB_KHR
55 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
56 #endif
57 return result;
58 }
59 #endif
60
61 return VK_SUCCESS;
62 }
63
64 void
65 radv_finish_wsi(struct radv_physical_device *physical_device)
66 {
67 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
68 wsi_wl_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
69 #endif
70 #ifdef VK_USE_PLATFORM_XCB_KHR
71 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
72 #endif
73 }
74
75 void radv_DestroySurfaceKHR(
76 VkInstance _instance,
77 VkSurfaceKHR _surface,
78 const VkAllocationCallbacks* pAllocator)
79 {
80 RADV_FROM_HANDLE(radv_instance, instance, _instance);
81 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
82
83 vk_free2(&instance->alloc, pAllocator, surface);
84 }
85
86 VkResult radv_GetPhysicalDeviceSurfaceSupportKHR(
87 VkPhysicalDevice physicalDevice,
88 uint32_t queueFamilyIndex,
89 VkSurfaceKHR _surface,
90 VkBool32* pSupported)
91 {
92 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
93 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
94 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
95
96 return iface->get_support(surface, &device->wsi_device,
97 &device->instance->alloc,
98 queueFamilyIndex, device->local_fd, true, pSupported);
99 }
100
101 VkResult radv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
102 VkPhysicalDevice physicalDevice,
103 VkSurfaceKHR _surface,
104 VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
105 {
106 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
107 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
108 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
109
110 return iface->get_capabilities(surface, pSurfaceCapabilities);
111 }
112
113 VkResult radv_GetPhysicalDeviceSurfaceFormatsKHR(
114 VkPhysicalDevice physicalDevice,
115 VkSurfaceKHR _surface,
116 uint32_t* pSurfaceFormatCount,
117 VkSurfaceFormatKHR* pSurfaceFormats)
118 {
119 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
120 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
121 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
122
123 return iface->get_formats(surface, &device->wsi_device, pSurfaceFormatCount,
124 pSurfaceFormats);
125 }
126
127 VkResult radv_GetPhysicalDeviceSurfacePresentModesKHR(
128 VkPhysicalDevice physicalDevice,
129 VkSurfaceKHR _surface,
130 uint32_t* pPresentModeCount,
131 VkPresentModeKHR* pPresentModes)
132 {
133 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
134 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
135 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
136
137 return iface->get_present_modes(surface, pPresentModeCount,
138 pPresentModes);
139 }
140
141 static VkResult
142 radv_wsi_image_create(VkDevice device_h,
143 const VkSwapchainCreateInfoKHR *pCreateInfo,
144 const VkAllocationCallbacks* pAllocator,
145 bool needs_linear_copy,
146 bool linear,
147 VkImage *image_p,
148 VkDeviceMemory *memory_p,
149 uint32_t *size,
150 uint32_t *offset,
151 uint32_t *row_pitch, int *fd_p)
152 {
153 VkResult result = VK_SUCCESS;
154 struct radeon_surf *surface;
155 VkImage image_h;
156 struct radv_image *image;
157 int fd;
158 RADV_FROM_HANDLE(radv_device, device, device_h);
159
160 result = radv_image_create(device_h,
161 &(struct radv_image_create_info) {
162 .vk_info =
163 &(VkImageCreateInfo) {
164 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
165 .imageType = VK_IMAGE_TYPE_2D,
166 .format = pCreateInfo->imageFormat,
167 .extent = {
168 .width = pCreateInfo->imageExtent.width,
169 .height = pCreateInfo->imageExtent.height,
170 .depth = 1
171 },
172 .mipLevels = 1,
173 .arrayLayers = 1,
174 .samples = 1,
175 /* FIXME: Need a way to use X tiling to allow scanout */
176 .tiling = linear ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL,
177 .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
178 .flags = 0,
179 },
180 .scanout = true},
181 NULL,
182 &image_h);
183 if (result != VK_SUCCESS)
184 return result;
185
186 image = radv_image_from_handle(image_h);
187
188 VkDeviceMemory memory_h;
189
190 const VkMemoryDedicatedAllocateInfoKHR ded_alloc = {
191 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
192 .pNext = NULL,
193 .buffer = VK_NULL_HANDLE,
194 .image = image_h
195 };
196
197 /* Find the first VRAM memory type, or GART for PRIME images. */
198 int memory_type_index = -1;
199 for (int i = 0; i < device->physical_device->memory_properties.memoryTypeCount; ++i) {
200 bool is_local = !!(device->physical_device->memory_properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
201 if ((linear && !is_local) || (!linear && is_local)) {
202 memory_type_index = i;
203 break;
204 }
205 }
206
207 /* fallback */
208 if (memory_type_index == -1)
209 memory_type_index = 0;
210
211 result = radv_alloc_memory(device_h,
212 &(VkMemoryAllocateInfo) {
213 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
214 .pNext = &ded_alloc,
215 .allocationSize = image->size,
216 .memoryTypeIndex = memory_type_index,
217 },
218 NULL /* XXX: pAllocator */,
219 RADV_MEM_IMPLICIT_SYNC,
220 &memory_h);
221 if (result != VK_SUCCESS)
222 goto fail_create_image;
223
224 radv_BindImageMemory(device_h, image_h, memory_h, 0);
225
226 /*
227 * return the fd for the image in the no copy mode,
228 * or the fd for the linear image if a copy is required.
229 */
230 if (!needs_linear_copy || (needs_linear_copy && linear)) {
231 RADV_FROM_HANDLE(radv_device_memory, memory, memory_h);
232 if (!radv_get_memory_fd(device, memory, &fd))
233 goto fail_alloc_memory;
234 *fd_p = fd;
235 }
236
237 surface = &image->surface;
238
239 *image_p = image_h;
240 *memory_p = memory_h;
241 *size = image->size;
242 *offset = image->offset;
243
244 if (device->physical_device->rad_info.chip_class >= GFX9)
245 *row_pitch = surface->u.gfx9.surf_pitch * surface->bpe;
246 else
247 *row_pitch = surface->u.legacy.level[0].nblk_x * surface->bpe;
248 return VK_SUCCESS;
249 fail_alloc_memory:
250 radv_FreeMemory(device_h, memory_h, pAllocator);
251
252 fail_create_image:
253 radv_DestroyImage(device_h, image_h, pAllocator);
254
255 return result;
256 }
257
258 static void
259 radv_wsi_image_free(VkDevice device,
260 const VkAllocationCallbacks* pAllocator,
261 VkImage image_h,
262 VkDeviceMemory memory_h)
263 {
264 radv_DestroyImage(device, image_h, pAllocator);
265
266 radv_FreeMemory(device, memory_h, pAllocator);
267 }
268
269 static const struct wsi_image_fns radv_wsi_image_fns = {
270 .create_wsi_image = radv_wsi_image_create,
271 .free_wsi_image = radv_wsi_image_free,
272 };
273
274 #define NUM_PRIME_POOLS RADV_QUEUE_TRANSFER
275 static void
276 radv_wsi_free_prime_command_buffers(struct radv_device *device,
277 struct wsi_swapchain *swapchain)
278 {
279 const int num_pools = NUM_PRIME_POOLS;
280 const int num_images = swapchain->image_count;
281 int i;
282 for (i = 0; i < num_pools; i++) {
283 radv_FreeCommandBuffers(radv_device_to_handle(device),
284 swapchain->cmd_pools[i],
285 swapchain->image_count,
286 &swapchain->cmd_buffers[i * num_images]);
287
288 radv_DestroyCommandPool(radv_device_to_handle(device),
289 swapchain->cmd_pools[i],
290 &swapchain->alloc);
291 }
292 }
293
294 static VkResult
295 radv_wsi_create_prime_command_buffers(struct radv_device *device,
296 const VkAllocationCallbacks *alloc,
297 struct wsi_swapchain *swapchain)
298 {
299 const int num_pools = NUM_PRIME_POOLS;
300 const int num_images = swapchain->image_count;
301 int num_cmd_buffers = num_images * num_pools; //TODO bump to MAX_QUEUE_FAMILIES
302 VkResult result;
303 int i, j;
304
305 swapchain->cmd_buffers = vk_alloc(alloc, (sizeof(VkCommandBuffer) * num_cmd_buffers), 8,
306 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
307 if (!swapchain->cmd_buffers)
308 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
309
310 memset(swapchain->cmd_buffers, 0, sizeof(VkCommandBuffer) * num_cmd_buffers);
311 memset(swapchain->cmd_pools, 0, sizeof(VkCommandPool) * num_pools);
312 for (i = 0; i < num_pools; i++) {
313 VkCommandPoolCreateInfo pool_create_info;
314
315 pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
316 pool_create_info.pNext = NULL;
317 pool_create_info.flags = 0;
318 pool_create_info.queueFamilyIndex = i;
319
320 result = radv_CreateCommandPool(radv_device_to_handle(device),
321 &pool_create_info, alloc,
322 &swapchain->cmd_pools[i]);
323 if (result != VK_SUCCESS)
324 goto fail;
325
326 VkCommandBufferAllocateInfo cmd_buffer_info;
327 cmd_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
328 cmd_buffer_info.pNext = NULL;
329 cmd_buffer_info.commandPool = swapchain->cmd_pools[i];
330 cmd_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
331 cmd_buffer_info.commandBufferCount = num_images;
332
333 result = radv_AllocateCommandBuffers(radv_device_to_handle(device),
334 &cmd_buffer_info,
335 &swapchain->cmd_buffers[i * num_images]);
336 if (result != VK_SUCCESS)
337 goto fail;
338 for (j = 0; j < num_images; j++) {
339 VkImage image, linear_image;
340 int idx = (i * num_images) + j;
341
342 swapchain->get_image_and_linear(swapchain, j, &image, &linear_image);
343 VkCommandBufferBeginInfo begin_info = {0};
344
345 begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
346
347 radv_BeginCommandBuffer(swapchain->cmd_buffers[idx], &begin_info);
348
349 radv_blit_to_prime_linear(radv_cmd_buffer_from_handle(swapchain->cmd_buffers[idx]),
350 radv_image_from_handle(image),
351 radv_image_from_handle(linear_image));
352
353 radv_EndCommandBuffer(swapchain->cmd_buffers[idx]);
354 }
355 }
356 return VK_SUCCESS;
357 fail:
358 radv_wsi_free_prime_command_buffers(device, swapchain);
359 return result;
360 }
361
362 VkResult radv_CreateSwapchainKHR(
363 VkDevice _device,
364 const VkSwapchainCreateInfoKHR* pCreateInfo,
365 const VkAllocationCallbacks* pAllocator,
366 VkSwapchainKHR* pSwapchain)
367 {
368 RADV_FROM_HANDLE(radv_device, device, _device);
369 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
370 struct wsi_interface *iface =
371 device->physical_device->wsi_device.wsi[surface->platform];
372 struct wsi_swapchain *swapchain;
373 const VkAllocationCallbacks *alloc;
374 if (pAllocator)
375 alloc = pAllocator;
376 else
377 alloc = &device->alloc;
378 VkResult result = iface->create_swapchain(surface, _device,
379 &device->physical_device->wsi_device,
380 device->physical_device->local_fd,
381 pCreateInfo,
382 alloc, &radv_wsi_image_fns,
383 &swapchain);
384 if (result != VK_SUCCESS)
385 return result;
386
387 if (pAllocator)
388 swapchain->alloc = *pAllocator;
389 else
390 swapchain->alloc = device->alloc;
391
392 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++)
393 swapchain->fences[i] = VK_NULL_HANDLE;
394
395 if (swapchain->needs_linear_copy) {
396 result = radv_wsi_create_prime_command_buffers(device, alloc,
397 swapchain);
398 if (result != VK_SUCCESS)
399 return result;
400 }
401
402 *pSwapchain = wsi_swapchain_to_handle(swapchain);
403
404 return VK_SUCCESS;
405 }
406
407 void radv_DestroySwapchainKHR(
408 VkDevice _device,
409 VkSwapchainKHR _swapchain,
410 const VkAllocationCallbacks* pAllocator)
411 {
412 RADV_FROM_HANDLE(radv_device, device, _device);
413 RADV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
414 const VkAllocationCallbacks *alloc;
415
416 if (!_swapchain)
417 return;
418
419 if (pAllocator)
420 alloc = pAllocator;
421 else
422 alloc = &device->alloc;
423
424 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++) {
425 if (swapchain->fences[i] != VK_NULL_HANDLE)
426 radv_DestroyFence(_device, swapchain->fences[i], pAllocator);
427 }
428
429 if (swapchain->needs_linear_copy)
430 radv_wsi_free_prime_command_buffers(device, swapchain);
431
432 swapchain->destroy(swapchain, alloc);
433 }
434
435 VkResult radv_GetSwapchainImagesKHR(
436 VkDevice device,
437 VkSwapchainKHR _swapchain,
438 uint32_t* pSwapchainImageCount,
439 VkImage* pSwapchainImages)
440 {
441 RADV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
442
443 return swapchain->get_images(swapchain, pSwapchainImageCount,
444 pSwapchainImages);
445 }
446
447 VkResult radv_AcquireNextImageKHR(
448 VkDevice device,
449 VkSwapchainKHR _swapchain,
450 uint64_t timeout,
451 VkSemaphore semaphore,
452 VkFence _fence,
453 uint32_t* pImageIndex)
454 {
455 RADV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
456 RADV_FROM_HANDLE(radv_fence, fence, _fence);
457
458 VkResult result = swapchain->acquire_next_image(swapchain, timeout, semaphore,
459 pImageIndex);
460
461 if (fence && (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR)) {
462 fence->submitted = true;
463 fence->signalled = true;
464 }
465 return result;
466 }
467
468 VkResult radv_QueuePresentKHR(
469 VkQueue _queue,
470 const VkPresentInfoKHR* pPresentInfo)
471 {
472 RADV_FROM_HANDLE(radv_queue, queue, _queue);
473 VkResult result = VK_SUCCESS;
474 const VkPresentRegionsKHR *regions =
475 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
476
477 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
478 RADV_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
479 struct radeon_winsys_cs *cs;
480 const VkPresentRegionKHR *region = NULL;
481 VkResult item_result;
482 struct radv_winsys_sem_info sem_info;
483
484 item_result = radv_alloc_sem_info(&sem_info,
485 pPresentInfo->waitSemaphoreCount,
486 pPresentInfo->pWaitSemaphores,
487 0,
488 NULL);
489 if (pPresentInfo->pResults != NULL)
490 pPresentInfo->pResults[i] = item_result;
491 result = result == VK_SUCCESS ? item_result : result;
492 if (item_result != VK_SUCCESS) {
493 radv_free_sem_info(&sem_info);
494 continue;
495 }
496
497 assert(radv_device_from_handle(swapchain->device) == queue->device);
498 if (swapchain->fences[0] == VK_NULL_HANDLE) {
499 item_result = radv_CreateFence(radv_device_to_handle(queue->device),
500 &(VkFenceCreateInfo) {
501 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
502 .flags = 0,
503 }, &swapchain->alloc, &swapchain->fences[0]);
504 if (pPresentInfo->pResults != NULL)
505 pPresentInfo->pResults[i] = item_result;
506 result = result == VK_SUCCESS ? item_result : result;
507 if (item_result != VK_SUCCESS) {
508 radv_free_sem_info(&sem_info);
509 continue;
510 }
511 } else {
512 radv_ResetFences(radv_device_to_handle(queue->device),
513 1, &swapchain->fences[0]);
514 }
515
516 if (swapchain->needs_linear_copy) {
517 int idx = (queue->queue_family_index * swapchain->image_count) + pPresentInfo->pImageIndices[i];
518 cs = radv_cmd_buffer_from_handle(swapchain->cmd_buffers[idx])->cs;
519 } else
520 cs = queue->device->empty_cs[queue->queue_family_index];
521 RADV_FROM_HANDLE(radv_fence, fence, swapchain->fences[0]);
522 struct radeon_winsys_fence *base_fence = fence->fence;
523 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
524
525 queue->device->ws->cs_submit(ctx, queue->queue_idx,
526 &cs,
527 1, NULL, NULL,
528 &sem_info,
529 false, base_fence);
530 fence->submitted = true;
531
532 if (regions && regions->pRegions)
533 region = &regions->pRegions[i];
534
535 item_result = swapchain->queue_present(swapchain,
536 pPresentInfo->pImageIndices[i],
537 region);
538 /* TODO: What if one of them returns OUT_OF_DATE? */
539 if (pPresentInfo->pResults != NULL)
540 pPresentInfo->pResults[i] = item_result;
541 result = result == VK_SUCCESS ? item_result : result;
542 if (item_result != VK_SUCCESS) {
543 radv_free_sem_info(&sem_info);
544 continue;
545 }
546
547 VkFence last = swapchain->fences[2];
548 swapchain->fences[2] = swapchain->fences[1];
549 swapchain->fences[1] = swapchain->fences[0];
550 swapchain->fences[0] = last;
551
552 if (last != VK_NULL_HANDLE) {
553 radv_WaitForFences(radv_device_to_handle(queue->device),
554 1, &last, true, 1);
555 }
556
557 radv_free_sem_info(&sem_info);
558 }
559
560 return VK_SUCCESS;
561 }