vulkan/wsi: Add a wsi_device_init function
[mesa.git] / src / amd / vulkan / radv_wsi.c
1 /*
2 * Copyright © 2016 Red Hat
3 * based on intel anv code:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include "radv_private.h"
27 #include "radv_meta.h"
28 #include "wsi_common.h"
29 #include "vk_util.h"
30 #include "util/macros.h"
31
32 #define WSI_CB(x) .x = radv_##x
33 MAYBE_UNUSED static const struct wsi_callbacks wsi_cbs = {
34 WSI_CB(GetPhysicalDeviceFormatProperties),
35 };
36
37 static PFN_vkVoidFunction
38 radv_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
39 {
40 return radv_lookup_entrypoint(pName);
41 }
42
43 VkResult
44 radv_init_wsi(struct radv_physical_device *physical_device)
45 {
46 VkResult result;
47
48 wsi_device_init(&physical_device->wsi_device,
49 radv_physical_device_to_handle(physical_device),
50 radv_wsi_proc_addr);
51
52 #ifdef VK_USE_PLATFORM_XCB_KHR
53 result = wsi_x11_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
54 if (result != VK_SUCCESS)
55 return result;
56 #endif
57
58 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
59 result = wsi_wl_init_wsi(&physical_device->wsi_device, &physical_device->instance->alloc,
60 radv_physical_device_to_handle(physical_device),
61 &wsi_cbs);
62 if (result != VK_SUCCESS) {
63 #ifdef VK_USE_PLATFORM_XCB_KHR
64 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
65 #endif
66 return result;
67 }
68 #endif
69
70 return VK_SUCCESS;
71 }
72
73 void
74 radv_finish_wsi(struct radv_physical_device *physical_device)
75 {
76 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
77 wsi_wl_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
78 #endif
79 #ifdef VK_USE_PLATFORM_XCB_KHR
80 wsi_x11_finish_wsi(&physical_device->wsi_device, &physical_device->instance->alloc);
81 #endif
82 }
83
84 void radv_DestroySurfaceKHR(
85 VkInstance _instance,
86 VkSurfaceKHR _surface,
87 const VkAllocationCallbacks* pAllocator)
88 {
89 RADV_FROM_HANDLE(radv_instance, instance, _instance);
90 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
91
92 vk_free2(&instance->alloc, pAllocator, surface);
93 }
94
95 VkResult radv_GetPhysicalDeviceSurfaceSupportKHR(
96 VkPhysicalDevice physicalDevice,
97 uint32_t queueFamilyIndex,
98 VkSurfaceKHR _surface,
99 VkBool32* pSupported)
100 {
101 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
102 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
103 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
104
105 return iface->get_support(surface, &device->wsi_device,
106 &device->instance->alloc,
107 queueFamilyIndex, device->local_fd, true, pSupported);
108 }
109
110 VkResult radv_GetPhysicalDeviceSurfaceCapabilitiesKHR(
111 VkPhysicalDevice physicalDevice,
112 VkSurfaceKHR _surface,
113 VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
114 {
115 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
116 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
117 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
118
119 return iface->get_capabilities(surface, pSurfaceCapabilities);
120 }
121
122 VkResult radv_GetPhysicalDeviceSurfaceFormatsKHR(
123 VkPhysicalDevice physicalDevice,
124 VkSurfaceKHR _surface,
125 uint32_t* pSurfaceFormatCount,
126 VkSurfaceFormatKHR* pSurfaceFormats)
127 {
128 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
129 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
130 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
131
132 return iface->get_formats(surface, &device->wsi_device, pSurfaceFormatCount,
133 pSurfaceFormats);
134 }
135
136 VkResult radv_GetPhysicalDeviceSurfacePresentModesKHR(
137 VkPhysicalDevice physicalDevice,
138 VkSurfaceKHR _surface,
139 uint32_t* pPresentModeCount,
140 VkPresentModeKHR* pPresentModes)
141 {
142 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
143 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
144 struct wsi_interface *iface = device->wsi_device.wsi[surface->platform];
145
146 return iface->get_present_modes(surface, pPresentModeCount,
147 pPresentModes);
148 }
149
150 static VkResult
151 radv_wsi_image_create(VkDevice device_h,
152 const VkSwapchainCreateInfoKHR *pCreateInfo,
153 const VkAllocationCallbacks* pAllocator,
154 bool needs_linear_copy,
155 bool linear,
156 struct wsi_image *wsi_image)
157 {
158 VkResult result = VK_SUCCESS;
159 struct radeon_surf *surface;
160 VkImage image_h;
161 struct radv_image *image;
162 int fd;
163 RADV_FROM_HANDLE(radv_device, device, device_h);
164
165 result = radv_image_create(device_h,
166 &(struct radv_image_create_info) {
167 .vk_info =
168 &(VkImageCreateInfo) {
169 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
170 .imageType = VK_IMAGE_TYPE_2D,
171 .format = pCreateInfo->imageFormat,
172 .extent = {
173 .width = pCreateInfo->imageExtent.width,
174 .height = pCreateInfo->imageExtent.height,
175 .depth = 1
176 },
177 .mipLevels = 1,
178 .arrayLayers = 1,
179 .samples = 1,
180 /* FIXME: Need a way to use X tiling to allow scanout */
181 .tiling = linear ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL,
182 .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
183 .flags = 0,
184 },
185 .scanout = true},
186 NULL,
187 &image_h);
188 if (result != VK_SUCCESS)
189 return result;
190
191 image = radv_image_from_handle(image_h);
192
193 VkDeviceMemory memory_h;
194
195 const VkMemoryDedicatedAllocateInfoKHR ded_alloc = {
196 .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
197 .pNext = NULL,
198 .buffer = VK_NULL_HANDLE,
199 .image = image_h
200 };
201
202 /* Find the first VRAM memory type, or GART for PRIME images. */
203 int memory_type_index = -1;
204 for (int i = 0; i < device->physical_device->memory_properties.memoryTypeCount; ++i) {
205 bool is_local = !!(device->physical_device->memory_properties.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
206 if ((linear && !is_local) || (!linear && is_local)) {
207 memory_type_index = i;
208 break;
209 }
210 }
211
212 /* fallback */
213 if (memory_type_index == -1)
214 memory_type_index = 0;
215
216 result = radv_alloc_memory(device_h,
217 &(VkMemoryAllocateInfo) {
218 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
219 .pNext = &ded_alloc,
220 .allocationSize = image->size,
221 .memoryTypeIndex = memory_type_index,
222 },
223 NULL /* XXX: pAllocator */,
224 RADV_MEM_IMPLICIT_SYNC,
225 &memory_h);
226 if (result != VK_SUCCESS)
227 goto fail_create_image;
228
229 radv_BindImageMemory(device_h, image_h, memory_h, 0);
230
231 /*
232 * return the fd for the image in the no copy mode,
233 * or the fd for the linear image if a copy is required.
234 */
235 if (!needs_linear_copy || (needs_linear_copy && linear)) {
236 RADV_FROM_HANDLE(radv_device_memory, memory, memory_h);
237 if (!radv_get_memory_fd(device, memory, &fd))
238 goto fail_alloc_memory;
239 wsi_image->fd = fd;
240 }
241
242 surface = &image->surface;
243
244 wsi_image->image = image_h;
245 wsi_image->memory = memory_h;
246 wsi_image->size = image->size;
247 wsi_image->offset = image->offset;
248 if (device->physical_device->rad_info.chip_class >= GFX9)
249 wsi_image->row_pitch =
250 surface->u.gfx9.surf_pitch * surface->bpe;
251 else
252 wsi_image->row_pitch =
253 surface->u.legacy.level[0].nblk_x * surface->bpe;
254
255 return VK_SUCCESS;
256 fail_alloc_memory:
257 radv_FreeMemory(device_h, memory_h, pAllocator);
258
259 fail_create_image:
260 radv_DestroyImage(device_h, image_h, pAllocator);
261
262 return result;
263 }
264
265 static void
266 radv_wsi_image_free(VkDevice device,
267 const VkAllocationCallbacks* pAllocator,
268 struct wsi_image *wsi_image)
269 {
270 radv_DestroyImage(device, wsi_image->image, pAllocator);
271
272 radv_FreeMemory(device, wsi_image->memory, pAllocator);
273 }
274
275 static const struct wsi_image_fns radv_wsi_image_fns = {
276 .create_wsi_image = radv_wsi_image_create,
277 .free_wsi_image = radv_wsi_image_free,
278 };
279
280 #define NUM_PRIME_POOLS RADV_QUEUE_TRANSFER
281 static void
282 radv_wsi_free_prime_command_buffers(struct radv_device *device,
283 struct wsi_swapchain *swapchain)
284 {
285 const int num_pools = NUM_PRIME_POOLS;
286 const int num_images = swapchain->image_count;
287 int i;
288 for (i = 0; i < num_pools; i++) {
289 radv_FreeCommandBuffers(radv_device_to_handle(device),
290 swapchain->cmd_pools[i],
291 swapchain->image_count,
292 &swapchain->cmd_buffers[i * num_images]);
293
294 radv_DestroyCommandPool(radv_device_to_handle(device),
295 swapchain->cmd_pools[i],
296 &swapchain->alloc);
297 }
298 }
299
300 static VkResult
301 radv_wsi_create_prime_command_buffers(struct radv_device *device,
302 const VkAllocationCallbacks *alloc,
303 struct wsi_swapchain *swapchain)
304 {
305 const int num_pools = NUM_PRIME_POOLS;
306 const int num_images = swapchain->image_count;
307 int num_cmd_buffers = num_images * num_pools; //TODO bump to MAX_QUEUE_FAMILIES
308 VkResult result;
309 int i, j;
310
311 swapchain->cmd_buffers = vk_alloc(alloc, (sizeof(VkCommandBuffer) * num_cmd_buffers), 8,
312 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
313 if (!swapchain->cmd_buffers)
314 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
315
316 memset(swapchain->cmd_buffers, 0, sizeof(VkCommandBuffer) * num_cmd_buffers);
317 memset(swapchain->cmd_pools, 0, sizeof(VkCommandPool) * num_pools);
318 for (i = 0; i < num_pools; i++) {
319 VkCommandPoolCreateInfo pool_create_info;
320
321 pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
322 pool_create_info.pNext = NULL;
323 pool_create_info.flags = 0;
324 pool_create_info.queueFamilyIndex = i;
325
326 result = radv_CreateCommandPool(radv_device_to_handle(device),
327 &pool_create_info, alloc,
328 &swapchain->cmd_pools[i]);
329 if (result != VK_SUCCESS)
330 goto fail;
331
332 VkCommandBufferAllocateInfo cmd_buffer_info;
333 cmd_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
334 cmd_buffer_info.pNext = NULL;
335 cmd_buffer_info.commandPool = swapchain->cmd_pools[i];
336 cmd_buffer_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
337 cmd_buffer_info.commandBufferCount = num_images;
338
339 result = radv_AllocateCommandBuffers(radv_device_to_handle(device),
340 &cmd_buffer_info,
341 &swapchain->cmd_buffers[i * num_images]);
342 if (result != VK_SUCCESS)
343 goto fail;
344 for (j = 0; j < num_images; j++) {
345 VkImage image, linear_image;
346 int idx = (i * num_images) + j;
347
348 swapchain->get_image_and_linear(swapchain, j, &image, &linear_image);
349 VkCommandBufferBeginInfo begin_info = {0};
350
351 begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
352
353 radv_BeginCommandBuffer(swapchain->cmd_buffers[idx], &begin_info);
354
355 radv_blit_to_prime_linear(radv_cmd_buffer_from_handle(swapchain->cmd_buffers[idx]),
356 radv_image_from_handle(image),
357 radv_image_from_handle(linear_image));
358
359 radv_EndCommandBuffer(swapchain->cmd_buffers[idx]);
360 }
361 }
362 return VK_SUCCESS;
363 fail:
364 radv_wsi_free_prime_command_buffers(device, swapchain);
365 return result;
366 }
367
368 VkResult radv_CreateSwapchainKHR(
369 VkDevice _device,
370 const VkSwapchainCreateInfoKHR* pCreateInfo,
371 const VkAllocationCallbacks* pAllocator,
372 VkSwapchainKHR* pSwapchain)
373 {
374 RADV_FROM_HANDLE(radv_device, device, _device);
375 ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
376 struct wsi_interface *iface =
377 device->physical_device->wsi_device.wsi[surface->platform];
378 struct wsi_swapchain *swapchain;
379 const VkAllocationCallbacks *alloc;
380 if (pAllocator)
381 alloc = pAllocator;
382 else
383 alloc = &device->alloc;
384 VkResult result = iface->create_swapchain(surface, _device,
385 &device->physical_device->wsi_device,
386 device->physical_device->local_fd,
387 pCreateInfo,
388 alloc, &radv_wsi_image_fns,
389 &swapchain);
390 if (result != VK_SUCCESS)
391 return result;
392
393 if (pAllocator)
394 swapchain->alloc = *pAllocator;
395 else
396 swapchain->alloc = device->alloc;
397
398 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++)
399 swapchain->fences[i] = VK_NULL_HANDLE;
400
401 if (swapchain->needs_linear_copy) {
402 result = radv_wsi_create_prime_command_buffers(device, alloc,
403 swapchain);
404 if (result != VK_SUCCESS)
405 return result;
406 }
407
408 *pSwapchain = wsi_swapchain_to_handle(swapchain);
409
410 return VK_SUCCESS;
411 }
412
413 void radv_DestroySwapchainKHR(
414 VkDevice _device,
415 VkSwapchainKHR _swapchain,
416 const VkAllocationCallbacks* pAllocator)
417 {
418 RADV_FROM_HANDLE(radv_device, device, _device);
419 RADV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
420 const VkAllocationCallbacks *alloc;
421
422 if (!_swapchain)
423 return;
424
425 if (pAllocator)
426 alloc = pAllocator;
427 else
428 alloc = &device->alloc;
429
430 for (unsigned i = 0; i < ARRAY_SIZE(swapchain->fences); i++) {
431 if (swapchain->fences[i] != VK_NULL_HANDLE)
432 radv_DestroyFence(_device, swapchain->fences[i], pAllocator);
433 }
434
435 if (swapchain->needs_linear_copy)
436 radv_wsi_free_prime_command_buffers(device, swapchain);
437
438 swapchain->destroy(swapchain, alloc);
439 }
440
441 VkResult radv_GetSwapchainImagesKHR(
442 VkDevice device,
443 VkSwapchainKHR _swapchain,
444 uint32_t* pSwapchainImageCount,
445 VkImage* pSwapchainImages)
446 {
447 RADV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
448
449 return swapchain->get_images(swapchain, pSwapchainImageCount,
450 pSwapchainImages);
451 }
452
453 VkResult radv_AcquireNextImageKHR(
454 VkDevice device,
455 VkSwapchainKHR _swapchain,
456 uint64_t timeout,
457 VkSemaphore semaphore,
458 VkFence _fence,
459 uint32_t* pImageIndex)
460 {
461 RADV_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
462 RADV_FROM_HANDLE(radv_fence, fence, _fence);
463
464 VkResult result = swapchain->acquire_next_image(swapchain, timeout, semaphore,
465 pImageIndex);
466
467 if (fence && (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR)) {
468 fence->submitted = true;
469 fence->signalled = true;
470 }
471 return result;
472 }
473
474 VkResult radv_QueuePresentKHR(
475 VkQueue _queue,
476 const VkPresentInfoKHR* pPresentInfo)
477 {
478 RADV_FROM_HANDLE(radv_queue, queue, _queue);
479 VkResult result = VK_SUCCESS;
480 const VkPresentRegionsKHR *regions =
481 vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
482
483 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
484 RADV_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
485 struct radeon_winsys_cs *cs;
486 const VkPresentRegionKHR *region = NULL;
487 VkResult item_result;
488 struct radv_winsys_sem_info sem_info;
489
490 item_result = radv_alloc_sem_info(&sem_info,
491 pPresentInfo->waitSemaphoreCount,
492 pPresentInfo->pWaitSemaphores,
493 0,
494 NULL);
495 if (pPresentInfo->pResults != NULL)
496 pPresentInfo->pResults[i] = item_result;
497 result = result == VK_SUCCESS ? item_result : result;
498 if (item_result != VK_SUCCESS) {
499 radv_free_sem_info(&sem_info);
500 continue;
501 }
502
503 assert(radv_device_from_handle(swapchain->device) == queue->device);
504 if (swapchain->fences[0] == VK_NULL_HANDLE) {
505 item_result = radv_CreateFence(radv_device_to_handle(queue->device),
506 &(VkFenceCreateInfo) {
507 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
508 .flags = 0,
509 }, &swapchain->alloc, &swapchain->fences[0]);
510 if (pPresentInfo->pResults != NULL)
511 pPresentInfo->pResults[i] = item_result;
512 result = result == VK_SUCCESS ? item_result : result;
513 if (item_result != VK_SUCCESS) {
514 radv_free_sem_info(&sem_info);
515 continue;
516 }
517 } else {
518 radv_ResetFences(radv_device_to_handle(queue->device),
519 1, &swapchain->fences[0]);
520 }
521
522 if (swapchain->needs_linear_copy) {
523 int idx = (queue->queue_family_index * swapchain->image_count) + pPresentInfo->pImageIndices[i];
524 cs = radv_cmd_buffer_from_handle(swapchain->cmd_buffers[idx])->cs;
525 } else
526 cs = queue->device->empty_cs[queue->queue_family_index];
527 RADV_FROM_HANDLE(radv_fence, fence, swapchain->fences[0]);
528 struct radeon_winsys_fence *base_fence = fence->fence;
529 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
530
531 queue->device->ws->cs_submit(ctx, queue->queue_idx,
532 &cs,
533 1, NULL, NULL,
534 &sem_info,
535 false, base_fence);
536 fence->submitted = true;
537
538 if (regions && regions->pRegions)
539 region = &regions->pRegions[i];
540
541 item_result = swapchain->queue_present(swapchain,
542 pPresentInfo->pImageIndices[i],
543 region);
544 /* TODO: What if one of them returns OUT_OF_DATE? */
545 if (pPresentInfo->pResults != NULL)
546 pPresentInfo->pResults[i] = item_result;
547 result = result == VK_SUCCESS ? item_result : result;
548 if (item_result != VK_SUCCESS) {
549 radv_free_sem_info(&sem_info);
550 continue;
551 }
552
553 VkFence last = swapchain->fences[2];
554 swapchain->fences[2] = swapchain->fences[1];
555 swapchain->fences[1] = swapchain->fences[0];
556 swapchain->fences[0] = last;
557
558 if (last != VK_NULL_HANDLE) {
559 radv_WaitForFences(radv_device_to_handle(queue->device),
560 1, &last, true, 1);
561 }
562
563 radv_free_sem_info(&sem_info);
564 }
565
566 return VK_SUCCESS;
567 }