739e0e32580efb8ccbe424ba356b30312e647e5e
[mesa.git] / src / amd / vulkan / radv_wsi_wayland.c
1 /*
2 * Copyright © 2016 Red Hat
3 * based on intel anv code:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include <wayland-client.h>
27 #include <wayland-drm-client-protocol.h>
28
29 #include "radv_wsi.h"
30
31 #include "vk_format.h"
32 #include <util/hash_table.h>
33
34 #define MIN_NUM_IMAGES 2
35
36 struct wsi_wl_display {
37 struct radv_physical_device *physical_device;
38 struct wl_display * display;
39 struct wl_drm * drm;
40
41 /* Vector of VkFormats supported */
42 struct radv_vector formats;
43
44 uint32_t capabilities;
45 };
46
47 struct wsi_wayland {
48 struct radv_wsi_interface base;
49
50 struct radv_physical_device * physical_device;
51
52 pthread_mutex_t mutex;
53 /* Hash table of wl_display -> wsi_wl_display mappings */
54 struct hash_table * displays;
55 };
56
57 static void
58 wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format)
59 {
60 /* Don't add a format that's already in the list */
61 VkFormat *f;
62 radv_vector_foreach(f, &display->formats)
63 if (*f == format)
64 return;
65
66 /* Don't add formats that aren't renderable. */
67 VkFormatProperties props;
68 radv_GetPhysicalDeviceFormatProperties(
69 radv_physical_device_to_handle(display->physical_device), format, &props);
70 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
71 return;
72
73 f = radv_vector_add(&display->formats);
74 if (f)
75 *f = format;
76 }
77
78 static void
79 drm_handle_device(void *data, struct wl_drm *drm, const char *name)
80 {
81 fprintf(stderr, "wl_drm.device(%s)\n", name);
82 }
83
84 static uint32_t
85 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
86 {
87 switch (vk_format) {
88 /* TODO: Figure out what all the formats mean and make this table
89 * correct.
90 */
91 #if 0
92 case VK_FORMAT_R4G4B4A4_UNORM:
93 return alpha ? WL_DRM_FORMAT_ABGR4444 : WL_DRM_FORMAT_XBGR4444;
94 case VK_FORMAT_R5G6B5_UNORM:
95 return WL_DRM_FORMAT_BGR565;
96 case VK_FORMAT_R5G5B5A1_UNORM:
97 return alpha ? WL_DRM_FORMAT_ABGR1555 : WL_DRM_FORMAT_XBGR1555;
98 case VK_FORMAT_R8G8B8_UNORM:
99 return WL_DRM_FORMAT_XBGR8888;
100 case VK_FORMAT_R8G8B8A8_UNORM:
101 return alpha ? WL_DRM_FORMAT_ABGR8888 : WL_DRM_FORMAT_XBGR8888;
102 case VK_FORMAT_R10G10B10A2_UNORM:
103 return alpha ? WL_DRM_FORMAT_ABGR2101010 : WL_DRM_FORMAT_XBGR2101010;
104 case VK_FORMAT_B4G4R4A4_UNORM:
105 return alpha ? WL_DRM_FORMAT_ARGB4444 : WL_DRM_FORMAT_XRGB4444;
106 case VK_FORMAT_B5G6R5_UNORM:
107 return WL_DRM_FORMAT_RGB565;
108 case VK_FORMAT_B5G5R5A1_UNORM:
109 return alpha ? WL_DRM_FORMAT_XRGB1555 : WL_DRM_FORMAT_XRGB1555;
110 #endif
111 case VK_FORMAT_B8G8R8_SRGB:
112 return WL_DRM_FORMAT_BGRX8888;
113 case VK_FORMAT_B8G8R8A8_SRGB:
114 return alpha ? WL_DRM_FORMAT_ARGB8888 : WL_DRM_FORMAT_XRGB8888;
115 #if 0
116 case VK_FORMAT_B10G10R10A2_UNORM:
117 return alpha ? WL_DRM_FORMAT_ARGB2101010 : WL_DRM_FORMAT_XRGB2101010;
118 #endif
119
120 default:
121 assert(!"Unsupported Vulkan format");
122 return 0;
123 }
124 }
125
126 static void
127 drm_handle_format(void *data, struct wl_drm *drm, uint32_t wl_format)
128 {
129 struct wsi_wl_display *display = data;
130
131 switch (wl_format) {
132 #if 0
133 case WL_DRM_FORMAT_ABGR4444:
134 case WL_DRM_FORMAT_XBGR4444:
135 wsi_wl_display_add_vk_format(display, VK_FORMAT_R4G4B4A4_UNORM);
136 break;
137 case WL_DRM_FORMAT_BGR565:
138 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G6B5_UNORM);
139 break;
140 case WL_DRM_FORMAT_ABGR1555:
141 case WL_DRM_FORMAT_XBGR1555:
142 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G5B5A1_UNORM);
143 break;
144 case WL_DRM_FORMAT_XBGR8888:
145 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8_UNORM);
146 /* fallthrough */
147 case WL_DRM_FORMAT_ABGR8888:
148 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8A8_UNORM);
149 break;
150 case WL_DRM_FORMAT_ABGR2101010:
151 case WL_DRM_FORMAT_XBGR2101010:
152 wsi_wl_display_add_vk_format(display, VK_FORMAT_R10G10B10A2_UNORM);
153 break;
154 case WL_DRM_FORMAT_ARGB4444:
155 case WL_DRM_FORMAT_XRGB4444:
156 wsi_wl_display_add_vk_format(display, VK_FORMAT_B4G4R4A4_UNORM);
157 break;
158 case WL_DRM_FORMAT_RGB565:
159 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G6R5_UNORM);
160 break;
161 case WL_DRM_FORMAT_ARGB1555:
162 case WL_DRM_FORMAT_XRGB1555:
163 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G5R5A1_UNORM);
164 break;
165 #endif
166 case WL_DRM_FORMAT_XRGB8888:
167 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_SRGB);
168 /* fallthrough */
169 case WL_DRM_FORMAT_ARGB8888:
170 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_SRGB);
171 break;
172 #if 0
173 case WL_DRM_FORMAT_ARGB2101010:
174 case WL_DRM_FORMAT_XRGB2101010:
175 wsi_wl_display_add_vk_format(display, VK_FORMAT_B10G10R10A2_UNORM);
176 break;
177 #endif
178 }
179 }
180
181 static void
182 drm_handle_authenticated(void *data, struct wl_drm *drm)
183 {
184 }
185
186 static void
187 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t capabilities)
188 {
189 struct wsi_wl_display *display = data;
190
191 display->capabilities = capabilities;
192 }
193
194 static const struct wl_drm_listener drm_listener = {
195 drm_handle_device,
196 drm_handle_format,
197 drm_handle_authenticated,
198 drm_handle_capabilities,
199 };
200
201 static void
202 registry_handle_global(void *data, struct wl_registry *registry,
203 uint32_t name, const char *interface, uint32_t version)
204 {
205 struct wsi_wl_display *display = data;
206
207 if (strcmp(interface, "wl_drm") == 0) {
208 assert(display->drm == NULL);
209
210 assert(version >= 2);
211 display->drm = wl_registry_bind(registry, name, &wl_drm_interface, 2);
212
213 if (display->drm)
214 wl_drm_add_listener(display->drm, &drm_listener, display);
215 }
216 }
217
218 static void
219 registry_handle_global_remove(void *data, struct wl_registry *registry,
220 uint32_t name)
221 { /* No-op */ }
222
223 static const struct wl_registry_listener registry_listener = {
224 registry_handle_global,
225 registry_handle_global_remove
226 };
227
228 static void
229 wsi_wl_display_destroy(struct wsi_wayland *wsi, struct wsi_wl_display *display)
230 {
231 radv_vector_finish(&display->formats);
232 if (display->drm)
233 wl_drm_destroy(display->drm);
234 radv_free(&wsi->physical_device->instance->alloc, display);
235 }
236
237 static struct wsi_wl_display *
238 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display)
239 {
240 struct wsi_wl_display *display =
241 radv_alloc(&wsi->physical_device->instance->alloc, sizeof(*display), 8,
242 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
243 if (!display)
244 return NULL;
245
246 memset(display, 0, sizeof(*display));
247
248 display->display = wl_display;
249 display->physical_device = wsi->physical_device;
250
251 if (!radv_vector_init(&display->formats, sizeof(VkFormat), 8))
252 goto fail;
253
254 struct wl_registry *registry = wl_display_get_registry(wl_display);
255 if (!registry)
256 return NULL;
257
258 wl_registry_add_listener(registry, &registry_listener, display);
259
260 /* Round-rip to get the wl_drm global */
261 wl_display_roundtrip(wl_display);
262
263 if (!display->drm)
264 goto fail;
265
266 /* Round-rip to get wl_drm formats and capabilities */
267 wl_display_roundtrip(wl_display);
268
269 /* We need prime support */
270 if (!(display->capabilities & WL_DRM_CAPABILITY_PRIME))
271 goto fail;
272
273 /* We don't need this anymore */
274 wl_registry_destroy(registry);
275
276 return display;
277
278 fail:
279 if (registry)
280 wl_registry_destroy(registry);
281
282 wsi_wl_display_destroy(wsi, display);
283 return NULL;
284 }
285
286 static struct wsi_wl_display *
287 wsi_wl_get_display(struct radv_physical_device *device,
288 struct wl_display *wl_display)
289 {
290 struct wsi_wayland *wsi =
291 (struct wsi_wayland *)device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
292
293 pthread_mutex_lock(&wsi->mutex);
294
295 struct hash_entry *entry = _mesa_hash_table_search(wsi->displays,
296 wl_display);
297 if (!entry) {
298 /* We're about to make a bunch of blocking calls. Let's drop the
299 * mutex for now so we don't block up too badly.
300 */
301 pthread_mutex_unlock(&wsi->mutex);
302
303 struct wsi_wl_display *display = wsi_wl_display_create(wsi, wl_display);
304
305 pthread_mutex_lock(&wsi->mutex);
306
307 entry = _mesa_hash_table_search(wsi->displays, wl_display);
308 if (entry) {
309 /* Oops, someone raced us to it */
310 wsi_wl_display_destroy(wsi, display);
311 } else {
312 entry = _mesa_hash_table_insert(wsi->displays, wl_display, display);
313 }
314 }
315
316 pthread_mutex_unlock(&wsi->mutex);
317
318 return entry->data;
319 }
320
321 VkBool32 radv_GetPhysicalDeviceWaylandPresentationSupportKHR(
322 VkPhysicalDevice physicalDevice,
323 uint32_t queueFamilyIndex,
324 struct wl_display* display)
325 {
326 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
327
328 return wsi_wl_get_display(physical_device, display) != NULL;
329 }
330
331 static VkResult
332 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
333 struct radv_physical_device *device,
334 uint32_t queueFamilyIndex,
335 VkBool32* pSupported)
336 {
337 *pSupported = true;
338
339 return VK_SUCCESS;
340 }
341
342 static const VkPresentModeKHR present_modes[] = {
343 VK_PRESENT_MODE_MAILBOX_KHR,
344 VK_PRESENT_MODE_FIFO_KHR,
345 };
346
347 static VkResult
348 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
349 struct radv_physical_device *device,
350 VkSurfaceCapabilitiesKHR* caps)
351 {
352 caps->minImageCount = MIN_NUM_IMAGES;
353 caps->maxImageCount = 4;
354 caps->currentExtent = (VkExtent2D) { -1, -1 };
355 caps->minImageExtent = (VkExtent2D) { 1, 1 };
356 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
357 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
358 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
359 caps->maxImageArrayLayers = 1;
360
361 caps->supportedCompositeAlpha =
362 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
363 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
364
365 caps->supportedUsageFlags =
366 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
367 VK_IMAGE_USAGE_SAMPLED_BIT |
368 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
369 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
370
371 return VK_SUCCESS;
372 }
373
374 static VkResult
375 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
376 struct radv_physical_device *device,
377 uint32_t* pSurfaceFormatCount,
378 VkSurfaceFormatKHR* pSurfaceFormats)
379 {
380 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
381 struct wsi_wl_display *display =
382 wsi_wl_get_display(device, surface->display);
383
384 uint32_t count = radv_vector_length(&display->formats);
385
386 if (pSurfaceFormats == NULL) {
387 *pSurfaceFormatCount = count;
388 return VK_SUCCESS;
389 }
390
391 assert(*pSurfaceFormatCount >= count);
392 *pSurfaceFormatCount = count;
393
394 VkFormat *f;
395 radv_vector_foreach(f, &display->formats) {
396 *(pSurfaceFormats++) = (VkSurfaceFormatKHR) {
397 .format = *f,
398 /* TODO: We should get this from the compositor somehow */
399 .colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
400 };
401 }
402
403 return VK_SUCCESS;
404 }
405
406 static VkResult
407 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
408 struct radv_physical_device *device,
409 uint32_t* pPresentModeCount,
410 VkPresentModeKHR* pPresentModes)
411 {
412 if (pPresentModes == NULL) {
413 *pPresentModeCount = ARRAY_SIZE(present_modes);
414 return VK_SUCCESS;
415 }
416
417 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
418 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
419 *pPresentModeCount = ARRAY_SIZE(present_modes);
420
421 return VK_SUCCESS;
422 }
423
424 static VkResult
425 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *surface,
426 struct radv_device *device,
427 const VkSwapchainCreateInfoKHR* pCreateInfo,
428 const VkAllocationCallbacks* pAllocator,
429 struct radv_swapchain **swapchain);
430
431 VkResult radv_CreateWaylandSurfaceKHR(
432 VkInstance _instance,
433 const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
434 const VkAllocationCallbacks* pAllocator,
435 VkSurfaceKHR* pSurface)
436 {
437 RADV_FROM_HANDLE(radv_instance, instance, _instance);
438
439 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
440
441 VkIcdSurfaceWayland *surface;
442
443 surface = radv_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
444 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
445 if (surface == NULL)
446 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
447
448 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
449 surface->display = pCreateInfo->display;
450 surface->surface = pCreateInfo->surface;
451
452 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
453
454 return VK_SUCCESS;
455 }
456
457 struct wsi_wl_image {
458 struct radv_image * image;
459 struct radv_device_memory * memory;
460 struct wl_buffer * buffer;
461 bool busy;
462 };
463
464 struct wsi_wl_swapchain {
465 struct radv_swapchain base;
466
467 struct wsi_wl_display * display;
468 struct wl_event_queue * queue;
469 struct wl_surface * surface;
470
471 VkExtent2D extent;
472 VkFormat vk_format;
473 uint32_t drm_format;
474
475 VkPresentModeKHR present_mode;
476 bool fifo_ready;
477
478 uint32_t image_count;
479 struct wsi_wl_image images[0];
480 };
481
482 static VkResult
483 wsi_wl_swapchain_get_images(struct radv_swapchain *radv_chain,
484 uint32_t *pCount, VkImage *pSwapchainImages)
485 {
486 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)radv_chain;
487
488 if (pSwapchainImages == NULL) {
489 *pCount = chain->image_count;
490 return VK_SUCCESS;
491 }
492
493 assert(chain->image_count <= *pCount);
494 for (uint32_t i = 0; i < chain->image_count; i++)
495 pSwapchainImages[i] = radv_image_to_handle(chain->images[i].image);
496
497 *pCount = chain->image_count;
498
499 return VK_SUCCESS;
500 }
501
502 static VkResult
503 wsi_wl_swapchain_acquire_next_image(struct radv_swapchain *radv_chain,
504 uint64_t timeout,
505 VkSemaphore semaphore,
506 uint32_t *image_index)
507 {
508 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)radv_chain;
509
510 int ret = wl_display_dispatch_queue_pending(chain->display->display,
511 chain->queue);
512 /* XXX: I'm not sure if out-of-date is the right error here. If
513 * wl_display_dispatch_queue_pending fails it most likely means we got
514 * kicked by the server so this seems more-or-less correct.
515 */
516 if (ret < 0)
517 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
518
519 while (1) {
520 for (uint32_t i = 0; i < chain->image_count; i++) {
521 if (!chain->images[i].busy) {
522 /* We found a non-busy image */
523 *image_index = i;
524 chain->images[i].busy = true;
525 return VK_SUCCESS;
526 }
527 }
528
529 /* This time we do a blocking dispatch because we can't go
530 * anywhere until we get an event.
531 */
532 int ret = wl_display_roundtrip_queue(chain->display->display,
533 chain->queue);
534 if (ret < 0)
535 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
536 }
537 }
538
539 static void
540 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
541 {
542 struct wsi_wl_swapchain *chain = data;
543
544 chain->fifo_ready = true;
545
546 wl_callback_destroy(callback);
547 }
548
549 static const struct wl_callback_listener frame_listener = {
550 frame_handle_done,
551 };
552
553 static VkResult
554 wsi_wl_swapchain_queue_present(struct radv_swapchain *radv_chain,
555 struct radv_queue *queue,
556 uint32_t image_index)
557 {
558 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)radv_chain;
559
560 if (chain->present_mode == VK_PRESENT_MODE_FIFO_KHR) {
561 while (!chain->fifo_ready) {
562 int ret = wl_display_dispatch_queue(chain->display->display,
563 chain->queue);
564 if (ret < 0)
565 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
566 }
567 }
568
569 assert(image_index < chain->image_count);
570 wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
571 wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
572
573 if (chain->present_mode == VK_PRESENT_MODE_FIFO_KHR) {
574 struct wl_callback *frame = wl_surface_frame(chain->surface);
575 wl_proxy_set_queue((struct wl_proxy *)frame, chain->queue);
576 wl_callback_add_listener(frame, &frame_listener, chain);
577 chain->fifo_ready = false;
578 }
579
580 chain->images[image_index].busy = true;
581 wl_surface_commit(chain->surface);
582 wl_display_flush(chain->display->display);
583
584 return VK_SUCCESS;
585 }
586
587 static void
588 wsi_wl_image_finish(struct wsi_wl_swapchain *chain, struct wsi_wl_image *image,
589 const VkAllocationCallbacks* pAllocator)
590 {
591 VkDevice vk_device = radv_device_to_handle(chain->base.device);
592 radv_FreeMemory(vk_device, radv_device_memory_to_handle(image->memory),
593 pAllocator);
594 radv_DestroyImage(vk_device, radv_image_to_handle(image->image),
595 pAllocator);
596 }
597
598 static void
599 buffer_handle_release(void *data, struct wl_buffer *buffer)
600 {
601 struct wsi_wl_image *image = data;
602
603 assert(image->buffer == buffer);
604
605 image->busy = false;
606 }
607
608 static const struct wl_buffer_listener buffer_listener = {
609 buffer_handle_release,
610 };
611
612 static VkResult
613 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
614 struct wsi_wl_image *image,
615 const VkSwapchainCreateInfoKHR *pCreateInfo,
616 const VkAllocationCallbacks* pAllocator)
617 {
618 VkDevice vk_device = radv_device_to_handle(chain->base.device);
619 VkResult result;
620 bool bret;
621 VkImage vk_image;
622 struct radeon_surf *surface;
623 int fd;
624 result = radv_image_create(vk_device,
625 &(struct radv_image_create_info) {
626 .vk_info =
627 &(VkImageCreateInfo) {
628 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
629 .imageType = VK_IMAGE_TYPE_2D,
630 .format = chain->vk_format,
631 .extent = {
632 .width = chain->extent.width,
633 .height = chain->extent.height,
634 .depth = 1
635 },
636 .mipLevels = 1,
637 .arrayLayers = 1,
638 .samples = 1,
639 /* FIXME: Need a way to use X tiling to allow scanout */
640 .tiling = VK_IMAGE_TILING_OPTIMAL,
641 .usage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
642 pCreateInfo->imageUsage),
643 .flags = 0,
644 },
645 .scanout = true},
646 pAllocator,
647 &vk_image);
648
649 if (result != VK_SUCCESS)
650 return result;
651
652 image->image = radv_image_from_handle(vk_image);
653 assert(vk_format_is_color(image->image->vk_format));
654
655 VkDeviceMemory vk_memory;
656 result = radv_AllocateMemory(vk_device,
657 &(VkMemoryAllocateInfo) {
658 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
659 .allocationSize = image->image->size,
660 .memoryTypeIndex = 0,
661 },
662 pAllocator,
663 &vk_memory);
664
665 if (result != VK_SUCCESS)
666 goto fail_image;
667
668 image->memory = radv_device_memory_from_handle(vk_memory);
669
670 result = radv_BindImageMemory(vk_device, vk_image, vk_memory, 0);
671
672 if (result != VK_SUCCESS)
673 goto fail_mem;
674
675 bret = chain->base.device->ws->buffer_get_fd(chain->base.device->ws,
676 image->memory->bo, &fd);
677 if (bret == false)
678 goto fail_mem;
679
680 {
681 struct radeon_bo_metadata metadata;
682 radv_init_metadata(chain->base.device, image->image, &metadata);
683 chain->base.device->ws->buffer_set_metadata(image->memory->bo, &metadata);
684 }
685 surface = &image->image->surface;
686
687 image->buffer = wl_drm_create_prime_buffer(chain->display->drm,
688 fd, /* name */
689 chain->extent.width,
690 chain->extent.height,
691 chain->drm_format,
692 surface->level[0].offset,
693 surface->level[0].pitch_bytes,
694 0, 0, 0, 0 /* unused */);
695 wl_display_roundtrip(chain->display->display);
696 close(fd);
697
698 wl_proxy_set_queue((struct wl_proxy *)image->buffer, chain->queue);
699 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
700
701 return VK_SUCCESS;
702
703 fail_mem:
704 radv_FreeMemory(vk_device, vk_memory, pAllocator);
705 fail_image:
706 radv_DestroyImage(vk_device, vk_image, pAllocator);
707
708 return result;
709 }
710
711 static VkResult
712 wsi_wl_swapchain_destroy(struct radv_swapchain *radv_chain,
713 const VkAllocationCallbacks *pAllocator)
714 {
715 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)radv_chain;
716
717 for (uint32_t i = 0; i < chain->image_count; i++) {
718 if (chain->images[i].buffer)
719 wsi_wl_image_finish(chain, &chain->images[i], pAllocator);
720 }
721
722 radv_free2(&chain->base.device->alloc, pAllocator, chain);
723
724 return VK_SUCCESS;
725 }
726
727 static VkResult
728 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
729 struct radv_device *device,
730 const VkSwapchainCreateInfoKHR* pCreateInfo,
731 const VkAllocationCallbacks* pAllocator,
732 struct radv_swapchain **swapchain_out)
733 {
734 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
735 struct wsi_wl_swapchain *chain;
736 VkResult result;
737
738 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
739
740 int num_images = pCreateInfo->minImageCount;
741
742 assert(num_images >= MIN_NUM_IMAGES);
743
744 /* For true mailbox mode, we need at least 4 images:
745 * 1) One to scan out from
746 * 2) One to have queued for scan-out
747 * 3) One to be currently held by the Wayland compositor
748 * 4) One to render to
749 */
750 if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
751 num_images = MAX2(num_images, 4);
752
753 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
754 chain = radv_alloc2(&device->alloc, pAllocator, size, 8,
755 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
756 if (chain == NULL)
757 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
758
759 chain->base.device = device;
760 chain->base.destroy = wsi_wl_swapchain_destroy;
761 chain->base.get_images = wsi_wl_swapchain_get_images;
762 chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
763 chain->base.queue_present = wsi_wl_swapchain_queue_present;
764
765 chain->surface = surface->surface;
766 chain->extent = pCreateInfo->imageExtent;
767 chain->vk_format = pCreateInfo->imageFormat;
768 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, false);
769
770 chain->present_mode = pCreateInfo->presentMode;
771 chain->fifo_ready = true;
772
773 chain->image_count = num_images;
774
775 /* Mark a bunch of stuff as NULL. This way we can just call
776 * destroy_swapchain for cleanup.
777 */
778 for (uint32_t i = 0; i < chain->image_count; i++)
779 chain->images[i].buffer = NULL;
780 chain->queue = NULL;
781
782 chain->display = wsi_wl_get_display(&device->instance->physicalDevice,
783 surface->display);
784 if (!chain->display) {
785 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
786 goto fail;
787 }
788
789 chain->queue = wl_display_create_queue(chain->display->display);
790 if (!chain->queue) {
791 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
792 goto fail;
793 }
794
795 for (uint32_t i = 0; i < chain->image_count; i++) {
796 result = wsi_wl_image_init(chain, &chain->images[i],
797 pCreateInfo, pAllocator);
798 if (result != VK_SUCCESS)
799 goto fail;
800 chain->images[i].busy = false;
801 }
802
803 *swapchain_out = &chain->base;
804
805 return VK_SUCCESS;
806
807 fail:
808 wsi_wl_swapchain_destroy(&chain->base, pAllocator);
809
810 return result;
811 }
812
813 VkResult
814 radv_wl_init_wsi(struct radv_physical_device *device)
815 {
816 struct wsi_wayland *wsi;
817 VkResult result;
818
819 wsi = radv_alloc(&device->instance->alloc, sizeof(*wsi), 8,
820 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
821 if (!wsi) {
822 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
823 goto fail;
824 }
825
826 wsi->physical_device = device;
827
828 int ret = pthread_mutex_init(&wsi->mutex, NULL);
829 if (ret != 0) {
830 if (ret == ENOMEM) {
831 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
832 } else {
833 /* FINISHME: Choose a better error. */
834 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
835 }
836
837 goto fail_alloc;
838 }
839
840 wsi->displays = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
841 _mesa_key_pointer_equal);
842 if (!wsi->displays) {
843 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
844 goto fail_mutex;
845 }
846
847 wsi->base.get_support = wsi_wl_surface_get_support;
848 wsi->base.get_capabilities = wsi_wl_surface_get_capabilities;
849 wsi->base.get_formats = wsi_wl_surface_get_formats;
850 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
851 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
852
853 device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
854
855 return VK_SUCCESS;
856
857 fail_mutex:
858 pthread_mutex_destroy(&wsi->mutex);
859
860 fail_alloc:
861 radv_free(&device->instance->alloc, wsi);
862 fail:
863 device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
864
865 return result;
866 }
867
868 void
869 radv_wl_finish_wsi(struct radv_physical_device *device)
870 {
871 struct wsi_wayland *wsi =
872 (struct wsi_wayland *)device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
873
874 if (wsi) {
875 _mesa_hash_table_destroy(wsi->displays, NULL);
876
877 pthread_mutex_destroy(&wsi->mutex);
878
879 radv_free(&device->instance->alloc, wsi);
880 }
881 }