vulkan/wsi/wayland: Return better error messages
[mesa.git] / src / vulkan / wsi / wsi_common_wayland.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <wayland-client.h>
25
26 #include <assert.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <pthread.h>
33
34 #include "vk_util.h"
35 #include "wsi_common_wayland.h"
36 #include "wayland-drm-client-protocol.h"
37
38 #include <util/hash_table.h>
39 #include <util/u_vector.h>
40
41 #define typed_memcpy(dest, src, count) ({ \
42 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
43 memcpy((dest), (src), (count) * sizeof(*(src))); \
44 })
45
46 struct wsi_wayland;
47
48 struct wsi_wl_display {
49 /* The real wl_display */
50 struct wl_display * wl_display;
51 /* Actually a proxy wrapper around the event queue */
52 struct wl_display * wl_display_wrapper;
53 struct wl_event_queue * queue;
54 struct wl_drm * drm;
55
56 struct wsi_wayland *wsi_wl;
57 /* Vector of VkFormats supported */
58 struct u_vector formats;
59
60 uint32_t capabilities;
61
62 /* Only used for displays created by wsi_wl_display_create */
63 uint32_t refcount;
64 };
65
66 struct wsi_wayland {
67 struct wsi_interface base;
68
69 const VkAllocationCallbacks *alloc;
70 VkPhysicalDevice physical_device;
71
72 const struct wsi_callbacks *cbs;
73 };
74
75 static void
76 wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format)
77 {
78 /* Don't add a format that's already in the list */
79 VkFormat *f;
80 u_vector_foreach(f, &display->formats)
81 if (*f == format)
82 return;
83
84 /* Don't add formats that aren't renderable. */
85 VkFormatProperties props;
86
87 display->wsi_wl->cbs->get_phys_device_format_properties(display->wsi_wl->physical_device,
88 format, &props);
89 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
90 return;
91
92 f = u_vector_add(&display->formats);
93 if (f)
94 *f = format;
95 }
96
97 static void
98 drm_handle_device(void *data, struct wl_drm *drm, const char *name)
99 {
100 }
101
102 static uint32_t
103 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
104 {
105 switch (vk_format) {
106 /* TODO: Figure out what all the formats mean and make this table
107 * correct.
108 */
109 #if 0
110 case VK_FORMAT_R4G4B4A4_UNORM:
111 return alpha ? WL_DRM_FORMAT_ABGR4444 : WL_DRM_FORMAT_XBGR4444;
112 case VK_FORMAT_R5G6B5_UNORM:
113 return WL_DRM_FORMAT_BGR565;
114 case VK_FORMAT_R5G5B5A1_UNORM:
115 return alpha ? WL_DRM_FORMAT_ABGR1555 : WL_DRM_FORMAT_XBGR1555;
116 case VK_FORMAT_R8G8B8_UNORM:
117 return WL_DRM_FORMAT_XBGR8888;
118 case VK_FORMAT_R8G8B8A8_UNORM:
119 return alpha ? WL_DRM_FORMAT_ABGR8888 : WL_DRM_FORMAT_XBGR8888;
120 case VK_FORMAT_R10G10B10A2_UNORM:
121 return alpha ? WL_DRM_FORMAT_ABGR2101010 : WL_DRM_FORMAT_XBGR2101010;
122 case VK_FORMAT_B4G4R4A4_UNORM:
123 return alpha ? WL_DRM_FORMAT_ARGB4444 : WL_DRM_FORMAT_XRGB4444;
124 case VK_FORMAT_B5G6R5_UNORM:
125 return WL_DRM_FORMAT_RGB565;
126 case VK_FORMAT_B5G5R5A1_UNORM:
127 return alpha ? WL_DRM_FORMAT_XRGB1555 : WL_DRM_FORMAT_XRGB1555;
128 #endif
129 case VK_FORMAT_B8G8R8_UNORM:
130 case VK_FORMAT_B8G8R8_SRGB:
131 return WL_DRM_FORMAT_BGRX8888;
132 case VK_FORMAT_B8G8R8A8_UNORM:
133 case VK_FORMAT_B8G8R8A8_SRGB:
134 return alpha ? WL_DRM_FORMAT_ARGB8888 : WL_DRM_FORMAT_XRGB8888;
135 #if 0
136 case VK_FORMAT_B10G10R10A2_UNORM:
137 return alpha ? WL_DRM_FORMAT_ARGB2101010 : WL_DRM_FORMAT_XRGB2101010;
138 #endif
139
140 default:
141 assert(!"Unsupported Vulkan format");
142 return 0;
143 }
144 }
145
146 static void
147 drm_handle_format(void *data, struct wl_drm *drm, uint32_t wl_format)
148 {
149 struct wsi_wl_display *display = data;
150 if (display->formats.element_size == 0)
151 return;
152
153 switch (wl_format) {
154 #if 0
155 case WL_DRM_FORMAT_ABGR4444:
156 case WL_DRM_FORMAT_XBGR4444:
157 wsi_wl_display_add_vk_format(display, VK_FORMAT_R4G4B4A4_UNORM);
158 break;
159 case WL_DRM_FORMAT_BGR565:
160 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G6B5_UNORM);
161 break;
162 case WL_DRM_FORMAT_ABGR1555:
163 case WL_DRM_FORMAT_XBGR1555:
164 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G5B5A1_UNORM);
165 break;
166 case WL_DRM_FORMAT_XBGR8888:
167 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8_UNORM);
168 /* fallthrough */
169 case WL_DRM_FORMAT_ABGR8888:
170 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8A8_UNORM);
171 break;
172 case WL_DRM_FORMAT_ABGR2101010:
173 case WL_DRM_FORMAT_XBGR2101010:
174 wsi_wl_display_add_vk_format(display, VK_FORMAT_R10G10B10A2_UNORM);
175 break;
176 case WL_DRM_FORMAT_ARGB4444:
177 case WL_DRM_FORMAT_XRGB4444:
178 wsi_wl_display_add_vk_format(display, VK_FORMAT_B4G4R4A4_UNORM);
179 break;
180 case WL_DRM_FORMAT_RGB565:
181 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G6R5_UNORM);
182 break;
183 case WL_DRM_FORMAT_ARGB1555:
184 case WL_DRM_FORMAT_XRGB1555:
185 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G5R5A1_UNORM);
186 break;
187 #endif
188 case WL_DRM_FORMAT_XRGB8888:
189 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_SRGB);
190 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_UNORM);
191 /* fallthrough */
192 case WL_DRM_FORMAT_ARGB8888:
193 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_SRGB);
194 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_UNORM);
195 break;
196 #if 0
197 case WL_DRM_FORMAT_ARGB2101010:
198 case WL_DRM_FORMAT_XRGB2101010:
199 wsi_wl_display_add_vk_format(display, VK_FORMAT_B10G10R10A2_UNORM);
200 break;
201 #endif
202 }
203 }
204
205 static void
206 drm_handle_authenticated(void *data, struct wl_drm *drm)
207 {
208 }
209
210 static void
211 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t capabilities)
212 {
213 struct wsi_wl_display *display = data;
214
215 display->capabilities = capabilities;
216 }
217
218 static const struct wl_drm_listener drm_listener = {
219 drm_handle_device,
220 drm_handle_format,
221 drm_handle_authenticated,
222 drm_handle_capabilities,
223 };
224
225 static void
226 registry_handle_global(void *data, struct wl_registry *registry,
227 uint32_t name, const char *interface, uint32_t version)
228 {
229 struct wsi_wl_display *display = data;
230
231 if (strcmp(interface, "wl_drm") == 0) {
232 assert(display->drm == NULL);
233
234 assert(version >= 2);
235 display->drm = wl_registry_bind(registry, name, &wl_drm_interface, 2);
236
237 if (display->drm)
238 wl_drm_add_listener(display->drm, &drm_listener, display);
239 }
240 }
241
242 static void
243 registry_handle_global_remove(void *data, struct wl_registry *registry,
244 uint32_t name)
245 { /* No-op */ }
246
247 static const struct wl_registry_listener registry_listener = {
248 registry_handle_global,
249 registry_handle_global_remove
250 };
251
252 static void
253 wsi_wl_display_finish(struct wsi_wl_display *display)
254 {
255 assert(display->refcount == 0);
256
257 u_vector_finish(&display->formats);
258 if (display->drm)
259 wl_drm_destroy(display->drm);
260 if (display->wl_display_wrapper)
261 wl_proxy_wrapper_destroy(display->wl_display_wrapper);
262 if (display->queue)
263 wl_event_queue_destroy(display->queue);
264 }
265
266 static VkResult
267 wsi_wl_display_init(struct wsi_wayland *wsi_wl,
268 struct wsi_wl_display *display,
269 struct wl_display *wl_display,
270 bool get_format_list)
271 {
272 VkResult result = VK_SUCCESS;
273 memset(display, 0, sizeof(*display));
274
275 display->wsi_wl = wsi_wl;
276 display->wl_display = wl_display;
277
278 if (get_format_list) {
279 if (!u_vector_init(&display->formats, sizeof(VkFormat), 8)) {
280 result = VK_ERROR_OUT_OF_HOST_MEMORY;
281 goto fail;
282 }
283 }
284
285 display->queue = wl_display_create_queue(wl_display);
286 if (!display->queue) {
287 result = VK_ERROR_OUT_OF_HOST_MEMORY;
288 goto fail;
289 }
290
291 display->wl_display_wrapper = wl_proxy_create_wrapper(wl_display);
292 if (!display->wl_display_wrapper) {
293 result = VK_ERROR_OUT_OF_HOST_MEMORY;
294 goto fail;
295 }
296
297 wl_proxy_set_queue((struct wl_proxy *) display->wl_display_wrapper,
298 display->queue);
299
300 struct wl_registry *registry =
301 wl_display_get_registry(display->wl_display_wrapper);
302 if (!registry) {
303 result = VK_ERROR_OUT_OF_HOST_MEMORY;
304 goto fail;
305 }
306
307 wl_registry_add_listener(registry, &registry_listener, display);
308
309 /* Round-trip to get the wl_drm global */
310 wl_display_roundtrip_queue(display->wl_display, display->queue);
311
312 if (!display->drm) {
313 result = VK_ERROR_SURFACE_LOST_KHR;
314 goto fail_registry;
315 }
316
317 /* Round-trip to get wl_drm formats and capabilities */
318 wl_display_roundtrip_queue(display->wl_display, display->queue);
319
320 /* We need prime support */
321 if (!(display->capabilities & WL_DRM_CAPABILITY_PRIME)) {
322 result = VK_ERROR_SURFACE_LOST_KHR;
323 goto fail_registry;
324 }
325
326 /* We don't need this anymore */
327 wl_registry_destroy(registry);
328
329 display->refcount = 0;
330
331 return VK_SUCCESS;
332
333 fail_registry:
334 if (registry)
335 wl_registry_destroy(registry);
336
337 fail:
338 wsi_wl_display_finish(display);
339 return result;
340 }
341
342 static VkResult
343 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display,
344 struct wsi_wl_display **display_out)
345 {
346 struct wsi_wl_display *display =
347 vk_alloc(wsi->alloc, sizeof(*display), 8,
348 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
349 if (!display)
350 return VK_ERROR_OUT_OF_HOST_MEMORY;
351
352 VkResult result = wsi_wl_display_init(wsi, display, wl_display, true);
353 if (result != VK_SUCCESS) {
354 vk_free(wsi->alloc, display);
355 return result;
356 }
357
358 display->refcount++;
359 *display_out = display;
360
361 return result;
362 }
363
364 static struct wsi_wl_display *
365 wsi_wl_display_ref(struct wsi_wl_display *display)
366 {
367 display->refcount++;
368 return display;
369 }
370
371 static void
372 wsi_wl_display_unref(struct wsi_wl_display *display)
373 {
374 if (display->refcount-- > 1)
375 return;
376
377 struct wsi_wayland *wsi = display->wsi_wl;
378 wsi_wl_display_finish(display);
379 vk_free(wsi->alloc, display);
380 }
381
382 VkBool32
383 wsi_wl_get_presentation_support(struct wsi_device *wsi_device,
384 struct wl_display *wl_display)
385 {
386 struct wsi_wayland *wsi =
387 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
388
389 struct wsi_wl_display display;
390 int ret = wsi_wl_display_init(wsi, &display, wl_display, false);
391 wsi_wl_display_finish(&display);
392
393 return ret == 0;
394 }
395
396 static VkResult
397 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
398 struct wsi_device *wsi_device,
399 const VkAllocationCallbacks *alloc,
400 uint32_t queueFamilyIndex,
401 int local_fd,
402 bool can_handle_different_gpu,
403 VkBool32* pSupported)
404 {
405 *pSupported = true;
406
407 return VK_SUCCESS;
408 }
409
410 static const VkPresentModeKHR present_modes[] = {
411 VK_PRESENT_MODE_MAILBOX_KHR,
412 VK_PRESENT_MODE_FIFO_KHR,
413 };
414
415 static VkResult
416 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
417 VkSurfaceCapabilitiesKHR* caps)
418 {
419 /* For true mailbox mode, we need at least 4 images:
420 * 1) One to scan out from
421 * 2) One to have queued for scan-out
422 * 3) One to be currently held by the Wayland compositor
423 * 4) One to render to
424 */
425 caps->minImageCount = 4;
426 /* There is no real maximum */
427 caps->maxImageCount = 0;
428
429 caps->currentExtent = (VkExtent2D) { -1, -1 };
430 caps->minImageExtent = (VkExtent2D) { 1, 1 };
431 /* This is the maximum supported size on Intel */
432 caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
433 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
434 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
435 caps->maxImageArrayLayers = 1;
436
437 caps->supportedCompositeAlpha =
438 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
439 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
440
441 caps->supportedUsageFlags =
442 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
443 VK_IMAGE_USAGE_SAMPLED_BIT |
444 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
445 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
446
447 return VK_SUCCESS;
448 }
449
450 static VkResult
451 wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
452 const void *info_next,
453 VkSurfaceCapabilities2KHR* caps)
454 {
455 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
456
457 return wsi_wl_surface_get_capabilities(surface, &caps->surfaceCapabilities);
458 }
459
460 static VkResult
461 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
462 struct wsi_device *wsi_device,
463 uint32_t* pSurfaceFormatCount,
464 VkSurfaceFormatKHR* pSurfaceFormats)
465 {
466 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
467 struct wsi_wayland *wsi =
468 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
469
470 struct wsi_wl_display display;
471 if (wsi_wl_display_init(wsi, &display, surface->display, true))
472 return VK_ERROR_SURFACE_LOST_KHR;
473
474 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
475
476 VkFormat *disp_fmt;
477 u_vector_foreach(disp_fmt, &display.formats) {
478 vk_outarray_append(&out, out_fmt) {
479 out_fmt->format = *disp_fmt;
480 out_fmt->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
481 }
482 }
483
484 wsi_wl_display_finish(&display);
485
486 return vk_outarray_status(&out);
487 }
488
489 static VkResult
490 wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
491 struct wsi_device *wsi_device,
492 const void *info_next,
493 uint32_t* pSurfaceFormatCount,
494 VkSurfaceFormat2KHR* pSurfaceFormats)
495 {
496 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
497 struct wsi_wayland *wsi =
498 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
499
500 struct wsi_wl_display display;
501 if (wsi_wl_display_init(wsi, &display, surface->display, true))
502 return VK_ERROR_SURFACE_LOST_KHR;
503
504 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
505
506 VkFormat *disp_fmt;
507 u_vector_foreach(disp_fmt, &display.formats) {
508 vk_outarray_append(&out, out_fmt) {
509 out_fmt->surfaceFormat.format = *disp_fmt;
510 out_fmt->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
511 }
512 }
513
514 wsi_wl_display_finish(&display);
515
516 return vk_outarray_status(&out);
517 }
518
519 static VkResult
520 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
521 uint32_t* pPresentModeCount,
522 VkPresentModeKHR* pPresentModes)
523 {
524 if (pPresentModes == NULL) {
525 *pPresentModeCount = ARRAY_SIZE(present_modes);
526 return VK_SUCCESS;
527 }
528
529 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
530 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
531
532 if (*pPresentModeCount < ARRAY_SIZE(present_modes))
533 return VK_INCOMPLETE;
534 else
535 return VK_SUCCESS;
536 }
537
538 VkResult wsi_create_wl_surface(const VkAllocationCallbacks *pAllocator,
539 const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
540 VkSurfaceKHR *pSurface)
541 {
542 VkIcdSurfaceWayland *surface;
543
544 surface = vk_alloc(pAllocator, sizeof *surface, 8,
545 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
546 if (surface == NULL)
547 return VK_ERROR_OUT_OF_HOST_MEMORY;
548
549 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
550 surface->display = pCreateInfo->display;
551 surface->surface = pCreateInfo->surface;
552
553 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
554
555 return VK_SUCCESS;
556 }
557
558 struct wsi_wl_image {
559 VkImage image;
560 VkDeviceMemory memory;
561 struct wl_buffer * buffer;
562 bool busy;
563 };
564
565 struct wsi_wl_swapchain {
566 struct wsi_swapchain base;
567
568 struct wsi_wl_display *display;
569
570 struct wl_surface * surface;
571 uint32_t surface_version;
572 struct wl_drm * drm_wrapper;
573 struct wl_callback * frame;
574
575 VkExtent2D extent;
576 VkFormat vk_format;
577 uint32_t drm_format;
578
579 VkPresentModeKHR present_mode;
580 bool fifo_ready;
581
582 struct wsi_wl_image images[0];
583 };
584
585 static VkResult
586 wsi_wl_swapchain_get_images(struct wsi_swapchain *wsi_chain,
587 uint32_t *pCount, VkImage *pSwapchainImages)
588 {
589 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
590 uint32_t ret_count;
591 VkResult result;
592
593 if (pSwapchainImages == NULL) {
594 *pCount = chain->base.image_count;
595 return VK_SUCCESS;
596 }
597
598 result = VK_SUCCESS;
599 ret_count = chain->base.image_count;
600 if (chain->base.image_count > *pCount) {
601 ret_count = *pCount;
602 result = VK_INCOMPLETE;
603 }
604
605 for (uint32_t i = 0; i < ret_count; i++)
606 pSwapchainImages[i] = chain->images[i].image;
607
608 return result;
609 }
610
611 static VkResult
612 wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
613 uint64_t timeout,
614 VkSemaphore semaphore,
615 uint32_t *image_index)
616 {
617 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
618
619 int ret = wl_display_dispatch_queue_pending(chain->display->wl_display,
620 chain->display->queue);
621 /* XXX: I'm not sure if out-of-date is the right error here. If
622 * wl_display_dispatch_queue_pending fails it most likely means we got
623 * kicked by the server so this seems more-or-less correct.
624 */
625 if (ret < 0)
626 return VK_ERROR_OUT_OF_DATE_KHR;
627
628 while (1) {
629 for (uint32_t i = 0; i < chain->base.image_count; i++) {
630 if (!chain->images[i].busy) {
631 /* We found a non-busy image */
632 *image_index = i;
633 chain->images[i].busy = true;
634 return VK_SUCCESS;
635 }
636 }
637
638 /* This time we do a blocking dispatch because we can't go
639 * anywhere until we get an event.
640 */
641 int ret = wl_display_roundtrip_queue(chain->display->wl_display,
642 chain->display->queue);
643 if (ret < 0)
644 return VK_ERROR_OUT_OF_DATE_KHR;
645 }
646 }
647
648 static void
649 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
650 {
651 struct wsi_wl_swapchain *chain = data;
652
653 chain->frame = NULL;
654 chain->fifo_ready = true;
655
656 wl_callback_destroy(callback);
657 }
658
659 static const struct wl_callback_listener frame_listener = {
660 frame_handle_done,
661 };
662
663 static VkResult
664 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
665 uint32_t image_index,
666 const VkPresentRegionKHR *damage)
667 {
668 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
669
670 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
671 while (!chain->fifo_ready) {
672 int ret = wl_display_dispatch_queue(chain->display->wl_display,
673 chain->display->queue);
674 if (ret < 0)
675 return VK_ERROR_OUT_OF_DATE_KHR;
676 }
677 }
678
679 assert(image_index < chain->base.image_count);
680 wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
681
682 if (chain->surface_version >= 4 && damage &&
683 damage->pRectangles && damage->rectangleCount > 0) {
684 for (unsigned i = 0; i < damage->rectangleCount; i++) {
685 const VkRectLayerKHR *rect = &damage->pRectangles[i];
686 assert(rect->layer == 0);
687 wl_surface_damage_buffer(chain->surface,
688 rect->offset.x, rect->offset.y,
689 rect->extent.width, rect->extent.height);
690 }
691 } else {
692 wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
693 }
694
695 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
696 chain->frame = wl_surface_frame(chain->surface);
697 wl_callback_add_listener(chain->frame, &frame_listener, chain);
698 chain->fifo_ready = false;
699 }
700
701 chain->images[image_index].busy = true;
702 wl_surface_commit(chain->surface);
703 wl_display_flush(chain->display->wl_display);
704
705 return VK_SUCCESS;
706 }
707
708 static void
709 buffer_handle_release(void *data, struct wl_buffer *buffer)
710 {
711 struct wsi_wl_image *image = data;
712
713 assert(image->buffer == buffer);
714
715 image->busy = false;
716 }
717
718 static const struct wl_buffer_listener buffer_listener = {
719 buffer_handle_release,
720 };
721
722 static VkResult
723 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
724 struct wsi_wl_image *image,
725 const VkSwapchainCreateInfoKHR *pCreateInfo,
726 const VkAllocationCallbacks* pAllocator)
727 {
728 VkDevice vk_device = chain->base.device;
729 VkResult result;
730 int fd;
731 uint32_t size;
732 uint32_t row_pitch;
733 uint32_t offset;
734 result = chain->base.image_fns->create_wsi_image(vk_device,
735 pCreateInfo,
736 pAllocator,
737 false,
738 false,
739 &image->image,
740 &image->memory,
741 &size,
742 &offset,
743 &row_pitch,
744 &fd);
745 if (result != VK_SUCCESS)
746 return result;
747
748 image->buffer = wl_drm_create_prime_buffer(chain->drm_wrapper,
749 fd, /* name */
750 chain->extent.width,
751 chain->extent.height,
752 chain->drm_format,
753 offset,
754 row_pitch,
755 0, 0, 0, 0 /* unused */);
756 close(fd);
757
758 if (!image->buffer)
759 goto fail_image;
760
761 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
762
763 return VK_SUCCESS;
764
765 fail_image:
766 chain->base.image_fns->free_wsi_image(vk_device, pAllocator,
767 image->image, image->memory);
768
769 return result;
770 }
771
772 static VkResult
773 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
774 const VkAllocationCallbacks *pAllocator)
775 {
776 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
777
778 for (uint32_t i = 0; i < chain->base.image_count; i++) {
779 if (chain->images[i].buffer) {
780 wl_buffer_destroy(chain->images[i].buffer);
781 chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
782 chain->images[i].image,
783 chain->images[i].memory);
784 }
785 }
786
787 if (chain->frame)
788 wl_callback_destroy(chain->frame);
789 if (chain->surface)
790 wl_proxy_wrapper_destroy(chain->surface);
791 if (chain->drm_wrapper)
792 wl_proxy_wrapper_destroy(chain->drm_wrapper);
793
794 if (chain->display)
795 wsi_wl_display_unref(chain->display);
796
797 vk_free(pAllocator, chain);
798
799 return VK_SUCCESS;
800 }
801
802 static VkResult
803 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
804 VkDevice device,
805 struct wsi_device *wsi_device,
806 int local_fd,
807 const VkSwapchainCreateInfoKHR* pCreateInfo,
808 const VkAllocationCallbacks* pAllocator,
809 const struct wsi_image_fns *image_fns,
810 struct wsi_swapchain **swapchain_out)
811 {
812 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
813 struct wsi_wayland *wsi =
814 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
815 struct wsi_wl_swapchain *chain;
816 VkResult result;
817
818 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
819
820 int num_images = pCreateInfo->minImageCount;
821
822 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
823 chain = vk_alloc(pAllocator, size, 8,
824 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
825 if (chain == NULL)
826 return VK_ERROR_OUT_OF_HOST_MEMORY;
827
828 /* Mark a bunch of stuff as NULL. This way we can just call
829 * destroy_swapchain for cleanup.
830 */
831 for (uint32_t i = 0; i < num_images; i++)
832 chain->images[i].buffer = NULL;
833 chain->surface = NULL;
834 chain->drm_wrapper = NULL;
835 chain->frame = NULL;
836
837 bool alpha = pCreateInfo->compositeAlpha ==
838 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
839
840 chain->base.device = device;
841 chain->base.destroy = wsi_wl_swapchain_destroy;
842 chain->base.get_images = wsi_wl_swapchain_get_images;
843 chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
844 chain->base.queue_present = wsi_wl_swapchain_queue_present;
845 chain->base.image_fns = image_fns;
846 chain->base.present_mode = pCreateInfo->presentMode;
847 chain->base.image_count = num_images;
848 chain->base.needs_linear_copy = false;
849 chain->extent = pCreateInfo->imageExtent;
850 chain->vk_format = pCreateInfo->imageFormat;
851 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
852
853 if (pCreateInfo->oldSwapchain) {
854 /* If we have an oldSwapchain parameter, copy the display struct over
855 * from the old one so we don't have to fully re-initialize it.
856 */
857 struct wsi_wl_swapchain *old_chain = (void *)pCreateInfo->oldSwapchain;
858 chain->display = wsi_wl_display_ref(old_chain->display);
859 } else {
860 chain->display = NULL;
861 result = wsi_wl_display_create(wsi, surface->display, &chain->display);
862 if (result != VK_SUCCESS)
863 goto fail;
864 }
865
866 chain->surface = wl_proxy_create_wrapper(surface->surface);
867 if (!chain->surface) {
868 result = VK_ERROR_OUT_OF_HOST_MEMORY;
869 goto fail;
870 }
871 wl_proxy_set_queue((struct wl_proxy *) chain->surface,
872 chain->display->queue);
873 chain->surface_version = wl_proxy_get_version((void *)surface->surface);
874
875 chain->drm_wrapper = wl_proxy_create_wrapper(chain->display->drm);
876 if (!chain->drm_wrapper) {
877 result = VK_ERROR_OUT_OF_HOST_MEMORY;
878 goto fail;
879 }
880 wl_proxy_set_queue((struct wl_proxy *) chain->drm_wrapper,
881 chain->display->queue);
882
883 chain->fifo_ready = true;
884
885 for (uint32_t i = 0; i < chain->base.image_count; i++) {
886 result = wsi_wl_image_init(chain, &chain->images[i],
887 pCreateInfo, pAllocator);
888 if (result != VK_SUCCESS)
889 goto fail;
890 chain->images[i].busy = false;
891 }
892
893 *swapchain_out = &chain->base;
894
895 return VK_SUCCESS;
896
897 fail:
898 wsi_wl_swapchain_destroy(&chain->base, pAllocator);
899
900 return result;
901 }
902
903 VkResult
904 wsi_wl_init_wsi(struct wsi_device *wsi_device,
905 const VkAllocationCallbacks *alloc,
906 VkPhysicalDevice physical_device,
907 const struct wsi_callbacks *cbs)
908 {
909 struct wsi_wayland *wsi;
910 VkResult result;
911
912 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
913 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
914 if (!wsi) {
915 result = VK_ERROR_OUT_OF_HOST_MEMORY;
916 goto fail;
917 }
918
919 wsi->physical_device = physical_device;
920 wsi->alloc = alloc;
921 wsi->cbs = cbs;
922
923 wsi->base.get_support = wsi_wl_surface_get_support;
924 wsi->base.get_capabilities = wsi_wl_surface_get_capabilities;
925 wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
926 wsi->base.get_formats = wsi_wl_surface_get_formats;
927 wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
928 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
929 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
930
931 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
932
933 return VK_SUCCESS;
934
935 fail:
936 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
937
938 return result;
939 }
940
941 void
942 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
943 const VkAllocationCallbacks *alloc)
944 {
945 struct wsi_wayland *wsi =
946 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
947 if (!wsi)
948 return;
949
950 vk_free(alloc, wsi);
951 }