2ff29736aeb6f574a5991be56f5af8681f778b0d
[mesa.git] / src / vulkan / wsi / wsi_common_wayland.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <wayland-client.h>
25
26 #include <assert.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <pthread.h>
33
34 #include "util/vk_util.h"
35 #include "wsi_common_wayland.h"
36 #include "wayland-drm-client-protocol.h"
37
38 #include <util/hash_table.h>
39 #include <util/u_vector.h>
40
41 #define typed_memcpy(dest, src, count) ({ \
42 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
43 memcpy((dest), (src), (count) * sizeof(*(src))); \
44 })
45
46 struct wsi_wayland;
47
48 struct wsi_wl_display {
49 struct wl_display * display;
50 struct wl_drm * drm;
51
52 struct wsi_wayland *wsi_wl;
53 /* Vector of VkFormats supported */
54 struct u_vector formats;
55
56 uint32_t capabilities;
57 };
58
59 struct wsi_wayland {
60 struct wsi_interface base;
61
62 const VkAllocationCallbacks *alloc;
63 VkPhysicalDevice physical_device;
64
65 pthread_mutex_t mutex;
66 /* Hash table of wl_display -> wsi_wl_display mappings */
67 struct hash_table * displays;
68
69 const struct wsi_callbacks *cbs;
70 };
71
72 static void
73 wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format)
74 {
75 /* Don't add a format that's already in the list */
76 VkFormat *f;
77 u_vector_foreach(f, &display->formats)
78 if (*f == format)
79 return;
80
81 /* Don't add formats that aren't renderable. */
82 VkFormatProperties props;
83
84 display->wsi_wl->cbs->get_phys_device_format_properties(display->wsi_wl->physical_device,
85 format, &props);
86 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
87 return;
88
89 f = u_vector_add(&display->formats);
90 if (f)
91 *f = format;
92 }
93
94 static void
95 drm_handle_device(void *data, struct wl_drm *drm, const char *name)
96 {
97 fprintf(stderr, "wl_drm.device(%s)\n", name);
98 }
99
100 static uint32_t
101 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
102 {
103 switch (vk_format) {
104 /* TODO: Figure out what all the formats mean and make this table
105 * correct.
106 */
107 #if 0
108 case VK_FORMAT_R4G4B4A4_UNORM:
109 return alpha ? WL_DRM_FORMAT_ABGR4444 : WL_DRM_FORMAT_XBGR4444;
110 case VK_FORMAT_R5G6B5_UNORM:
111 return WL_DRM_FORMAT_BGR565;
112 case VK_FORMAT_R5G5B5A1_UNORM:
113 return alpha ? WL_DRM_FORMAT_ABGR1555 : WL_DRM_FORMAT_XBGR1555;
114 case VK_FORMAT_R8G8B8_UNORM:
115 return WL_DRM_FORMAT_XBGR8888;
116 case VK_FORMAT_R8G8B8A8_UNORM:
117 return alpha ? WL_DRM_FORMAT_ABGR8888 : WL_DRM_FORMAT_XBGR8888;
118 case VK_FORMAT_R10G10B10A2_UNORM:
119 return alpha ? WL_DRM_FORMAT_ABGR2101010 : WL_DRM_FORMAT_XBGR2101010;
120 case VK_FORMAT_B4G4R4A4_UNORM:
121 return alpha ? WL_DRM_FORMAT_ARGB4444 : WL_DRM_FORMAT_XRGB4444;
122 case VK_FORMAT_B5G6R5_UNORM:
123 return WL_DRM_FORMAT_RGB565;
124 case VK_FORMAT_B5G5R5A1_UNORM:
125 return alpha ? WL_DRM_FORMAT_XRGB1555 : WL_DRM_FORMAT_XRGB1555;
126 #endif
127 case VK_FORMAT_B8G8R8_UNORM:
128 case VK_FORMAT_B8G8R8_SRGB:
129 return WL_DRM_FORMAT_BGRX8888;
130 case VK_FORMAT_B8G8R8A8_UNORM:
131 case VK_FORMAT_B8G8R8A8_SRGB:
132 return alpha ? WL_DRM_FORMAT_ARGB8888 : WL_DRM_FORMAT_XRGB8888;
133 #if 0
134 case VK_FORMAT_B10G10R10A2_UNORM:
135 return alpha ? WL_DRM_FORMAT_ARGB2101010 : WL_DRM_FORMAT_XRGB2101010;
136 #endif
137
138 default:
139 assert(!"Unsupported Vulkan format");
140 return 0;
141 }
142 }
143
144 static void
145 drm_handle_format(void *data, struct wl_drm *drm, uint32_t wl_format)
146 {
147 struct wsi_wl_display *display = data;
148
149 switch (wl_format) {
150 #if 0
151 case WL_DRM_FORMAT_ABGR4444:
152 case WL_DRM_FORMAT_XBGR4444:
153 wsi_wl_display_add_vk_format(display, VK_FORMAT_R4G4B4A4_UNORM);
154 break;
155 case WL_DRM_FORMAT_BGR565:
156 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G6B5_UNORM);
157 break;
158 case WL_DRM_FORMAT_ABGR1555:
159 case WL_DRM_FORMAT_XBGR1555:
160 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G5B5A1_UNORM);
161 break;
162 case WL_DRM_FORMAT_XBGR8888:
163 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8_UNORM);
164 /* fallthrough */
165 case WL_DRM_FORMAT_ABGR8888:
166 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8A8_UNORM);
167 break;
168 case WL_DRM_FORMAT_ABGR2101010:
169 case WL_DRM_FORMAT_XBGR2101010:
170 wsi_wl_display_add_vk_format(display, VK_FORMAT_R10G10B10A2_UNORM);
171 break;
172 case WL_DRM_FORMAT_ARGB4444:
173 case WL_DRM_FORMAT_XRGB4444:
174 wsi_wl_display_add_vk_format(display, VK_FORMAT_B4G4R4A4_UNORM);
175 break;
176 case WL_DRM_FORMAT_RGB565:
177 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G6R5_UNORM);
178 break;
179 case WL_DRM_FORMAT_ARGB1555:
180 case WL_DRM_FORMAT_XRGB1555:
181 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G5R5A1_UNORM);
182 break;
183 #endif
184 case WL_DRM_FORMAT_XRGB8888:
185 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_SRGB);
186 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_UNORM);
187 /* fallthrough */
188 case WL_DRM_FORMAT_ARGB8888:
189 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_SRGB);
190 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_UNORM);
191 break;
192 #if 0
193 case WL_DRM_FORMAT_ARGB2101010:
194 case WL_DRM_FORMAT_XRGB2101010:
195 wsi_wl_display_add_vk_format(display, VK_FORMAT_B10G10R10A2_UNORM);
196 break;
197 #endif
198 }
199 }
200
201 static void
202 drm_handle_authenticated(void *data, struct wl_drm *drm)
203 {
204 }
205
206 static void
207 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t capabilities)
208 {
209 struct wsi_wl_display *display = data;
210
211 display->capabilities = capabilities;
212 }
213
214 static const struct wl_drm_listener drm_listener = {
215 drm_handle_device,
216 drm_handle_format,
217 drm_handle_authenticated,
218 drm_handle_capabilities,
219 };
220
221 static void
222 registry_handle_global(void *data, struct wl_registry *registry,
223 uint32_t name, const char *interface, uint32_t version)
224 {
225 struct wsi_wl_display *display = data;
226
227 if (strcmp(interface, "wl_drm") == 0) {
228 assert(display->drm == NULL);
229
230 assert(version >= 2);
231 display->drm = wl_registry_bind(registry, name, &wl_drm_interface, 2);
232
233 if (display->drm)
234 wl_drm_add_listener(display->drm, &drm_listener, display);
235 }
236 }
237
238 static void
239 registry_handle_global_remove(void *data, struct wl_registry *registry,
240 uint32_t name)
241 { /* No-op */ }
242
243 static const struct wl_registry_listener registry_listener = {
244 registry_handle_global,
245 registry_handle_global_remove
246 };
247
248 static void
249 wsi_wl_display_destroy(struct wsi_wayland *wsi, struct wsi_wl_display *display)
250 {
251 u_vector_finish(&display->formats);
252 if (display->drm)
253 wl_drm_destroy(display->drm);
254 vk_free(wsi->alloc, display);
255 }
256
257 static struct wsi_wl_display *
258 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display)
259 {
260 struct wsi_wl_display *display =
261 vk_alloc(wsi->alloc, sizeof(*display), 8,
262 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
263 if (!display)
264 return NULL;
265
266 memset(display, 0, sizeof(*display));
267
268 display->display = wl_display;
269 display->wsi_wl = wsi;
270
271 if (!u_vector_init(&display->formats, sizeof(VkFormat), 8))
272 goto fail;
273
274 struct wl_registry *registry = wl_display_get_registry(wl_display);
275 if (!registry)
276 goto fail;
277
278 wl_registry_add_listener(registry, &registry_listener, display);
279
280 /* Round-rip to get the wl_drm global */
281 wl_display_roundtrip(wl_display);
282
283 if (!display->drm)
284 goto fail_registry;
285
286 /* Round-rip to get wl_drm formats and capabilities */
287 wl_display_roundtrip(wl_display);
288
289 /* We need prime support */
290 if (!(display->capabilities & WL_DRM_CAPABILITY_PRIME))
291 goto fail_registry;
292
293 /* We don't need this anymore */
294 wl_registry_destroy(registry);
295
296 return display;
297
298 fail_registry:
299 if (registry)
300 wl_registry_destroy(registry);
301
302 fail:
303 wsi_wl_display_destroy(wsi, display);
304 return NULL;
305 }
306
307 static struct wsi_wl_display *
308 wsi_wl_get_display(struct wsi_device *wsi_device,
309 struct wl_display *wl_display)
310 {
311 struct wsi_wayland *wsi =
312 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
313
314 pthread_mutex_lock(&wsi->mutex);
315
316 struct hash_entry *entry = _mesa_hash_table_search(wsi->displays,
317 wl_display);
318 if (!entry) {
319 /* We're about to make a bunch of blocking calls. Let's drop the
320 * mutex for now so we don't block up too badly.
321 */
322 pthread_mutex_unlock(&wsi->mutex);
323
324 struct wsi_wl_display *display = wsi_wl_display_create(wsi, wl_display);
325 if (!display)
326 return NULL;
327
328 pthread_mutex_lock(&wsi->mutex);
329
330 entry = _mesa_hash_table_search(wsi->displays, wl_display);
331 if (entry) {
332 /* Oops, someone raced us to it */
333 wsi_wl_display_destroy(wsi, display);
334 } else {
335 entry = _mesa_hash_table_insert(wsi->displays, wl_display, display);
336 }
337 }
338
339 pthread_mutex_unlock(&wsi->mutex);
340
341 return entry->data;
342 }
343
344 VkBool32
345 wsi_wl_get_presentation_support(struct wsi_device *wsi_device,
346 struct wl_display *wl_display)
347 {
348 return wsi_wl_get_display(wsi_device, wl_display) != NULL;
349 }
350
351 static VkResult
352 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
353 struct wsi_device *wsi_device,
354 const VkAllocationCallbacks *alloc,
355 uint32_t queueFamilyIndex,
356 int local_fd,
357 bool can_handle_different_gpu,
358 VkBool32* pSupported)
359 {
360 *pSupported = true;
361
362 return VK_SUCCESS;
363 }
364
365 static const VkPresentModeKHR present_modes[] = {
366 VK_PRESENT_MODE_MAILBOX_KHR,
367 VK_PRESENT_MODE_FIFO_KHR,
368 };
369
370 static VkResult
371 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
372 VkSurfaceCapabilitiesKHR* caps)
373 {
374 /* For true mailbox mode, we need at least 4 images:
375 * 1) One to scan out from
376 * 2) One to have queued for scan-out
377 * 3) One to be currently held by the Wayland compositor
378 * 4) One to render to
379 */
380 caps->minImageCount = 4;
381 /* There is no real maximum */
382 caps->maxImageCount = 0;
383
384 caps->currentExtent = (VkExtent2D) { -1, -1 };
385 caps->minImageExtent = (VkExtent2D) { 1, 1 };
386 /* This is the maximum supported size on Intel */
387 caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
388 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
389 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
390 caps->maxImageArrayLayers = 1;
391
392 caps->supportedCompositeAlpha =
393 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
394 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
395
396 caps->supportedUsageFlags =
397 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
398 VK_IMAGE_USAGE_SAMPLED_BIT |
399 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
400 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
401
402 return VK_SUCCESS;
403 }
404
405 static VkResult
406 wsi_wl_surface_get_capabilities2(VkIcdSurfaceBase *surface,
407 const void *info_next,
408 VkSurfaceCapabilities2KHR* caps)
409 {
410 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
411
412 return wsi_wl_surface_get_capabilities(surface, &caps->surfaceCapabilities);
413 }
414
415 static VkResult
416 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
417 struct wsi_device *wsi_device,
418 uint32_t* pSurfaceFormatCount,
419 VkSurfaceFormatKHR* pSurfaceFormats)
420 {
421 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
422 struct wsi_wl_display *display =
423 wsi_wl_get_display(wsi_device, surface->display);
424 if (!display)
425 return VK_ERROR_OUT_OF_HOST_MEMORY;
426
427 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
428
429 VkFormat *disp_fmt;
430 u_vector_foreach(disp_fmt, &display->formats) {
431 vk_outarray_append(&out, out_fmt) {
432 out_fmt->format = *disp_fmt;
433 out_fmt->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
434 }
435 }
436
437 return vk_outarray_status(&out);
438 }
439
440 static VkResult
441 wsi_wl_surface_get_formats2(VkIcdSurfaceBase *icd_surface,
442 struct wsi_device *wsi_device,
443 const void *info_next,
444 uint32_t* pSurfaceFormatCount,
445 VkSurfaceFormat2KHR* pSurfaceFormats)
446 {
447 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
448 struct wsi_wl_display *display =
449 wsi_wl_get_display(wsi_device, surface->display);
450 if (!display)
451 return VK_ERROR_OUT_OF_HOST_MEMORY;
452
453 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
454
455 VkFormat *disp_fmt;
456 u_vector_foreach(disp_fmt, &display->formats) {
457 vk_outarray_append(&out, out_fmt) {
458 out_fmt->surfaceFormat.format = *disp_fmt;
459 out_fmt->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
460 }
461 }
462
463 return vk_outarray_status(&out);
464 }
465
466 static VkResult
467 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
468 uint32_t* pPresentModeCount,
469 VkPresentModeKHR* pPresentModes)
470 {
471 if (pPresentModes == NULL) {
472 *pPresentModeCount = ARRAY_SIZE(present_modes);
473 return VK_SUCCESS;
474 }
475
476 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
477 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
478
479 if (*pPresentModeCount < ARRAY_SIZE(present_modes))
480 return VK_INCOMPLETE;
481 else
482 return VK_SUCCESS;
483 }
484
485 VkResult wsi_create_wl_surface(const VkAllocationCallbacks *pAllocator,
486 const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
487 VkSurfaceKHR *pSurface)
488 {
489 VkIcdSurfaceWayland *surface;
490
491 surface = vk_alloc(pAllocator, sizeof *surface, 8,
492 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
493 if (surface == NULL)
494 return VK_ERROR_OUT_OF_HOST_MEMORY;
495
496 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
497 surface->display = pCreateInfo->display;
498 surface->surface = pCreateInfo->surface;
499
500 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
501
502 return VK_SUCCESS;
503 }
504
505 struct wsi_wl_image {
506 VkImage image;
507 VkDeviceMemory memory;
508 struct wl_buffer * buffer;
509 bool busy;
510 };
511
512 struct wsi_wl_swapchain {
513 struct wsi_swapchain base;
514
515 struct wsi_wl_display * display;
516 struct wl_event_queue * queue;
517 struct wl_surface * surface;
518 uint32_t surface_version;
519
520 VkExtent2D extent;
521 VkFormat vk_format;
522 uint32_t drm_format;
523
524 VkPresentModeKHR present_mode;
525 bool fifo_ready;
526
527 struct wsi_wl_image images[0];
528 };
529
530 static VkResult
531 wsi_wl_swapchain_get_images(struct wsi_swapchain *wsi_chain,
532 uint32_t *pCount, VkImage *pSwapchainImages)
533 {
534 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
535 uint32_t ret_count;
536 VkResult result;
537
538 if (pSwapchainImages == NULL) {
539 *pCount = chain->base.image_count;
540 return VK_SUCCESS;
541 }
542
543 result = VK_SUCCESS;
544 ret_count = chain->base.image_count;
545 if (chain->base.image_count > *pCount) {
546 ret_count = *pCount;
547 result = VK_INCOMPLETE;
548 }
549
550 for (uint32_t i = 0; i < ret_count; i++)
551 pSwapchainImages[i] = chain->images[i].image;
552
553 return result;
554 }
555
556 static VkResult
557 wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
558 uint64_t timeout,
559 VkSemaphore semaphore,
560 uint32_t *image_index)
561 {
562 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
563
564 int ret = wl_display_dispatch_queue_pending(chain->display->display,
565 chain->queue);
566 /* XXX: I'm not sure if out-of-date is the right error here. If
567 * wl_display_dispatch_queue_pending fails it most likely means we got
568 * kicked by the server so this seems more-or-less correct.
569 */
570 if (ret < 0)
571 return VK_ERROR_OUT_OF_DATE_KHR;
572
573 while (1) {
574 for (uint32_t i = 0; i < chain->base.image_count; i++) {
575 if (!chain->images[i].busy) {
576 /* We found a non-busy image */
577 *image_index = i;
578 chain->images[i].busy = true;
579 return VK_SUCCESS;
580 }
581 }
582
583 /* This time we do a blocking dispatch because we can't go
584 * anywhere until we get an event.
585 */
586 int ret = wl_display_roundtrip_queue(chain->display->display,
587 chain->queue);
588 if (ret < 0)
589 return VK_ERROR_OUT_OF_DATE_KHR;
590 }
591 }
592
593 static void
594 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
595 {
596 struct wsi_wl_swapchain *chain = data;
597
598 chain->fifo_ready = true;
599
600 wl_callback_destroy(callback);
601 }
602
603 static const struct wl_callback_listener frame_listener = {
604 frame_handle_done,
605 };
606
607 static VkResult
608 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
609 uint32_t image_index,
610 const VkPresentRegionKHR *damage)
611 {
612 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
613
614 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
615 while (!chain->fifo_ready) {
616 int ret = wl_display_dispatch_queue(chain->display->display,
617 chain->queue);
618 if (ret < 0)
619 return VK_ERROR_OUT_OF_DATE_KHR;
620 }
621 }
622
623 assert(image_index < chain->base.image_count);
624 wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
625
626 if (chain->surface_version >= 4 && damage &&
627 damage->pRectangles && damage->rectangleCount > 0) {
628 for (unsigned i = 0; i < damage->rectangleCount; i++) {
629 const VkRectLayerKHR *rect = &damage->pRectangles[i];
630 assert(rect->layer == 0);
631 wl_surface_damage_buffer(chain->surface,
632 rect->offset.x, rect->offset.y,
633 rect->extent.width, rect->extent.height);
634 }
635 } else {
636 wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
637 }
638
639 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
640 struct wl_callback *frame = wl_surface_frame(chain->surface);
641 wl_proxy_set_queue((struct wl_proxy *)frame, chain->queue);
642 wl_callback_add_listener(frame, &frame_listener, chain);
643 chain->fifo_ready = false;
644 }
645
646 chain->images[image_index].busy = true;
647 wl_surface_commit(chain->surface);
648 wl_display_flush(chain->display->display);
649
650 return VK_SUCCESS;
651 }
652
653 static void
654 buffer_handle_release(void *data, struct wl_buffer *buffer)
655 {
656 struct wsi_wl_image *image = data;
657
658 assert(image->buffer == buffer);
659
660 image->busy = false;
661 }
662
663 static const struct wl_buffer_listener buffer_listener = {
664 buffer_handle_release,
665 };
666
667 static VkResult
668 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
669 struct wsi_wl_image *image,
670 const VkSwapchainCreateInfoKHR *pCreateInfo,
671 const VkAllocationCallbacks* pAllocator)
672 {
673 VkDevice vk_device = chain->base.device;
674 VkResult result;
675 int fd;
676 uint32_t size;
677 uint32_t row_pitch;
678 uint32_t offset;
679 result = chain->base.image_fns->create_wsi_image(vk_device,
680 pCreateInfo,
681 pAllocator,
682 false,
683 false,
684 &image->image,
685 &image->memory,
686 &size,
687 &offset,
688 &row_pitch,
689 &fd);
690 if (result != VK_SUCCESS)
691 return result;
692
693 image->buffer = wl_drm_create_prime_buffer(chain->display->drm,
694 fd, /* name */
695 chain->extent.width,
696 chain->extent.height,
697 chain->drm_format,
698 offset,
699 row_pitch,
700 0, 0, 0, 0 /* unused */);
701 close(fd);
702
703 if (!image->buffer)
704 goto fail_image;
705
706 wl_proxy_set_queue((struct wl_proxy *)image->buffer, chain->queue);
707 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
708
709 return VK_SUCCESS;
710
711 fail_image:
712 chain->base.image_fns->free_wsi_image(vk_device, pAllocator,
713 image->image, image->memory);
714
715 return result;
716 }
717
718 static VkResult
719 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
720 const VkAllocationCallbacks *pAllocator)
721 {
722 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
723
724 for (uint32_t i = 0; i < chain->base.image_count; i++) {
725 if (chain->images[i].buffer)
726 chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
727 chain->images[i].image,
728 chain->images[i].memory);
729 }
730
731 vk_free(pAllocator, chain);
732
733 return VK_SUCCESS;
734 }
735
736 static VkResult
737 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
738 VkDevice device,
739 struct wsi_device *wsi_device,
740 int local_fd,
741 const VkSwapchainCreateInfoKHR* pCreateInfo,
742 const VkAllocationCallbacks* pAllocator,
743 const struct wsi_image_fns *image_fns,
744 struct wsi_swapchain **swapchain_out)
745 {
746 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
747 struct wsi_wl_swapchain *chain;
748 VkResult result;
749
750 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
751
752 int num_images = pCreateInfo->minImageCount;
753
754 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
755 chain = vk_alloc(pAllocator, size, 8,
756 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
757 if (chain == NULL)
758 return VK_ERROR_OUT_OF_HOST_MEMORY;
759
760 bool alpha = pCreateInfo->compositeAlpha ==
761 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
762
763 chain->base.device = device;
764 chain->base.destroy = wsi_wl_swapchain_destroy;
765 chain->base.get_images = wsi_wl_swapchain_get_images;
766 chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
767 chain->base.queue_present = wsi_wl_swapchain_queue_present;
768 chain->base.image_fns = image_fns;
769 chain->base.present_mode = pCreateInfo->presentMode;
770 chain->base.image_count = num_images;
771 chain->base.needs_linear_copy = false;
772 chain->surface = surface->surface;
773 chain->surface_version = wl_proxy_get_version((void *)surface->surface);
774 chain->extent = pCreateInfo->imageExtent;
775 chain->vk_format = pCreateInfo->imageFormat;
776 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
777
778 chain->fifo_ready = true;
779
780 /* Mark a bunch of stuff as NULL. This way we can just call
781 * destroy_swapchain for cleanup.
782 */
783 for (uint32_t i = 0; i < chain->base.image_count; i++)
784 chain->images[i].buffer = NULL;
785 chain->queue = NULL;
786
787 chain->display = wsi_wl_get_display(wsi_device,
788 surface->display);
789 if (!chain->display) {
790 result = VK_ERROR_INITIALIZATION_FAILED;
791 goto fail;
792 }
793
794 chain->queue = wl_display_create_queue(chain->display->display);
795 if (!chain->queue) {
796 result = VK_ERROR_INITIALIZATION_FAILED;
797 goto fail;
798 }
799
800 for (uint32_t i = 0; i < chain->base.image_count; i++) {
801 result = wsi_wl_image_init(chain, &chain->images[i],
802 pCreateInfo, pAllocator);
803 if (result != VK_SUCCESS)
804 goto fail;
805 chain->images[i].busy = false;
806 }
807
808 *swapchain_out = &chain->base;
809
810 return VK_SUCCESS;
811
812 fail:
813 wsi_wl_swapchain_destroy(&chain->base, pAllocator);
814
815 return result;
816 }
817
818 VkResult
819 wsi_wl_init_wsi(struct wsi_device *wsi_device,
820 const VkAllocationCallbacks *alloc,
821 VkPhysicalDevice physical_device,
822 const struct wsi_callbacks *cbs)
823 {
824 struct wsi_wayland *wsi;
825 VkResult result;
826
827 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
828 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
829 if (!wsi) {
830 result = VK_ERROR_OUT_OF_HOST_MEMORY;
831 goto fail;
832 }
833
834 wsi->physical_device = physical_device;
835 wsi->alloc = alloc;
836 wsi->cbs = cbs;
837 int ret = pthread_mutex_init(&wsi->mutex, NULL);
838 if (ret != 0) {
839 if (ret == ENOMEM) {
840 result = VK_ERROR_OUT_OF_HOST_MEMORY;
841 } else {
842 /* FINISHME: Choose a better error. */
843 result = VK_ERROR_OUT_OF_HOST_MEMORY;
844 }
845
846 goto fail_alloc;
847 }
848
849 wsi->displays = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
850 _mesa_key_pointer_equal);
851 if (!wsi->displays) {
852 result = VK_ERROR_OUT_OF_HOST_MEMORY;
853 goto fail_mutex;
854 }
855
856 wsi->base.get_support = wsi_wl_surface_get_support;
857 wsi->base.get_capabilities = wsi_wl_surface_get_capabilities;
858 wsi->base.get_capabilities2 = wsi_wl_surface_get_capabilities2;
859 wsi->base.get_formats = wsi_wl_surface_get_formats;
860 wsi->base.get_formats2 = wsi_wl_surface_get_formats2;
861 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
862 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
863
864 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
865
866 return VK_SUCCESS;
867
868 fail_mutex:
869 pthread_mutex_destroy(&wsi->mutex);
870
871 fail_alloc:
872 vk_free(alloc, wsi);
873 fail:
874 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
875
876 return result;
877 }
878
879 void
880 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
881 const VkAllocationCallbacks *alloc)
882 {
883 struct wsi_wayland *wsi =
884 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
885
886 if (wsi) {
887 struct hash_entry *entry;
888 hash_table_foreach(wsi->displays, entry)
889 wsi_wl_display_destroy(wsi, entry->data);
890
891 _mesa_hash_table_destroy(wsi->displays, NULL);
892
893 pthread_mutex_destroy(&wsi->mutex);
894
895 vk_free(alloc, wsi);
896 }
897 }