vulkan/wsi/wayland: Pass damage through to the compositor
[mesa.git] / src / vulkan / wsi / wsi_common_wayland.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <wayland-client.h>
25
26 #include <assert.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <pthread.h>
33
34 #include "wsi_common_wayland.h"
35 #include "wayland-drm-client-protocol.h"
36
37 #include <util/hash_table.h>
38 #include <util/u_vector.h>
39
40 #define typed_memcpy(dest, src, count) ({ \
41 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
42 memcpy((dest), (src), (count) * sizeof(*(src))); \
43 })
44
45 struct wsi_wayland;
46
47 struct wsi_wl_display {
48 struct wl_display * display;
49 struct wl_drm * drm;
50
51 struct wsi_wayland *wsi_wl;
52 /* Vector of VkFormats supported */
53 struct u_vector formats;
54
55 uint32_t capabilities;
56 };
57
58 struct wsi_wayland {
59 struct wsi_interface base;
60
61 const VkAllocationCallbacks *alloc;
62 VkPhysicalDevice physical_device;
63
64 pthread_mutex_t mutex;
65 /* Hash table of wl_display -> wsi_wl_display mappings */
66 struct hash_table * displays;
67
68 const struct wsi_callbacks *cbs;
69 };
70
71 static void
72 wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format)
73 {
74 /* Don't add a format that's already in the list */
75 VkFormat *f;
76 u_vector_foreach(f, &display->formats)
77 if (*f == format)
78 return;
79
80 /* Don't add formats that aren't renderable. */
81 VkFormatProperties props;
82
83 display->wsi_wl->cbs->get_phys_device_format_properties(display->wsi_wl->physical_device,
84 format, &props);
85 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
86 return;
87
88 f = u_vector_add(&display->formats);
89 if (f)
90 *f = format;
91 }
92
93 static void
94 drm_handle_device(void *data, struct wl_drm *drm, const char *name)
95 {
96 fprintf(stderr, "wl_drm.device(%s)\n", name);
97 }
98
99 static uint32_t
100 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
101 {
102 switch (vk_format) {
103 /* TODO: Figure out what all the formats mean and make this table
104 * correct.
105 */
106 #if 0
107 case VK_FORMAT_R4G4B4A4_UNORM:
108 return alpha ? WL_DRM_FORMAT_ABGR4444 : WL_DRM_FORMAT_XBGR4444;
109 case VK_FORMAT_R5G6B5_UNORM:
110 return WL_DRM_FORMAT_BGR565;
111 case VK_FORMAT_R5G5B5A1_UNORM:
112 return alpha ? WL_DRM_FORMAT_ABGR1555 : WL_DRM_FORMAT_XBGR1555;
113 case VK_FORMAT_R8G8B8_UNORM:
114 return WL_DRM_FORMAT_XBGR8888;
115 case VK_FORMAT_R8G8B8A8_UNORM:
116 return alpha ? WL_DRM_FORMAT_ABGR8888 : WL_DRM_FORMAT_XBGR8888;
117 case VK_FORMAT_R10G10B10A2_UNORM:
118 return alpha ? WL_DRM_FORMAT_ABGR2101010 : WL_DRM_FORMAT_XBGR2101010;
119 case VK_FORMAT_B4G4R4A4_UNORM:
120 return alpha ? WL_DRM_FORMAT_ARGB4444 : WL_DRM_FORMAT_XRGB4444;
121 case VK_FORMAT_B5G6R5_UNORM:
122 return WL_DRM_FORMAT_RGB565;
123 case VK_FORMAT_B5G5R5A1_UNORM:
124 return alpha ? WL_DRM_FORMAT_XRGB1555 : WL_DRM_FORMAT_XRGB1555;
125 #endif
126 case VK_FORMAT_B8G8R8_UNORM:
127 case VK_FORMAT_B8G8R8_SRGB:
128 return WL_DRM_FORMAT_BGRX8888;
129 case VK_FORMAT_B8G8R8A8_UNORM:
130 case VK_FORMAT_B8G8R8A8_SRGB:
131 return alpha ? WL_DRM_FORMAT_ARGB8888 : WL_DRM_FORMAT_XRGB8888;
132 #if 0
133 case VK_FORMAT_B10G10R10A2_UNORM:
134 return alpha ? WL_DRM_FORMAT_ARGB2101010 : WL_DRM_FORMAT_XRGB2101010;
135 #endif
136
137 default:
138 assert(!"Unsupported Vulkan format");
139 return 0;
140 }
141 }
142
143 static void
144 drm_handle_format(void *data, struct wl_drm *drm, uint32_t wl_format)
145 {
146 struct wsi_wl_display *display = data;
147
148 switch (wl_format) {
149 #if 0
150 case WL_DRM_FORMAT_ABGR4444:
151 case WL_DRM_FORMAT_XBGR4444:
152 wsi_wl_display_add_vk_format(display, VK_FORMAT_R4G4B4A4_UNORM);
153 break;
154 case WL_DRM_FORMAT_BGR565:
155 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G6B5_UNORM);
156 break;
157 case WL_DRM_FORMAT_ABGR1555:
158 case WL_DRM_FORMAT_XBGR1555:
159 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G5B5A1_UNORM);
160 break;
161 case WL_DRM_FORMAT_XBGR8888:
162 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8_UNORM);
163 /* fallthrough */
164 case WL_DRM_FORMAT_ABGR8888:
165 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8A8_UNORM);
166 break;
167 case WL_DRM_FORMAT_ABGR2101010:
168 case WL_DRM_FORMAT_XBGR2101010:
169 wsi_wl_display_add_vk_format(display, VK_FORMAT_R10G10B10A2_UNORM);
170 break;
171 case WL_DRM_FORMAT_ARGB4444:
172 case WL_DRM_FORMAT_XRGB4444:
173 wsi_wl_display_add_vk_format(display, VK_FORMAT_B4G4R4A4_UNORM);
174 break;
175 case WL_DRM_FORMAT_RGB565:
176 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G6R5_UNORM);
177 break;
178 case WL_DRM_FORMAT_ARGB1555:
179 case WL_DRM_FORMAT_XRGB1555:
180 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G5R5A1_UNORM);
181 break;
182 #endif
183 case WL_DRM_FORMAT_XRGB8888:
184 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_SRGB);
185 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_UNORM);
186 /* fallthrough */
187 case WL_DRM_FORMAT_ARGB8888:
188 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_SRGB);
189 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_UNORM);
190 break;
191 #if 0
192 case WL_DRM_FORMAT_ARGB2101010:
193 case WL_DRM_FORMAT_XRGB2101010:
194 wsi_wl_display_add_vk_format(display, VK_FORMAT_B10G10R10A2_UNORM);
195 break;
196 #endif
197 }
198 }
199
200 static void
201 drm_handle_authenticated(void *data, struct wl_drm *drm)
202 {
203 }
204
205 static void
206 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t capabilities)
207 {
208 struct wsi_wl_display *display = data;
209
210 display->capabilities = capabilities;
211 }
212
213 static const struct wl_drm_listener drm_listener = {
214 drm_handle_device,
215 drm_handle_format,
216 drm_handle_authenticated,
217 drm_handle_capabilities,
218 };
219
220 static void
221 registry_handle_global(void *data, struct wl_registry *registry,
222 uint32_t name, const char *interface, uint32_t version)
223 {
224 struct wsi_wl_display *display = data;
225
226 if (strcmp(interface, "wl_drm") == 0) {
227 assert(display->drm == NULL);
228
229 assert(version >= 2);
230 display->drm = wl_registry_bind(registry, name, &wl_drm_interface, 2);
231
232 if (display->drm)
233 wl_drm_add_listener(display->drm, &drm_listener, display);
234 }
235 }
236
237 static void
238 registry_handle_global_remove(void *data, struct wl_registry *registry,
239 uint32_t name)
240 { /* No-op */ }
241
242 static const struct wl_registry_listener registry_listener = {
243 registry_handle_global,
244 registry_handle_global_remove
245 };
246
247 static void
248 wsi_wl_display_destroy(struct wsi_wayland *wsi, struct wsi_wl_display *display)
249 {
250 u_vector_finish(&display->formats);
251 if (display->drm)
252 wl_drm_destroy(display->drm);
253 vk_free(wsi->alloc, display);
254 }
255
256 static struct wsi_wl_display *
257 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display)
258 {
259 struct wsi_wl_display *display =
260 vk_alloc(wsi->alloc, sizeof(*display), 8,
261 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
262 if (!display)
263 return NULL;
264
265 memset(display, 0, sizeof(*display));
266
267 display->display = wl_display;
268 display->wsi_wl = wsi;
269
270 if (!u_vector_init(&display->formats, sizeof(VkFormat), 8))
271 goto fail;
272
273 struct wl_registry *registry = wl_display_get_registry(wl_display);
274 if (!registry)
275 return NULL;
276
277 wl_registry_add_listener(registry, &registry_listener, display);
278
279 /* Round-rip to get the wl_drm global */
280 wl_display_roundtrip(wl_display);
281
282 if (!display->drm)
283 goto fail;
284
285 /* Round-rip to get wl_drm formats and capabilities */
286 wl_display_roundtrip(wl_display);
287
288 /* We need prime support */
289 if (!(display->capabilities & WL_DRM_CAPABILITY_PRIME))
290 goto fail;
291
292 /* We don't need this anymore */
293 wl_registry_destroy(registry);
294
295 return display;
296
297 fail:
298 if (registry)
299 wl_registry_destroy(registry);
300
301 wsi_wl_display_destroy(wsi, display);
302 return NULL;
303 }
304
305 static struct wsi_wl_display *
306 wsi_wl_get_display(struct wsi_device *wsi_device,
307 struct wl_display *wl_display)
308 {
309 struct wsi_wayland *wsi =
310 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
311
312 pthread_mutex_lock(&wsi->mutex);
313
314 struct hash_entry *entry = _mesa_hash_table_search(wsi->displays,
315 wl_display);
316 if (!entry) {
317 /* We're about to make a bunch of blocking calls. Let's drop the
318 * mutex for now so we don't block up too badly.
319 */
320 pthread_mutex_unlock(&wsi->mutex);
321
322 struct wsi_wl_display *display = wsi_wl_display_create(wsi, wl_display);
323 if (!display)
324 return NULL;
325
326 pthread_mutex_lock(&wsi->mutex);
327
328 entry = _mesa_hash_table_search(wsi->displays, wl_display);
329 if (entry) {
330 /* Oops, someone raced us to it */
331 wsi_wl_display_destroy(wsi, display);
332 } else {
333 entry = _mesa_hash_table_insert(wsi->displays, wl_display, display);
334 }
335 }
336
337 pthread_mutex_unlock(&wsi->mutex);
338
339 return entry->data;
340 }
341
342 VkBool32
343 wsi_wl_get_presentation_support(struct wsi_device *wsi_device,
344 struct wl_display *wl_display)
345 {
346 return wsi_wl_get_display(wsi_device, wl_display) != NULL;
347 }
348
349 static VkResult
350 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
351 struct wsi_device *wsi_device,
352 const VkAllocationCallbacks *alloc,
353 uint32_t queueFamilyIndex,
354 int local_fd,
355 bool can_handle_different_gpu,
356 VkBool32* pSupported)
357 {
358 *pSupported = true;
359
360 return VK_SUCCESS;
361 }
362
363 static const VkPresentModeKHR present_modes[] = {
364 VK_PRESENT_MODE_MAILBOX_KHR,
365 VK_PRESENT_MODE_FIFO_KHR,
366 };
367
368 static VkResult
369 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
370 VkSurfaceCapabilitiesKHR* caps)
371 {
372 /* For true mailbox mode, we need at least 4 images:
373 * 1) One to scan out from
374 * 2) One to have queued for scan-out
375 * 3) One to be currently held by the Wayland compositor
376 * 4) One to render to
377 */
378 caps->minImageCount = 4;
379 /* There is no real maximum */
380 caps->maxImageCount = 0;
381
382 caps->currentExtent = (VkExtent2D) { -1, -1 };
383 caps->minImageExtent = (VkExtent2D) { 1, 1 };
384 /* This is the maximum supported size on Intel */
385 caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
386 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
387 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
388 caps->maxImageArrayLayers = 1;
389
390 caps->supportedCompositeAlpha =
391 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
392 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
393
394 caps->supportedUsageFlags =
395 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
396 VK_IMAGE_USAGE_SAMPLED_BIT |
397 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
398 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
399
400 return VK_SUCCESS;
401 }
402
403 static VkResult
404 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
405 struct wsi_device *wsi_device,
406 uint32_t* pSurfaceFormatCount,
407 VkSurfaceFormatKHR* pSurfaceFormats)
408 {
409 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
410 struct wsi_wl_display *display =
411 wsi_wl_get_display(wsi_device, surface->display);
412 if (!display)
413 return VK_ERROR_OUT_OF_HOST_MEMORY;
414
415 if (pSurfaceFormats == NULL) {
416 *pSurfaceFormatCount = u_vector_length(&display->formats);
417 return VK_SUCCESS;
418 }
419
420 uint32_t count = 0;
421 VkFormat *f;
422 u_vector_foreach(f, &display->formats) {
423 if (count == *pSurfaceFormatCount)
424 return VK_INCOMPLETE;
425
426 pSurfaceFormats[count++] = (VkSurfaceFormatKHR) {
427 .format = *f,
428 /* TODO: We should get this from the compositor somehow */
429 .colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
430 };
431 }
432
433 assert(*pSurfaceFormatCount <= count);
434 *pSurfaceFormatCount = count;
435
436 return VK_SUCCESS;
437 }
438
439 static VkResult
440 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
441 uint32_t* pPresentModeCount,
442 VkPresentModeKHR* pPresentModes)
443 {
444 if (pPresentModes == NULL) {
445 *pPresentModeCount = ARRAY_SIZE(present_modes);
446 return VK_SUCCESS;
447 }
448
449 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
450 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
451
452 if (*pPresentModeCount < ARRAY_SIZE(present_modes))
453 return VK_INCOMPLETE;
454 else
455 return VK_SUCCESS;
456 }
457
458 VkResult wsi_create_wl_surface(const VkAllocationCallbacks *pAllocator,
459 const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
460 VkSurfaceKHR *pSurface)
461 {
462 VkIcdSurfaceWayland *surface;
463
464 surface = vk_alloc(pAllocator, sizeof *surface, 8,
465 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
466 if (surface == NULL)
467 return VK_ERROR_OUT_OF_HOST_MEMORY;
468
469 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
470 surface->display = pCreateInfo->display;
471 surface->surface = pCreateInfo->surface;
472
473 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
474
475 return VK_SUCCESS;
476 }
477
478 struct wsi_wl_image {
479 VkImage image;
480 VkDeviceMemory memory;
481 struct wl_buffer * buffer;
482 bool busy;
483 };
484
485 struct wsi_wl_swapchain {
486 struct wsi_swapchain base;
487
488 struct wsi_wl_display * display;
489 struct wl_event_queue * queue;
490 struct wl_surface * surface;
491 uint32_t surface_version;
492
493 VkExtent2D extent;
494 VkFormat vk_format;
495 uint32_t drm_format;
496
497 VkPresentModeKHR present_mode;
498 bool fifo_ready;
499
500 struct wsi_wl_image images[0];
501 };
502
503 static VkResult
504 wsi_wl_swapchain_get_images(struct wsi_swapchain *wsi_chain,
505 uint32_t *pCount, VkImage *pSwapchainImages)
506 {
507 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
508 uint32_t ret_count;
509 VkResult result;
510
511 if (pSwapchainImages == NULL) {
512 *pCount = chain->base.image_count;
513 return VK_SUCCESS;
514 }
515
516 result = VK_SUCCESS;
517 ret_count = chain->base.image_count;
518 if (chain->base.image_count > *pCount) {
519 ret_count = *pCount;
520 result = VK_INCOMPLETE;
521 }
522
523 for (uint32_t i = 0; i < ret_count; i++)
524 pSwapchainImages[i] = chain->images[i].image;
525
526 return result;
527 }
528
529 static VkResult
530 wsi_wl_swapchain_acquire_next_image(struct wsi_swapchain *wsi_chain,
531 uint64_t timeout,
532 VkSemaphore semaphore,
533 uint32_t *image_index)
534 {
535 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
536
537 int ret = wl_display_dispatch_queue_pending(chain->display->display,
538 chain->queue);
539 /* XXX: I'm not sure if out-of-date is the right error here. If
540 * wl_display_dispatch_queue_pending fails it most likely means we got
541 * kicked by the server so this seems more-or-less correct.
542 */
543 if (ret < 0)
544 return VK_ERROR_OUT_OF_DATE_KHR;
545
546 while (1) {
547 for (uint32_t i = 0; i < chain->base.image_count; i++) {
548 if (!chain->images[i].busy) {
549 /* We found a non-busy image */
550 *image_index = i;
551 chain->images[i].busy = true;
552 return VK_SUCCESS;
553 }
554 }
555
556 /* This time we do a blocking dispatch because we can't go
557 * anywhere until we get an event.
558 */
559 int ret = wl_display_roundtrip_queue(chain->display->display,
560 chain->queue);
561 if (ret < 0)
562 return VK_ERROR_OUT_OF_DATE_KHR;
563 }
564 }
565
566 static void
567 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
568 {
569 struct wsi_wl_swapchain *chain = data;
570
571 chain->fifo_ready = true;
572
573 wl_callback_destroy(callback);
574 }
575
576 static const struct wl_callback_listener frame_listener = {
577 frame_handle_done,
578 };
579
580 static VkResult
581 wsi_wl_swapchain_queue_present(struct wsi_swapchain *wsi_chain,
582 uint32_t image_index,
583 const VkPresentRegionKHR *damage)
584 {
585 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
586
587 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
588 while (!chain->fifo_ready) {
589 int ret = wl_display_dispatch_queue(chain->display->display,
590 chain->queue);
591 if (ret < 0)
592 return VK_ERROR_OUT_OF_DATE_KHR;
593 }
594 }
595
596 assert(image_index < chain->base.image_count);
597 wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
598
599 if (chain->surface_version >= 4 && damage &&
600 damage->pRectangles && damage->rectangleCount > 0) {
601 for (unsigned i = 0; i < damage->rectangleCount; i++) {
602 const VkRectLayerKHR *rect = &damage->pRectangles[i];
603 assert(rect->layer == 0);
604 wl_surface_damage_buffer(chain->surface,
605 rect->offset.x, rect->offset.y,
606 rect->extent.width, rect->extent.height);
607 }
608 } else {
609 wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
610 }
611
612 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
613 struct wl_callback *frame = wl_surface_frame(chain->surface);
614 wl_proxy_set_queue((struct wl_proxy *)frame, chain->queue);
615 wl_callback_add_listener(frame, &frame_listener, chain);
616 chain->fifo_ready = false;
617 }
618
619 chain->images[image_index].busy = true;
620 wl_surface_commit(chain->surface);
621 wl_display_flush(chain->display->display);
622
623 return VK_SUCCESS;
624 }
625
626 static void
627 buffer_handle_release(void *data, struct wl_buffer *buffer)
628 {
629 struct wsi_wl_image *image = data;
630
631 assert(image->buffer == buffer);
632
633 image->busy = false;
634 }
635
636 static const struct wl_buffer_listener buffer_listener = {
637 buffer_handle_release,
638 };
639
640 static VkResult
641 wsi_wl_image_init(struct wsi_wl_swapchain *chain,
642 struct wsi_wl_image *image,
643 const VkSwapchainCreateInfoKHR *pCreateInfo,
644 const VkAllocationCallbacks* pAllocator)
645 {
646 VkDevice vk_device = chain->base.device;
647 VkResult result;
648 int fd;
649 uint32_t size;
650 uint32_t row_pitch;
651 uint32_t offset;
652 result = chain->base.image_fns->create_wsi_image(vk_device,
653 pCreateInfo,
654 pAllocator,
655 false,
656 false,
657 &image->image,
658 &image->memory,
659 &size,
660 &offset,
661 &row_pitch,
662 &fd);
663 if (result != VK_SUCCESS)
664 return result;
665
666 image->buffer = wl_drm_create_prime_buffer(chain->display->drm,
667 fd, /* name */
668 chain->extent.width,
669 chain->extent.height,
670 chain->drm_format,
671 offset,
672 row_pitch,
673 0, 0, 0, 0 /* unused */);
674 wl_display_roundtrip(chain->display->display);
675 close(fd);
676
677 if (!image->buffer)
678 goto fail_image;
679
680 wl_proxy_set_queue((struct wl_proxy *)image->buffer, chain->queue);
681 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
682
683 return VK_SUCCESS;
684
685 fail_image:
686 chain->base.image_fns->free_wsi_image(vk_device, pAllocator,
687 image->image, image->memory);
688
689 return result;
690 }
691
692 static VkResult
693 wsi_wl_swapchain_destroy(struct wsi_swapchain *wsi_chain,
694 const VkAllocationCallbacks *pAllocator)
695 {
696 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)wsi_chain;
697
698 for (uint32_t i = 0; i < chain->base.image_count; i++) {
699 if (chain->images[i].buffer)
700 chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
701 chain->images[i].image,
702 chain->images[i].memory);
703 }
704
705 vk_free(pAllocator, chain);
706
707 return VK_SUCCESS;
708 }
709
710 static VkResult
711 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
712 VkDevice device,
713 struct wsi_device *wsi_device,
714 int local_fd,
715 const VkSwapchainCreateInfoKHR* pCreateInfo,
716 const VkAllocationCallbacks* pAllocator,
717 const struct wsi_image_fns *image_fns,
718 struct wsi_swapchain **swapchain_out)
719 {
720 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
721 struct wsi_wl_swapchain *chain;
722 VkResult result;
723
724 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
725
726 int num_images = pCreateInfo->minImageCount;
727
728 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
729 chain = vk_alloc(pAllocator, size, 8,
730 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
731 if (chain == NULL)
732 return VK_ERROR_OUT_OF_HOST_MEMORY;
733
734 bool alpha = pCreateInfo->compositeAlpha ==
735 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
736
737 chain->base.device = device;
738 chain->base.destroy = wsi_wl_swapchain_destroy;
739 chain->base.get_images = wsi_wl_swapchain_get_images;
740 chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
741 chain->base.queue_present = wsi_wl_swapchain_queue_present;
742 chain->base.image_fns = image_fns;
743 chain->base.present_mode = pCreateInfo->presentMode;
744 chain->base.image_count = num_images;
745 chain->base.needs_linear_copy = false;
746 chain->surface = surface->surface;
747 chain->surface_version = wl_proxy_get_version((void *)surface->surface);
748 chain->extent = pCreateInfo->imageExtent;
749 chain->vk_format = pCreateInfo->imageFormat;
750 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, alpha);
751
752 chain->fifo_ready = true;
753
754 /* Mark a bunch of stuff as NULL. This way we can just call
755 * destroy_swapchain for cleanup.
756 */
757 for (uint32_t i = 0; i < chain->base.image_count; i++)
758 chain->images[i].buffer = NULL;
759 chain->queue = NULL;
760
761 chain->display = wsi_wl_get_display(wsi_device,
762 surface->display);
763 if (!chain->display) {
764 result = VK_ERROR_INITIALIZATION_FAILED;
765 goto fail;
766 }
767
768 chain->queue = wl_display_create_queue(chain->display->display);
769 if (!chain->queue) {
770 result = VK_ERROR_INITIALIZATION_FAILED;
771 goto fail;
772 }
773
774 for (uint32_t i = 0; i < chain->base.image_count; i++) {
775 result = wsi_wl_image_init(chain, &chain->images[i],
776 pCreateInfo, pAllocator);
777 if (result != VK_SUCCESS)
778 goto fail;
779 chain->images[i].busy = false;
780 }
781
782 *swapchain_out = &chain->base;
783
784 return VK_SUCCESS;
785
786 fail:
787 wsi_wl_swapchain_destroy(&chain->base, pAllocator);
788
789 return result;
790 }
791
792 VkResult
793 wsi_wl_init_wsi(struct wsi_device *wsi_device,
794 const VkAllocationCallbacks *alloc,
795 VkPhysicalDevice physical_device,
796 const struct wsi_callbacks *cbs)
797 {
798 struct wsi_wayland *wsi;
799 VkResult result;
800
801 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
802 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
803 if (!wsi) {
804 result = VK_ERROR_OUT_OF_HOST_MEMORY;
805 goto fail;
806 }
807
808 wsi->physical_device = physical_device;
809 wsi->alloc = alloc;
810 wsi->cbs = cbs;
811 int ret = pthread_mutex_init(&wsi->mutex, NULL);
812 if (ret != 0) {
813 if (ret == ENOMEM) {
814 result = VK_ERROR_OUT_OF_HOST_MEMORY;
815 } else {
816 /* FINISHME: Choose a better error. */
817 result = VK_ERROR_OUT_OF_HOST_MEMORY;
818 }
819
820 goto fail_alloc;
821 }
822
823 wsi->displays = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
824 _mesa_key_pointer_equal);
825 if (!wsi->displays) {
826 result = VK_ERROR_OUT_OF_HOST_MEMORY;
827 goto fail_mutex;
828 }
829
830 wsi->base.get_support = wsi_wl_surface_get_support;
831 wsi->base.get_capabilities = wsi_wl_surface_get_capabilities;
832 wsi->base.get_formats = wsi_wl_surface_get_formats;
833 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
834 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
835
836 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
837
838 return VK_SUCCESS;
839
840 fail_mutex:
841 pthread_mutex_destroy(&wsi->mutex);
842
843 fail_alloc:
844 vk_free(alloc, wsi);
845 fail:
846 wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
847
848 return result;
849 }
850
851 void
852 wsi_wl_finish_wsi(struct wsi_device *wsi_device,
853 const VkAllocationCallbacks *alloc)
854 {
855 struct wsi_wayland *wsi =
856 (struct wsi_wayland *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
857
858 if (wsi) {
859 struct hash_entry *entry;
860 hash_table_foreach(wsi->displays, entry)
861 wsi_wl_display_destroy(wsi, entry->data);
862
863 _mesa_hash_table_destroy(wsi->displays, NULL);
864
865 pthread_mutex_destroy(&wsi->mutex);
866
867 vk_free(alloc, wsi);
868 }
869 }