08eeda0a038fc9d5081ccc54bd597498598ff3dd
[mesa.git] / src / intel / vulkan / anv_wsi_wayland.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <wayland-client.h>
25 #include <wayland-drm-client-protocol.h>
26
27 #include "anv_wsi.h"
28
29 #include <util/hash_table.h>
30
31 #define MIN_NUM_IMAGES 2
32
33 struct wsi_wl_display {
34 struct anv_physical_device *physical_device;
35 struct wl_display * display;
36 struct wl_drm * drm;
37
38 /* Vector of VkFormats supported */
39 struct anv_vector formats;
40
41 uint32_t capabilities;
42 };
43
44 struct wsi_wayland {
45 struct anv_wsi_interface base;
46
47 struct anv_physical_device * physical_device;
48
49 pthread_mutex_t mutex;
50 /* Hash table of wl_display -> wsi_wl_display mappings */
51 struct hash_table * displays;
52 };
53
54 static void
55 wsi_wl_display_add_vk_format(struct wsi_wl_display *display, VkFormat format)
56 {
57 /* Don't add a format that's already in the list */
58 VkFormat *f;
59 anv_vector_foreach(f, &display->formats)
60 if (*f == format)
61 return;
62
63 /* Don't add formats that aren't renderable. */
64 VkFormatProperties props;
65 anv_GetPhysicalDeviceFormatProperties(
66 anv_physical_device_to_handle(display->physical_device), format, &props);
67 if (!(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
68 return;
69
70 f = anv_vector_add(&display->formats);
71 if (f)
72 *f = format;
73 }
74
75 static void
76 drm_handle_device(void *data, struct wl_drm *drm, const char *name)
77 {
78 fprintf(stderr, "wl_drm.device(%s)\n", name);
79 }
80
81 static uint32_t
82 wl_drm_format_for_vk_format(VkFormat vk_format, bool alpha)
83 {
84 switch (vk_format) {
85 /* TODO: Figure out what all the formats mean and make this table
86 * correct.
87 */
88 #if 0
89 case VK_FORMAT_R4G4B4A4_UNORM:
90 return alpha ? WL_DRM_FORMAT_ABGR4444 : WL_DRM_FORMAT_XBGR4444;
91 case VK_FORMAT_R5G6B5_UNORM:
92 return WL_DRM_FORMAT_BGR565;
93 case VK_FORMAT_R5G5B5A1_UNORM:
94 return alpha ? WL_DRM_FORMAT_ABGR1555 : WL_DRM_FORMAT_XBGR1555;
95 case VK_FORMAT_R8G8B8_UNORM:
96 return WL_DRM_FORMAT_XBGR8888;
97 case VK_FORMAT_R8G8B8A8_UNORM:
98 return alpha ? WL_DRM_FORMAT_ABGR8888 : WL_DRM_FORMAT_XBGR8888;
99 case VK_FORMAT_R10G10B10A2_UNORM:
100 return alpha ? WL_DRM_FORMAT_ABGR2101010 : WL_DRM_FORMAT_XBGR2101010;
101 case VK_FORMAT_B4G4R4A4_UNORM:
102 return alpha ? WL_DRM_FORMAT_ARGB4444 : WL_DRM_FORMAT_XRGB4444;
103 case VK_FORMAT_B5G6R5_UNORM:
104 return WL_DRM_FORMAT_RGB565;
105 case VK_FORMAT_B5G5R5A1_UNORM:
106 return alpha ? WL_DRM_FORMAT_XRGB1555 : WL_DRM_FORMAT_XRGB1555;
107 #endif
108 case VK_FORMAT_B8G8R8_SRGB:
109 return WL_DRM_FORMAT_BGRX8888;
110 case VK_FORMAT_B8G8R8A8_SRGB:
111 return alpha ? WL_DRM_FORMAT_ARGB8888 : WL_DRM_FORMAT_XRGB8888;
112 #if 0
113 case VK_FORMAT_B10G10R10A2_UNORM:
114 return alpha ? WL_DRM_FORMAT_ARGB2101010 : WL_DRM_FORMAT_XRGB2101010;
115 #endif
116
117 default:
118 assert("!Unsupported Vulkan format");
119 return 0;
120 }
121 }
122
123 static void
124 drm_handle_format(void *data, struct wl_drm *drm, uint32_t wl_format)
125 {
126 struct wsi_wl_display *display = data;
127
128 switch (wl_format) {
129 #if 0
130 case WL_DRM_FORMAT_ABGR4444:
131 case WL_DRM_FORMAT_XBGR4444:
132 wsi_wl_display_add_vk_format(display, VK_FORMAT_R4G4B4A4_UNORM);
133 break;
134 case WL_DRM_FORMAT_BGR565:
135 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G6B5_UNORM);
136 break;
137 case WL_DRM_FORMAT_ABGR1555:
138 case WL_DRM_FORMAT_XBGR1555:
139 wsi_wl_display_add_vk_format(display, VK_FORMAT_R5G5B5A1_UNORM);
140 break;
141 case WL_DRM_FORMAT_XBGR8888:
142 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8_UNORM);
143 /* fallthrough */
144 case WL_DRM_FORMAT_ABGR8888:
145 wsi_wl_display_add_vk_format(display, VK_FORMAT_R8G8B8A8_UNORM);
146 break;
147 case WL_DRM_FORMAT_ABGR2101010:
148 case WL_DRM_FORMAT_XBGR2101010:
149 wsi_wl_display_add_vk_format(display, VK_FORMAT_R10G10B10A2_UNORM);
150 break;
151 case WL_DRM_FORMAT_ARGB4444:
152 case WL_DRM_FORMAT_XRGB4444:
153 wsi_wl_display_add_vk_format(display, VK_FORMAT_B4G4R4A4_UNORM);
154 break;
155 case WL_DRM_FORMAT_RGB565:
156 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G6R5_UNORM);
157 break;
158 case WL_DRM_FORMAT_ARGB1555:
159 case WL_DRM_FORMAT_XRGB1555:
160 wsi_wl_display_add_vk_format(display, VK_FORMAT_B5G5R5A1_UNORM);
161 break;
162 #endif
163 case WL_DRM_FORMAT_XRGB8888:
164 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8_SRGB);
165 /* fallthrough */
166 case WL_DRM_FORMAT_ARGB8888:
167 wsi_wl_display_add_vk_format(display, VK_FORMAT_B8G8R8A8_SRGB);
168 break;
169 #if 0
170 case WL_DRM_FORMAT_ARGB2101010:
171 case WL_DRM_FORMAT_XRGB2101010:
172 wsi_wl_display_add_vk_format(display, VK_FORMAT_B10G10R10A2_UNORM);
173 break;
174 #endif
175 }
176 }
177
178 static void
179 drm_handle_authenticated(void *data, struct wl_drm *drm)
180 {
181 }
182
183 static void
184 drm_handle_capabilities(void *data, struct wl_drm *drm, uint32_t capabilities)
185 {
186 struct wsi_wl_display *display = data;
187
188 display->capabilities = capabilities;
189 }
190
191 static const struct wl_drm_listener drm_listener = {
192 drm_handle_device,
193 drm_handle_format,
194 drm_handle_authenticated,
195 drm_handle_capabilities,
196 };
197
198 static void
199 registry_handle_global(void *data, struct wl_registry *registry,
200 uint32_t name, const char *interface, uint32_t version)
201 {
202 struct wsi_wl_display *display = data;
203
204 if (strcmp(interface, "wl_drm") == 0) {
205 assert(display->drm == NULL);
206
207 assert(version >= 2);
208 display->drm = wl_registry_bind(registry, name, &wl_drm_interface, 2);
209
210 if (display->drm)
211 wl_drm_add_listener(display->drm, &drm_listener, display);
212 }
213 }
214
215 static void
216 registry_handle_global_remove(void *data, struct wl_registry *registry,
217 uint32_t name)
218 { /* No-op */ }
219
220 static const struct wl_registry_listener registry_listener = {
221 registry_handle_global,
222 registry_handle_global_remove
223 };
224
225 static void
226 wsi_wl_display_destroy(struct wsi_wayland *wsi, struct wsi_wl_display *display)
227 {
228 anv_vector_finish(&display->formats);
229 if (display->drm)
230 wl_drm_destroy(display->drm);
231 anv_free(&wsi->physical_device->instance->alloc, display);
232 }
233
234 static struct wsi_wl_display *
235 wsi_wl_display_create(struct wsi_wayland *wsi, struct wl_display *wl_display)
236 {
237 struct wsi_wl_display *display =
238 anv_alloc(&wsi->physical_device->instance->alloc, sizeof(*display), 8,
239 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
240 if (!display)
241 return NULL;
242
243 memset(display, 0, sizeof(*display));
244
245 display->display = wl_display;
246 display->physical_device = wsi->physical_device;
247
248 if (!anv_vector_init(&display->formats, sizeof(VkFormat), 8))
249 goto fail;
250
251 struct wl_registry *registry = wl_display_get_registry(wl_display);
252 if (!registry)
253 return NULL;
254
255 wl_registry_add_listener(registry, &registry_listener, display);
256
257 /* Round-rip to get the wl_drm global */
258 wl_display_roundtrip(wl_display);
259
260 if (!display->drm)
261 goto fail;
262
263 /* Round-rip to get wl_drm formats and capabilities */
264 wl_display_roundtrip(wl_display);
265
266 /* We need prime support */
267 if (!(display->capabilities & WL_DRM_CAPABILITY_PRIME))
268 goto fail;
269
270 /* We don't need this anymore */
271 wl_registry_destroy(registry);
272
273 return display;
274
275 fail:
276 if (registry)
277 wl_registry_destroy(registry);
278
279 wsi_wl_display_destroy(wsi, display);
280 return NULL;
281 }
282
283 static struct wsi_wl_display *
284 wsi_wl_get_display(struct anv_physical_device *device,
285 struct wl_display *wl_display)
286 {
287 struct wsi_wayland *wsi =
288 (struct wsi_wayland *)device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
289
290 pthread_mutex_lock(&wsi->mutex);
291
292 struct hash_entry *entry = _mesa_hash_table_search(wsi->displays,
293 wl_display);
294 if (!entry) {
295 /* We're about to make a bunch of blocking calls. Let's drop the
296 * mutex for now so we don't block up too badly.
297 */
298 pthread_mutex_unlock(&wsi->mutex);
299
300 struct wsi_wl_display *display = wsi_wl_display_create(wsi, wl_display);
301
302 pthread_mutex_lock(&wsi->mutex);
303
304 entry = _mesa_hash_table_search(wsi->displays, wl_display);
305 if (entry) {
306 /* Oops, someone raced us to it */
307 wsi_wl_display_destroy(wsi, display);
308 } else {
309 entry = _mesa_hash_table_insert(wsi->displays, wl_display, display);
310 }
311 }
312
313 pthread_mutex_unlock(&wsi->mutex);
314
315 return entry->data;
316 }
317
318 VkBool32 anv_GetPhysicalDeviceWaylandPresentationSupportKHR(
319 VkPhysicalDevice physicalDevice,
320 uint32_t queueFamilyIndex,
321 struct wl_display* display)
322 {
323 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
324
325 return wsi_wl_get_display(physical_device, display) != NULL;
326 }
327
328 static VkResult
329 wsi_wl_surface_get_support(VkIcdSurfaceBase *surface,
330 struct anv_physical_device *device,
331 uint32_t queueFamilyIndex,
332 VkBool32* pSupported)
333 {
334 *pSupported = true;
335
336 return VK_SUCCESS;
337 }
338
339 static const VkPresentModeKHR present_modes[] = {
340 VK_PRESENT_MODE_MAILBOX_KHR,
341 VK_PRESENT_MODE_FIFO_KHR,
342 };
343
344 static VkResult
345 wsi_wl_surface_get_capabilities(VkIcdSurfaceBase *surface,
346 struct anv_physical_device *device,
347 VkSurfaceCapabilitiesKHR* caps)
348 {
349 caps->minImageCount = MIN_NUM_IMAGES;
350 caps->maxImageCount = 4;
351 caps->currentExtent = (VkExtent2D) { -1, -1 };
352 caps->minImageExtent = (VkExtent2D) { 1, 1 };
353 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
354 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
355 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
356 caps->maxImageArrayLayers = 1;
357
358 caps->supportedCompositeAlpha =
359 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
360 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
361
362 caps->supportedUsageFlags =
363 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
364 VK_IMAGE_USAGE_SAMPLED_BIT |
365 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
366 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
367
368 return VK_SUCCESS;
369 }
370
371 static VkResult
372 wsi_wl_surface_get_formats(VkIcdSurfaceBase *icd_surface,
373 struct anv_physical_device *device,
374 uint32_t* pSurfaceFormatCount,
375 VkSurfaceFormatKHR* pSurfaceFormats)
376 {
377 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
378 struct wsi_wl_display *display =
379 wsi_wl_get_display(device, surface->display);
380
381 uint32_t count = anv_vector_length(&display->formats);
382
383 if (pSurfaceFormats == NULL) {
384 *pSurfaceFormatCount = count;
385 return VK_SUCCESS;
386 }
387
388 assert(*pSurfaceFormatCount >= count);
389 *pSurfaceFormatCount = count;
390
391 VkFormat *f;
392 anv_vector_foreach(f, &display->formats) {
393 *(pSurfaceFormats++) = (VkSurfaceFormatKHR) {
394 .format = *f,
395 /* TODO: We should get this from the compositor somehow */
396 .colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR,
397 };
398 }
399
400 return VK_SUCCESS;
401 }
402
403 static VkResult
404 wsi_wl_surface_get_present_modes(VkIcdSurfaceBase *surface,
405 struct anv_physical_device *device,
406 uint32_t* pPresentModeCount,
407 VkPresentModeKHR* pPresentModes)
408 {
409 if (pPresentModes == NULL) {
410 *pPresentModeCount = ARRAY_SIZE(present_modes);
411 return VK_SUCCESS;
412 }
413
414 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
415 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
416 *pPresentModeCount = ARRAY_SIZE(present_modes);
417
418 return VK_SUCCESS;
419 }
420
421 static VkResult
422 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *surface,
423 struct anv_device *device,
424 const VkSwapchainCreateInfoKHR* pCreateInfo,
425 const VkAllocationCallbacks* pAllocator,
426 struct anv_swapchain **swapchain);
427
428 VkResult anv_CreateWaylandSurfaceKHR(
429 VkInstance _instance,
430 const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
431 const VkAllocationCallbacks* pAllocator,
432 VkSurfaceKHR* pSurface)
433 {
434 ANV_FROM_HANDLE(anv_instance, instance, _instance);
435
436 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR);
437
438 VkIcdSurfaceWayland *surface;
439
440 surface = anv_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
441 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
442 if (surface == NULL)
443 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
444
445 surface->base.platform = VK_ICD_WSI_PLATFORM_WAYLAND;
446 surface->display = pCreateInfo->display;
447 surface->surface = pCreateInfo->surface;
448
449 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
450
451 return VK_SUCCESS;
452 }
453
454 struct wsi_wl_image {
455 struct anv_image * image;
456 struct anv_device_memory * memory;
457 struct wl_buffer * buffer;
458 bool busy;
459 };
460
461 struct wsi_wl_swapchain {
462 struct anv_swapchain base;
463
464 struct wsi_wl_display * display;
465 struct wl_event_queue * queue;
466 struct wl_surface * surface;
467
468 VkExtent2D extent;
469 VkFormat vk_format;
470 uint32_t drm_format;
471
472 VkPresentModeKHR present_mode;
473 bool fifo_ready;
474
475 uint32_t image_count;
476 struct wsi_wl_image images[0];
477 };
478
479 static VkResult
480 wsi_wl_swapchain_get_images(struct anv_swapchain *anv_chain,
481 uint32_t *pCount, VkImage *pSwapchainImages)
482 {
483 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
484
485 if (pSwapchainImages == NULL) {
486 *pCount = chain->image_count;
487 return VK_SUCCESS;
488 }
489
490 assert(chain->image_count <= *pCount);
491 for (uint32_t i = 0; i < chain->image_count; i++)
492 pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
493
494 *pCount = chain->image_count;
495
496 return VK_SUCCESS;
497 }
498
499 static VkResult
500 wsi_wl_swapchain_acquire_next_image(struct anv_swapchain *anv_chain,
501 uint64_t timeout,
502 VkSemaphore semaphore,
503 uint32_t *image_index)
504 {
505 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
506
507 int ret = wl_display_dispatch_queue_pending(chain->display->display,
508 chain->queue);
509 /* XXX: I'm not sure if out-of-date is the right error here. If
510 * wl_display_dispatch_queue_pending fails it most likely means we got
511 * kicked by the server so this seems more-or-less correct.
512 */
513 if (ret < 0)
514 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
515
516 while (1) {
517 for (uint32_t i = 0; i < chain->image_count; i++) {
518 if (!chain->images[i].busy) {
519 /* We found a non-busy image */
520 *image_index = i;
521 return VK_SUCCESS;
522 }
523 }
524
525 /* This time we do a blocking dispatch because we can't go
526 * anywhere until we get an event.
527 */
528 int ret = wl_display_roundtrip_queue(chain->display->display,
529 chain->queue);
530 if (ret < 0)
531 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
532 }
533 }
534
535 static void
536 frame_handle_done(void *data, struct wl_callback *callback, uint32_t serial)
537 {
538 struct wsi_wl_swapchain *chain = data;
539
540 chain->fifo_ready = true;
541
542 wl_callback_destroy(callback);
543 }
544
545 static const struct wl_callback_listener frame_listener = {
546 frame_handle_done,
547 };
548
549 static VkResult
550 wsi_wl_swapchain_queue_present(struct anv_swapchain *anv_chain,
551 struct anv_queue *queue,
552 uint32_t image_index)
553 {
554 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
555
556 if (chain->present_mode == VK_PRESENT_MODE_FIFO_KHR) {
557 while (!chain->fifo_ready) {
558 int ret = wl_display_dispatch_queue(chain->display->display,
559 chain->queue);
560 if (ret < 0)
561 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
562 }
563 }
564
565 assert(image_index < chain->image_count);
566 wl_surface_attach(chain->surface, chain->images[image_index].buffer, 0, 0);
567 wl_surface_damage(chain->surface, 0, 0, INT32_MAX, INT32_MAX);
568
569 if (chain->present_mode == VK_PRESENT_MODE_FIFO_KHR) {
570 struct wl_callback *frame = wl_surface_frame(chain->surface);
571 wl_proxy_set_queue((struct wl_proxy *)frame, chain->queue);
572 wl_callback_add_listener(frame, &frame_listener, chain);
573 chain->fifo_ready = false;
574 }
575
576 chain->images[image_index].busy = true;
577 wl_surface_commit(chain->surface);
578 wl_display_flush(chain->display->display);
579
580 return VK_SUCCESS;
581 }
582
583 static void
584 wsi_wl_image_finish(struct wsi_wl_swapchain *chain, struct wsi_wl_image *image,
585 const VkAllocationCallbacks* pAllocator)
586 {
587 VkDevice vk_device = anv_device_to_handle(chain->base.device);
588 anv_FreeMemory(vk_device, anv_device_memory_to_handle(image->memory),
589 pAllocator);
590 anv_DestroyImage(vk_device, anv_image_to_handle(image->image),
591 pAllocator);
592 }
593
594 static void
595 buffer_handle_release(void *data, struct wl_buffer *buffer)
596 {
597 struct wsi_wl_image *image = data;
598
599 assert(image->buffer == buffer);
600
601 image->busy = false;
602 }
603
604 static const struct wl_buffer_listener buffer_listener = {
605 buffer_handle_release,
606 };
607
608 static VkResult
609 wsi_wl_image_init(struct wsi_wl_swapchain *chain, struct wsi_wl_image *image,
610 const VkAllocationCallbacks* pAllocator)
611 {
612 VkDevice vk_device = anv_device_to_handle(chain->base.device);
613 VkResult result;
614
615 VkImage vk_image;
616 result = anv_image_create(vk_device,
617 &(struct anv_image_create_info) {
618 .isl_tiling_flags = ISL_TILING_X_BIT,
619 .stride = 0,
620 .vk_info =
621 &(VkImageCreateInfo) {
622 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
623 .imageType = VK_IMAGE_TYPE_2D,
624 .format = chain->vk_format,
625 .extent = {
626 .width = chain->extent.width,
627 .height = chain->extent.height,
628 .depth = 1
629 },
630 .mipLevels = 1,
631 .arrayLayers = 1,
632 .samples = 1,
633 /* FIXME: Need a way to use X tiling to allow scanout */
634 .tiling = VK_IMAGE_TILING_OPTIMAL,
635 .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
636 .flags = 0,
637 }},
638 pAllocator,
639 &vk_image);
640
641 if (result != VK_SUCCESS)
642 return result;
643
644 image->image = anv_image_from_handle(vk_image);
645 assert(anv_format_is_color(image->image->format));
646
647 struct anv_surface *surface = &image->image->color_surface;
648
649 VkDeviceMemory vk_memory;
650 result = anv_AllocateMemory(vk_device,
651 &(VkMemoryAllocateInfo) {
652 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
653 .allocationSize = image->image->size,
654 .memoryTypeIndex = 0,
655 },
656 pAllocator,
657 &vk_memory);
658
659 if (result != VK_SUCCESS)
660 goto fail_image;
661
662 image->memory = anv_device_memory_from_handle(vk_memory);
663 image->memory->bo.is_winsys_bo = true;
664
665 result = anv_BindImageMemory(vk_device, vk_image, vk_memory, 0);
666
667 if (result != VK_SUCCESS)
668 goto fail_mem;
669
670 int ret = anv_gem_set_tiling(chain->base.device,
671 image->memory->bo.gem_handle,
672 surface->isl.row_pitch, I915_TILING_X);
673 if (ret) {
674 /* FINISHME: Choose a better error. */
675 result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
676 goto fail_mem;
677 }
678
679 int fd = anv_gem_handle_to_fd(chain->base.device,
680 image->memory->bo.gem_handle);
681 if (fd == -1) {
682 /* FINISHME: Choose a better error. */
683 result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
684 goto fail_mem;
685 }
686
687 image->buffer = wl_drm_create_prime_buffer(chain->display->drm,
688 fd, /* name */
689 chain->extent.width,
690 chain->extent.height,
691 chain->drm_format,
692 surface->offset,
693 surface->isl.row_pitch,
694 0, 0, 0, 0 /* unused */);
695 wl_display_roundtrip(chain->display->display);
696 close(fd);
697
698 wl_proxy_set_queue((struct wl_proxy *)image->buffer, chain->queue);
699 wl_buffer_add_listener(image->buffer, &buffer_listener, image);
700
701 return VK_SUCCESS;
702
703 fail_mem:
704 anv_FreeMemory(vk_device, vk_memory, pAllocator);
705 fail_image:
706 anv_DestroyImage(vk_device, vk_image, pAllocator);
707
708 return result;
709 }
710
711 static VkResult
712 wsi_wl_swapchain_destroy(struct anv_swapchain *anv_chain,
713 const VkAllocationCallbacks *pAllocator)
714 {
715 struct wsi_wl_swapchain *chain = (struct wsi_wl_swapchain *)anv_chain;
716
717 for (uint32_t i = 0; i < chain->image_count; i++) {
718 if (chain->images[i].buffer)
719 wsi_wl_image_finish(chain, &chain->images[i], pAllocator);
720 }
721
722 anv_free2(&chain->base.device->alloc, pAllocator, chain);
723
724 return VK_SUCCESS;
725 }
726
727 static VkResult
728 wsi_wl_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
729 struct anv_device *device,
730 const VkSwapchainCreateInfoKHR* pCreateInfo,
731 const VkAllocationCallbacks* pAllocator,
732 struct anv_swapchain **swapchain_out)
733 {
734 VkIcdSurfaceWayland *surface = (VkIcdSurfaceWayland *)icd_surface;
735 struct wsi_wl_swapchain *chain;
736 VkResult result;
737
738 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
739
740 int num_images = pCreateInfo->minImageCount;
741
742 assert(num_images >= MIN_NUM_IMAGES);
743
744 /* For true mailbox mode, we need at least 4 images:
745 * 1) One to scan out from
746 * 2) One to have queued for scan-out
747 * 3) One to be currently held by the Wayland compositor
748 * 4) One to render to
749 */
750 if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
751 num_images = MAX2(num_images, 4);
752
753 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
754 chain = anv_alloc2(&device->alloc, pAllocator, size, 8,
755 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
756 if (chain == NULL)
757 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
758
759 chain->base.device = device;
760 chain->base.destroy = wsi_wl_swapchain_destroy;
761 chain->base.get_images = wsi_wl_swapchain_get_images;
762 chain->base.acquire_next_image = wsi_wl_swapchain_acquire_next_image;
763 chain->base.queue_present = wsi_wl_swapchain_queue_present;
764
765 chain->surface = surface->surface;
766 chain->extent = pCreateInfo->imageExtent;
767 chain->vk_format = pCreateInfo->imageFormat;
768 chain->drm_format = wl_drm_format_for_vk_format(chain->vk_format, false);
769
770 chain->present_mode = pCreateInfo->presentMode;
771 chain->fifo_ready = true;
772
773 chain->image_count = num_images;
774
775 /* Mark a bunch of stuff as NULL. This way we can just call
776 * destroy_swapchain for cleanup.
777 */
778 for (uint32_t i = 0; i < chain->image_count; i++)
779 chain->images[i].buffer = NULL;
780 chain->queue = NULL;
781
782 chain->display = wsi_wl_get_display(&device->instance->physicalDevice,
783 surface->display);
784 if (!chain->display)
785 goto fail;
786
787 chain->queue = wl_display_create_queue(chain->display->display);
788 if (!chain->queue)
789 goto fail;
790
791 for (uint32_t i = 0; i < chain->image_count; i++) {
792 result = wsi_wl_image_init(chain, &chain->images[i], pAllocator);
793 if (result != VK_SUCCESS)
794 goto fail;
795 chain->images[i].busy = false;
796 }
797
798 *swapchain_out = &chain->base;
799
800 return VK_SUCCESS;
801
802 fail:
803 wsi_wl_swapchain_destroy(&chain->base, pAllocator);
804
805 return result;
806 }
807
808 VkResult
809 anv_wl_init_wsi(struct anv_physical_device *device)
810 {
811 struct wsi_wayland *wsi;
812 VkResult result;
813
814 wsi = anv_alloc(&device->instance->alloc, sizeof(*wsi), 8,
815 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
816 if (!wsi) {
817 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
818 goto fail;
819 }
820
821 wsi->physical_device = device;
822
823 int ret = pthread_mutex_init(&wsi->mutex, NULL);
824 if (ret != 0) {
825 if (ret == ENOMEM) {
826 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
827 } else {
828 /* FINISHME: Choose a better error. */
829 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
830 }
831
832 goto fail_alloc;
833 }
834
835 wsi->displays = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
836 _mesa_key_pointer_equal);
837 if (!wsi->displays) {
838 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
839 goto fail_mutex;
840 }
841
842 wsi->base.get_support = wsi_wl_surface_get_support;
843 wsi->base.get_capabilities = wsi_wl_surface_get_capabilities;
844 wsi->base.get_formats = wsi_wl_surface_get_formats;
845 wsi->base.get_present_modes = wsi_wl_surface_get_present_modes;
846 wsi->base.create_swapchain = wsi_wl_surface_create_swapchain;
847
848 device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = &wsi->base;
849
850 return VK_SUCCESS;
851
852 fail_mutex:
853 pthread_mutex_destroy(&wsi->mutex);
854
855 fail_alloc:
856 anv_free(&device->instance->alloc, wsi);
857 fail:
858 device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND] = NULL;
859
860 return result;
861 }
862
863 void
864 anv_wl_finish_wsi(struct anv_physical_device *device)
865 {
866 struct wsi_wayland *wsi =
867 (struct wsi_wayland *)device->wsi[VK_ICD_WSI_PLATFORM_WAYLAND];
868
869 if (wsi) {
870 _mesa_hash_table_destroy(wsi->displays, NULL);
871
872 pthread_mutex_destroy(&wsi->mutex);
873
874 anv_free(&device->instance->alloc, wsi);
875 }
876 }