02b6eb6b7c771cfd8d124e0daf4d47e9c2805603
[mesa.git] / src / intel / vulkan / wsi_common_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #include <xcb/xcb.h>
27 #include <xcb/dri3.h>
28 #include <xcb/present.h>
29
30 #include "util/hash_table.h"
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <string.h>
36 #include "wsi_common.h"
37 #include "wsi_common_x11.h"
38
39 #define typed_memcpy(dest, src, count) ({ \
40 static_assert(sizeof(*src) == sizeof(*dest), ""); \
41 memcpy((dest), (src), (count) * sizeof(*(src))); \
42 })
43
44 struct wsi_x11_connection {
45 bool has_dri3;
46 bool has_present;
47 };
48
49 struct wsi_x11 {
50 struct wsi_interface base;
51
52 pthread_mutex_t mutex;
53 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
54 struct hash_table *connections;
55 };
56
57 static struct wsi_x11_connection *
58 wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
59 xcb_connection_t *conn)
60 {
61 xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
62 xcb_query_extension_reply_t *dri3_reply, *pres_reply;
63
64 struct wsi_x11_connection *wsi_conn =
65 vk_alloc(alloc, sizeof(*wsi_conn), 8,
66 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
67 if (!wsi_conn)
68 return NULL;
69
70 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
71 pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
72
73 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
74 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
75 if (dri3_reply == NULL || pres_reply == NULL) {
76 free(dri3_reply);
77 free(pres_reply);
78 vk_free(alloc, wsi_conn);
79 return NULL;
80 }
81
82 wsi_conn->has_dri3 = dri3_reply->present != 0;
83 wsi_conn->has_present = pres_reply->present != 0;
84
85 free(dri3_reply);
86 free(pres_reply);
87
88 return wsi_conn;
89 }
90
91 static void
92 wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
93 struct wsi_x11_connection *conn)
94 {
95 vk_free(alloc, conn);
96 }
97
98 static struct wsi_x11_connection *
99 wsi_x11_get_connection(struct wsi_device *wsi_dev,
100 const VkAllocationCallbacks *alloc,
101 xcb_connection_t *conn)
102 {
103 struct wsi_x11 *wsi =
104 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
105
106 pthread_mutex_lock(&wsi->mutex);
107
108 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
109 if (!entry) {
110 /* We're about to make a bunch of blocking calls. Let's drop the
111 * mutex for now so we don't block up too badly.
112 */
113 pthread_mutex_unlock(&wsi->mutex);
114
115 struct wsi_x11_connection *wsi_conn =
116 wsi_x11_connection_create(alloc, conn);
117
118 pthread_mutex_lock(&wsi->mutex);
119
120 entry = _mesa_hash_table_search(wsi->connections, conn);
121 if (entry) {
122 /* Oops, someone raced us to it */
123 wsi_x11_connection_destroy(alloc, wsi_conn);
124 } else {
125 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
126 }
127 }
128
129 pthread_mutex_unlock(&wsi->mutex);
130
131 return entry->data;
132 }
133
134 static const VkSurfaceFormatKHR formats[] = {
135 { .format = VK_FORMAT_B8G8R8A8_SRGB, },
136 { .format = VK_FORMAT_B8G8R8A8_UNORM, },
137 };
138
139 static const VkPresentModeKHR present_modes[] = {
140 VK_PRESENT_MODE_MAILBOX_KHR,
141 };
142
143 static xcb_screen_t *
144 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
145 {
146 xcb_screen_iterator_t screen_iter =
147 xcb_setup_roots_iterator(xcb_get_setup(conn));
148
149 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
150 if (screen_iter.data->root == root)
151 return screen_iter.data;
152 }
153
154 return NULL;
155 }
156
157 static xcb_visualtype_t *
158 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
159 unsigned *depth)
160 {
161 xcb_depth_iterator_t depth_iter =
162 xcb_screen_allowed_depths_iterator(screen);
163
164 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
165 xcb_visualtype_iterator_t visual_iter =
166 xcb_depth_visuals_iterator (depth_iter.data);
167
168 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
169 if (visual_iter.data->visual_id == visual_id) {
170 if (depth)
171 *depth = depth_iter.data->depth;
172 return visual_iter.data;
173 }
174 }
175 }
176
177 return NULL;
178 }
179
180 static xcb_visualtype_t *
181 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
182 unsigned *depth)
183 {
184 xcb_screen_iterator_t screen_iter =
185 xcb_setup_roots_iterator(xcb_get_setup(conn));
186
187 /* For this we have to iterate over all of the screens which is rather
188 * annoying. Fortunately, there is probably only 1.
189 */
190 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
191 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
192 visual_id, depth);
193 if (visual)
194 return visual;
195 }
196
197 return NULL;
198 }
199
200 static xcb_visualtype_t *
201 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
202 unsigned *depth)
203 {
204 xcb_query_tree_cookie_t tree_cookie;
205 xcb_get_window_attributes_cookie_t attrib_cookie;
206 xcb_query_tree_reply_t *tree;
207 xcb_get_window_attributes_reply_t *attrib;
208
209 tree_cookie = xcb_query_tree(conn, window);
210 attrib_cookie = xcb_get_window_attributes(conn, window);
211
212 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
213 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
214 if (attrib == NULL || tree == NULL) {
215 free(attrib);
216 free(tree);
217 return NULL;
218 }
219
220 xcb_window_t root = tree->root;
221 xcb_visualid_t visual_id = attrib->visual;
222 free(attrib);
223 free(tree);
224
225 xcb_screen_t *screen = get_screen_for_root(conn, root);
226 if (screen == NULL)
227 return NULL;
228
229 return screen_get_visualtype(screen, visual_id, depth);
230 }
231
232 static bool
233 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
234 {
235 uint32_t rgb_mask = visual->red_mask |
236 visual->green_mask |
237 visual->blue_mask;
238
239 uint32_t all_mask = 0xffffffff >> (32 - depth);
240
241 /* Do we have bits left over after RGB? */
242 return (all_mask & ~rgb_mask) != 0;
243 }
244
245 VkBool32 wsi_get_physical_device_xcb_presentation_support(
246 struct wsi_device *wsi_device,
247 VkAllocationCallbacks *alloc,
248 uint32_t queueFamilyIndex,
249 xcb_connection_t* connection,
250 xcb_visualid_t visual_id)
251 {
252 struct wsi_x11_connection *wsi_conn =
253 wsi_x11_get_connection(wsi_device, alloc, connection);
254
255 if (!wsi_conn->has_dri3) {
256 fprintf(stderr, "vulkan: No DRI3 support\n");
257 return false;
258 }
259
260 unsigned visual_depth;
261 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
262 return false;
263
264 if (visual_depth != 24 && visual_depth != 32)
265 return false;
266
267 return true;
268 }
269
270 static xcb_connection_t*
271 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
272 {
273 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
274 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
275 else
276 return ((VkIcdSurfaceXcb *)icd_surface)->connection;
277 }
278
279 static xcb_window_t
280 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
281 {
282 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
283 return ((VkIcdSurfaceXlib *)icd_surface)->window;
284 else
285 return ((VkIcdSurfaceXcb *)icd_surface)->window;
286 }
287
288 static VkResult
289 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
290 struct wsi_device *wsi_device,
291 const VkAllocationCallbacks *alloc,
292 uint32_t queueFamilyIndex,
293 VkBool32* pSupported)
294 {
295 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
296 xcb_window_t window = x11_surface_get_window(icd_surface);
297
298 struct wsi_x11_connection *wsi_conn =
299 wsi_x11_get_connection(wsi_device, alloc, conn);
300 if (!wsi_conn)
301 return VK_ERROR_OUT_OF_HOST_MEMORY;
302
303 if (!wsi_conn->has_dri3) {
304 fprintf(stderr, "vulkan: No DRI3 support\n");
305 *pSupported = false;
306 return VK_SUCCESS;
307 }
308
309 unsigned visual_depth;
310 if (!get_visualtype_for_window(conn, window, &visual_depth)) {
311 *pSupported = false;
312 return VK_SUCCESS;
313 }
314
315 if (visual_depth != 24 && visual_depth != 32) {
316 *pSupported = false;
317 return VK_SUCCESS;
318 }
319
320 *pSupported = true;
321 return VK_SUCCESS;
322 }
323
324 static VkResult
325 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
326 VkSurfaceCapabilitiesKHR *caps)
327 {
328 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
329 xcb_window_t window = x11_surface_get_window(icd_surface);
330 xcb_get_geometry_cookie_t geom_cookie;
331 xcb_generic_error_t *err;
332 xcb_get_geometry_reply_t *geom;
333 unsigned visual_depth;
334
335 geom_cookie = xcb_get_geometry(conn, window);
336
337 /* This does a round-trip. This is why we do get_geometry first and
338 * wait to read the reply until after we have a visual.
339 */
340 xcb_visualtype_t *visual =
341 get_visualtype_for_window(conn, window, &visual_depth);
342
343 geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
344 if (geom) {
345 VkExtent2D extent = { geom->width, geom->height };
346 caps->currentExtent = extent;
347 caps->minImageExtent = extent;
348 caps->maxImageExtent = extent;
349 } else {
350 /* This can happen if the client didn't wait for the configure event
351 * to come back from the compositor. In that case, we don't know the
352 * size of the window so we just return valid "I don't know" stuff.
353 */
354 caps->currentExtent = (VkExtent2D) { -1, -1 };
355 caps->minImageExtent = (VkExtent2D) { 1, 1 };
356 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
357 }
358 free(err);
359 free(geom);
360
361 if (visual_has_alpha(visual, visual_depth)) {
362 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
363 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
364 } else {
365 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
366 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
367 }
368
369 caps->minImageCount = 2;
370 caps->maxImageCount = 4;
371 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
372 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
373 caps->maxImageArrayLayers = 1;
374 caps->supportedUsageFlags =
375 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
376 VK_IMAGE_USAGE_SAMPLED_BIT |
377 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
378 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
379
380 return VK_SUCCESS;
381 }
382
383 static VkResult
384 x11_surface_get_formats(VkIcdSurfaceBase *surface,
385 struct wsi_device *wsi_device,
386 uint32_t *pSurfaceFormatCount,
387 VkSurfaceFormatKHR *pSurfaceFormats)
388 {
389 if (pSurfaceFormats == NULL) {
390 *pSurfaceFormatCount = ARRAY_SIZE(formats);
391 return VK_SUCCESS;
392 }
393
394 assert(*pSurfaceFormatCount >= ARRAY_SIZE(formats));
395 typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
396 *pSurfaceFormatCount = ARRAY_SIZE(formats);
397
398 return VK_SUCCESS;
399 }
400
401 static VkResult
402 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
403 uint32_t *pPresentModeCount,
404 VkPresentModeKHR *pPresentModes)
405 {
406 if (pPresentModes == NULL) {
407 *pPresentModeCount = ARRAY_SIZE(present_modes);
408 return VK_SUCCESS;
409 }
410
411 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
412 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
413 *pPresentModeCount = ARRAY_SIZE(present_modes);
414
415 return VK_SUCCESS;
416 }
417
418 VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
419 const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
420 VkSurfaceKHR *pSurface)
421 {
422 VkIcdSurfaceXcb *surface;
423
424 surface = vk_alloc(pAllocator, sizeof *surface, 8,
425 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
426 if (surface == NULL)
427 return VK_ERROR_OUT_OF_HOST_MEMORY;
428
429 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
430 surface->connection = pCreateInfo->connection;
431 surface->window = pCreateInfo->window;
432
433 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
434 return VK_SUCCESS;
435 }
436
437 VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator,
438 const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
439 VkSurfaceKHR *pSurface)
440 {
441 VkIcdSurfaceXlib *surface;
442
443 surface = vk_alloc(pAllocator, sizeof *surface, 8,
444 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
445 if (surface == NULL)
446 return VK_ERROR_OUT_OF_HOST_MEMORY;
447
448 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
449 surface->dpy = pCreateInfo->dpy;
450 surface->window = pCreateInfo->window;
451
452 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
453 return VK_SUCCESS;
454 }
455
456 struct x11_image {
457 VkImage image;
458 VkDeviceMemory memory;
459 xcb_pixmap_t pixmap;
460 bool busy;
461 struct xshmfence * shm_fence;
462 uint32_t sync_fence;
463 };
464
465 struct x11_swapchain {
466 struct wsi_swapchain base;
467
468 xcb_connection_t * conn;
469 xcb_window_t window;
470 xcb_gc_t gc;
471 VkExtent2D extent;
472 uint32_t image_count;
473
474 xcb_present_event_t event_id;
475 xcb_special_event_t * special_event;
476 uint64_t send_sbc;
477 uint32_t stamp;
478
479 struct x11_image images[0];
480 };
481
482 static VkResult
483 x11_get_images(struct wsi_swapchain *anv_chain,
484 uint32_t* pCount, VkImage *pSwapchainImages)
485 {
486 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
487
488 if (pSwapchainImages == NULL) {
489 *pCount = chain->image_count;
490 return VK_SUCCESS;
491 }
492
493 assert(chain->image_count <= *pCount);
494 for (uint32_t i = 0; i < chain->image_count; i++)
495 pSwapchainImages[i] = chain->images[i].image;
496
497 *pCount = chain->image_count;
498
499 return VK_SUCCESS;
500 }
501
502 static VkResult
503 x11_handle_dri3_present_event(struct x11_swapchain *chain,
504 xcb_present_generic_event_t *event)
505 {
506 switch (event->evtype) {
507 case XCB_PRESENT_CONFIGURE_NOTIFY: {
508 xcb_present_configure_notify_event_t *config = (void *) event;
509
510 if (config->width != chain->extent.width ||
511 config->height != chain->extent.height)
512 return VK_ERROR_OUT_OF_DATE_KHR;
513
514 break;
515 }
516
517 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
518 xcb_present_idle_notify_event_t *idle = (void *) event;
519
520 for (unsigned i = 0; i < chain->image_count; i++) {
521 if (chain->images[i].pixmap == idle->pixmap) {
522 chain->images[i].busy = false;
523 break;
524 }
525 }
526
527 break;
528 }
529
530 case XCB_PRESENT_COMPLETE_NOTIFY:
531 default:
532 break;
533 }
534
535 return VK_SUCCESS;
536 }
537
538 static VkResult
539 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
540 uint64_t timeout,
541 VkSemaphore semaphore,
542 uint32_t *image_index)
543 {
544 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
545
546 while (1) {
547 for (uint32_t i = 0; i < chain->image_count; i++) {
548 if (!chain->images[i].busy) {
549 /* We found a non-busy image */
550 xshmfence_await(chain->images[i].shm_fence);
551 *image_index = i;
552 chain->images[i].busy = true;
553 return VK_SUCCESS;
554 }
555 }
556
557 xcb_flush(chain->conn);
558 xcb_generic_event_t *event =
559 xcb_wait_for_special_event(chain->conn, chain->special_event);
560 if (!event)
561 return VK_ERROR_OUT_OF_DATE_KHR;
562
563 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
564 free(event);
565 if (result != VK_SUCCESS)
566 return result;
567 }
568 }
569
570 static VkResult
571 x11_queue_present(struct wsi_swapchain *anv_chain,
572 uint32_t image_index)
573 {
574 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
575 struct x11_image *image = &chain->images[image_index];
576
577 assert(image_index < chain->image_count);
578
579 uint32_t options = XCB_PRESENT_OPTION_NONE;
580
581 int64_t target_msc = 0;
582 int64_t divisor = 0;
583 int64_t remainder = 0;
584
585 options |= XCB_PRESENT_OPTION_ASYNC;
586
587 xshmfence_reset(image->shm_fence);
588
589 ++chain->send_sbc;
590 xcb_void_cookie_t cookie =
591 xcb_present_pixmap(chain->conn,
592 chain->window,
593 image->pixmap,
594 (uint32_t) chain->send_sbc,
595 0, /* valid */
596 0, /* update */
597 0, /* x_off */
598 0, /* y_off */
599 XCB_NONE, /* target_crtc */
600 XCB_NONE,
601 image->sync_fence,
602 options,
603 target_msc,
604 divisor,
605 remainder, 0, NULL);
606 xcb_discard_reply(chain->conn, cookie.sequence);
607 image->busy = true;
608
609 xcb_flush(chain->conn);
610
611 return VK_SUCCESS;
612 }
613
614 static VkResult
615 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
616 const VkSwapchainCreateInfoKHR *pCreateInfo,
617 const VkAllocationCallbacks* pAllocator,
618 struct x11_image *image)
619 {
620 xcb_void_cookie_t cookie;
621 VkResult result;
622 uint32_t row_pitch;
623 uint32_t offset;
624 uint32_t bpp = 32;
625 uint32_t depth = 24;
626 int fd;
627 uint32_t size;
628
629 result = chain->base.image_fns->create_wsi_image(device_h,
630 pCreateInfo,
631 pAllocator,
632 &image->image,
633 &image->memory,
634 &size,
635 &offset,
636 &row_pitch,
637 &fd);
638 if (result != VK_SUCCESS)
639 return result;
640
641 image->pixmap = xcb_generate_id(chain->conn);
642
643 cookie =
644 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
645 image->pixmap,
646 chain->window,
647 size,
648 pCreateInfo->imageExtent.width,
649 pCreateInfo->imageExtent.height,
650 row_pitch,
651 depth, bpp, fd);
652 xcb_discard_reply(chain->conn, cookie.sequence);
653
654 int fence_fd = xshmfence_alloc_shm();
655 if (fence_fd < 0)
656 goto fail_pixmap;
657
658 image->shm_fence = xshmfence_map_shm(fence_fd);
659 if (image->shm_fence == NULL)
660 goto fail_shmfence_alloc;
661
662 image->sync_fence = xcb_generate_id(chain->conn);
663 xcb_dri3_fence_from_fd(chain->conn,
664 image->pixmap,
665 image->sync_fence,
666 false,
667 fence_fd);
668
669 image->busy = false;
670 xshmfence_trigger(image->shm_fence);
671
672 return VK_SUCCESS;
673
674 fail_shmfence_alloc:
675 close(fence_fd);
676
677 fail_pixmap:
678 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
679 xcb_discard_reply(chain->conn, cookie.sequence);
680
681 chain->base.image_fns->free_wsi_image(device_h, pAllocator,
682 image->image, image->memory);
683
684 return result;
685 }
686
687 static void
688 x11_image_finish(struct x11_swapchain *chain,
689 const VkAllocationCallbacks* pAllocator,
690 struct x11_image *image)
691 {
692 xcb_void_cookie_t cookie;
693
694 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
695 xcb_discard_reply(chain->conn, cookie.sequence);
696 xshmfence_unmap_shm(image->shm_fence);
697
698 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
699 xcb_discard_reply(chain->conn, cookie.sequence);
700
701 chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
702 image->image, image->memory);
703 }
704
705 static VkResult
706 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
707 const VkAllocationCallbacks *pAllocator)
708 {
709 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
710 for (uint32_t i = 0; i < chain->image_count; i++)
711 x11_image_finish(chain, pAllocator, &chain->images[i]);
712
713 xcb_unregister_for_special_event(chain->conn, chain->special_event);
714
715 vk_free(pAllocator, chain);
716
717 return VK_SUCCESS;
718 }
719
720 static VkResult
721 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
722 VkDevice device,
723 struct wsi_device *wsi_device,
724 const VkSwapchainCreateInfoKHR *pCreateInfo,
725 const VkAllocationCallbacks* pAllocator,
726 const struct wsi_image_fns *image_fns,
727 struct wsi_swapchain **swapchain_out)
728 {
729 struct x11_swapchain *chain;
730 xcb_void_cookie_t cookie;
731 VkResult result;
732
733 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
734
735 int num_images = pCreateInfo->minImageCount;
736
737 /* For true mailbox mode, we need at least 4 images:
738 * 1) One to scan out from
739 * 2) One to have queued for scan-out
740 * 3) One to be currently held by the Wayland compositor
741 * 4) One to render to
742 */
743 if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
744 num_images = MAX2(num_images, 4);
745
746 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
747 chain = vk_alloc(pAllocator, size, 8,
748 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
749 if (chain == NULL)
750 return VK_ERROR_OUT_OF_HOST_MEMORY;
751
752 chain->base.device = device;
753 chain->base.destroy = x11_swapchain_destroy;
754 chain->base.get_images = x11_get_images;
755 chain->base.acquire_next_image = x11_acquire_next_image;
756 chain->base.queue_present = x11_queue_present;
757 chain->base.image_fns = image_fns;
758 chain->conn = x11_surface_get_connection(icd_surface);
759 chain->window = x11_surface_get_window(icd_surface);
760 chain->extent = pCreateInfo->imageExtent;
761 chain->image_count = num_images;
762 chain->send_sbc = 0;
763
764 chain->event_id = xcb_generate_id(chain->conn);
765 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
766 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
767 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
768 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
769
770 /* Create an XCB event queue to hold present events outside of the usual
771 * application event queue
772 */
773 chain->special_event =
774 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
775 chain->event_id, NULL);
776
777 chain->gc = xcb_generate_id(chain->conn);
778 if (!chain->gc) {
779 /* FINISHME: Choose a better error. */
780 result = VK_ERROR_OUT_OF_HOST_MEMORY;
781 goto fail_register;
782 }
783
784 cookie = xcb_create_gc(chain->conn,
785 chain->gc,
786 chain->window,
787 XCB_GC_GRAPHICS_EXPOSURES,
788 (uint32_t []) { 0 });
789 xcb_discard_reply(chain->conn, cookie.sequence);
790
791 uint32_t image = 0;
792 for (; image < chain->image_count; image++) {
793 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
794 &chain->images[image]);
795 if (result != VK_SUCCESS)
796 goto fail_init_images;
797 }
798
799 *swapchain_out = &chain->base;
800
801 return VK_SUCCESS;
802
803 fail_init_images:
804 for (uint32_t j = 0; j < image; j++)
805 x11_image_finish(chain, pAllocator, &chain->images[j]);
806
807 fail_register:
808 xcb_unregister_for_special_event(chain->conn, chain->special_event);
809
810 vk_free(pAllocator, chain);
811
812 return result;
813 }
814
815 VkResult
816 wsi_x11_init_wsi(struct wsi_device *wsi_device,
817 const VkAllocationCallbacks *alloc)
818 {
819 struct wsi_x11 *wsi;
820 VkResult result;
821
822 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
823 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
824 if (!wsi) {
825 result = VK_ERROR_OUT_OF_HOST_MEMORY;
826 goto fail;
827 }
828
829 int ret = pthread_mutex_init(&wsi->mutex, NULL);
830 if (ret != 0) {
831 if (ret == ENOMEM) {
832 result = VK_ERROR_OUT_OF_HOST_MEMORY;
833 } else {
834 /* FINISHME: Choose a better error. */
835 result = VK_ERROR_OUT_OF_HOST_MEMORY;
836 }
837
838 goto fail_alloc;
839 }
840
841 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
842 _mesa_key_pointer_equal);
843 if (!wsi->connections) {
844 result = VK_ERROR_OUT_OF_HOST_MEMORY;
845 goto fail_mutex;
846 }
847
848 wsi->base.get_support = x11_surface_get_support;
849 wsi->base.get_capabilities = x11_surface_get_capabilities;
850 wsi->base.get_formats = x11_surface_get_formats;
851 wsi->base.get_present_modes = x11_surface_get_present_modes;
852 wsi->base.create_swapchain = x11_surface_create_swapchain;
853
854 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
855 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
856
857 return VK_SUCCESS;
858
859 fail_mutex:
860 pthread_mutex_destroy(&wsi->mutex);
861 fail_alloc:
862 vk_free(alloc, wsi);
863 fail:
864 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
865 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
866
867 return result;
868 }
869
870 void
871 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
872 const VkAllocationCallbacks *alloc)
873 {
874 struct wsi_x11 *wsi =
875 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
876
877 if (wsi) {
878 _mesa_hash_table_destroy(wsi->connections, NULL);
879
880 pthread_mutex_destroy(&wsi->mutex);
881
882 vk_free(alloc, wsi);
883 }
884 }