vulkan/wsi: fix incorrect assignment in assert()
[mesa.git] / src / vulkan / wsi / wsi_common_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #include <xcb/xcb.h>
27 #include <xcb/dri3.h>
28 #include <xcb/present.h>
29
30 #include "util/macros.h"
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <string.h>
36 #include <fcntl.h>
37 #include <poll.h>
38 #include <xf86drm.h>
39 #include <drm_fourcc.h>
40 #include "util/hash_table.h"
41
42 #include "vk_util.h"
43 #include "wsi_common_private.h"
44 #include "wsi_common_x11.h"
45 #include "wsi_common_queue.h"
46
47 #define typed_memcpy(dest, src, count) ({ \
48 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
49 memcpy((dest), (src), (count) * sizeof(*(src))); \
50 })
51
52 struct wsi_x11_connection {
53 bool has_dri3;
54 bool has_dri3_modifiers;
55 bool has_present;
56 bool is_proprietary_x11;
57 };
58
59 struct wsi_x11 {
60 struct wsi_interface base;
61
62 pthread_mutex_t mutex;
63 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
64 struct hash_table *connections;
65 };
66
67
68 /** wsi_dri3_open
69 *
70 * Wrapper around xcb_dri3_open
71 */
72 static int
73 wsi_dri3_open(xcb_connection_t *conn,
74 xcb_window_t root,
75 uint32_t provider)
76 {
77 xcb_dri3_open_cookie_t cookie;
78 xcb_dri3_open_reply_t *reply;
79 int fd;
80
81 cookie = xcb_dri3_open(conn,
82 root,
83 provider);
84
85 reply = xcb_dri3_open_reply(conn, cookie, NULL);
86 if (!reply)
87 return -1;
88
89 if (reply->nfd != 1) {
90 free(reply);
91 return -1;
92 }
93
94 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
95 free(reply);
96 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
97
98 return fd;
99 }
100
101 static bool
102 wsi_x11_check_dri3_compatible(xcb_connection_t *conn, int local_fd)
103 {
104 xcb_screen_iterator_t screen_iter =
105 xcb_setup_roots_iterator(xcb_get_setup(conn));
106 xcb_screen_t *screen = screen_iter.data;
107
108 int dri3_fd = wsi_dri3_open(conn, screen->root, None);
109 if (dri3_fd != -1) {
110 char *local_dev = drmGetRenderDeviceNameFromFd(local_fd);
111 char *dri3_dev = drmGetRenderDeviceNameFromFd(dri3_fd);
112 int ret;
113
114 close(dri3_fd);
115
116 ret = strcmp(local_dev, dri3_dev);
117
118 free(local_dev);
119 free(dri3_dev);
120
121 if (ret != 0)
122 return false;
123 }
124 return true;
125 }
126
127 static struct wsi_x11_connection *
128 wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
129 xcb_connection_t *conn)
130 {
131 xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie;
132 xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply;
133 bool has_dri3_v1_2 = false;
134 bool has_present_v1_2 = false;
135
136 struct wsi_x11_connection *wsi_conn =
137 vk_alloc(alloc, sizeof(*wsi_conn), 8,
138 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
139 if (!wsi_conn)
140 return NULL;
141
142 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
143 pres_cookie = xcb_query_extension(conn, 7, "Present");
144
145 /* We try to be nice to users and emit a warning if they try to use a
146 * Vulkan application on a system without DRI3 enabled. However, this ends
147 * up spewing the warning when a user has, for example, both Intel
148 * integrated graphics and a discrete card with proprietary drivers and are
149 * running on the discrete card with the proprietary DDX. In this case, we
150 * really don't want to print the warning because it just confuses users.
151 * As a heuristic to detect this case, we check for a couple of proprietary
152 * X11 extensions.
153 */
154 amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
155 nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
156
157 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
158 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
159 amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
160 nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
161 if (!dri3_reply || !pres_reply) {
162 free(dri3_reply);
163 free(pres_reply);
164 free(amd_reply);
165 free(nv_reply);
166 vk_free(alloc, wsi_conn);
167 return NULL;
168 }
169
170 wsi_conn->has_dri3 = dri3_reply->present != 0;
171 #ifdef HAVE_DRI3_MODIFIERS
172 if (wsi_conn->has_dri3) {
173 xcb_dri3_query_version_cookie_t ver_cookie;
174 xcb_dri3_query_version_reply_t *ver_reply;
175
176 ver_cookie = xcb_dri3_query_version(conn, 1, 2);
177 ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
178 has_dri3_v1_2 =
179 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
180 free(ver_reply);
181 }
182 #endif
183
184 wsi_conn->has_present = pres_reply->present != 0;
185 #ifdef HAVE_DRI3_MODIFIERS
186 if (wsi_conn->has_present) {
187 xcb_present_query_version_cookie_t ver_cookie;
188 xcb_present_query_version_reply_t *ver_reply;
189
190 ver_cookie = xcb_present_query_version(conn, 1, 2);
191 ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
192 has_present_v1_2 =
193 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
194 free(ver_reply);
195 }
196 #endif
197
198 wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
199 wsi_conn->is_proprietary_x11 = false;
200 if (amd_reply && amd_reply->present)
201 wsi_conn->is_proprietary_x11 = true;
202 if (nv_reply && nv_reply->present)
203 wsi_conn->is_proprietary_x11 = true;
204
205 free(dri3_reply);
206 free(pres_reply);
207 free(amd_reply);
208 free(nv_reply);
209
210 return wsi_conn;
211 }
212
213 static void
214 wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
215 struct wsi_x11_connection *conn)
216 {
217 vk_free(alloc, conn);
218 }
219
220 static bool
221 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
222 {
223 if (wsi_conn->has_dri3)
224 return true;
225 if (!wsi_conn->is_proprietary_x11) {
226 fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
227 "Note: you can probably enable DRI3 in your Xorg config\n");
228 }
229 return false;
230 }
231
232 static struct wsi_x11_connection *
233 wsi_x11_get_connection(struct wsi_device *wsi_dev,
234 const VkAllocationCallbacks *alloc,
235 xcb_connection_t *conn)
236 {
237 struct wsi_x11 *wsi =
238 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
239
240 pthread_mutex_lock(&wsi->mutex);
241
242 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
243 if (!entry) {
244 /* We're about to make a bunch of blocking calls. Let's drop the
245 * mutex for now so we don't block up too badly.
246 */
247 pthread_mutex_unlock(&wsi->mutex);
248
249 struct wsi_x11_connection *wsi_conn =
250 wsi_x11_connection_create(alloc, conn);
251 if (!wsi_conn)
252 return NULL;
253
254 pthread_mutex_lock(&wsi->mutex);
255
256 entry = _mesa_hash_table_search(wsi->connections, conn);
257 if (entry) {
258 /* Oops, someone raced us to it */
259 wsi_x11_connection_destroy(alloc, wsi_conn);
260 } else {
261 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
262 }
263 }
264
265 pthread_mutex_unlock(&wsi->mutex);
266
267 return entry->data;
268 }
269
270 static const VkFormat formats[] = {
271 VK_FORMAT_B8G8R8A8_SRGB,
272 VK_FORMAT_B8G8R8A8_UNORM,
273 };
274
275 static const VkPresentModeKHR present_modes[] = {
276 VK_PRESENT_MODE_IMMEDIATE_KHR,
277 VK_PRESENT_MODE_MAILBOX_KHR,
278 VK_PRESENT_MODE_FIFO_KHR,
279 };
280
281 static xcb_screen_t *
282 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
283 {
284 xcb_screen_iterator_t screen_iter =
285 xcb_setup_roots_iterator(xcb_get_setup(conn));
286
287 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
288 if (screen_iter.data->root == root)
289 return screen_iter.data;
290 }
291
292 return NULL;
293 }
294
295 static xcb_visualtype_t *
296 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
297 unsigned *depth)
298 {
299 xcb_depth_iterator_t depth_iter =
300 xcb_screen_allowed_depths_iterator(screen);
301
302 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
303 xcb_visualtype_iterator_t visual_iter =
304 xcb_depth_visuals_iterator (depth_iter.data);
305
306 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
307 if (visual_iter.data->visual_id == visual_id) {
308 if (depth)
309 *depth = depth_iter.data->depth;
310 return visual_iter.data;
311 }
312 }
313 }
314
315 return NULL;
316 }
317
318 static xcb_visualtype_t *
319 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
320 unsigned *depth)
321 {
322 xcb_screen_iterator_t screen_iter =
323 xcb_setup_roots_iterator(xcb_get_setup(conn));
324
325 /* For this we have to iterate over all of the screens which is rather
326 * annoying. Fortunately, there is probably only 1.
327 */
328 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
329 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
330 visual_id, depth);
331 if (visual)
332 return visual;
333 }
334
335 return NULL;
336 }
337
338 static xcb_visualtype_t *
339 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
340 unsigned *depth)
341 {
342 xcb_query_tree_cookie_t tree_cookie;
343 xcb_get_window_attributes_cookie_t attrib_cookie;
344 xcb_query_tree_reply_t *tree;
345 xcb_get_window_attributes_reply_t *attrib;
346
347 tree_cookie = xcb_query_tree(conn, window);
348 attrib_cookie = xcb_get_window_attributes(conn, window);
349
350 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
351 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
352 if (attrib == NULL || tree == NULL) {
353 free(attrib);
354 free(tree);
355 return NULL;
356 }
357
358 xcb_window_t root = tree->root;
359 xcb_visualid_t visual_id = attrib->visual;
360 free(attrib);
361 free(tree);
362
363 xcb_screen_t *screen = get_screen_for_root(conn, root);
364 if (screen == NULL)
365 return NULL;
366
367 return screen_get_visualtype(screen, visual_id, depth);
368 }
369
370 static bool
371 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
372 {
373 uint32_t rgb_mask = visual->red_mask |
374 visual->green_mask |
375 visual->blue_mask;
376
377 uint32_t all_mask = 0xffffffff >> (32 - depth);
378
379 /* Do we have bits left over after RGB? */
380 return (all_mask & ~rgb_mask) != 0;
381 }
382
383 VkBool32 wsi_get_physical_device_xcb_presentation_support(
384 struct wsi_device *wsi_device,
385 VkAllocationCallbacks *alloc,
386 uint32_t queueFamilyIndex,
387 int fd,
388 bool can_handle_different_gpu,
389 xcb_connection_t* connection,
390 xcb_visualid_t visual_id)
391 {
392 struct wsi_x11_connection *wsi_conn =
393 wsi_x11_get_connection(wsi_device, alloc, connection);
394
395 if (!wsi_conn)
396 return false;
397
398 if (!wsi_x11_check_for_dri3(wsi_conn))
399 return false;
400
401 if (!can_handle_different_gpu)
402 if (!wsi_x11_check_dri3_compatible(connection, fd))
403 return false;
404
405 unsigned visual_depth;
406 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
407 return false;
408
409 if (visual_depth != 24 && visual_depth != 32)
410 return false;
411
412 return true;
413 }
414
415 static xcb_connection_t*
416 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
417 {
418 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
419 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
420 else
421 return ((VkIcdSurfaceXcb *)icd_surface)->connection;
422 }
423
424 static xcb_window_t
425 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
426 {
427 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
428 return ((VkIcdSurfaceXlib *)icd_surface)->window;
429 else
430 return ((VkIcdSurfaceXcb *)icd_surface)->window;
431 }
432
433 static VkResult
434 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
435 struct wsi_device *wsi_device,
436 const VkAllocationCallbacks *alloc,
437 uint32_t queueFamilyIndex,
438 int local_fd,
439 VkBool32* pSupported)
440 {
441 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
442 xcb_window_t window = x11_surface_get_window(icd_surface);
443
444 struct wsi_x11_connection *wsi_conn =
445 wsi_x11_get_connection(wsi_device, alloc, conn);
446 if (!wsi_conn)
447 return VK_ERROR_OUT_OF_HOST_MEMORY;
448
449 if (!wsi_x11_check_for_dri3(wsi_conn)) {
450 *pSupported = false;
451 return VK_SUCCESS;
452 }
453
454 unsigned visual_depth;
455 if (!get_visualtype_for_window(conn, window, &visual_depth)) {
456 *pSupported = false;
457 return VK_SUCCESS;
458 }
459
460 if (visual_depth != 24 && visual_depth != 32) {
461 *pSupported = false;
462 return VK_SUCCESS;
463 }
464
465 *pSupported = true;
466 return VK_SUCCESS;
467 }
468
469 static VkResult
470 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
471 VkSurfaceCapabilitiesKHR *caps)
472 {
473 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
474 xcb_window_t window = x11_surface_get_window(icd_surface);
475 xcb_get_geometry_cookie_t geom_cookie;
476 xcb_generic_error_t *err;
477 xcb_get_geometry_reply_t *geom;
478 unsigned visual_depth;
479
480 geom_cookie = xcb_get_geometry(conn, window);
481
482 /* This does a round-trip. This is why we do get_geometry first and
483 * wait to read the reply until after we have a visual.
484 */
485 xcb_visualtype_t *visual =
486 get_visualtype_for_window(conn, window, &visual_depth);
487
488 if (!visual)
489 return VK_ERROR_SURFACE_LOST_KHR;
490
491 geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
492 if (geom) {
493 VkExtent2D extent = { geom->width, geom->height };
494 caps->currentExtent = extent;
495 caps->minImageExtent = extent;
496 caps->maxImageExtent = extent;
497 } else {
498 /* This can happen if the client didn't wait for the configure event
499 * to come back from the compositor. In that case, we don't know the
500 * size of the window so we just return valid "I don't know" stuff.
501 */
502 caps->currentExtent = (VkExtent2D) { -1, -1 };
503 caps->minImageExtent = (VkExtent2D) { 1, 1 };
504 /* This is the maximum supported size on Intel */
505 caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
506 }
507 free(err);
508 free(geom);
509
510 if (visual_has_alpha(visual, visual_depth)) {
511 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
512 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
513 } else {
514 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
515 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
516 }
517
518 /* For true mailbox mode, we need at least 4 images:
519 * 1) One to scan out from
520 * 2) One to have queued for scan-out
521 * 3) One to be currently held by the X server
522 * 4) One to render to
523 */
524 caps->minImageCount = 2;
525 /* There is no real maximum */
526 caps->maxImageCount = 0;
527
528 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
529 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
530 caps->maxImageArrayLayers = 1;
531 caps->supportedUsageFlags =
532 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
533 VK_IMAGE_USAGE_SAMPLED_BIT |
534 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
535 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
536
537 return VK_SUCCESS;
538 }
539
540 static VkResult
541 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
542 const void *info_next,
543 VkSurfaceCapabilities2KHR *caps)
544 {
545 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
546
547 return x11_surface_get_capabilities(icd_surface, &caps->surfaceCapabilities);
548 }
549
550 static VkResult
551 x11_surface_get_formats(VkIcdSurfaceBase *surface,
552 struct wsi_device *wsi_device,
553 uint32_t *pSurfaceFormatCount,
554 VkSurfaceFormatKHR *pSurfaceFormats)
555 {
556 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
557
558 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
559 vk_outarray_append(&out, f) {
560 f->format = formats[i];
561 f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
562 }
563 }
564
565 return vk_outarray_status(&out);
566 }
567
568 static VkResult
569 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
570 struct wsi_device *wsi_device,
571 const void *info_next,
572 uint32_t *pSurfaceFormatCount,
573 VkSurfaceFormat2KHR *pSurfaceFormats)
574 {
575 VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
576
577 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
578 vk_outarray_append(&out, f) {
579 assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
580 f->surfaceFormat.format = formats[i];
581 f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
582 }
583 }
584
585 return vk_outarray_status(&out);
586 }
587
588 static VkResult
589 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
590 uint32_t *pPresentModeCount,
591 VkPresentModeKHR *pPresentModes)
592 {
593 if (pPresentModes == NULL) {
594 *pPresentModeCount = ARRAY_SIZE(present_modes);
595 return VK_SUCCESS;
596 }
597
598 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
599 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
600
601 return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
602 VK_INCOMPLETE : VK_SUCCESS;
603 }
604
605 VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
606 const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
607 VkSurfaceKHR *pSurface)
608 {
609 VkIcdSurfaceXcb *surface;
610
611 surface = vk_alloc(pAllocator, sizeof *surface, 8,
612 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
613 if (surface == NULL)
614 return VK_ERROR_OUT_OF_HOST_MEMORY;
615
616 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
617 surface->connection = pCreateInfo->connection;
618 surface->window = pCreateInfo->window;
619
620 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
621 return VK_SUCCESS;
622 }
623
624 VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator,
625 const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
626 VkSurfaceKHR *pSurface)
627 {
628 VkIcdSurfaceXlib *surface;
629
630 surface = vk_alloc(pAllocator, sizeof *surface, 8,
631 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
632 if (surface == NULL)
633 return VK_ERROR_OUT_OF_HOST_MEMORY;
634
635 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
636 surface->dpy = pCreateInfo->dpy;
637 surface->window = pCreateInfo->window;
638
639 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
640 return VK_SUCCESS;
641 }
642
643 struct x11_image {
644 struct wsi_image base;
645 xcb_pixmap_t pixmap;
646 bool busy;
647 struct xshmfence * shm_fence;
648 uint32_t sync_fence;
649 };
650
651 struct x11_swapchain {
652 struct wsi_swapchain base;
653
654 bool has_dri3_modifiers;
655
656 xcb_connection_t * conn;
657 xcb_window_t window;
658 xcb_gc_t gc;
659 uint32_t depth;
660 VkExtent2D extent;
661
662 xcb_present_event_t event_id;
663 xcb_special_event_t * special_event;
664 uint64_t send_sbc;
665 uint64_t last_present_msc;
666 uint32_t stamp;
667
668 bool threaded;
669 VkResult status;
670 xcb_present_complete_mode_t last_present_mode;
671 struct wsi_queue present_queue;
672 struct wsi_queue acquire_queue;
673 pthread_t queue_manager;
674
675 struct x11_image images[0];
676 };
677
678 /**
679 * Update the swapchain status with the result of an operation, and return
680 * the combined status. The chain status will eventually be returned from
681 * AcquireNextImage and QueuePresent.
682 *
683 * We make sure to 'stick' more pessimistic statuses: an out-of-date error
684 * is permanent once seen, and every subsequent call will return this. If
685 * this has not been seen, success will be returned.
686 */
687 static VkResult
688 x11_swapchain_result(struct x11_swapchain *chain, VkResult result)
689 {
690 /* Prioritise returning existing errors for consistency. */
691 if (chain->status < 0)
692 return chain->status;
693
694 /* If we have a new error, mark it as permanent on the chain and return. */
695 if (result < 0) {
696 chain->status = result;
697 return result;
698 }
699
700 /* Return temporary errors, but don't persist them. */
701 if (result == VK_TIMEOUT || result == VK_NOT_READY)
702 return result;
703
704 /* Suboptimal isn't an error, but is a status which sticks to the swapchain
705 * and is always returned rather than success.
706 */
707 if (result == VK_SUBOPTIMAL_KHR) {
708 chain->status = result;
709 return result;
710 }
711
712 /* No changes, so return the last status. */
713 return chain->status;
714 }
715
716 static struct wsi_image *
717 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
718 {
719 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
720 return &chain->images[image_index].base;
721 }
722
723 /**
724 * Process an X11 Present event. Does not update chain->status.
725 */
726 static VkResult
727 x11_handle_dri3_present_event(struct x11_swapchain *chain,
728 xcb_present_generic_event_t *event)
729 {
730 switch (event->evtype) {
731 case XCB_PRESENT_CONFIGURE_NOTIFY: {
732 xcb_present_configure_notify_event_t *config = (void *) event;
733
734 if (config->width != chain->extent.width ||
735 config->height != chain->extent.height)
736 return VK_ERROR_OUT_OF_DATE_KHR;
737
738 break;
739 }
740
741 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
742 xcb_present_idle_notify_event_t *idle = (void *) event;
743
744 for (unsigned i = 0; i < chain->base.image_count; i++) {
745 if (chain->images[i].pixmap == idle->pixmap) {
746 chain->images[i].busy = false;
747 if (chain->threaded)
748 wsi_queue_push(&chain->acquire_queue, i);
749 break;
750 }
751 }
752
753 break;
754 }
755
756 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
757 xcb_present_complete_notify_event_t *complete = (void *) event;
758 if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP)
759 chain->last_present_msc = complete->msc;
760
761 VkResult result = VK_SUCCESS;
762
763 /* The winsys is now trying to flip directly and cannot due to our
764 * configuration. Request the user reallocate.
765 */
766 #ifdef HAVE_DRI3_MODIFIERS
767 if (complete->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
768 chain->last_present_mode != XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY)
769 result = VK_SUBOPTIMAL_KHR;
770 #endif
771
772 /* When we go from flipping to copying, the odds are very likely that
773 * we could reallocate in a more optimal way if we didn't have to care
774 * about scanout, so we always do this.
775 */
776 if (complete->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
777 chain->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP)
778 result = VK_SUBOPTIMAL_KHR;
779
780 chain->last_present_mode = complete->mode;
781 return result;
782 }
783
784 default:
785 break;
786 }
787
788 return VK_SUCCESS;
789 }
790
791
792 static uint64_t wsi_get_current_time(void)
793 {
794 uint64_t current_time;
795 struct timespec tv;
796
797 clock_gettime(CLOCK_MONOTONIC, &tv);
798 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
799 return current_time;
800 }
801
802 static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
803 {
804 uint64_t current_time = wsi_get_current_time();
805
806 timeout = MIN2(UINT64_MAX - current_time, timeout);
807
808 return current_time + timeout;
809 }
810
811 static VkResult
812 x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
813 uint32_t *image_index, uint64_t timeout)
814 {
815 xcb_generic_event_t *event;
816 struct pollfd pfds;
817 uint64_t atimeout;
818 while (1) {
819 for (uint32_t i = 0; i < chain->base.image_count; i++) {
820 if (!chain->images[i].busy) {
821 /* We found a non-busy image */
822 xshmfence_await(chain->images[i].shm_fence);
823 *image_index = i;
824 chain->images[i].busy = true;
825 return x11_swapchain_result(chain, VK_SUCCESS);
826 }
827 }
828
829 xcb_flush(chain->conn);
830
831 if (timeout == UINT64_MAX) {
832 event = xcb_wait_for_special_event(chain->conn, chain->special_event);
833 if (!event)
834 return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
835 } else {
836 event = xcb_poll_for_special_event(chain->conn, chain->special_event);
837 if (!event) {
838 int ret;
839 if (timeout == 0)
840 return x11_swapchain_result(chain, VK_NOT_READY);
841
842 atimeout = wsi_get_absolute_timeout(timeout);
843
844 pfds.fd = xcb_get_file_descriptor(chain->conn);
845 pfds.events = POLLIN;
846 ret = poll(&pfds, 1, timeout / 1000 / 1000);
847 if (ret == 0)
848 return x11_swapchain_result(chain, VK_TIMEOUT);
849 if (ret == -1)
850 return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
851
852 /* If a non-special event happens, the fd will still
853 * poll. So recalculate the timeout now just in case.
854 */
855 uint64_t current_time = wsi_get_current_time();
856 if (atimeout > current_time)
857 timeout = atimeout - current_time;
858 else
859 timeout = 0;
860 continue;
861 }
862 }
863
864 /* Update the swapchain status here. We may catch non-fatal errors here,
865 * in which case we need to update the status and continue.
866 */
867 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
868 free(event);
869 if (result < 0)
870 return x11_swapchain_result(chain, result);
871 }
872 }
873
874 static VkResult
875 x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
876 uint32_t *image_index_out, uint64_t timeout)
877 {
878 assert(chain->threaded);
879
880 uint32_t image_index;
881 VkResult result = wsi_queue_pull(&chain->acquire_queue,
882 &image_index, timeout);
883 if (result < 0 || result == VK_TIMEOUT) {
884 /* On error, the thread has shut down, so safe to update chain->status.
885 * Calling x11_swapchain_result with VK_TIMEOUT won't modify
886 * chain->status so that is also safe.
887 */
888 return x11_swapchain_result(chain, result);
889 } else if (chain->status < 0) {
890 return chain->status;
891 }
892
893 assert(image_index < chain->base.image_count);
894 xshmfence_await(chain->images[image_index].shm_fence);
895
896 *image_index_out = image_index;
897
898 return chain->status;
899 }
900
901 static VkResult
902 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
903 uint32_t target_msc)
904 {
905 struct x11_image *image = &chain->images[image_index];
906
907 assert(image_index < chain->base.image_count);
908
909 uint32_t options = XCB_PRESENT_OPTION_NONE;
910
911 int64_t divisor = 0;
912 int64_t remainder = 0;
913
914 if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR)
915 options |= XCB_PRESENT_OPTION_ASYNC;
916
917 #ifdef HAVE_DRI3_MODIFIERS
918 if (chain->has_dri3_modifiers)
919 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
920 #endif
921
922 xshmfence_reset(image->shm_fence);
923
924 ++chain->send_sbc;
925 xcb_void_cookie_t cookie =
926 xcb_present_pixmap(chain->conn,
927 chain->window,
928 image->pixmap,
929 (uint32_t) chain->send_sbc,
930 0, /* valid */
931 0, /* update */
932 0, /* x_off */
933 0, /* y_off */
934 XCB_NONE, /* target_crtc */
935 XCB_NONE,
936 image->sync_fence,
937 options,
938 target_msc,
939 divisor,
940 remainder, 0, NULL);
941 xcb_discard_reply(chain->conn, cookie.sequence);
942 image->busy = true;
943
944 xcb_flush(chain->conn);
945
946 return x11_swapchain_result(chain, VK_SUCCESS);
947 }
948
949 static VkResult
950 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
951 uint64_t timeout,
952 VkSemaphore semaphore,
953 uint32_t *image_index)
954 {
955 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
956
957 if (chain->threaded) {
958 return x11_acquire_next_image_from_queue(chain, image_index, timeout);
959 } else {
960 return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
961 }
962 }
963
964 static VkResult
965 x11_queue_present(struct wsi_swapchain *anv_chain,
966 uint32_t image_index,
967 const VkPresentRegionKHR *damage)
968 {
969 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
970
971 if (chain->threaded) {
972 wsi_queue_push(&chain->present_queue, image_index);
973 return chain->status;
974 } else {
975 return x11_present_to_x11(chain, image_index, 0);
976 }
977 }
978
979 static void *
980 x11_manage_fifo_queues(void *state)
981 {
982 struct x11_swapchain *chain = state;
983 VkResult result;
984
985 assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
986
987 while (chain->status >= 0) {
988 /* It should be safe to unconditionally block here. Later in the loop
989 * we blocks until the previous present has landed on-screen. At that
990 * point, we should have received IDLE_NOTIFY on all images presented
991 * before that point so the client should be able to acquire any image
992 * other than the currently presented one.
993 */
994 uint32_t image_index;
995 result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
996 assert(result != VK_TIMEOUT);
997 if (result < 0) {
998 goto fail;
999 } else if (chain->status < 0) {
1000 /* The status can change underneath us if the swapchain is destroyed
1001 * from another thread.
1002 */
1003 return NULL;
1004 }
1005
1006 uint64_t target_msc = chain->last_present_msc + 1;
1007 result = x11_present_to_x11(chain, image_index, target_msc);
1008 if (result < 0)
1009 goto fail;
1010
1011 while (chain->last_present_msc < target_msc) {
1012 xcb_generic_event_t *event =
1013 xcb_wait_for_special_event(chain->conn, chain->special_event);
1014 if (!event) {
1015 result = VK_ERROR_OUT_OF_DATE_KHR;
1016 goto fail;
1017 }
1018
1019 result = x11_handle_dri3_present_event(chain, (void *)event);
1020 free(event);
1021 if (result < 0)
1022 goto fail;
1023 }
1024 }
1025
1026 fail:
1027 result = x11_swapchain_result(chain, result);
1028 wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
1029
1030 return NULL;
1031 }
1032
1033 static VkResult
1034 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
1035 const VkSwapchainCreateInfoKHR *pCreateInfo,
1036 const VkAllocationCallbacks* pAllocator,
1037 const uint64_t *const *modifiers,
1038 const uint32_t *num_modifiers,
1039 int num_tranches, struct x11_image *image)
1040 {
1041 xcb_void_cookie_t cookie;
1042 VkResult result;
1043 uint32_t bpp = 32;
1044
1045 if (chain->base.use_prime_blit) {
1046 bool use_modifier = num_tranches > 0;
1047 result = wsi_create_prime_image(&chain->base, pCreateInfo, use_modifier, &image->base);
1048 } else {
1049 result = wsi_create_native_image(&chain->base, pCreateInfo,
1050 num_tranches, num_modifiers, modifiers,
1051 &image->base);
1052 }
1053 if (result < 0)
1054 return result;
1055
1056 image->pixmap = xcb_generate_id(chain->conn);
1057
1058 #ifdef HAVE_DRI3_MODIFIERS
1059 if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
1060 /* If the image has a modifier, we must have DRI3 v1.2. */
1061 assert(chain->has_dri3_modifiers);
1062
1063 cookie =
1064 xcb_dri3_pixmap_from_buffers_checked(chain->conn,
1065 image->pixmap,
1066 chain->window,
1067 image->base.num_planes,
1068 pCreateInfo->imageExtent.width,
1069 pCreateInfo->imageExtent.height,
1070 image->base.row_pitches[0],
1071 image->base.offsets[0],
1072 image->base.row_pitches[1],
1073 image->base.offsets[1],
1074 image->base.row_pitches[2],
1075 image->base.offsets[2],
1076 image->base.row_pitches[3],
1077 image->base.offsets[3],
1078 chain->depth, bpp,
1079 image->base.drm_modifier,
1080 image->base.fds);
1081 } else
1082 #endif
1083 {
1084 /* Without passing modifiers, we can't have multi-plane RGB images. */
1085 assert(image->base.num_planes == 1);
1086
1087 cookie =
1088 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
1089 image->pixmap,
1090 chain->window,
1091 image->base.sizes[0],
1092 pCreateInfo->imageExtent.width,
1093 pCreateInfo->imageExtent.height,
1094 image->base.row_pitches[0],
1095 chain->depth, bpp,
1096 image->base.fds[0]);
1097 }
1098
1099 xcb_discard_reply(chain->conn, cookie.sequence);
1100
1101 /* XCB has now taken ownership of the FDs. */
1102 for (int i = 0; i < image->base.num_planes; i++)
1103 image->base.fds[i] = -1;
1104
1105 int fence_fd = xshmfence_alloc_shm();
1106 if (fence_fd < 0)
1107 goto fail_pixmap;
1108
1109 image->shm_fence = xshmfence_map_shm(fence_fd);
1110 if (image->shm_fence == NULL)
1111 goto fail_shmfence_alloc;
1112
1113 image->sync_fence = xcb_generate_id(chain->conn);
1114 xcb_dri3_fence_from_fd(chain->conn,
1115 image->pixmap,
1116 image->sync_fence,
1117 false,
1118 fence_fd);
1119
1120 image->busy = false;
1121 xshmfence_trigger(image->shm_fence);
1122
1123 return VK_SUCCESS;
1124
1125 fail_shmfence_alloc:
1126 close(fence_fd);
1127
1128 fail_pixmap:
1129 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1130 xcb_discard_reply(chain->conn, cookie.sequence);
1131
1132 wsi_destroy_image(&chain->base, &image->base);
1133
1134 return result;
1135 }
1136
1137 static void
1138 x11_image_finish(struct x11_swapchain *chain,
1139 const VkAllocationCallbacks* pAllocator,
1140 struct x11_image *image)
1141 {
1142 xcb_void_cookie_t cookie;
1143
1144 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
1145 xcb_discard_reply(chain->conn, cookie.sequence);
1146 xshmfence_unmap_shm(image->shm_fence);
1147
1148 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1149 xcb_discard_reply(chain->conn, cookie.sequence);
1150
1151 wsi_destroy_image(&chain->base, &image->base);
1152 }
1153
1154 static void
1155 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
1156 xcb_connection_t *conn, xcb_window_t window,
1157 uint8_t depth, uint8_t bpp,
1158 VkCompositeAlphaFlagsKHR vk_alpha,
1159 uint64_t **modifiers_in, uint32_t *num_modifiers_in,
1160 uint32_t *num_tranches_in,
1161 const VkAllocationCallbacks *pAllocator)
1162 {
1163 if (!wsi_conn->has_dri3_modifiers)
1164 goto out;
1165
1166 #ifdef HAVE_DRI3_MODIFIERS
1167 xcb_generic_error_t *error = NULL;
1168 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
1169 xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
1170 xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
1171 xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
1172 free(error);
1173
1174 if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
1175 mod_reply->num_screen_modifiers == 0)) {
1176 free(mod_reply);
1177 goto out;
1178 }
1179
1180 uint32_t n = 0;
1181 uint32_t counts[2];
1182 uint64_t *modifiers[2];
1183
1184 if (mod_reply->num_window_modifiers) {
1185 counts[n] = mod_reply->num_window_modifiers;
1186 modifiers[n] = vk_alloc(pAllocator,
1187 counts[n] * sizeof(uint64_t),
1188 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1189 if (!modifiers[n]) {
1190 free(mod_reply);
1191 goto out;
1192 }
1193
1194 memcpy(modifiers[n],
1195 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1196 counts[n] * sizeof(uint64_t));
1197 n++;
1198 }
1199
1200 if (mod_reply->num_screen_modifiers) {
1201 counts[n] = mod_reply->num_screen_modifiers;
1202 modifiers[n] = vk_alloc(pAllocator,
1203 counts[n] * sizeof(uint64_t),
1204 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1205 if (!modifiers[n]) {
1206 if (n > 0)
1207 vk_free(pAllocator, modifiers[0]);
1208 free(mod_reply);
1209 goto out;
1210 }
1211
1212 memcpy(modifiers[n],
1213 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1214 counts[n] * sizeof(uint64_t));
1215 n++;
1216 }
1217
1218 for (int i = 0; i < n; i++) {
1219 modifiers_in[i] = modifiers[i];
1220 num_modifiers_in[i] = counts[i];
1221 }
1222 *num_tranches_in = n;
1223
1224 free(mod_reply);
1225 return;
1226 #endif
1227 out:
1228 *num_tranches_in = 0;
1229 }
1230
1231 static VkResult
1232 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
1233 const VkAllocationCallbacks *pAllocator)
1234 {
1235 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1236 xcb_void_cookie_t cookie;
1237
1238 if (chain->threaded) {
1239 chain->status = VK_ERROR_OUT_OF_DATE_KHR;
1240 /* Push a UINT32_MAX to wake up the manager */
1241 wsi_queue_push(&chain->present_queue, UINT32_MAX);
1242 pthread_join(chain->queue_manager, NULL);
1243 wsi_queue_destroy(&chain->acquire_queue);
1244 wsi_queue_destroy(&chain->present_queue);
1245 }
1246
1247 for (uint32_t i = 0; i < chain->base.image_count; i++)
1248 x11_image_finish(chain, pAllocator, &chain->images[i]);
1249
1250 xcb_unregister_for_special_event(chain->conn, chain->special_event);
1251 cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
1252 chain->window,
1253 XCB_PRESENT_EVENT_MASK_NO_EVENT);
1254 xcb_discard_reply(chain->conn, cookie.sequence);
1255
1256 wsi_swapchain_finish(&chain->base);
1257
1258 vk_free(pAllocator, chain);
1259
1260 return VK_SUCCESS;
1261 }
1262
1263 static VkResult
1264 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
1265 VkDevice device,
1266 struct wsi_device *wsi_device,
1267 int local_fd,
1268 const VkSwapchainCreateInfoKHR *pCreateInfo,
1269 const VkAllocationCallbacks* pAllocator,
1270 struct wsi_swapchain **swapchain_out)
1271 {
1272 struct x11_swapchain *chain;
1273 xcb_void_cookie_t cookie;
1274 VkResult result;
1275
1276 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
1277
1278 const unsigned num_images = pCreateInfo->minImageCount;
1279
1280 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
1281 struct wsi_x11_connection *wsi_conn =
1282 wsi_x11_get_connection(wsi_device, pAllocator, conn);
1283 if (!wsi_conn)
1284 return VK_ERROR_OUT_OF_HOST_MEMORY;
1285
1286 /* Check for whether or not we have a window up-front */
1287 xcb_window_t window = x11_surface_get_window(icd_surface);
1288 xcb_get_geometry_reply_t *geometry =
1289 xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
1290 if (geometry == NULL)
1291 return VK_ERROR_SURFACE_LOST_KHR;
1292 const uint32_t bit_depth = geometry->depth;
1293 free(geometry);
1294
1295 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
1296 chain = vk_alloc(pAllocator, size, 8,
1297 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1298 if (chain == NULL)
1299 return VK_ERROR_OUT_OF_HOST_MEMORY;
1300
1301 result = wsi_swapchain_init(wsi_device, &chain->base, device,
1302 pCreateInfo, pAllocator);
1303 if (result != VK_SUCCESS)
1304 goto fail_alloc;
1305
1306 chain->base.destroy = x11_swapchain_destroy;
1307 chain->base.get_wsi_image = x11_get_wsi_image;
1308 chain->base.acquire_next_image = x11_acquire_next_image;
1309 chain->base.queue_present = x11_queue_present;
1310 chain->base.present_mode = pCreateInfo->presentMode;
1311 chain->base.image_count = num_images;
1312 chain->conn = conn;
1313 chain->window = window;
1314 chain->depth = bit_depth;
1315 chain->extent = pCreateInfo->imageExtent;
1316 chain->send_sbc = 0;
1317 chain->last_present_msc = 0;
1318 chain->threaded = false;
1319 chain->status = VK_SUCCESS;
1320 chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
1321
1322 /* If we are reallocating from an old swapchain, then we inherit its
1323 * last completion mode, to ensure we don't get into reallocation
1324 * cycles. If we are starting anew, we set 'COPY', as that is the only
1325 * mode which provokes reallocation when anything changes, to make
1326 * sure we have the most optimal allocation.
1327 */
1328 struct x11_swapchain *old_chain = (void *) pCreateInfo->oldSwapchain;
1329 if (old_chain)
1330 chain->last_present_mode = old_chain->last_present_mode;
1331 else
1332 chain->last_present_mode = XCB_PRESENT_COMPLETE_MODE_COPY;
1333
1334 if (!wsi_x11_check_dri3_compatible(conn, local_fd))
1335 chain->base.use_prime_blit = true;
1336
1337 chain->event_id = xcb_generate_id(chain->conn);
1338 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
1339 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1340 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1341 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1342
1343 /* Create an XCB event queue to hold present events outside of the usual
1344 * application event queue
1345 */
1346 chain->special_event =
1347 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
1348 chain->event_id, NULL);
1349
1350 chain->gc = xcb_generate_id(chain->conn);
1351 if (!chain->gc) {
1352 /* FINISHME: Choose a better error. */
1353 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1354 goto fail_register;
1355 }
1356
1357 cookie = xcb_create_gc(chain->conn,
1358 chain->gc,
1359 chain->window,
1360 XCB_GC_GRAPHICS_EXPOSURES,
1361 (uint32_t []) { 0 });
1362 xcb_discard_reply(chain->conn, cookie.sequence);
1363
1364 uint64_t *modifiers[2] = {NULL, NULL};
1365 uint32_t num_modifiers[2] = {0, 0};
1366 uint32_t num_tranches = 0;
1367 if (wsi_device->supports_modifiers)
1368 wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
1369 pCreateInfo->compositeAlpha,
1370 modifiers, num_modifiers, &num_tranches,
1371 pAllocator);
1372
1373 uint32_t image = 0;
1374 for (; image < chain->base.image_count; image++) {
1375 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
1376 (const uint64_t *const *)modifiers,
1377 num_modifiers, num_tranches,
1378 &chain->images[image]);
1379 if (result != VK_SUCCESS)
1380 goto fail_init_images;
1381 }
1382
1383 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
1384 chain->threaded = true;
1385
1386 /* Initialize our queues. We make them base.image_count + 1 because we will
1387 * occasionally use UINT32_MAX to signal the other thread that an error
1388 * has occurred and we don't want an overflow.
1389 */
1390 int ret;
1391 ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
1392 if (ret) {
1393 goto fail_init_images;
1394 }
1395
1396 ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
1397 if (ret) {
1398 wsi_queue_destroy(&chain->acquire_queue);
1399 goto fail_init_images;
1400 }
1401
1402 for (unsigned i = 0; i < chain->base.image_count; i++)
1403 wsi_queue_push(&chain->acquire_queue, i);
1404
1405 ret = pthread_create(&chain->queue_manager, NULL,
1406 x11_manage_fifo_queues, chain);
1407 if (ret) {
1408 wsi_queue_destroy(&chain->present_queue);
1409 wsi_queue_destroy(&chain->acquire_queue);
1410 goto fail_init_images;
1411 }
1412 }
1413
1414 for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
1415 vk_free(pAllocator, modifiers[i]);
1416 *swapchain_out = &chain->base;
1417
1418 return VK_SUCCESS;
1419
1420 fail_init_images:
1421 for (uint32_t j = 0; j < image; j++)
1422 x11_image_finish(chain, pAllocator, &chain->images[j]);
1423
1424 for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
1425 vk_free(pAllocator, modifiers[i]);
1426
1427 fail_register:
1428 xcb_unregister_for_special_event(chain->conn, chain->special_event);
1429
1430 wsi_swapchain_finish(&chain->base);
1431
1432 fail_alloc:
1433 vk_free(pAllocator, chain);
1434
1435 return result;
1436 }
1437
1438 VkResult
1439 wsi_x11_init_wsi(struct wsi_device *wsi_device,
1440 const VkAllocationCallbacks *alloc)
1441 {
1442 struct wsi_x11 *wsi;
1443 VkResult result;
1444
1445 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
1446 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1447 if (!wsi) {
1448 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1449 goto fail;
1450 }
1451
1452 int ret = pthread_mutex_init(&wsi->mutex, NULL);
1453 if (ret != 0) {
1454 if (ret == ENOMEM) {
1455 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1456 } else {
1457 /* FINISHME: Choose a better error. */
1458 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1459 }
1460
1461 goto fail_alloc;
1462 }
1463
1464 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1465 _mesa_key_pointer_equal);
1466 if (!wsi->connections) {
1467 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1468 goto fail_mutex;
1469 }
1470
1471 wsi->base.get_support = x11_surface_get_support;
1472 wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
1473 wsi->base.get_formats = x11_surface_get_formats;
1474 wsi->base.get_formats2 = x11_surface_get_formats2;
1475 wsi->base.get_present_modes = x11_surface_get_present_modes;
1476 wsi->base.create_swapchain = x11_surface_create_swapchain;
1477
1478 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
1479 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
1480
1481 return VK_SUCCESS;
1482
1483 fail_mutex:
1484 pthread_mutex_destroy(&wsi->mutex);
1485 fail_alloc:
1486 vk_free(alloc, wsi);
1487 fail:
1488 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
1489 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
1490
1491 return result;
1492 }
1493
1494 void
1495 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
1496 const VkAllocationCallbacks *alloc)
1497 {
1498 struct wsi_x11 *wsi =
1499 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
1500
1501 if (wsi) {
1502 struct hash_entry *entry;
1503 hash_table_foreach(wsi->connections, entry)
1504 wsi_x11_connection_destroy(alloc, entry->data);
1505
1506 _mesa_hash_table_destroy(wsi->connections, NULL);
1507
1508 pthread_mutex_destroy(&wsi->mutex);
1509
1510 vk_free(alloc, wsi);
1511 }
1512 }