vulkan/wsi: Plumb present regions through the common code
[mesa.git] / src / vulkan / wsi / wsi_common_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #include <xcb/xcb.h>
27 #include <xcb/dri3.h>
28 #include <xcb/present.h>
29
30 #include "util/macros.h"
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <string.h>
36 #include <fcntl.h>
37 #include <poll.h>
38 #include <xf86drm.h>
39 #include "util/hash_table.h"
40
41 #include "wsi_common.h"
42 #include "wsi_common_x11.h"
43 #include "wsi_common_queue.h"
44
45 #define typed_memcpy(dest, src, count) ({ \
46 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
47 memcpy((dest), (src), (count) * sizeof(*(src))); \
48 })
49
50 struct wsi_x11_connection {
51 bool has_dri3;
52 bool has_present;
53 bool is_proprietary_x11;
54 };
55
56 struct wsi_x11 {
57 struct wsi_interface base;
58
59 pthread_mutex_t mutex;
60 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
61 struct hash_table *connections;
62 };
63
64
65 /** wsi_dri3_open
66 *
67 * Wrapper around xcb_dri3_open
68 */
69 static int
70 wsi_dri3_open(xcb_connection_t *conn,
71 xcb_window_t root,
72 uint32_t provider)
73 {
74 xcb_dri3_open_cookie_t cookie;
75 xcb_dri3_open_reply_t *reply;
76 int fd;
77
78 cookie = xcb_dri3_open(conn,
79 root,
80 provider);
81
82 reply = xcb_dri3_open_reply(conn, cookie, NULL);
83 if (!reply)
84 return -1;
85
86 if (reply->nfd != 1) {
87 free(reply);
88 return -1;
89 }
90
91 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
92 free(reply);
93 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
94
95 return fd;
96 }
97
98 static bool
99 wsi_x11_check_dri3_compatible(xcb_connection_t *conn, int local_fd)
100 {
101 xcb_screen_iterator_t screen_iter =
102 xcb_setup_roots_iterator(xcb_get_setup(conn));
103 xcb_screen_t *screen = screen_iter.data;
104
105 int dri3_fd = wsi_dri3_open(conn, screen->root, None);
106 if (dri3_fd != -1) {
107 char *local_dev = drmGetRenderDeviceNameFromFd(local_fd);
108 char *dri3_dev = drmGetRenderDeviceNameFromFd(dri3_fd);
109 int ret;
110
111 close(dri3_fd);
112
113 ret = strcmp(local_dev, dri3_dev);
114
115 free(local_dev);
116 free(dri3_dev);
117
118 if (ret != 0)
119 return false;
120 }
121 return true;
122 }
123
124 static struct wsi_x11_connection *
125 wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
126 xcb_connection_t *conn)
127 {
128 xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie;
129 xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply;
130
131 struct wsi_x11_connection *wsi_conn =
132 vk_alloc(alloc, sizeof(*wsi_conn), 8,
133 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
134 if (!wsi_conn)
135 return NULL;
136
137 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
138 pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
139
140 /* We try to be nice to users and emit a warning if they try to use a
141 * Vulkan application on a system without DRI3 enabled. However, this ends
142 * up spewing the warning when a user has, for example, both Intel
143 * integrated graphics and a discrete card with proprietary drivers and are
144 * running on the discrete card with the proprietary DDX. In this case, we
145 * really don't want to print the warning because it just confuses users.
146 * As a heuristic to detect this case, we check for a couple of proprietary
147 * X11 extensions.
148 */
149 amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
150 nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
151
152 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
153 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
154 amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
155 nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
156 if (!dri3_reply || !pres_reply) {
157 free(dri3_reply);
158 free(pres_reply);
159 free(amd_reply);
160 free(nv_reply);
161 vk_free(alloc, wsi_conn);
162 return NULL;
163 }
164
165 wsi_conn->has_dri3 = dri3_reply->present != 0;
166 wsi_conn->has_present = pres_reply->present != 0;
167 wsi_conn->is_proprietary_x11 = false;
168 if (amd_reply && amd_reply->present)
169 wsi_conn->is_proprietary_x11 = true;
170 if (nv_reply && nv_reply->present)
171 wsi_conn->is_proprietary_x11 = true;
172
173 free(dri3_reply);
174 free(pres_reply);
175 free(amd_reply);
176 free(nv_reply);
177
178 return wsi_conn;
179 }
180
181 static void
182 wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
183 struct wsi_x11_connection *conn)
184 {
185 vk_free(alloc, conn);
186 }
187
188 static bool
189 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
190 {
191 if (wsi_conn->has_dri3)
192 return true;
193 if (!wsi_conn->is_proprietary_x11) {
194 fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
195 "Note: you can probably enable DRI3 in your Xorg config\n");
196 }
197 return false;
198 }
199
200 static struct wsi_x11_connection *
201 wsi_x11_get_connection(struct wsi_device *wsi_dev,
202 const VkAllocationCallbacks *alloc,
203 xcb_connection_t *conn)
204 {
205 struct wsi_x11 *wsi =
206 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
207
208 pthread_mutex_lock(&wsi->mutex);
209
210 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
211 if (!entry) {
212 /* We're about to make a bunch of blocking calls. Let's drop the
213 * mutex for now so we don't block up too badly.
214 */
215 pthread_mutex_unlock(&wsi->mutex);
216
217 struct wsi_x11_connection *wsi_conn =
218 wsi_x11_connection_create(alloc, conn);
219 if (!wsi_conn)
220 return NULL;
221
222 pthread_mutex_lock(&wsi->mutex);
223
224 entry = _mesa_hash_table_search(wsi->connections, conn);
225 if (entry) {
226 /* Oops, someone raced us to it */
227 wsi_x11_connection_destroy(alloc, wsi_conn);
228 } else {
229 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
230 }
231 }
232
233 pthread_mutex_unlock(&wsi->mutex);
234
235 return entry->data;
236 }
237
238 static const VkSurfaceFormatKHR formats[] = {
239 { .format = VK_FORMAT_B8G8R8A8_SRGB, },
240 { .format = VK_FORMAT_B8G8R8A8_UNORM, },
241 };
242
243 static const VkPresentModeKHR present_modes[] = {
244 VK_PRESENT_MODE_IMMEDIATE_KHR,
245 VK_PRESENT_MODE_MAILBOX_KHR,
246 VK_PRESENT_MODE_FIFO_KHR,
247 };
248
249 static xcb_screen_t *
250 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
251 {
252 xcb_screen_iterator_t screen_iter =
253 xcb_setup_roots_iterator(xcb_get_setup(conn));
254
255 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
256 if (screen_iter.data->root == root)
257 return screen_iter.data;
258 }
259
260 return NULL;
261 }
262
263 static xcb_visualtype_t *
264 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
265 unsigned *depth)
266 {
267 xcb_depth_iterator_t depth_iter =
268 xcb_screen_allowed_depths_iterator(screen);
269
270 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
271 xcb_visualtype_iterator_t visual_iter =
272 xcb_depth_visuals_iterator (depth_iter.data);
273
274 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
275 if (visual_iter.data->visual_id == visual_id) {
276 if (depth)
277 *depth = depth_iter.data->depth;
278 return visual_iter.data;
279 }
280 }
281 }
282
283 return NULL;
284 }
285
286 static xcb_visualtype_t *
287 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
288 unsigned *depth)
289 {
290 xcb_screen_iterator_t screen_iter =
291 xcb_setup_roots_iterator(xcb_get_setup(conn));
292
293 /* For this we have to iterate over all of the screens which is rather
294 * annoying. Fortunately, there is probably only 1.
295 */
296 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
297 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
298 visual_id, depth);
299 if (visual)
300 return visual;
301 }
302
303 return NULL;
304 }
305
306 static xcb_visualtype_t *
307 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
308 unsigned *depth)
309 {
310 xcb_query_tree_cookie_t tree_cookie;
311 xcb_get_window_attributes_cookie_t attrib_cookie;
312 xcb_query_tree_reply_t *tree;
313 xcb_get_window_attributes_reply_t *attrib;
314
315 tree_cookie = xcb_query_tree(conn, window);
316 attrib_cookie = xcb_get_window_attributes(conn, window);
317
318 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
319 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
320 if (attrib == NULL || tree == NULL) {
321 free(attrib);
322 free(tree);
323 return NULL;
324 }
325
326 xcb_window_t root = tree->root;
327 xcb_visualid_t visual_id = attrib->visual;
328 free(attrib);
329 free(tree);
330
331 xcb_screen_t *screen = get_screen_for_root(conn, root);
332 if (screen == NULL)
333 return NULL;
334
335 return screen_get_visualtype(screen, visual_id, depth);
336 }
337
338 static bool
339 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
340 {
341 uint32_t rgb_mask = visual->red_mask |
342 visual->green_mask |
343 visual->blue_mask;
344
345 uint32_t all_mask = 0xffffffff >> (32 - depth);
346
347 /* Do we have bits left over after RGB? */
348 return (all_mask & ~rgb_mask) != 0;
349 }
350
351 VkBool32 wsi_get_physical_device_xcb_presentation_support(
352 struct wsi_device *wsi_device,
353 VkAllocationCallbacks *alloc,
354 uint32_t queueFamilyIndex,
355 int fd,
356 bool can_handle_different_gpu,
357 xcb_connection_t* connection,
358 xcb_visualid_t visual_id)
359 {
360 struct wsi_x11_connection *wsi_conn =
361 wsi_x11_get_connection(wsi_device, alloc, connection);
362
363 if (!wsi_conn)
364 return false;
365
366 if (!wsi_x11_check_for_dri3(wsi_conn))
367 return false;
368
369 if (!can_handle_different_gpu)
370 if (!wsi_x11_check_dri3_compatible(connection, fd))
371 return false;
372
373 unsigned visual_depth;
374 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
375 return false;
376
377 if (visual_depth != 24 && visual_depth != 32)
378 return false;
379
380 return true;
381 }
382
383 static xcb_connection_t*
384 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
385 {
386 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
387 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
388 else
389 return ((VkIcdSurfaceXcb *)icd_surface)->connection;
390 }
391
392 static xcb_window_t
393 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
394 {
395 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
396 return ((VkIcdSurfaceXlib *)icd_surface)->window;
397 else
398 return ((VkIcdSurfaceXcb *)icd_surface)->window;
399 }
400
401 static VkResult
402 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
403 struct wsi_device *wsi_device,
404 const VkAllocationCallbacks *alloc,
405 uint32_t queueFamilyIndex,
406 int local_fd,
407 bool can_handle_different_gpu,
408 VkBool32* pSupported)
409 {
410 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
411 xcb_window_t window = x11_surface_get_window(icd_surface);
412
413 struct wsi_x11_connection *wsi_conn =
414 wsi_x11_get_connection(wsi_device, alloc, conn);
415 if (!wsi_conn)
416 return VK_ERROR_OUT_OF_HOST_MEMORY;
417
418 if (!wsi_x11_check_for_dri3(wsi_conn)) {
419 *pSupported = false;
420 return VK_SUCCESS;
421 }
422
423 if (!can_handle_different_gpu)
424 if (!wsi_x11_check_dri3_compatible(conn, local_fd))
425 return false;
426
427 unsigned visual_depth;
428 if (!get_visualtype_for_window(conn, window, &visual_depth)) {
429 *pSupported = false;
430 return VK_SUCCESS;
431 }
432
433 if (visual_depth != 24 && visual_depth != 32) {
434 *pSupported = false;
435 return VK_SUCCESS;
436 }
437
438 *pSupported = true;
439 return VK_SUCCESS;
440 }
441
442 static VkResult
443 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
444 VkSurfaceCapabilitiesKHR *caps)
445 {
446 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
447 xcb_window_t window = x11_surface_get_window(icd_surface);
448 xcb_get_geometry_cookie_t geom_cookie;
449 xcb_generic_error_t *err;
450 xcb_get_geometry_reply_t *geom;
451 unsigned visual_depth;
452
453 geom_cookie = xcb_get_geometry(conn, window);
454
455 /* This does a round-trip. This is why we do get_geometry first and
456 * wait to read the reply until after we have a visual.
457 */
458 xcb_visualtype_t *visual =
459 get_visualtype_for_window(conn, window, &visual_depth);
460
461 if (!visual)
462 return VK_ERROR_SURFACE_LOST_KHR;
463
464 geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
465 if (geom) {
466 VkExtent2D extent = { geom->width, geom->height };
467 caps->currentExtent = extent;
468 caps->minImageExtent = extent;
469 caps->maxImageExtent = extent;
470 } else {
471 /* This can happen if the client didn't wait for the configure event
472 * to come back from the compositor. In that case, we don't know the
473 * size of the window so we just return valid "I don't know" stuff.
474 */
475 caps->currentExtent = (VkExtent2D) { -1, -1 };
476 caps->minImageExtent = (VkExtent2D) { 1, 1 };
477 /* This is the maximum supported size on Intel */
478 caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
479 }
480 free(err);
481 free(geom);
482
483 if (visual_has_alpha(visual, visual_depth)) {
484 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
485 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
486 } else {
487 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
488 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
489 }
490
491 /* For true mailbox mode, we need at least 4 images:
492 * 1) One to scan out from
493 * 2) One to have queued for scan-out
494 * 3) One to be currently held by the X server
495 * 4) One to render to
496 */
497 caps->minImageCount = 2;
498 /* There is no real maximum */
499 caps->maxImageCount = 0;
500
501 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
502 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
503 caps->maxImageArrayLayers = 1;
504 caps->supportedUsageFlags =
505 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
506 VK_IMAGE_USAGE_SAMPLED_BIT |
507 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
508 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
509
510 return VK_SUCCESS;
511 }
512
513 static VkResult
514 x11_surface_get_formats(VkIcdSurfaceBase *surface,
515 struct wsi_device *wsi_device,
516 uint32_t *pSurfaceFormatCount,
517 VkSurfaceFormatKHR *pSurfaceFormats)
518 {
519 if (pSurfaceFormats == NULL) {
520 *pSurfaceFormatCount = ARRAY_SIZE(formats);
521 return VK_SUCCESS;
522 }
523
524 *pSurfaceFormatCount = MIN2(*pSurfaceFormatCount, ARRAY_SIZE(formats));
525 typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
526
527 return *pSurfaceFormatCount < ARRAY_SIZE(formats) ?
528 VK_INCOMPLETE : VK_SUCCESS;
529 }
530
531 static VkResult
532 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
533 uint32_t *pPresentModeCount,
534 VkPresentModeKHR *pPresentModes)
535 {
536 if (pPresentModes == NULL) {
537 *pPresentModeCount = ARRAY_SIZE(present_modes);
538 return VK_SUCCESS;
539 }
540
541 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
542 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
543
544 return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
545 VK_INCOMPLETE : VK_SUCCESS;
546 }
547
548 VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
549 const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
550 VkSurfaceKHR *pSurface)
551 {
552 VkIcdSurfaceXcb *surface;
553
554 surface = vk_alloc(pAllocator, sizeof *surface, 8,
555 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
556 if (surface == NULL)
557 return VK_ERROR_OUT_OF_HOST_MEMORY;
558
559 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
560 surface->connection = pCreateInfo->connection;
561 surface->window = pCreateInfo->window;
562
563 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
564 return VK_SUCCESS;
565 }
566
567 VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator,
568 const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
569 VkSurfaceKHR *pSurface)
570 {
571 VkIcdSurfaceXlib *surface;
572
573 surface = vk_alloc(pAllocator, sizeof *surface, 8,
574 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
575 if (surface == NULL)
576 return VK_ERROR_OUT_OF_HOST_MEMORY;
577
578 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
579 surface->dpy = pCreateInfo->dpy;
580 surface->window = pCreateInfo->window;
581
582 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
583 return VK_SUCCESS;
584 }
585
586 struct x11_image {
587 VkImage image;
588 VkImage linear_image; // for prime
589 VkDeviceMemory memory;
590 VkDeviceMemory linear_memory; // for prime
591 xcb_pixmap_t pixmap;
592 bool busy;
593 struct xshmfence * shm_fence;
594 uint32_t sync_fence;
595 };
596
597 struct x11_swapchain {
598 struct wsi_swapchain base;
599
600 xcb_connection_t * conn;
601 xcb_window_t window;
602 xcb_gc_t gc;
603 uint32_t depth;
604 VkExtent2D extent;
605
606 xcb_present_event_t event_id;
607 xcb_special_event_t * special_event;
608 uint64_t send_sbc;
609 uint64_t last_present_msc;
610 uint32_t stamp;
611
612 bool threaded;
613 VkResult status;
614 struct wsi_queue present_queue;
615 struct wsi_queue acquire_queue;
616 pthread_t queue_manager;
617
618 struct x11_image images[0];
619 };
620
621 static VkResult
622 x11_get_images(struct wsi_swapchain *anv_chain,
623 uint32_t* pCount, VkImage *pSwapchainImages)
624 {
625 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
626 uint32_t ret_count;
627 VkResult result;
628
629 if (pSwapchainImages == NULL) {
630 *pCount = chain->base.image_count;
631 return VK_SUCCESS;
632 }
633
634 result = VK_SUCCESS;
635 ret_count = chain->base.image_count;
636 if (chain->base.image_count > *pCount) {
637 ret_count = *pCount;
638 result = VK_INCOMPLETE;
639 }
640
641 for (uint32_t i = 0; i < ret_count; i++)
642 pSwapchainImages[i] = chain->images[i].image;
643
644 return result;
645 }
646
647 static void
648 x11_get_image_and_linear(struct wsi_swapchain *drv_chain,
649 int imageIndex, VkImage *image, VkImage *linear_image)
650 {
651 struct x11_swapchain *chain = (struct x11_swapchain *)drv_chain;
652 *image = chain->images[imageIndex].image;
653 *linear_image = chain->images[imageIndex].linear_image;
654 }
655
656 static VkResult
657 x11_handle_dri3_present_event(struct x11_swapchain *chain,
658 xcb_present_generic_event_t *event)
659 {
660 switch (event->evtype) {
661 case XCB_PRESENT_CONFIGURE_NOTIFY: {
662 xcb_present_configure_notify_event_t *config = (void *) event;
663
664 if (config->width != chain->extent.width ||
665 config->height != chain->extent.height)
666 return VK_ERROR_OUT_OF_DATE_KHR;
667
668 break;
669 }
670
671 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
672 xcb_present_idle_notify_event_t *idle = (void *) event;
673
674 for (unsigned i = 0; i < chain->base.image_count; i++) {
675 if (chain->images[i].pixmap == idle->pixmap) {
676 chain->images[i].busy = false;
677 if (chain->threaded)
678 wsi_queue_push(&chain->acquire_queue, i);
679 break;
680 }
681 }
682
683 break;
684 }
685
686 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
687 xcb_present_complete_notify_event_t *complete = (void *) event;
688 if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP)
689 chain->last_present_msc = complete->msc;
690 break;
691 }
692
693 default:
694 break;
695 }
696
697 return VK_SUCCESS;
698 }
699
700
701 static uint64_t wsi_get_current_time(void)
702 {
703 uint64_t current_time;
704 struct timespec tv;
705
706 clock_gettime(CLOCK_MONOTONIC, &tv);
707 current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
708 return current_time;
709 }
710
711 static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
712 {
713 uint64_t current_time = wsi_get_current_time();
714
715 timeout = MIN2(UINT64_MAX - current_time, timeout);
716
717 return current_time + timeout;
718 }
719
720 static VkResult
721 x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
722 uint32_t *image_index, uint64_t timeout)
723 {
724 xcb_generic_event_t *event;
725 struct pollfd pfds;
726 uint64_t atimeout;
727 while (1) {
728 for (uint32_t i = 0; i < chain->base.image_count; i++) {
729 if (!chain->images[i].busy) {
730 /* We found a non-busy image */
731 xshmfence_await(chain->images[i].shm_fence);
732 *image_index = i;
733 chain->images[i].busy = true;
734 return VK_SUCCESS;
735 }
736 }
737
738 xcb_flush(chain->conn);
739
740 if (timeout == UINT64_MAX) {
741 event = xcb_wait_for_special_event(chain->conn, chain->special_event);
742 if (!event)
743 return VK_ERROR_OUT_OF_DATE_KHR;
744 } else {
745 event = xcb_poll_for_special_event(chain->conn, chain->special_event);
746 if (!event) {
747 int ret;
748 if (timeout == 0)
749 return VK_NOT_READY;
750
751 atimeout = wsi_get_absolute_timeout(timeout);
752
753 pfds.fd = xcb_get_file_descriptor(chain->conn);
754 pfds.events = POLLIN;
755 ret = poll(&pfds, 1, timeout / 1000 / 1000);
756 if (ret == 0)
757 return VK_TIMEOUT;
758 if (ret == -1)
759 return VK_ERROR_OUT_OF_DATE_KHR;
760
761 /* If a non-special event happens, the fd will still
762 * poll. So recalculate the timeout now just in case.
763 */
764 uint64_t current_time = wsi_get_current_time();
765 if (atimeout > current_time)
766 timeout = atimeout - current_time;
767 else
768 timeout = 0;
769 continue;
770 }
771 }
772
773 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
774 free(event);
775 if (result != VK_SUCCESS)
776 return result;
777 }
778 }
779
780 static VkResult
781 x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
782 uint32_t *image_index_out, uint64_t timeout)
783 {
784 assert(chain->threaded);
785
786 uint32_t image_index;
787 VkResult result = wsi_queue_pull(&chain->acquire_queue,
788 &image_index, timeout);
789 if (result != VK_SUCCESS) {
790 return result;
791 } else if (chain->status != VK_SUCCESS) {
792 return chain->status;
793 }
794
795 assert(image_index < chain->base.image_count);
796 xshmfence_await(chain->images[image_index].shm_fence);
797
798 *image_index_out = image_index;
799
800 return VK_SUCCESS;
801 }
802
803 static VkResult
804 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
805 uint32_t target_msc)
806 {
807 struct x11_image *image = &chain->images[image_index];
808
809 assert(image_index < chain->base.image_count);
810
811 uint32_t options = XCB_PRESENT_OPTION_NONE;
812
813 int64_t divisor = 0;
814 int64_t remainder = 0;
815
816 if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR)
817 options |= XCB_PRESENT_OPTION_ASYNC;
818
819 xshmfence_reset(image->shm_fence);
820
821 ++chain->send_sbc;
822 xcb_void_cookie_t cookie =
823 xcb_present_pixmap(chain->conn,
824 chain->window,
825 image->pixmap,
826 (uint32_t) chain->send_sbc,
827 0, /* valid */
828 0, /* update */
829 0, /* x_off */
830 0, /* y_off */
831 XCB_NONE, /* target_crtc */
832 XCB_NONE,
833 image->sync_fence,
834 options,
835 target_msc,
836 divisor,
837 remainder, 0, NULL);
838 xcb_discard_reply(chain->conn, cookie.sequence);
839 image->busy = true;
840
841 xcb_flush(chain->conn);
842
843 return VK_SUCCESS;
844 }
845
846 static VkResult
847 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
848 uint64_t timeout,
849 VkSemaphore semaphore,
850 uint32_t *image_index)
851 {
852 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
853
854 if (chain->threaded) {
855 return x11_acquire_next_image_from_queue(chain, image_index, timeout);
856 } else {
857 return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
858 }
859 }
860
861 static VkResult
862 x11_queue_present(struct wsi_swapchain *anv_chain,
863 uint32_t image_index,
864 const VkPresentRegionKHR *damage)
865 {
866 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
867
868 if (chain->threaded) {
869 wsi_queue_push(&chain->present_queue, image_index);
870 return chain->status;
871 } else {
872 return x11_present_to_x11(chain, image_index, 0);
873 }
874 }
875
876 static void *
877 x11_manage_fifo_queues(void *state)
878 {
879 struct x11_swapchain *chain = state;
880 VkResult result;
881
882 assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
883
884 while (chain->status == VK_SUCCESS) {
885 /* It should be safe to unconditionally block here. Later in the loop
886 * we blocks until the previous present has landed on-screen. At that
887 * point, we should have received IDLE_NOTIFY on all images presented
888 * before that point so the client should be able to acquire any image
889 * other than the currently presented one.
890 */
891 uint32_t image_index;
892 result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
893 if (result != VK_SUCCESS) {
894 goto fail;
895 } else if (chain->status != VK_SUCCESS) {
896 return NULL;
897 }
898
899 uint64_t target_msc = chain->last_present_msc + 1;
900 result = x11_present_to_x11(chain, image_index, target_msc);
901 if (result != VK_SUCCESS)
902 goto fail;
903
904 while (chain->last_present_msc < target_msc) {
905 xcb_generic_event_t *event =
906 xcb_wait_for_special_event(chain->conn, chain->special_event);
907 if (!event)
908 goto fail;
909
910 result = x11_handle_dri3_present_event(chain, (void *)event);
911 if (result != VK_SUCCESS)
912 goto fail;
913 }
914 }
915
916 fail:
917 chain->status = result;
918 wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
919
920 return NULL;
921 }
922
923 static VkResult
924 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
925 const VkSwapchainCreateInfoKHR *pCreateInfo,
926 const VkAllocationCallbacks* pAllocator,
927 struct x11_image *image)
928 {
929 xcb_void_cookie_t cookie;
930 VkResult result;
931 uint32_t row_pitch;
932 uint32_t offset;
933 uint32_t bpp = 32;
934 int fd;
935 uint32_t size;
936
937 result = chain->base.image_fns->create_wsi_image(device_h,
938 pCreateInfo,
939 pAllocator,
940 chain->base.needs_linear_copy,
941 false,
942 &image->image,
943 &image->memory,
944 &size,
945 &offset,
946 &row_pitch,
947 &fd);
948 if (result != VK_SUCCESS)
949 return result;
950
951 if (chain->base.needs_linear_copy) {
952 result = chain->base.image_fns->create_wsi_image(device_h,
953 pCreateInfo,
954 pAllocator,
955 chain->base.needs_linear_copy,
956 true,
957 &image->linear_image,
958 &image->linear_memory,
959 &size,
960 &offset,
961 &row_pitch,
962 &fd);
963 if (result != VK_SUCCESS) {
964 chain->base.image_fns->free_wsi_image(device_h, pAllocator,
965 image->image, image->memory);
966 return result;
967 }
968 }
969
970 image->pixmap = xcb_generate_id(chain->conn);
971
972 cookie =
973 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
974 image->pixmap,
975 chain->window,
976 size,
977 pCreateInfo->imageExtent.width,
978 pCreateInfo->imageExtent.height,
979 row_pitch,
980 chain->depth, bpp, fd);
981 xcb_discard_reply(chain->conn, cookie.sequence);
982
983 int fence_fd = xshmfence_alloc_shm();
984 if (fence_fd < 0)
985 goto fail_pixmap;
986
987 image->shm_fence = xshmfence_map_shm(fence_fd);
988 if (image->shm_fence == NULL)
989 goto fail_shmfence_alloc;
990
991 image->sync_fence = xcb_generate_id(chain->conn);
992 xcb_dri3_fence_from_fd(chain->conn,
993 image->pixmap,
994 image->sync_fence,
995 false,
996 fence_fd);
997
998 image->busy = false;
999 xshmfence_trigger(image->shm_fence);
1000
1001 return VK_SUCCESS;
1002
1003 fail_shmfence_alloc:
1004 close(fence_fd);
1005
1006 fail_pixmap:
1007 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1008 xcb_discard_reply(chain->conn, cookie.sequence);
1009
1010 if (chain->base.needs_linear_copy) {
1011 chain->base.image_fns->free_wsi_image(device_h, pAllocator,
1012 image->linear_image, image->linear_memory);
1013 }
1014 chain->base.image_fns->free_wsi_image(device_h, pAllocator,
1015 image->image, image->memory);
1016
1017 return result;
1018 }
1019
1020 static void
1021 x11_image_finish(struct x11_swapchain *chain,
1022 const VkAllocationCallbacks* pAllocator,
1023 struct x11_image *image)
1024 {
1025 xcb_void_cookie_t cookie;
1026
1027 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
1028 xcb_discard_reply(chain->conn, cookie.sequence);
1029 xshmfence_unmap_shm(image->shm_fence);
1030
1031 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1032 xcb_discard_reply(chain->conn, cookie.sequence);
1033
1034 if (chain->base.needs_linear_copy) {
1035 chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
1036 image->linear_image, image->linear_memory);
1037 }
1038 chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
1039 image->image, image->memory);
1040 }
1041
1042 static VkResult
1043 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
1044 const VkAllocationCallbacks *pAllocator)
1045 {
1046 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1047 xcb_void_cookie_t cookie;
1048
1049 for (uint32_t i = 0; i < chain->base.image_count; i++)
1050 x11_image_finish(chain, pAllocator, &chain->images[i]);
1051
1052 if (chain->threaded) {
1053 chain->status = VK_ERROR_OUT_OF_DATE_KHR;
1054 /* Push a UINT32_MAX to wake up the manager */
1055 wsi_queue_push(&chain->present_queue, UINT32_MAX);
1056 pthread_join(chain->queue_manager, NULL);
1057 wsi_queue_destroy(&chain->acquire_queue);
1058 wsi_queue_destroy(&chain->present_queue);
1059 }
1060
1061 xcb_unregister_for_special_event(chain->conn, chain->special_event);
1062 cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
1063 chain->window,
1064 XCB_PRESENT_EVENT_MASK_NO_EVENT);
1065 xcb_discard_reply(chain->conn, cookie.sequence);
1066
1067 vk_free(pAllocator, chain);
1068
1069 return VK_SUCCESS;
1070 }
1071
1072 static VkResult
1073 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
1074 VkDevice device,
1075 struct wsi_device *wsi_device,
1076 int local_fd,
1077 const VkSwapchainCreateInfoKHR *pCreateInfo,
1078 const VkAllocationCallbacks* pAllocator,
1079 const struct wsi_image_fns *image_fns,
1080 struct wsi_swapchain **swapchain_out)
1081 {
1082 struct x11_swapchain *chain;
1083 xcb_void_cookie_t cookie;
1084 VkResult result;
1085
1086 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
1087
1088 const unsigned num_images = pCreateInfo->minImageCount;
1089
1090 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
1091 chain = vk_alloc(pAllocator, size, 8,
1092 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1093 if (chain == NULL)
1094 return VK_ERROR_OUT_OF_HOST_MEMORY;
1095
1096 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
1097 xcb_window_t window = x11_surface_get_window(icd_surface);
1098 xcb_get_geometry_reply_t *geometry =
1099 xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
1100
1101 if (geometry == NULL)
1102 return VK_ERROR_SURFACE_LOST_KHR;
1103
1104 chain->base.device = device;
1105 chain->base.destroy = x11_swapchain_destroy;
1106 chain->base.get_images = x11_get_images;
1107 chain->base.get_image_and_linear = x11_get_image_and_linear;
1108 chain->base.acquire_next_image = x11_acquire_next_image;
1109 chain->base.queue_present = x11_queue_present;
1110 chain->base.image_fns = image_fns;
1111 chain->base.present_mode = pCreateInfo->presentMode;
1112 chain->base.image_count = num_images;
1113 chain->conn = conn;
1114 chain->window = window;
1115 chain->depth = geometry->depth;
1116 chain->extent = pCreateInfo->imageExtent;
1117 chain->send_sbc = 0;
1118 chain->last_present_msc = 0;
1119 chain->threaded = false;
1120 chain->status = VK_SUCCESS;
1121
1122 free(geometry);
1123
1124 chain->base.needs_linear_copy = false;
1125 if (!wsi_x11_check_dri3_compatible(conn, local_fd))
1126 chain->base.needs_linear_copy = true;
1127
1128 chain->event_id = xcb_generate_id(chain->conn);
1129 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
1130 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1131 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1132 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1133
1134 /* Create an XCB event queue to hold present events outside of the usual
1135 * application event queue
1136 */
1137 chain->special_event =
1138 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
1139 chain->event_id, NULL);
1140
1141 chain->gc = xcb_generate_id(chain->conn);
1142 if (!chain->gc) {
1143 /* FINISHME: Choose a better error. */
1144 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1145 goto fail_register;
1146 }
1147
1148 cookie = xcb_create_gc(chain->conn,
1149 chain->gc,
1150 chain->window,
1151 XCB_GC_GRAPHICS_EXPOSURES,
1152 (uint32_t []) { 0 });
1153 xcb_discard_reply(chain->conn, cookie.sequence);
1154
1155 uint32_t image = 0;
1156 for (; image < chain->base.image_count; image++) {
1157 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
1158 &chain->images[image]);
1159 if (result != VK_SUCCESS)
1160 goto fail_init_images;
1161 }
1162
1163 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
1164 chain->threaded = true;
1165
1166 /* Initialize our queues. We make them base.image_count + 1 because we will
1167 * occasionally use UINT32_MAX to signal the other thread that an error
1168 * has occurred and we don't want an overflow.
1169 */
1170 int ret;
1171 ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
1172 if (ret) {
1173 goto fail_init_images;
1174 }
1175
1176 ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
1177 if (ret) {
1178 wsi_queue_destroy(&chain->acquire_queue);
1179 goto fail_init_images;
1180 }
1181
1182 for (unsigned i = 0; i < chain->base.image_count; i++)
1183 wsi_queue_push(&chain->acquire_queue, i);
1184
1185 ret = pthread_create(&chain->queue_manager, NULL,
1186 x11_manage_fifo_queues, chain);
1187 if (ret) {
1188 wsi_queue_destroy(&chain->present_queue);
1189 wsi_queue_destroy(&chain->acquire_queue);
1190 goto fail_init_images;
1191 }
1192 }
1193
1194 *swapchain_out = &chain->base;
1195
1196 return VK_SUCCESS;
1197
1198 fail_init_images:
1199 for (uint32_t j = 0; j < image; j++)
1200 x11_image_finish(chain, pAllocator, &chain->images[j]);
1201
1202 fail_register:
1203 xcb_unregister_for_special_event(chain->conn, chain->special_event);
1204
1205 vk_free(pAllocator, chain);
1206
1207 return result;
1208 }
1209
1210 VkResult
1211 wsi_x11_init_wsi(struct wsi_device *wsi_device,
1212 const VkAllocationCallbacks *alloc)
1213 {
1214 struct wsi_x11 *wsi;
1215 VkResult result;
1216
1217 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
1218 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
1219 if (!wsi) {
1220 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1221 goto fail;
1222 }
1223
1224 int ret = pthread_mutex_init(&wsi->mutex, NULL);
1225 if (ret != 0) {
1226 if (ret == ENOMEM) {
1227 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1228 } else {
1229 /* FINISHME: Choose a better error. */
1230 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1231 }
1232
1233 goto fail_alloc;
1234 }
1235
1236 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1237 _mesa_key_pointer_equal);
1238 if (!wsi->connections) {
1239 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1240 goto fail_mutex;
1241 }
1242
1243 wsi->base.get_support = x11_surface_get_support;
1244 wsi->base.get_capabilities = x11_surface_get_capabilities;
1245 wsi->base.get_formats = x11_surface_get_formats;
1246 wsi->base.get_present_modes = x11_surface_get_present_modes;
1247 wsi->base.create_swapchain = x11_surface_create_swapchain;
1248
1249 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
1250 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
1251
1252 return VK_SUCCESS;
1253
1254 fail_mutex:
1255 pthread_mutex_destroy(&wsi->mutex);
1256 fail_alloc:
1257 vk_free(alloc, wsi);
1258 fail:
1259 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
1260 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
1261
1262 return result;
1263 }
1264
1265 void
1266 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
1267 const VkAllocationCallbacks *alloc)
1268 {
1269 struct wsi_x11 *wsi =
1270 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
1271
1272 if (wsi) {
1273 struct hash_entry *entry;
1274 hash_table_foreach(wsi->connections, entry)
1275 wsi_x11_connection_destroy(alloc, entry->data);
1276
1277 _mesa_hash_table_destroy(wsi->connections, NULL);
1278
1279 pthread_mutex_destroy(&wsi->mutex);
1280
1281 vk_free(alloc, wsi);
1282 }
1283 }