2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
30 #include "util/macros.h"
39 #include <drm_fourcc.h>
40 #include "util/hash_table.h"
43 #include "wsi_common_private.h"
44 #include "wsi_common_x11.h"
45 #include "wsi_common_queue.h"
47 #define typed_memcpy(dest, src, count) ({ \
48 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
49 memcpy((dest), (src), (count) * sizeof(*(src))); \
52 struct wsi_x11_connection
{
54 bool has_dri3_modifiers
;
56 bool is_proprietary_x11
;
60 struct wsi_interface base
;
62 pthread_mutex_t mutex
;
63 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
64 struct hash_table
*connections
;
70 * Wrapper around xcb_dri3_open
73 wsi_dri3_open(xcb_connection_t
*conn
,
77 xcb_dri3_open_cookie_t cookie
;
78 xcb_dri3_open_reply_t
*reply
;
81 cookie
= xcb_dri3_open(conn
,
85 reply
= xcb_dri3_open_reply(conn
, cookie
, NULL
);
89 if (reply
->nfd
!= 1) {
94 fd
= xcb_dri3_open_reply_fds(conn
, reply
)[0];
96 fcntl(fd
, F_SETFD
, fcntl(fd
, F_GETFD
) | FD_CLOEXEC
);
102 wsi_x11_check_dri3_compatible(xcb_connection_t
*conn
, int local_fd
)
104 xcb_screen_iterator_t screen_iter
=
105 xcb_setup_roots_iterator(xcb_get_setup(conn
));
106 xcb_screen_t
*screen
= screen_iter
.data
;
108 int dri3_fd
= wsi_dri3_open(conn
, screen
->root
, None
);
110 char *local_dev
= drmGetRenderDeviceNameFromFd(local_fd
);
111 char *dri3_dev
= drmGetRenderDeviceNameFromFd(dri3_fd
);
116 ret
= strcmp(local_dev
, dri3_dev
);
127 static struct wsi_x11_connection
*
128 wsi_x11_connection_create(struct wsi_device
*wsi_dev
,
129 xcb_connection_t
*conn
)
131 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
, amd_cookie
, nv_cookie
;
132 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
, *amd_reply
, *nv_reply
;
133 bool has_dri3_v1_2
= false;
134 bool has_present_v1_2
= false;
136 struct wsi_x11_connection
*wsi_conn
=
137 vk_alloc(&wsi_dev
->instance_alloc
, sizeof(*wsi_conn
), 8,
138 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
142 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
143 pres_cookie
= xcb_query_extension(conn
, 7, "Present");
145 /* We try to be nice to users and emit a warning if they try to use a
146 * Vulkan application on a system without DRI3 enabled. However, this ends
147 * up spewing the warning when a user has, for example, both Intel
148 * integrated graphics and a discrete card with proprietary drivers and are
149 * running on the discrete card with the proprietary DDX. In this case, we
150 * really don't want to print the warning because it just confuses users.
151 * As a heuristic to detect this case, we check for a couple of proprietary
154 amd_cookie
= xcb_query_extension(conn
, 11, "ATIFGLRXDRI");
155 nv_cookie
= xcb_query_extension(conn
, 10, "NV-CONTROL");
157 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
158 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
159 amd_reply
= xcb_query_extension_reply(conn
, amd_cookie
, NULL
);
160 nv_reply
= xcb_query_extension_reply(conn
, nv_cookie
, NULL
);
161 if (!dri3_reply
|| !pres_reply
) {
166 vk_free(&wsi_dev
->instance_alloc
, wsi_conn
);
170 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
171 #ifdef HAVE_DRI3_MODIFIERS
172 if (wsi_conn
->has_dri3
) {
173 xcb_dri3_query_version_cookie_t ver_cookie
;
174 xcb_dri3_query_version_reply_t
*ver_reply
;
176 ver_cookie
= xcb_dri3_query_version(conn
, 1, 2);
177 ver_reply
= xcb_dri3_query_version_reply(conn
, ver_cookie
, NULL
);
179 (ver_reply
->major_version
> 1 || ver_reply
->minor_version
>= 2);
184 wsi_conn
->has_present
= pres_reply
->present
!= 0;
185 #ifdef HAVE_DRI3_MODIFIERS
186 if (wsi_conn
->has_present
) {
187 xcb_present_query_version_cookie_t ver_cookie
;
188 xcb_present_query_version_reply_t
*ver_reply
;
190 ver_cookie
= xcb_present_query_version(conn
, 1, 2);
191 ver_reply
= xcb_present_query_version_reply(conn
, ver_cookie
, NULL
);
193 (ver_reply
->major_version
> 1 || ver_reply
->minor_version
>= 2);
198 wsi_conn
->has_dri3_modifiers
= has_dri3_v1_2
&& has_present_v1_2
;
199 wsi_conn
->is_proprietary_x11
= false;
200 if (amd_reply
&& amd_reply
->present
)
201 wsi_conn
->is_proprietary_x11
= true;
202 if (nv_reply
&& nv_reply
->present
)
203 wsi_conn
->is_proprietary_x11
= true;
214 wsi_x11_connection_destroy(struct wsi_device
*wsi_dev
,
215 struct wsi_x11_connection
*conn
)
217 vk_free(&wsi_dev
->instance_alloc
, conn
);
221 wsi_x11_check_for_dri3(struct wsi_x11_connection
*wsi_conn
)
223 if (wsi_conn
->has_dri3
)
225 if (!wsi_conn
->is_proprietary_x11
) {
226 fprintf(stderr
, "vulkan: No DRI3 support detected - required for presentation\n"
227 "Note: you can probably enable DRI3 in your Xorg config\n");
232 static struct wsi_x11_connection
*
233 wsi_x11_get_connection(struct wsi_device
*wsi_dev
,
234 xcb_connection_t
*conn
)
236 struct wsi_x11
*wsi
=
237 (struct wsi_x11
*)wsi_dev
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
239 pthread_mutex_lock(&wsi
->mutex
);
241 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
243 /* We're about to make a bunch of blocking calls. Let's drop the
244 * mutex for now so we don't block up too badly.
246 pthread_mutex_unlock(&wsi
->mutex
);
248 struct wsi_x11_connection
*wsi_conn
=
249 wsi_x11_connection_create(wsi_dev
, conn
);
253 pthread_mutex_lock(&wsi
->mutex
);
255 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
257 /* Oops, someone raced us to it */
258 wsi_x11_connection_destroy(wsi_dev
, wsi_conn
);
260 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
264 pthread_mutex_unlock(&wsi
->mutex
);
269 static const VkFormat formats
[] = {
270 VK_FORMAT_B8G8R8A8_SRGB
,
271 VK_FORMAT_B8G8R8A8_UNORM
,
274 static const VkPresentModeKHR present_modes
[] = {
275 VK_PRESENT_MODE_IMMEDIATE_KHR
,
276 VK_PRESENT_MODE_MAILBOX_KHR
,
277 VK_PRESENT_MODE_FIFO_KHR
,
280 static xcb_screen_t
*
281 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
283 xcb_screen_iterator_t screen_iter
=
284 xcb_setup_roots_iterator(xcb_get_setup(conn
));
286 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
287 if (screen_iter
.data
->root
== root
)
288 return screen_iter
.data
;
294 static xcb_visualtype_t
*
295 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
298 xcb_depth_iterator_t depth_iter
=
299 xcb_screen_allowed_depths_iterator(screen
);
301 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
302 xcb_visualtype_iterator_t visual_iter
=
303 xcb_depth_visuals_iterator (depth_iter
.data
);
305 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
306 if (visual_iter
.data
->visual_id
== visual_id
) {
308 *depth
= depth_iter
.data
->depth
;
309 return visual_iter
.data
;
317 static xcb_visualtype_t
*
318 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
321 xcb_screen_iterator_t screen_iter
=
322 xcb_setup_roots_iterator(xcb_get_setup(conn
));
324 /* For this we have to iterate over all of the screens which is rather
325 * annoying. Fortunately, there is probably only 1.
327 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
328 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
337 static xcb_visualtype_t
*
338 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
341 xcb_query_tree_cookie_t tree_cookie
;
342 xcb_get_window_attributes_cookie_t attrib_cookie
;
343 xcb_query_tree_reply_t
*tree
;
344 xcb_get_window_attributes_reply_t
*attrib
;
346 tree_cookie
= xcb_query_tree(conn
, window
);
347 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
349 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
350 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
351 if (attrib
== NULL
|| tree
== NULL
) {
357 xcb_window_t root
= tree
->root
;
358 xcb_visualid_t visual_id
= attrib
->visual
;
362 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
366 return screen_get_visualtype(screen
, visual_id
, depth
);
370 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
372 uint32_t rgb_mask
= visual
->red_mask
|
376 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
378 /* Do we have bits left over after RGB? */
379 return (all_mask
& ~rgb_mask
) != 0;
382 VkBool32
wsi_get_physical_device_xcb_presentation_support(
383 struct wsi_device
*wsi_device
,
384 uint32_t queueFamilyIndex
,
386 bool can_handle_different_gpu
,
387 xcb_connection_t
* connection
,
388 xcb_visualid_t visual_id
)
390 struct wsi_x11_connection
*wsi_conn
=
391 wsi_x11_get_connection(wsi_device
, connection
);
396 if (!wsi_x11_check_for_dri3(wsi_conn
))
399 if (!can_handle_different_gpu
)
400 if (!wsi_x11_check_dri3_compatible(connection
, fd
))
403 unsigned visual_depth
;
404 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
407 if (visual_depth
!= 24 && visual_depth
!= 32)
413 static xcb_connection_t
*
414 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
416 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
417 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
419 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
423 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
425 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
426 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
428 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
432 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
433 struct wsi_device
*wsi_device
,
434 uint32_t queueFamilyIndex
,
436 VkBool32
* pSupported
)
438 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
439 xcb_window_t window
= x11_surface_get_window(icd_surface
);
441 struct wsi_x11_connection
*wsi_conn
=
442 wsi_x11_get_connection(wsi_device
, conn
);
444 return VK_ERROR_OUT_OF_HOST_MEMORY
;
446 if (!wsi_x11_check_for_dri3(wsi_conn
)) {
451 unsigned visual_depth
;
452 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
457 if (visual_depth
!= 24 && visual_depth
!= 32) {
467 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
468 VkSurfaceCapabilitiesKHR
*caps
)
470 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
471 xcb_window_t window
= x11_surface_get_window(icd_surface
);
472 xcb_get_geometry_cookie_t geom_cookie
;
473 xcb_generic_error_t
*err
;
474 xcb_get_geometry_reply_t
*geom
;
475 unsigned visual_depth
;
477 geom_cookie
= xcb_get_geometry(conn
, window
);
479 /* This does a round-trip. This is why we do get_geometry first and
480 * wait to read the reply until after we have a visual.
482 xcb_visualtype_t
*visual
=
483 get_visualtype_for_window(conn
, window
, &visual_depth
);
486 return VK_ERROR_SURFACE_LOST_KHR
;
488 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
490 VkExtent2D extent
= { geom
->width
, geom
->height
};
491 caps
->currentExtent
= extent
;
492 caps
->minImageExtent
= extent
;
493 caps
->maxImageExtent
= extent
;
495 /* This can happen if the client didn't wait for the configure event
496 * to come back from the compositor. In that case, we don't know the
497 * size of the window so we just return valid "I don't know" stuff.
499 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
500 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
501 /* This is the maximum supported size on Intel */
502 caps
->maxImageExtent
= (VkExtent2D
) { 1 << 14, 1 << 14 };
507 if (visual_has_alpha(visual
, visual_depth
)) {
508 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
509 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
511 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
512 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
515 /* For true mailbox mode, we need at least 4 images:
516 * 1) One to scan out from
517 * 2) One to have queued for scan-out
518 * 3) One to be currently held by the X server
519 * 4) One to render to
521 caps
->minImageCount
= 2;
522 /* There is no real maximum */
523 caps
->maxImageCount
= 0;
525 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
526 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
527 caps
->maxImageArrayLayers
= 1;
528 caps
->supportedUsageFlags
=
529 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
530 VK_IMAGE_USAGE_SAMPLED_BIT
|
531 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
532 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
538 x11_surface_get_capabilities2(VkIcdSurfaceBase
*icd_surface
,
539 const void *info_next
,
540 VkSurfaceCapabilities2KHR
*caps
)
542 assert(caps
->sType
== VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR
);
544 return x11_surface_get_capabilities(icd_surface
, &caps
->surfaceCapabilities
);
548 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
549 struct wsi_device
*wsi_device
,
550 uint32_t *pSurfaceFormatCount
,
551 VkSurfaceFormatKHR
*pSurfaceFormats
)
553 VK_OUTARRAY_MAKE(out
, pSurfaceFormats
, pSurfaceFormatCount
);
555 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
556 vk_outarray_append(&out
, f
) {
557 f
->format
= formats
[i
];
558 f
->colorSpace
= VK_COLORSPACE_SRGB_NONLINEAR_KHR
;
562 return vk_outarray_status(&out
);
566 x11_surface_get_formats2(VkIcdSurfaceBase
*surface
,
567 struct wsi_device
*wsi_device
,
568 const void *info_next
,
569 uint32_t *pSurfaceFormatCount
,
570 VkSurfaceFormat2KHR
*pSurfaceFormats
)
572 VK_OUTARRAY_MAKE(out
, pSurfaceFormats
, pSurfaceFormatCount
);
574 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
575 vk_outarray_append(&out
, f
) {
576 assert(f
->sType
== VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR
);
577 f
->surfaceFormat
.format
= formats
[i
];
578 f
->surfaceFormat
.colorSpace
= VK_COLORSPACE_SRGB_NONLINEAR_KHR
;
582 return vk_outarray_status(&out
);
586 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
587 uint32_t *pPresentModeCount
,
588 VkPresentModeKHR
*pPresentModes
)
590 if (pPresentModes
== NULL
) {
591 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
595 *pPresentModeCount
= MIN2(*pPresentModeCount
, ARRAY_SIZE(present_modes
));
596 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
598 return *pPresentModeCount
< ARRAY_SIZE(present_modes
) ?
599 VK_INCOMPLETE
: VK_SUCCESS
;
603 x11_surface_is_local_to_gpu(struct wsi_device
*wsi_dev
,
605 xcb_connection_t
*conn
)
607 struct wsi_x11_connection
*wsi_conn
=
608 wsi_x11_get_connection(wsi_dev
, conn
);
613 if (!wsi_x11_check_for_dri3(wsi_conn
))
616 if (!wsi_x11_check_dri3_compatible(conn
, local_fd
))
623 x11_surface_get_present_rectangles(VkIcdSurfaceBase
*icd_surface
,
624 struct wsi_device
*wsi_device
,
626 uint32_t* pRectCount
,
629 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
630 xcb_window_t window
= x11_surface_get_window(icd_surface
);
631 VK_OUTARRAY_MAKE(out
, pRects
, pRectCount
);
633 if (x11_surface_is_local_to_gpu(wsi_device
, local_fd
, conn
)) {
634 vk_outarray_append(&out
, rect
) {
635 xcb_generic_error_t
*err
= NULL
;
636 xcb_get_geometry_cookie_t geom_cookie
= xcb_get_geometry(conn
, window
);
637 xcb_get_geometry_reply_t
*geom
=
638 xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
643 .extent
= { geom
->width
, geom
->height
},
646 /* This can happen if the client didn't wait for the configure event
647 * to come back from the compositor. In that case, we don't know the
648 * size of the window so we just return valid "I don't know" stuff.
652 .extent
= { -1, -1 },
659 return vk_outarray_status(&out
);
662 VkResult
wsi_create_xcb_surface(const VkAllocationCallbacks
*pAllocator
,
663 const VkXcbSurfaceCreateInfoKHR
*pCreateInfo
,
664 VkSurfaceKHR
*pSurface
)
666 VkIcdSurfaceXcb
*surface
;
668 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
669 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
671 return VK_ERROR_OUT_OF_HOST_MEMORY
;
673 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
674 surface
->connection
= pCreateInfo
->connection
;
675 surface
->window
= pCreateInfo
->window
;
677 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
681 VkResult
wsi_create_xlib_surface(const VkAllocationCallbacks
*pAllocator
,
682 const VkXlibSurfaceCreateInfoKHR
*pCreateInfo
,
683 VkSurfaceKHR
*pSurface
)
685 VkIcdSurfaceXlib
*surface
;
687 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
688 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
690 return VK_ERROR_OUT_OF_HOST_MEMORY
;
692 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
693 surface
->dpy
= pCreateInfo
->dpy
;
694 surface
->window
= pCreateInfo
->window
;
696 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
701 struct wsi_image base
;
704 struct xshmfence
* shm_fence
;
708 struct x11_swapchain
{
709 struct wsi_swapchain base
;
711 bool has_dri3_modifiers
;
713 xcb_connection_t
* conn
;
719 xcb_present_event_t event_id
;
720 xcb_special_event_t
* special_event
;
722 uint64_t last_present_msc
;
727 xcb_present_complete_mode_t last_present_mode
;
728 struct wsi_queue present_queue
;
729 struct wsi_queue acquire_queue
;
730 pthread_t queue_manager
;
732 struct x11_image images
[0];
736 * Update the swapchain status with the result of an operation, and return
737 * the combined status. The chain status will eventually be returned from
738 * AcquireNextImage and QueuePresent.
740 * We make sure to 'stick' more pessimistic statuses: an out-of-date error
741 * is permanent once seen, and every subsequent call will return this. If
742 * this has not been seen, success will be returned.
745 x11_swapchain_result(struct x11_swapchain
*chain
, VkResult result
)
747 /* Prioritise returning existing errors for consistency. */
748 if (chain
->status
< 0)
749 return chain
->status
;
751 /* If we have a new error, mark it as permanent on the chain and return. */
753 chain
->status
= result
;
757 /* Return temporary errors, but don't persist them. */
758 if (result
== VK_TIMEOUT
|| result
== VK_NOT_READY
)
761 /* Suboptimal isn't an error, but is a status which sticks to the swapchain
762 * and is always returned rather than success.
764 if (result
== VK_SUBOPTIMAL_KHR
) {
765 chain
->status
= result
;
769 /* No changes, so return the last status. */
770 return chain
->status
;
773 static struct wsi_image
*
774 x11_get_wsi_image(struct wsi_swapchain
*wsi_chain
, uint32_t image_index
)
776 struct x11_swapchain
*chain
= (struct x11_swapchain
*)wsi_chain
;
777 return &chain
->images
[image_index
].base
;
781 * Process an X11 Present event. Does not update chain->status.
784 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
785 xcb_present_generic_event_t
*event
)
787 switch (event
->evtype
) {
788 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
789 xcb_present_configure_notify_event_t
*config
= (void *) event
;
791 if (config
->width
!= chain
->extent
.width
||
792 config
->height
!= chain
->extent
.height
)
793 return VK_ERROR_OUT_OF_DATE_KHR
;
798 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
799 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
801 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++) {
802 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
803 chain
->images
[i
].busy
= false;
805 wsi_queue_push(&chain
->acquire_queue
, i
);
813 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY
: {
814 xcb_present_complete_notify_event_t
*complete
= (void *) event
;
815 if (complete
->kind
== XCB_PRESENT_COMPLETE_KIND_PIXMAP
)
816 chain
->last_present_msc
= complete
->msc
;
818 VkResult result
= VK_SUCCESS
;
820 /* The winsys is now trying to flip directly and cannot due to our
821 * configuration. Request the user reallocate.
823 #ifdef HAVE_DRI3_MODIFIERS
824 if (complete
->mode
== XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY
&&
825 chain
->last_present_mode
!= XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY
)
826 result
= VK_SUBOPTIMAL_KHR
;
829 /* When we go from flipping to copying, the odds are very likely that
830 * we could reallocate in a more optimal way if we didn't have to care
831 * about scanout, so we always do this.
833 if (complete
->mode
== XCB_PRESENT_COMPLETE_MODE_COPY
&&
834 chain
->last_present_mode
== XCB_PRESENT_COMPLETE_MODE_FLIP
)
835 result
= VK_SUBOPTIMAL_KHR
;
837 chain
->last_present_mode
= complete
->mode
;
849 static uint64_t wsi_get_current_time(void)
851 uint64_t current_time
;
854 clock_gettime(CLOCK_MONOTONIC
, &tv
);
855 current_time
= tv
.tv_nsec
+ tv
.tv_sec
*1000000000ull;
859 static uint64_t wsi_get_absolute_timeout(uint64_t timeout
)
861 uint64_t current_time
= wsi_get_current_time();
863 timeout
= MIN2(UINT64_MAX
- current_time
, timeout
);
865 return current_time
+ timeout
;
869 x11_acquire_next_image_poll_x11(struct x11_swapchain
*chain
,
870 uint32_t *image_index
, uint64_t timeout
)
872 xcb_generic_event_t
*event
;
876 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++) {
877 if (!chain
->images
[i
].busy
) {
878 /* We found a non-busy image */
879 xshmfence_await(chain
->images
[i
].shm_fence
);
881 chain
->images
[i
].busy
= true;
882 return x11_swapchain_result(chain
, VK_SUCCESS
);
886 xcb_flush(chain
->conn
);
888 if (timeout
== UINT64_MAX
) {
889 event
= xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
891 return x11_swapchain_result(chain
, VK_ERROR_OUT_OF_DATE_KHR
);
893 event
= xcb_poll_for_special_event(chain
->conn
, chain
->special_event
);
897 return x11_swapchain_result(chain
, VK_NOT_READY
);
899 atimeout
= wsi_get_absolute_timeout(timeout
);
901 pfds
.fd
= xcb_get_file_descriptor(chain
->conn
);
902 pfds
.events
= POLLIN
;
903 ret
= poll(&pfds
, 1, timeout
/ 1000 / 1000);
905 return x11_swapchain_result(chain
, VK_TIMEOUT
);
907 return x11_swapchain_result(chain
, VK_ERROR_OUT_OF_DATE_KHR
);
909 /* If a non-special event happens, the fd will still
910 * poll. So recalculate the timeout now just in case.
912 uint64_t current_time
= wsi_get_current_time();
913 if (atimeout
> current_time
)
914 timeout
= atimeout
- current_time
;
921 /* Update the swapchain status here. We may catch non-fatal errors here,
922 * in which case we need to update the status and continue.
924 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
927 return x11_swapchain_result(chain
, result
);
932 x11_acquire_next_image_from_queue(struct x11_swapchain
*chain
,
933 uint32_t *image_index_out
, uint64_t timeout
)
935 assert(chain
->threaded
);
937 uint32_t image_index
;
938 VkResult result
= wsi_queue_pull(&chain
->acquire_queue
,
939 &image_index
, timeout
);
940 if (result
< 0 || result
== VK_TIMEOUT
) {
941 /* On error, the thread has shut down, so safe to update chain->status.
942 * Calling x11_swapchain_result with VK_TIMEOUT won't modify
943 * chain->status so that is also safe.
945 return x11_swapchain_result(chain
, result
);
946 } else if (chain
->status
< 0) {
947 return chain
->status
;
950 assert(image_index
< chain
->base
.image_count
);
951 xshmfence_await(chain
->images
[image_index
].shm_fence
);
953 *image_index_out
= image_index
;
955 return chain
->status
;
959 x11_present_to_x11(struct x11_swapchain
*chain
, uint32_t image_index
,
962 struct x11_image
*image
= &chain
->images
[image_index
];
964 assert(image_index
< chain
->base
.image_count
);
966 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
969 int64_t remainder
= 0;
971 if (chain
->base
.present_mode
== VK_PRESENT_MODE_IMMEDIATE_KHR
)
972 options
|= XCB_PRESENT_OPTION_ASYNC
;
974 #ifdef HAVE_DRI3_MODIFIERS
975 if (chain
->has_dri3_modifiers
)
976 options
|= XCB_PRESENT_OPTION_SUBOPTIMAL
;
979 xshmfence_reset(image
->shm_fence
);
982 xcb_void_cookie_t cookie
=
983 xcb_present_pixmap(chain
->conn
,
986 (uint32_t) chain
->send_sbc
,
991 XCB_NONE
, /* target_crtc */
998 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1001 xcb_flush(chain
->conn
);
1003 return x11_swapchain_result(chain
, VK_SUCCESS
);
1007 x11_acquire_next_image(struct wsi_swapchain
*anv_chain
,
1008 const VkAcquireNextImageInfoKHR
*info
,
1009 uint32_t *image_index
)
1011 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1012 uint64_t timeout
= info
->timeout
;
1014 if (chain
->threaded
) {
1015 return x11_acquire_next_image_from_queue(chain
, image_index
, timeout
);
1017 return x11_acquire_next_image_poll_x11(chain
, image_index
, timeout
);
1022 x11_queue_present(struct wsi_swapchain
*anv_chain
,
1023 uint32_t image_index
,
1024 const VkPresentRegionKHR
*damage
)
1026 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1028 if (chain
->threaded
) {
1029 wsi_queue_push(&chain
->present_queue
, image_index
);
1030 return chain
->status
;
1032 return x11_present_to_x11(chain
, image_index
, 0);
1037 x11_manage_fifo_queues(void *state
)
1039 struct x11_swapchain
*chain
= state
;
1040 VkResult result
= VK_SUCCESS
;
1042 assert(chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
);
1044 while (chain
->status
>= 0) {
1045 /* It should be safe to unconditionally block here. Later in the loop
1046 * we blocks until the previous present has landed on-screen. At that
1047 * point, we should have received IDLE_NOTIFY on all images presented
1048 * before that point so the client should be able to acquire any image
1049 * other than the currently presented one.
1051 uint32_t image_index
= 0;
1052 result
= wsi_queue_pull(&chain
->present_queue
, &image_index
, INT64_MAX
);
1053 assert(result
!= VK_TIMEOUT
);
1056 } else if (chain
->status
< 0) {
1057 /* The status can change underneath us if the swapchain is destroyed
1058 * from another thread.
1063 uint64_t target_msc
= chain
->last_present_msc
+ 1;
1064 result
= x11_present_to_x11(chain
, image_index
, target_msc
);
1068 while (chain
->last_present_msc
< target_msc
) {
1069 xcb_generic_event_t
*event
=
1070 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
1072 result
= VK_ERROR_OUT_OF_DATE_KHR
;
1076 result
= x11_handle_dri3_present_event(chain
, (void *)event
);
1084 x11_swapchain_result(chain
, result
);
1085 wsi_queue_push(&chain
->acquire_queue
, UINT32_MAX
);
1091 x11_image_init(VkDevice device_h
, struct x11_swapchain
*chain
,
1092 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
1093 const VkAllocationCallbacks
* pAllocator
,
1094 const uint64_t *const *modifiers
,
1095 const uint32_t *num_modifiers
,
1096 int num_tranches
, struct x11_image
*image
)
1098 xcb_void_cookie_t cookie
;
1102 if (chain
->base
.use_prime_blit
) {
1103 bool use_modifier
= num_tranches
> 0;
1104 result
= wsi_create_prime_image(&chain
->base
, pCreateInfo
, use_modifier
, &image
->base
);
1106 result
= wsi_create_native_image(&chain
->base
, pCreateInfo
,
1107 num_tranches
, num_modifiers
, modifiers
,
1113 image
->pixmap
= xcb_generate_id(chain
->conn
);
1115 #ifdef HAVE_DRI3_MODIFIERS
1116 if (image
->base
.drm_modifier
!= DRM_FORMAT_MOD_INVALID
) {
1117 /* If the image has a modifier, we must have DRI3 v1.2. */
1118 assert(chain
->has_dri3_modifiers
);
1121 xcb_dri3_pixmap_from_buffers_checked(chain
->conn
,
1124 image
->base
.num_planes
,
1125 pCreateInfo
->imageExtent
.width
,
1126 pCreateInfo
->imageExtent
.height
,
1127 image
->base
.row_pitches
[0],
1128 image
->base
.offsets
[0],
1129 image
->base
.row_pitches
[1],
1130 image
->base
.offsets
[1],
1131 image
->base
.row_pitches
[2],
1132 image
->base
.offsets
[2],
1133 image
->base
.row_pitches
[3],
1134 image
->base
.offsets
[3],
1136 image
->base
.drm_modifier
,
1141 /* Without passing modifiers, we can't have multi-plane RGB images. */
1142 assert(image
->base
.num_planes
== 1);
1145 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
1148 image
->base
.sizes
[0],
1149 pCreateInfo
->imageExtent
.width
,
1150 pCreateInfo
->imageExtent
.height
,
1151 image
->base
.row_pitches
[0],
1153 image
->base
.fds
[0]);
1156 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1158 /* XCB has now taken ownership of the FDs. */
1159 for (int i
= 0; i
< image
->base
.num_planes
; i
++)
1160 image
->base
.fds
[i
] = -1;
1162 int fence_fd
= xshmfence_alloc_shm();
1166 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
1167 if (image
->shm_fence
== NULL
)
1168 goto fail_shmfence_alloc
;
1170 image
->sync_fence
= xcb_generate_id(chain
->conn
);
1171 xcb_dri3_fence_from_fd(chain
->conn
,
1177 image
->busy
= false;
1178 xshmfence_trigger(image
->shm_fence
);
1182 fail_shmfence_alloc
:
1186 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
1187 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1189 wsi_destroy_image(&chain
->base
, &image
->base
);
1195 x11_image_finish(struct x11_swapchain
*chain
,
1196 const VkAllocationCallbacks
* pAllocator
,
1197 struct x11_image
*image
)
1199 xcb_void_cookie_t cookie
;
1201 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
1202 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1203 xshmfence_unmap_shm(image
->shm_fence
);
1205 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
1206 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1208 wsi_destroy_image(&chain
->base
, &image
->base
);
1212 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection
*wsi_conn
,
1213 xcb_connection_t
*conn
, xcb_window_t window
,
1214 uint8_t depth
, uint8_t bpp
,
1215 VkCompositeAlphaFlagsKHR vk_alpha
,
1216 uint64_t **modifiers_in
, uint32_t *num_modifiers_in
,
1217 uint32_t *num_tranches_in
,
1218 const VkAllocationCallbacks
*pAllocator
)
1220 if (!wsi_conn
->has_dri3_modifiers
)
1223 #ifdef HAVE_DRI3_MODIFIERS
1224 xcb_generic_error_t
*error
= NULL
;
1225 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie
=
1226 xcb_dri3_get_supported_modifiers(conn
, window
, depth
, bpp
);
1227 xcb_dri3_get_supported_modifiers_reply_t
*mod_reply
=
1228 xcb_dri3_get_supported_modifiers_reply(conn
, mod_cookie
, &error
);
1231 if (!mod_reply
|| (mod_reply
->num_window_modifiers
== 0 &&
1232 mod_reply
->num_screen_modifiers
== 0)) {
1239 uint64_t *modifiers
[2];
1241 if (mod_reply
->num_window_modifiers
) {
1242 counts
[n
] = mod_reply
->num_window_modifiers
;
1243 modifiers
[n
] = vk_alloc(pAllocator
,
1244 counts
[n
] * sizeof(uint64_t),
1245 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1246 if (!modifiers
[n
]) {
1251 memcpy(modifiers
[n
],
1252 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply
),
1253 counts
[n
] * sizeof(uint64_t));
1257 if (mod_reply
->num_screen_modifiers
) {
1258 counts
[n
] = mod_reply
->num_screen_modifiers
;
1259 modifiers
[n
] = vk_alloc(pAllocator
,
1260 counts
[n
] * sizeof(uint64_t),
1261 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1262 if (!modifiers
[n
]) {
1264 vk_free(pAllocator
, modifiers
[0]);
1269 memcpy(modifiers
[n
],
1270 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply
),
1271 counts
[n
] * sizeof(uint64_t));
1275 for (int i
= 0; i
< n
; i
++) {
1276 modifiers_in
[i
] = modifiers
[i
];
1277 num_modifiers_in
[i
] = counts
[i
];
1279 *num_tranches_in
= n
;
1285 *num_tranches_in
= 0;
1289 x11_swapchain_destroy(struct wsi_swapchain
*anv_chain
,
1290 const VkAllocationCallbacks
*pAllocator
)
1292 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1293 xcb_void_cookie_t cookie
;
1295 if (chain
->threaded
) {
1296 chain
->status
= VK_ERROR_OUT_OF_DATE_KHR
;
1297 /* Push a UINT32_MAX to wake up the manager */
1298 wsi_queue_push(&chain
->present_queue
, UINT32_MAX
);
1299 pthread_join(chain
->queue_manager
, NULL
);
1300 wsi_queue_destroy(&chain
->acquire_queue
);
1301 wsi_queue_destroy(&chain
->present_queue
);
1304 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++)
1305 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
1307 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1308 cookie
= xcb_present_select_input_checked(chain
->conn
, chain
->event_id
,
1310 XCB_PRESENT_EVENT_MASK_NO_EVENT
);
1311 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1313 wsi_swapchain_finish(&chain
->base
);
1315 vk_free(pAllocator
, chain
);
1321 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
1323 struct wsi_device
*wsi_device
,
1325 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
1326 const VkAllocationCallbacks
* pAllocator
,
1327 struct wsi_swapchain
**swapchain_out
)
1329 struct x11_swapchain
*chain
;
1330 xcb_void_cookie_t cookie
;
1333 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
1335 const unsigned num_images
= pCreateInfo
->minImageCount
;
1337 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
1338 struct wsi_x11_connection
*wsi_conn
=
1339 wsi_x11_get_connection(wsi_device
, conn
);
1341 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1343 /* Check for whether or not we have a window up-front */
1344 xcb_window_t window
= x11_surface_get_window(icd_surface
);
1345 xcb_get_geometry_reply_t
*geometry
=
1346 xcb_get_geometry_reply(conn
, xcb_get_geometry(conn
, window
), NULL
);
1347 if (geometry
== NULL
)
1348 return VK_ERROR_SURFACE_LOST_KHR
;
1349 const uint32_t bit_depth
= geometry
->depth
;
1352 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
1353 chain
= vk_alloc(pAllocator
, size
, 8,
1354 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1356 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1358 result
= wsi_swapchain_init(wsi_device
, &chain
->base
, device
,
1359 pCreateInfo
, pAllocator
);
1360 if (result
!= VK_SUCCESS
)
1363 chain
->base
.destroy
= x11_swapchain_destroy
;
1364 chain
->base
.get_wsi_image
= x11_get_wsi_image
;
1365 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
1366 chain
->base
.queue_present
= x11_queue_present
;
1367 chain
->base
.present_mode
= pCreateInfo
->presentMode
;
1368 chain
->base
.image_count
= num_images
;
1370 chain
->window
= window
;
1371 chain
->depth
= bit_depth
;
1372 chain
->extent
= pCreateInfo
->imageExtent
;
1373 chain
->send_sbc
= 0;
1374 chain
->last_present_msc
= 0;
1375 chain
->threaded
= false;
1376 chain
->status
= VK_SUCCESS
;
1377 chain
->has_dri3_modifiers
= wsi_conn
->has_dri3_modifiers
;
1379 /* If we are reallocating from an old swapchain, then we inherit its
1380 * last completion mode, to ensure we don't get into reallocation
1381 * cycles. If we are starting anew, we set 'COPY', as that is the only
1382 * mode which provokes reallocation when anything changes, to make
1383 * sure we have the most optimal allocation.
1385 struct x11_swapchain
*old_chain
= (void *)(intptr_t) pCreateInfo
->oldSwapchain
;
1387 chain
->last_present_mode
= old_chain
->last_present_mode
;
1389 chain
->last_present_mode
= XCB_PRESENT_COMPLETE_MODE_COPY
;
1391 if (!wsi_x11_check_dri3_compatible(conn
, local_fd
))
1392 chain
->base
.use_prime_blit
= true;
1394 chain
->event_id
= xcb_generate_id(chain
->conn
);
1395 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
1396 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
1397 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
1398 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
1400 /* Create an XCB event queue to hold present events outside of the usual
1401 * application event queue
1403 chain
->special_event
=
1404 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
1405 chain
->event_id
, NULL
);
1407 chain
->gc
= xcb_generate_id(chain
->conn
);
1409 /* FINISHME: Choose a better error. */
1410 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1414 cookie
= xcb_create_gc(chain
->conn
,
1417 XCB_GC_GRAPHICS_EXPOSURES
,
1418 (uint32_t []) { 0 });
1419 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1421 uint64_t *modifiers
[2] = {NULL
, NULL
};
1422 uint32_t num_modifiers
[2] = {0, 0};
1423 uint32_t num_tranches
= 0;
1424 if (wsi_device
->supports_modifiers
)
1425 wsi_x11_get_dri3_modifiers(wsi_conn
, conn
, window
, chain
->depth
, 32,
1426 pCreateInfo
->compositeAlpha
,
1427 modifiers
, num_modifiers
, &num_tranches
,
1431 for (; image
< chain
->base
.image_count
; image
++) {
1432 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
1433 (const uint64_t *const *)modifiers
,
1434 num_modifiers
, num_tranches
,
1435 &chain
->images
[image
]);
1436 if (result
!= VK_SUCCESS
)
1437 goto fail_init_images
;
1440 if (chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
) {
1441 chain
->threaded
= true;
1443 /* Initialize our queues. We make them base.image_count + 1 because we will
1444 * occasionally use UINT32_MAX to signal the other thread that an error
1445 * has occurred and we don't want an overflow.
1448 ret
= wsi_queue_init(&chain
->acquire_queue
, chain
->base
.image_count
+ 1);
1450 goto fail_init_images
;
1453 ret
= wsi_queue_init(&chain
->present_queue
, chain
->base
.image_count
+ 1);
1455 wsi_queue_destroy(&chain
->acquire_queue
);
1456 goto fail_init_images
;
1459 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++)
1460 wsi_queue_push(&chain
->acquire_queue
, i
);
1462 ret
= pthread_create(&chain
->queue_manager
, NULL
,
1463 x11_manage_fifo_queues
, chain
);
1465 wsi_queue_destroy(&chain
->present_queue
);
1466 wsi_queue_destroy(&chain
->acquire_queue
);
1467 goto fail_init_images
;
1471 for (int i
= 0; i
< ARRAY_SIZE(modifiers
); i
++)
1472 vk_free(pAllocator
, modifiers
[i
]);
1473 *swapchain_out
= &chain
->base
;
1478 for (uint32_t j
= 0; j
< image
; j
++)
1479 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
1481 for (int i
= 0; i
< ARRAY_SIZE(modifiers
); i
++)
1482 vk_free(pAllocator
, modifiers
[i
]);
1485 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1487 wsi_swapchain_finish(&chain
->base
);
1490 vk_free(pAllocator
, chain
);
1496 wsi_x11_init_wsi(struct wsi_device
*wsi_device
,
1497 const VkAllocationCallbacks
*alloc
)
1499 struct wsi_x11
*wsi
;
1502 wsi
= vk_alloc(alloc
, sizeof(*wsi
), 8,
1503 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
1505 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1509 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
1511 if (ret
== ENOMEM
) {
1512 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1514 /* FINISHME: Choose a better error. */
1515 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1521 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1522 _mesa_key_pointer_equal
);
1523 if (!wsi
->connections
) {
1524 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1528 wsi
->base
.get_support
= x11_surface_get_support
;
1529 wsi
->base
.get_capabilities2
= x11_surface_get_capabilities2
;
1530 wsi
->base
.get_formats
= x11_surface_get_formats
;
1531 wsi
->base
.get_formats2
= x11_surface_get_formats2
;
1532 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
1533 wsi
->base
.get_present_rectangles
= x11_surface_get_present_rectangles
;
1534 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
1536 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
1537 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
1542 pthread_mutex_destroy(&wsi
->mutex
);
1544 vk_free(alloc
, wsi
);
1546 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
1547 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
1553 wsi_x11_finish_wsi(struct wsi_device
*wsi_device
,
1554 const VkAllocationCallbacks
*alloc
)
1556 struct wsi_x11
*wsi
=
1557 (struct wsi_x11
*)wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
1560 struct hash_entry
*entry
;
1561 hash_table_foreach(wsi
->connections
, entry
)
1562 wsi_x11_connection_destroy(wsi_device
, entry
->data
);
1564 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
1566 pthread_mutex_destroy(&wsi
->mutex
);
1568 vk_free(alloc
, wsi
);