2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
30 #include "util/macros.h"
39 #include "drm-uapi/drm_fourcc.h"
40 #include "util/hash_table.h"
41 #include "util/xmlconfig.h"
44 #include "vk_enum_to_str.h"
45 #include "wsi_common_private.h"
46 #include "wsi_common_x11.h"
47 #include "wsi_common_queue.h"
49 #define typed_memcpy(dest, src, count) ({ \
50 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
51 memcpy((dest), (src), (count) * sizeof(*(src))); \
54 struct wsi_x11_connection
{
56 bool has_dri3_modifiers
;
58 bool is_proprietary_x11
;
62 struct wsi_interface base
;
64 pthread_mutex_t mutex
;
65 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
66 struct hash_table
*connections
;
72 * Wrapper around xcb_dri3_open
75 wsi_dri3_open(xcb_connection_t
*conn
,
79 xcb_dri3_open_cookie_t cookie
;
80 xcb_dri3_open_reply_t
*reply
;
83 cookie
= xcb_dri3_open(conn
,
87 reply
= xcb_dri3_open_reply(conn
, cookie
, NULL
);
91 if (reply
->nfd
!= 1) {
96 fd
= xcb_dri3_open_reply_fds(conn
, reply
)[0];
98 fcntl(fd
, F_SETFD
, fcntl(fd
, F_GETFD
) | FD_CLOEXEC
);
104 wsi_x11_check_dri3_compatible(const struct wsi_device
*wsi_dev
,
105 xcb_connection_t
*conn
)
107 xcb_screen_iterator_t screen_iter
=
108 xcb_setup_roots_iterator(xcb_get_setup(conn
));
109 xcb_screen_t
*screen
= screen_iter
.data
;
111 int dri3_fd
= wsi_dri3_open(conn
, screen
->root
, None
);
115 bool match
= wsi_device_matches_drm_fd(wsi_dev
, dri3_fd
);
122 static struct wsi_x11_connection
*
123 wsi_x11_connection_create(struct wsi_device
*wsi_dev
,
124 xcb_connection_t
*conn
)
126 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
, amd_cookie
, nv_cookie
;
127 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
, *amd_reply
, *nv_reply
;
128 bool has_dri3_v1_2
= false;
129 bool has_present_v1_2
= false;
131 struct wsi_x11_connection
*wsi_conn
=
132 vk_alloc(&wsi_dev
->instance_alloc
, sizeof(*wsi_conn
), 8,
133 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
137 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
138 pres_cookie
= xcb_query_extension(conn
, 7, "Present");
140 /* We try to be nice to users and emit a warning if they try to use a
141 * Vulkan application on a system without DRI3 enabled. However, this ends
142 * up spewing the warning when a user has, for example, both Intel
143 * integrated graphics and a discrete card with proprietary drivers and are
144 * running on the discrete card with the proprietary DDX. In this case, we
145 * really don't want to print the warning because it just confuses users.
146 * As a heuristic to detect this case, we check for a couple of proprietary
149 amd_cookie
= xcb_query_extension(conn
, 11, "ATIFGLRXDRI");
150 nv_cookie
= xcb_query_extension(conn
, 10, "NV-CONTROL");
152 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
153 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
154 amd_reply
= xcb_query_extension_reply(conn
, amd_cookie
, NULL
);
155 nv_reply
= xcb_query_extension_reply(conn
, nv_cookie
, NULL
);
156 if (!dri3_reply
|| !pres_reply
) {
161 vk_free(&wsi_dev
->instance_alloc
, wsi_conn
);
165 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
166 #ifdef HAVE_DRI3_MODIFIERS
167 if (wsi_conn
->has_dri3
) {
168 xcb_dri3_query_version_cookie_t ver_cookie
;
169 xcb_dri3_query_version_reply_t
*ver_reply
;
171 ver_cookie
= xcb_dri3_query_version(conn
, 1, 2);
172 ver_reply
= xcb_dri3_query_version_reply(conn
, ver_cookie
, NULL
);
174 (ver_reply
->major_version
> 1 || ver_reply
->minor_version
>= 2);
179 wsi_conn
->has_present
= pres_reply
->present
!= 0;
180 #ifdef HAVE_DRI3_MODIFIERS
181 if (wsi_conn
->has_present
) {
182 xcb_present_query_version_cookie_t ver_cookie
;
183 xcb_present_query_version_reply_t
*ver_reply
;
185 ver_cookie
= xcb_present_query_version(conn
, 1, 2);
186 ver_reply
= xcb_present_query_version_reply(conn
, ver_cookie
, NULL
);
188 (ver_reply
->major_version
> 1 || ver_reply
->minor_version
>= 2);
193 wsi_conn
->has_dri3_modifiers
= has_dri3_v1_2
&& has_present_v1_2
;
194 wsi_conn
->is_proprietary_x11
= false;
195 if (amd_reply
&& amd_reply
->present
)
196 wsi_conn
->is_proprietary_x11
= true;
197 if (nv_reply
&& nv_reply
->present
)
198 wsi_conn
->is_proprietary_x11
= true;
209 wsi_x11_connection_destroy(struct wsi_device
*wsi_dev
,
210 struct wsi_x11_connection
*conn
)
212 vk_free(&wsi_dev
->instance_alloc
, conn
);
216 wsi_x11_check_for_dri3(struct wsi_x11_connection
*wsi_conn
)
218 if (wsi_conn
->has_dri3
)
220 if (!wsi_conn
->is_proprietary_x11
) {
221 fprintf(stderr
, "vulkan: No DRI3 support detected - required for presentation\n"
222 "Note: you can probably enable DRI3 in your Xorg config\n");
227 static struct wsi_x11_connection
*
228 wsi_x11_get_connection(struct wsi_device
*wsi_dev
,
229 xcb_connection_t
*conn
)
231 struct wsi_x11
*wsi
=
232 (struct wsi_x11
*)wsi_dev
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
234 pthread_mutex_lock(&wsi
->mutex
);
236 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
238 /* We're about to make a bunch of blocking calls. Let's drop the
239 * mutex for now so we don't block up too badly.
241 pthread_mutex_unlock(&wsi
->mutex
);
243 struct wsi_x11_connection
*wsi_conn
=
244 wsi_x11_connection_create(wsi_dev
, conn
);
248 pthread_mutex_lock(&wsi
->mutex
);
250 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
252 /* Oops, someone raced us to it */
253 wsi_x11_connection_destroy(wsi_dev
, wsi_conn
);
255 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
259 pthread_mutex_unlock(&wsi
->mutex
);
264 static const VkFormat formats
[] = {
265 VK_FORMAT_B8G8R8A8_SRGB
,
266 VK_FORMAT_B8G8R8A8_UNORM
,
269 static const VkPresentModeKHR present_modes
[] = {
270 VK_PRESENT_MODE_IMMEDIATE_KHR
,
271 VK_PRESENT_MODE_MAILBOX_KHR
,
272 VK_PRESENT_MODE_FIFO_KHR
,
275 static xcb_screen_t
*
276 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
278 xcb_screen_iterator_t screen_iter
=
279 xcb_setup_roots_iterator(xcb_get_setup(conn
));
281 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
282 if (screen_iter
.data
->root
== root
)
283 return screen_iter
.data
;
289 static xcb_visualtype_t
*
290 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
293 xcb_depth_iterator_t depth_iter
=
294 xcb_screen_allowed_depths_iterator(screen
);
296 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
297 xcb_visualtype_iterator_t visual_iter
=
298 xcb_depth_visuals_iterator (depth_iter
.data
);
300 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
301 if (visual_iter
.data
->visual_id
== visual_id
) {
303 *depth
= depth_iter
.data
->depth
;
304 return visual_iter
.data
;
312 static xcb_visualtype_t
*
313 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
316 xcb_screen_iterator_t screen_iter
=
317 xcb_setup_roots_iterator(xcb_get_setup(conn
));
319 /* For this we have to iterate over all of the screens which is rather
320 * annoying. Fortunately, there is probably only 1.
322 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
323 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
332 static xcb_visualtype_t
*
333 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
336 xcb_query_tree_cookie_t tree_cookie
;
337 xcb_get_window_attributes_cookie_t attrib_cookie
;
338 xcb_query_tree_reply_t
*tree
;
339 xcb_get_window_attributes_reply_t
*attrib
;
341 tree_cookie
= xcb_query_tree(conn
, window
);
342 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
344 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
345 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
346 if (attrib
== NULL
|| tree
== NULL
) {
352 xcb_window_t root
= tree
->root
;
353 xcb_visualid_t visual_id
= attrib
->visual
;
357 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
361 return screen_get_visualtype(screen
, visual_id
, depth
);
365 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
367 uint32_t rgb_mask
= visual
->red_mask
|
371 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
373 /* Do we have bits left over after RGB? */
374 return (all_mask
& ~rgb_mask
) != 0;
377 VkBool32
wsi_get_physical_device_xcb_presentation_support(
378 struct wsi_device
*wsi_device
,
379 uint32_t queueFamilyIndex
,
380 xcb_connection_t
* connection
,
381 xcb_visualid_t visual_id
)
383 struct wsi_x11_connection
*wsi_conn
=
384 wsi_x11_get_connection(wsi_device
, connection
);
389 if (!wsi_x11_check_for_dri3(wsi_conn
))
392 unsigned visual_depth
;
393 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
396 if (visual_depth
!= 24 && visual_depth
!= 32)
402 static xcb_connection_t
*
403 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
405 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
406 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
408 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
412 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
414 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
415 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
417 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
421 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
422 struct wsi_device
*wsi_device
,
423 uint32_t queueFamilyIndex
,
424 VkBool32
* pSupported
)
426 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
427 xcb_window_t window
= x11_surface_get_window(icd_surface
);
429 struct wsi_x11_connection
*wsi_conn
=
430 wsi_x11_get_connection(wsi_device
, conn
);
432 return VK_ERROR_OUT_OF_HOST_MEMORY
;
434 if (!wsi_x11_check_for_dri3(wsi_conn
)) {
439 unsigned visual_depth
;
440 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
445 if (visual_depth
!= 24 && visual_depth
!= 32) {
455 x11_get_min_image_count(struct wsi_device
*wsi_device
)
457 if (wsi_device
->x11
.override_minImageCount
)
458 return wsi_device
->x11
.override_minImageCount
;
460 /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
461 * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
462 * the render latency is CPU duration + GPU duration.
464 * This means that with scanout from pageflipping we need 3 frames to run
466 * 1) CPU rendering work
467 * 2) GPU rendering work
470 * Once we have a nonblocking acquire that returns a semaphore we can merge
471 * 1 and 3. Hence the ideal implementation needs only 2 images, but games
472 * cannot tellwe currently do not have an ideal implementation and that
473 * hence they need to allocate 3 images. So let us do it for them.
475 * This is a tradeoff as it uses more memory than needed for non-fullscreen
476 * and non-performance intensive applications.
482 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
483 struct wsi_device
*wsi_device
,
484 VkSurfaceCapabilitiesKHR
*caps
)
486 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
487 xcb_window_t window
= x11_surface_get_window(icd_surface
);
488 xcb_get_geometry_cookie_t geom_cookie
;
489 xcb_generic_error_t
*err
;
490 xcb_get_geometry_reply_t
*geom
;
491 unsigned visual_depth
;
493 geom_cookie
= xcb_get_geometry(conn
, window
);
495 /* This does a round-trip. This is why we do get_geometry first and
496 * wait to read the reply until after we have a visual.
498 xcb_visualtype_t
*visual
=
499 get_visualtype_for_window(conn
, window
, &visual_depth
);
502 return VK_ERROR_SURFACE_LOST_KHR
;
504 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
506 VkExtent2D extent
= { geom
->width
, geom
->height
};
507 caps
->currentExtent
= extent
;
508 caps
->minImageExtent
= extent
;
509 caps
->maxImageExtent
= extent
;
511 /* This can happen if the client didn't wait for the configure event
512 * to come back from the compositor. In that case, we don't know the
513 * size of the window so we just return valid "I don't know" stuff.
515 caps
->currentExtent
= (VkExtent2D
) { UINT32_MAX
, UINT32_MAX
};
516 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
517 caps
->maxImageExtent
= (VkExtent2D
) {
518 wsi_device
->maxImageDimension2D
,
519 wsi_device
->maxImageDimension2D
,
525 if (visual_has_alpha(visual
, visual_depth
)) {
526 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
527 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
529 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
530 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
533 caps
->minImageCount
= x11_get_min_image_count(wsi_device
);
534 /* There is no real maximum */
535 caps
->maxImageCount
= 0;
537 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
538 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
539 caps
->maxImageArrayLayers
= 1;
540 caps
->supportedUsageFlags
=
541 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
542 VK_IMAGE_USAGE_SAMPLED_BIT
|
543 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
544 VK_IMAGE_USAGE_STORAGE_BIT
|
545 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
551 x11_surface_get_capabilities2(VkIcdSurfaceBase
*icd_surface
,
552 struct wsi_device
*wsi_device
,
553 const void *info_next
,
554 VkSurfaceCapabilities2KHR
*caps
)
556 assert(caps
->sType
== VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR
);
559 x11_surface_get_capabilities(icd_surface
, wsi_device
,
560 &caps
->surfaceCapabilities
);
562 vk_foreach_struct(ext
, caps
->pNext
) {
563 switch (ext
->sType
) {
564 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR
: {
565 VkSurfaceProtectedCapabilitiesKHR
*protected = (void *)ext
;
566 protected->supportsProtected
= VK_FALSE
;
580 get_sorted_vk_formats(struct wsi_device
*wsi_device
, VkFormat
*sorted_formats
)
582 memcpy(sorted_formats
, formats
, sizeof(formats
));
584 if (wsi_device
->force_bgra8_unorm_first
) {
585 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
586 if (sorted_formats
[i
] == VK_FORMAT_B8G8R8A8_UNORM
) {
587 sorted_formats
[i
] = sorted_formats
[0];
588 sorted_formats
[0] = VK_FORMAT_B8G8R8A8_UNORM
;
596 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
597 struct wsi_device
*wsi_device
,
598 uint32_t *pSurfaceFormatCount
,
599 VkSurfaceFormatKHR
*pSurfaceFormats
)
601 VK_OUTARRAY_MAKE(out
, pSurfaceFormats
, pSurfaceFormatCount
);
603 VkFormat sorted_formats
[ARRAY_SIZE(formats
)];
604 get_sorted_vk_formats(wsi_device
, sorted_formats
);
606 for (unsigned i
= 0; i
< ARRAY_SIZE(sorted_formats
); i
++) {
607 vk_outarray_append(&out
, f
) {
608 f
->format
= sorted_formats
[i
];
609 f
->colorSpace
= VK_COLORSPACE_SRGB_NONLINEAR_KHR
;
613 return vk_outarray_status(&out
);
617 x11_surface_get_formats2(VkIcdSurfaceBase
*surface
,
618 struct wsi_device
*wsi_device
,
619 const void *info_next
,
620 uint32_t *pSurfaceFormatCount
,
621 VkSurfaceFormat2KHR
*pSurfaceFormats
)
623 VK_OUTARRAY_MAKE(out
, pSurfaceFormats
, pSurfaceFormatCount
);
625 VkFormat sorted_formats
[ARRAY_SIZE(formats
)];
626 get_sorted_vk_formats(wsi_device
, sorted_formats
);
628 for (unsigned i
= 0; i
< ARRAY_SIZE(sorted_formats
); i
++) {
629 vk_outarray_append(&out
, f
) {
630 assert(f
->sType
== VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR
);
631 f
->surfaceFormat
.format
= sorted_formats
[i
];
632 f
->surfaceFormat
.colorSpace
= VK_COLORSPACE_SRGB_NONLINEAR_KHR
;
636 return vk_outarray_status(&out
);
640 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
641 uint32_t *pPresentModeCount
,
642 VkPresentModeKHR
*pPresentModes
)
644 if (pPresentModes
== NULL
) {
645 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
649 *pPresentModeCount
= MIN2(*pPresentModeCount
, ARRAY_SIZE(present_modes
));
650 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
652 return *pPresentModeCount
< ARRAY_SIZE(present_modes
) ?
653 VK_INCOMPLETE
: VK_SUCCESS
;
657 x11_surface_get_present_rectangles(VkIcdSurfaceBase
*icd_surface
,
658 struct wsi_device
*wsi_device
,
659 uint32_t* pRectCount
,
662 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
663 xcb_window_t window
= x11_surface_get_window(icd_surface
);
664 VK_OUTARRAY_MAKE(out
, pRects
, pRectCount
);
666 vk_outarray_append(&out
, rect
) {
667 xcb_generic_error_t
*err
= NULL
;
668 xcb_get_geometry_cookie_t geom_cookie
= xcb_get_geometry(conn
, window
);
669 xcb_get_geometry_reply_t
*geom
=
670 xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
675 .extent
= { geom
->width
, geom
->height
},
678 /* This can happen if the client didn't wait for the configure event
679 * to come back from the compositor. In that case, we don't know the
680 * size of the window so we just return valid "I don't know" stuff.
684 .extent
= { UINT32_MAX
, UINT32_MAX
},
690 return vk_outarray_status(&out
);
693 VkResult
wsi_create_xcb_surface(const VkAllocationCallbacks
*pAllocator
,
694 const VkXcbSurfaceCreateInfoKHR
*pCreateInfo
,
695 VkSurfaceKHR
*pSurface
)
697 VkIcdSurfaceXcb
*surface
;
699 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
700 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
702 return VK_ERROR_OUT_OF_HOST_MEMORY
;
704 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
705 surface
->connection
= pCreateInfo
->connection
;
706 surface
->window
= pCreateInfo
->window
;
708 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
712 VkResult
wsi_create_xlib_surface(const VkAllocationCallbacks
*pAllocator
,
713 const VkXlibSurfaceCreateInfoKHR
*pCreateInfo
,
714 VkSurfaceKHR
*pSurface
)
716 VkIcdSurfaceXlib
*surface
;
718 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
719 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
721 return VK_ERROR_OUT_OF_HOST_MEMORY
;
723 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
724 surface
->dpy
= pCreateInfo
->dpy
;
725 surface
->window
= pCreateInfo
->window
;
727 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
732 struct wsi_image base
;
735 struct xshmfence
* shm_fence
;
739 struct x11_swapchain
{
740 struct wsi_swapchain base
;
742 bool has_dri3_modifiers
;
744 xcb_connection_t
* conn
;
750 xcb_present_event_t event_id
;
751 xcb_special_event_t
* special_event
;
753 uint64_t last_present_msc
;
755 int sent_image_count
;
757 bool has_present_queue
;
758 bool has_acquire_queue
;
760 xcb_present_complete_mode_t last_present_mode
;
761 struct wsi_queue present_queue
;
762 struct wsi_queue acquire_queue
;
763 pthread_t queue_manager
;
765 struct x11_image images
[0];
767 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain
, base
.base
, VkSwapchainKHR
,
768 VK_OBJECT_TYPE_SWAPCHAIN_KHR
)
771 * Update the swapchain status with the result of an operation, and return
772 * the combined status. The chain status will eventually be returned from
773 * AcquireNextImage and QueuePresent.
775 * We make sure to 'stick' more pessimistic statuses: an out-of-date error
776 * is permanent once seen, and every subsequent call will return this. If
777 * this has not been seen, success will be returned.
780 _x11_swapchain_result(struct x11_swapchain
*chain
, VkResult result
,
781 const char *file
, int line
)
783 /* Prioritise returning existing errors for consistency. */
784 if (chain
->status
< 0)
785 return chain
->status
;
787 /* If we have a new error, mark it as permanent on the chain and return. */
790 fprintf(stderr
, "%s:%d: Swapchain status changed to %s\n",
791 file
, line
, vk_Result_to_str(result
));
793 chain
->status
= result
;
797 /* Return temporary errors, but don't persist them. */
798 if (result
== VK_TIMEOUT
|| result
== VK_NOT_READY
)
801 /* Suboptimal isn't an error, but is a status which sticks to the swapchain
802 * and is always returned rather than success.
804 if (result
== VK_SUBOPTIMAL_KHR
) {
806 if (chain
->status
!= VK_SUBOPTIMAL_KHR
) {
807 fprintf(stderr
, "%s:%d: Swapchain status changed to %s\n",
808 file
, line
, vk_Result_to_str(result
));
811 chain
->status
= result
;
815 /* No changes, so return the last status. */
816 return chain
->status
;
818 #define x11_swapchain_result(chain, result) \
819 _x11_swapchain_result(chain, result, __FILE__, __LINE__)
821 static struct wsi_image
*
822 x11_get_wsi_image(struct wsi_swapchain
*wsi_chain
, uint32_t image_index
)
824 struct x11_swapchain
*chain
= (struct x11_swapchain
*)wsi_chain
;
825 return &chain
->images
[image_index
].base
;
829 * Process an X11 Present event. Does not update chain->status.
832 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
833 xcb_present_generic_event_t
*event
)
835 switch (event
->evtype
) {
836 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
837 xcb_present_configure_notify_event_t
*config
= (void *) event
;
839 if (config
->width
!= chain
->extent
.width
||
840 config
->height
!= chain
->extent
.height
)
841 return VK_ERROR_OUT_OF_DATE_KHR
;
846 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
847 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
849 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++) {
850 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
851 chain
->images
[i
].busy
= false;
852 chain
->sent_image_count
--;
853 assert(chain
->sent_image_count
>= 0);
854 if (chain
->has_acquire_queue
)
855 wsi_queue_push(&chain
->acquire_queue
, i
);
863 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY
: {
864 xcb_present_complete_notify_event_t
*complete
= (void *) event
;
865 if (complete
->kind
== XCB_PRESENT_COMPLETE_KIND_PIXMAP
)
866 chain
->last_present_msc
= complete
->msc
;
868 VkResult result
= VK_SUCCESS
;
870 /* The winsys is now trying to flip directly and cannot due to our
871 * configuration. Request the user reallocate.
873 #ifdef HAVE_DRI3_MODIFIERS
874 if (complete
->mode
== XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY
&&
875 chain
->last_present_mode
!= XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY
)
876 result
= VK_SUBOPTIMAL_KHR
;
879 /* When we go from flipping to copying, the odds are very likely that
880 * we could reallocate in a more optimal way if we didn't have to care
881 * about scanout, so we always do this.
883 if (complete
->mode
== XCB_PRESENT_COMPLETE_MODE_COPY
&&
884 chain
->last_present_mode
== XCB_PRESENT_COMPLETE_MODE_FLIP
)
885 result
= VK_SUBOPTIMAL_KHR
;
887 chain
->last_present_mode
= complete
->mode
;
899 static uint64_t wsi_get_absolute_timeout(uint64_t timeout
)
901 uint64_t current_time
= wsi_common_get_current_time();
903 timeout
= MIN2(UINT64_MAX
- current_time
, timeout
);
905 return current_time
+ timeout
;
909 x11_acquire_next_image_poll_x11(struct x11_swapchain
*chain
,
910 uint32_t *image_index
, uint64_t timeout
)
912 xcb_generic_event_t
*event
;
916 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++) {
917 if (!chain
->images
[i
].busy
) {
918 /* We found a non-busy image */
919 xshmfence_await(chain
->images
[i
].shm_fence
);
921 chain
->images
[i
].busy
= true;
922 return x11_swapchain_result(chain
, VK_SUCCESS
);
926 xcb_flush(chain
->conn
);
928 if (timeout
== UINT64_MAX
) {
929 event
= xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
931 return x11_swapchain_result(chain
, VK_ERROR_OUT_OF_DATE_KHR
);
933 event
= xcb_poll_for_special_event(chain
->conn
, chain
->special_event
);
937 return x11_swapchain_result(chain
, VK_NOT_READY
);
939 atimeout
= wsi_get_absolute_timeout(timeout
);
941 pfds
.fd
= xcb_get_file_descriptor(chain
->conn
);
942 pfds
.events
= POLLIN
;
943 ret
= poll(&pfds
, 1, timeout
/ 1000 / 1000);
945 return x11_swapchain_result(chain
, VK_TIMEOUT
);
947 return x11_swapchain_result(chain
, VK_ERROR_OUT_OF_DATE_KHR
);
949 /* If a non-special event happens, the fd will still
950 * poll. So recalculate the timeout now just in case.
952 uint64_t current_time
= wsi_common_get_current_time();
953 if (atimeout
> current_time
)
954 timeout
= atimeout
- current_time
;
961 /* Update the swapchain status here. We may catch non-fatal errors here,
962 * in which case we need to update the status and continue.
964 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
967 return x11_swapchain_result(chain
, result
);
972 x11_acquire_next_image_from_queue(struct x11_swapchain
*chain
,
973 uint32_t *image_index_out
, uint64_t timeout
)
975 assert(chain
->has_acquire_queue
);
977 uint32_t image_index
;
978 VkResult result
= wsi_queue_pull(&chain
->acquire_queue
,
979 &image_index
, timeout
);
980 if (result
< 0 || result
== VK_TIMEOUT
) {
981 /* On error, the thread has shut down, so safe to update chain->status.
982 * Calling x11_swapchain_result with VK_TIMEOUT won't modify
983 * chain->status so that is also safe.
985 return x11_swapchain_result(chain
, result
);
986 } else if (chain
->status
< 0) {
987 return chain
->status
;
990 assert(image_index
< chain
->base
.image_count
);
991 xshmfence_await(chain
->images
[image_index
].shm_fence
);
993 *image_index_out
= image_index
;
995 return chain
->status
;
999 x11_present_to_x11_dri3(struct x11_swapchain
*chain
, uint32_t image_index
,
1000 uint32_t target_msc
)
1002 struct x11_image
*image
= &chain
->images
[image_index
];
1004 assert(image_index
< chain
->base
.image_count
);
1006 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
1008 int64_t divisor
= 0;
1009 int64_t remainder
= 0;
1011 if (chain
->base
.present_mode
== VK_PRESENT_MODE_IMMEDIATE_KHR
)
1012 options
|= XCB_PRESENT_OPTION_ASYNC
;
1014 #ifdef HAVE_DRI3_MODIFIERS
1015 if (chain
->has_dri3_modifiers
)
1016 options
|= XCB_PRESENT_OPTION_SUBOPTIMAL
;
1019 /* Poll for any available event and update the swapchain status. This could
1020 * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
1021 * associated X11 surface has been resized.
1023 xcb_generic_event_t
*event
;
1024 while ((event
= xcb_poll_for_special_event(chain
->conn
, chain
->special_event
))) {
1025 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
1028 return x11_swapchain_result(chain
, result
);
1029 x11_swapchain_result(chain
, result
);
1032 xshmfence_reset(image
->shm_fence
);
1034 ++chain
->sent_image_count
;
1035 assert(chain
->sent_image_count
<= chain
->base
.image_count
);
1039 xcb_void_cookie_t cookie
=
1040 xcb_present_pixmap(chain
->conn
,
1043 (uint32_t) chain
->send_sbc
,
1048 XCB_NONE
, /* target_crtc */
1054 remainder
, 0, NULL
);
1055 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1057 xcb_flush(chain
->conn
);
1059 return x11_swapchain_result(chain
, VK_SUCCESS
);
1063 x11_present_to_x11_sw(struct x11_swapchain
*chain
, uint32_t image_index
,
1064 uint32_t target_msc
)
1066 struct x11_image
*image
= &chain
->images
[image_index
];
1068 xcb_void_cookie_t cookie
;
1070 chain
->base
.wsi
->MapMemory(chain
->base
.device
,
1074 cookie
= xcb_put_image(chain
->conn
, XCB_IMAGE_FORMAT_Z_PIXMAP
,
1077 image
->base
.row_pitches
[0] / 4,
1078 chain
->extent
.height
,
1080 image
->base
.row_pitches
[0] * chain
->extent
.height
,
1083 chain
->base
.wsi
->UnmapMemory(chain
->base
.device
, image
->base
.memory
);
1084 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1085 xcb_flush(chain
->conn
);
1086 return x11_swapchain_result(chain
, VK_SUCCESS
);
1089 x11_present_to_x11(struct x11_swapchain
*chain
, uint32_t image_index
,
1090 uint32_t target_msc
)
1092 if (chain
->base
.wsi
->sw
)
1093 return x11_present_to_x11_sw(chain
, image_index
, target_msc
);
1094 return x11_present_to_x11_dri3(chain
, image_index
, target_msc
);
1098 x11_acquire_next_image(struct wsi_swapchain
*anv_chain
,
1099 const VkAcquireNextImageInfoKHR
*info
,
1100 uint32_t *image_index
)
1102 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1103 uint64_t timeout
= info
->timeout
;
1105 /* If the swapchain is in an error state, don't go any further. */
1106 if (chain
->status
< 0)
1107 return chain
->status
;
1109 if (chain
->base
.wsi
->sw
) {
1113 if (chain
->has_acquire_queue
) {
1114 return x11_acquire_next_image_from_queue(chain
, image_index
, timeout
);
1116 return x11_acquire_next_image_poll_x11(chain
, image_index
, timeout
);
1121 x11_queue_present(struct wsi_swapchain
*anv_chain
,
1122 uint32_t image_index
,
1123 const VkPresentRegionKHR
*damage
)
1125 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1127 /* If the swapchain is in an error state, don't go any further. */
1128 if (chain
->status
< 0)
1129 return chain
->status
;
1131 chain
->images
[image_index
].busy
= true;
1132 if (chain
->has_present_queue
) {
1133 wsi_queue_push(&chain
->present_queue
, image_index
);
1134 return chain
->status
;
1136 return x11_present_to_x11(chain
, image_index
, 0);
1141 x11_manage_fifo_queues(void *state
)
1143 struct x11_swapchain
*chain
= state
;
1144 VkResult result
= VK_SUCCESS
;
1146 assert(chain
->has_present_queue
);
1147 while (chain
->status
>= 0) {
1148 /* We can block here unconditionally because after an image was sent to
1149 * the server (later on in this loop) we ensure at least one image is
1150 * acquirable by the consumer or wait there on such an event.
1152 uint32_t image_index
= 0;
1153 result
= wsi_queue_pull(&chain
->present_queue
, &image_index
, INT64_MAX
);
1154 assert(result
!= VK_TIMEOUT
);
1157 } else if (chain
->status
< 0) {
1158 /* The status can change underneath us if the swapchain is destroyed
1159 * from another thread.
1164 if (chain
->base
.present_mode
== VK_PRESENT_MODE_MAILBOX_KHR
) {
1165 result
= chain
->base
.wsi
->WaitForFences(chain
->base
.device
, 1,
1166 &chain
->base
.fences
[image_index
],
1168 if (result
!= VK_SUCCESS
) {
1169 result
= VK_ERROR_OUT_OF_DATE_KHR
;
1174 uint64_t target_msc
= 0;
1175 if (chain
->has_acquire_queue
)
1176 target_msc
= chain
->last_present_msc
+ 1;
1178 result
= x11_present_to_x11(chain
, image_index
, target_msc
);
1182 if (chain
->has_acquire_queue
) {
1183 /* Wait for our presentation to occur and ensure we have at least one
1184 * image that can be acquired by the client afterwards. This ensures we
1185 * can pull on the present-queue on the next loop.
1187 while (chain
->last_present_msc
< target_msc
||
1188 chain
->sent_image_count
== chain
->base
.image_count
) {
1189 xcb_generic_event_t
*event
=
1190 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
1192 result
= VK_ERROR_OUT_OF_DATE_KHR
;
1196 result
= x11_handle_dri3_present_event(chain
, (void *)event
);
1205 x11_swapchain_result(chain
, result
);
1206 if (chain
->has_acquire_queue
)
1207 wsi_queue_push(&chain
->acquire_queue
, UINT32_MAX
);
1213 x11_image_init(VkDevice device_h
, struct x11_swapchain
*chain
,
1214 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
1215 const VkAllocationCallbacks
* pAllocator
,
1216 const uint64_t *const *modifiers
,
1217 const uint32_t *num_modifiers
,
1218 int num_tranches
, struct x11_image
*image
)
1220 xcb_void_cookie_t cookie
;
1224 if (chain
->base
.use_prime_blit
) {
1225 bool use_modifier
= num_tranches
> 0;
1226 result
= wsi_create_prime_image(&chain
->base
, pCreateInfo
, use_modifier
, &image
->base
);
1228 result
= wsi_create_native_image(&chain
->base
, pCreateInfo
,
1229 num_tranches
, num_modifiers
, modifiers
,
1235 if (chain
->base
.wsi
->sw
) {
1236 image
->busy
= false;
1239 image
->pixmap
= xcb_generate_id(chain
->conn
);
1241 #ifdef HAVE_DRI3_MODIFIERS
1242 if (image
->base
.drm_modifier
!= DRM_FORMAT_MOD_INVALID
) {
1243 /* If the image has a modifier, we must have DRI3 v1.2. */
1244 assert(chain
->has_dri3_modifiers
);
1247 xcb_dri3_pixmap_from_buffers_checked(chain
->conn
,
1250 image
->base
.num_planes
,
1251 pCreateInfo
->imageExtent
.width
,
1252 pCreateInfo
->imageExtent
.height
,
1253 image
->base
.row_pitches
[0],
1254 image
->base
.offsets
[0],
1255 image
->base
.row_pitches
[1],
1256 image
->base
.offsets
[1],
1257 image
->base
.row_pitches
[2],
1258 image
->base
.offsets
[2],
1259 image
->base
.row_pitches
[3],
1260 image
->base
.offsets
[3],
1262 image
->base
.drm_modifier
,
1267 /* Without passing modifiers, we can't have multi-plane RGB images. */
1268 assert(image
->base
.num_planes
== 1);
1271 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
1274 image
->base
.sizes
[0],
1275 pCreateInfo
->imageExtent
.width
,
1276 pCreateInfo
->imageExtent
.height
,
1277 image
->base
.row_pitches
[0],
1279 image
->base
.fds
[0]);
1282 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1284 /* XCB has now taken ownership of the FDs. */
1285 for (int i
= 0; i
< image
->base
.num_planes
; i
++)
1286 image
->base
.fds
[i
] = -1;
1288 int fence_fd
= xshmfence_alloc_shm();
1292 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
1293 if (image
->shm_fence
== NULL
)
1294 goto fail_shmfence_alloc
;
1296 image
->sync_fence
= xcb_generate_id(chain
->conn
);
1297 xcb_dri3_fence_from_fd(chain
->conn
,
1303 image
->busy
= false;
1304 xshmfence_trigger(image
->shm_fence
);
1308 fail_shmfence_alloc
:
1312 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
1313 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1315 wsi_destroy_image(&chain
->base
, &image
->base
);
1321 x11_image_finish(struct x11_swapchain
*chain
,
1322 const VkAllocationCallbacks
* pAllocator
,
1323 struct x11_image
*image
)
1325 xcb_void_cookie_t cookie
;
1327 if (!chain
->base
.wsi
->sw
) {
1328 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
1329 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1330 xshmfence_unmap_shm(image
->shm_fence
);
1332 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
1333 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1336 wsi_destroy_image(&chain
->base
, &image
->base
);
1340 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection
*wsi_conn
,
1341 xcb_connection_t
*conn
, xcb_window_t window
,
1342 uint8_t depth
, uint8_t bpp
,
1343 VkCompositeAlphaFlagsKHR vk_alpha
,
1344 uint64_t **modifiers_in
, uint32_t *num_modifiers_in
,
1345 uint32_t *num_tranches_in
,
1346 const VkAllocationCallbacks
*pAllocator
)
1348 if (!wsi_conn
->has_dri3_modifiers
)
1351 #ifdef HAVE_DRI3_MODIFIERS
1352 xcb_generic_error_t
*error
= NULL
;
1353 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie
=
1354 xcb_dri3_get_supported_modifiers(conn
, window
, depth
, bpp
);
1355 xcb_dri3_get_supported_modifiers_reply_t
*mod_reply
=
1356 xcb_dri3_get_supported_modifiers_reply(conn
, mod_cookie
, &error
);
1359 if (!mod_reply
|| (mod_reply
->num_window_modifiers
== 0 &&
1360 mod_reply
->num_screen_modifiers
== 0)) {
1367 uint64_t *modifiers
[2];
1369 if (mod_reply
->num_window_modifiers
) {
1370 counts
[n
] = mod_reply
->num_window_modifiers
;
1371 modifiers
[n
] = vk_alloc(pAllocator
,
1372 counts
[n
] * sizeof(uint64_t),
1373 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1374 if (!modifiers
[n
]) {
1379 memcpy(modifiers
[n
],
1380 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply
),
1381 counts
[n
] * sizeof(uint64_t));
1385 if (mod_reply
->num_screen_modifiers
) {
1386 counts
[n
] = mod_reply
->num_screen_modifiers
;
1387 modifiers
[n
] = vk_alloc(pAllocator
,
1388 counts
[n
] * sizeof(uint64_t),
1389 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1390 if (!modifiers
[n
]) {
1392 vk_free(pAllocator
, modifiers
[0]);
1397 memcpy(modifiers
[n
],
1398 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply
),
1399 counts
[n
] * sizeof(uint64_t));
1403 for (int i
= 0; i
< n
; i
++) {
1404 modifiers_in
[i
] = modifiers
[i
];
1405 num_modifiers_in
[i
] = counts
[i
];
1407 *num_tranches_in
= n
;
1413 *num_tranches_in
= 0;
1417 x11_swapchain_destroy(struct wsi_swapchain
*anv_chain
,
1418 const VkAllocationCallbacks
*pAllocator
)
1420 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1421 xcb_void_cookie_t cookie
;
1423 if (chain
->has_present_queue
) {
1424 chain
->status
= VK_ERROR_OUT_OF_DATE_KHR
;
1425 /* Push a UINT32_MAX to wake up the manager */
1426 wsi_queue_push(&chain
->present_queue
, UINT32_MAX
);
1427 pthread_join(chain
->queue_manager
, NULL
);
1429 if (chain
->has_acquire_queue
)
1430 wsi_queue_destroy(&chain
->acquire_queue
);
1431 wsi_queue_destroy(&chain
->present_queue
);
1434 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++)
1435 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
1437 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1438 cookie
= xcb_present_select_input_checked(chain
->conn
, chain
->event_id
,
1440 XCB_PRESENT_EVENT_MASK_NO_EVENT
);
1441 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1443 wsi_swapchain_finish(&chain
->base
);
1445 vk_free(pAllocator
, chain
);
1451 wsi_x11_set_adaptive_sync_property(xcb_connection_t
*conn
,
1452 xcb_drawable_t drawable
,
1455 static char const name
[] = "_VARIABLE_REFRESH";
1456 xcb_intern_atom_cookie_t cookie
;
1457 xcb_intern_atom_reply_t
* reply
;
1458 xcb_void_cookie_t check
;
1460 cookie
= xcb_intern_atom(conn
, 0, strlen(name
), name
);
1461 reply
= xcb_intern_atom_reply(conn
, cookie
, NULL
);
1466 check
= xcb_change_property_checked(conn
, XCB_PROP_MODE_REPLACE
,
1467 drawable
, reply
->atom
,
1468 XCB_ATOM_CARDINAL
, 32, 1, &state
);
1470 check
= xcb_delete_property_checked(conn
, drawable
, reply
->atom
);
1472 xcb_discard_reply(conn
, check
.sequence
);
1478 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
1480 struct wsi_device
*wsi_device
,
1481 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
1482 const VkAllocationCallbacks
* pAllocator
,
1483 struct wsi_swapchain
**swapchain_out
)
1485 struct x11_swapchain
*chain
;
1486 xcb_void_cookie_t cookie
;
1488 VkPresentModeKHR present_mode
= wsi_swapchain_get_present_mode(wsi_device
, pCreateInfo
);
1490 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
1492 unsigned num_images
= pCreateInfo
->minImageCount
;
1493 if (wsi_device
->x11
.strict_imageCount
)
1494 num_images
= pCreateInfo
->minImageCount
;
1495 else if (present_mode
== VK_PRESENT_MODE_MAILBOX_KHR
)
1496 num_images
= MAX2(num_images
, 5);
1497 else if (wsi_device
->x11
.ensure_minImageCount
)
1498 num_images
= MAX2(num_images
, x11_get_min_image_count(wsi_device
));
1500 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
1501 struct wsi_x11_connection
*wsi_conn
=
1502 wsi_x11_get_connection(wsi_device
, conn
);
1504 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1506 /* Check for whether or not we have a window up-front */
1507 xcb_window_t window
= x11_surface_get_window(icd_surface
);
1508 xcb_get_geometry_reply_t
*geometry
=
1509 xcb_get_geometry_reply(conn
, xcb_get_geometry(conn
, window
), NULL
);
1510 if (geometry
== NULL
)
1511 return VK_ERROR_SURFACE_LOST_KHR
;
1512 const uint32_t bit_depth
= geometry
->depth
;
1515 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
1516 chain
= vk_alloc(pAllocator
, size
, 8,
1517 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1519 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1521 result
= wsi_swapchain_init(wsi_device
, &chain
->base
, device
,
1522 pCreateInfo
, pAllocator
);
1523 if (result
!= VK_SUCCESS
)
1526 chain
->base
.destroy
= x11_swapchain_destroy
;
1527 chain
->base
.get_wsi_image
= x11_get_wsi_image
;
1528 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
1529 chain
->base
.queue_present
= x11_queue_present
;
1530 chain
->base
.present_mode
= present_mode
;
1531 chain
->base
.image_count
= num_images
;
1533 chain
->window
= window
;
1534 chain
->depth
= bit_depth
;
1535 chain
->extent
= pCreateInfo
->imageExtent
;
1536 chain
->send_sbc
= 0;
1537 chain
->sent_image_count
= 0;
1538 chain
->last_present_msc
= 0;
1539 chain
->has_acquire_queue
= false;
1540 chain
->has_present_queue
= false;
1541 chain
->status
= VK_SUCCESS
;
1542 chain
->has_dri3_modifiers
= wsi_conn
->has_dri3_modifiers
;
1544 /* If we are reallocating from an old swapchain, then we inherit its
1545 * last completion mode, to ensure we don't get into reallocation
1546 * cycles. If we are starting anew, we set 'COPY', as that is the only
1547 * mode which provokes reallocation when anything changes, to make
1548 * sure we have the most optimal allocation.
1550 VK_FROM_HANDLE(x11_swapchain
, old_chain
, pCreateInfo
->oldSwapchain
);
1552 chain
->last_present_mode
= old_chain
->last_present_mode
;
1554 chain
->last_present_mode
= XCB_PRESENT_COMPLETE_MODE_COPY
;
1556 if (!wsi_device
->sw
)
1557 if (!wsi_x11_check_dri3_compatible(wsi_device
, conn
))
1558 chain
->base
.use_prime_blit
= true;
1560 chain
->event_id
= xcb_generate_id(chain
->conn
);
1561 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
1562 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
1563 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
1564 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
1566 /* Create an XCB event queue to hold present events outside of the usual
1567 * application event queue
1569 chain
->special_event
=
1570 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
1571 chain
->event_id
, NULL
);
1573 chain
->gc
= xcb_generate_id(chain
->conn
);
1575 /* FINISHME: Choose a better error. */
1576 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1580 cookie
= xcb_create_gc(chain
->conn
,
1583 XCB_GC_GRAPHICS_EXPOSURES
,
1584 (uint32_t []) { 0 });
1585 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1587 uint64_t *modifiers
[2] = {NULL
, NULL
};
1588 uint32_t num_modifiers
[2] = {0, 0};
1589 uint32_t num_tranches
= 0;
1590 if (wsi_device
->supports_modifiers
)
1591 wsi_x11_get_dri3_modifiers(wsi_conn
, conn
, window
, chain
->depth
, 32,
1592 pCreateInfo
->compositeAlpha
,
1593 modifiers
, num_modifiers
, &num_tranches
,
1597 for (; image
< chain
->base
.image_count
; image
++) {
1598 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
1599 (const uint64_t *const *)modifiers
,
1600 num_modifiers
, num_tranches
,
1601 &chain
->images
[image
]);
1602 if (result
!= VK_SUCCESS
)
1603 goto fail_init_images
;
1606 if ((chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
||
1607 chain
->base
.present_mode
== VK_PRESENT_MODE_MAILBOX_KHR
) && !chain
->base
.wsi
->sw
) {
1608 chain
->has_present_queue
= true;
1610 /* Initialize our queues. We make them base.image_count + 1 because we will
1611 * occasionally use UINT32_MAX to signal the other thread that an error
1612 * has occurred and we don't want an overflow.
1615 ret
= wsi_queue_init(&chain
->present_queue
, chain
->base
.image_count
+ 1);
1617 goto fail_init_images
;
1620 if (chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
) {
1621 chain
->has_acquire_queue
= true;
1623 ret
= wsi_queue_init(&chain
->acquire_queue
, chain
->base
.image_count
+ 1);
1625 wsi_queue_destroy(&chain
->present_queue
);
1626 goto fail_init_images
;
1629 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++)
1630 wsi_queue_push(&chain
->acquire_queue
, i
);
1633 ret
= pthread_create(&chain
->queue_manager
, NULL
,
1634 x11_manage_fifo_queues
, chain
);
1636 wsi_queue_destroy(&chain
->present_queue
);
1637 if (chain
->has_acquire_queue
)
1638 wsi_queue_destroy(&chain
->acquire_queue
);
1640 goto fail_init_images
;
1644 assert(chain
->has_present_queue
|| !chain
->has_acquire_queue
);
1646 for (int i
= 0; i
< ARRAY_SIZE(modifiers
); i
++)
1647 vk_free(pAllocator
, modifiers
[i
]);
1649 /* It is safe to set it here as only one swapchain can be associated with
1650 * the window, and swapchain creation does the association. At this point
1651 * we know the creation is going to succeed. */
1652 wsi_x11_set_adaptive_sync_property(conn
, window
,
1653 wsi_device
->enable_adaptive_sync
);
1655 *swapchain_out
= &chain
->base
;
1660 for (uint32_t j
= 0; j
< image
; j
++)
1661 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
1663 for (int i
= 0; i
< ARRAY_SIZE(modifiers
); i
++)
1664 vk_free(pAllocator
, modifiers
[i
]);
1667 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1669 wsi_swapchain_finish(&chain
->base
);
1672 vk_free(pAllocator
, chain
);
1678 wsi_x11_init_wsi(struct wsi_device
*wsi_device
,
1679 const VkAllocationCallbacks
*alloc
,
1680 const struct driOptionCache
*dri_options
)
1682 struct wsi_x11
*wsi
;
1685 wsi
= vk_alloc(alloc
, sizeof(*wsi
), 8,
1686 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
1688 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1692 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
1694 if (ret
== ENOMEM
) {
1695 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1697 /* FINISHME: Choose a better error. */
1698 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1704 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1705 _mesa_key_pointer_equal
);
1706 if (!wsi
->connections
) {
1707 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1712 if (driCheckOption(dri_options
, "vk_x11_override_min_image_count", DRI_INT
)) {
1713 wsi_device
->x11
.override_minImageCount
=
1714 driQueryOptioni(dri_options
, "vk_x11_override_min_image_count");
1716 if (driCheckOption(dri_options
, "vk_x11_strict_image_count", DRI_BOOL
)) {
1717 wsi_device
->x11
.strict_imageCount
=
1718 driQueryOptionb(dri_options
, "vk_x11_strict_image_count");
1720 if (driCheckOption(dri_options
, "vk_x11_ensure_min_image_count", DRI_BOOL
)) {
1721 wsi_device
->x11
.ensure_minImageCount
=
1722 driQueryOptionb(dri_options
, "vk_x11_ensure_min_image_count");
1727 wsi
->base
.get_support
= x11_surface_get_support
;
1728 wsi
->base
.get_capabilities2
= x11_surface_get_capabilities2
;
1729 wsi
->base
.get_formats
= x11_surface_get_formats
;
1730 wsi
->base
.get_formats2
= x11_surface_get_formats2
;
1731 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
1732 wsi
->base
.get_present_rectangles
= x11_surface_get_present_rectangles
;
1733 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
1735 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
1736 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
1741 pthread_mutex_destroy(&wsi
->mutex
);
1743 vk_free(alloc
, wsi
);
1745 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
1746 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
1752 wsi_x11_finish_wsi(struct wsi_device
*wsi_device
,
1753 const VkAllocationCallbacks
*alloc
)
1755 struct wsi_x11
*wsi
=
1756 (struct wsi_x11
*)wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
1759 hash_table_foreach(wsi
->connections
, entry
)
1760 wsi_x11_connection_destroy(wsi_device
, entry
->data
);
1762 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
1764 pthread_mutex_destroy(&wsi
->mutex
);
1766 vk_free(alloc
, wsi
);