2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
30 #include "util/macros.h"
39 #include "util/hash_table.h"
42 #include "wsi_common_private.h"
43 #include "wsi_common_x11.h"
44 #include "wsi_common_queue.h"
46 #define typed_memcpy(dest, src, count) ({ \
47 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
48 memcpy((dest), (src), (count) * sizeof(*(src))); \
51 struct wsi_x11_connection
{
54 bool is_proprietary_x11
;
58 struct wsi_interface base
;
60 pthread_mutex_t mutex
;
61 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
62 struct hash_table
*connections
;
68 * Wrapper around xcb_dri3_open
71 wsi_dri3_open(xcb_connection_t
*conn
,
75 xcb_dri3_open_cookie_t cookie
;
76 xcb_dri3_open_reply_t
*reply
;
79 cookie
= xcb_dri3_open(conn
,
83 reply
= xcb_dri3_open_reply(conn
, cookie
, NULL
);
87 if (reply
->nfd
!= 1) {
92 fd
= xcb_dri3_open_reply_fds(conn
, reply
)[0];
94 fcntl(fd
, F_SETFD
, fcntl(fd
, F_GETFD
) | FD_CLOEXEC
);
100 wsi_x11_check_dri3_compatible(xcb_connection_t
*conn
, int local_fd
)
102 xcb_screen_iterator_t screen_iter
=
103 xcb_setup_roots_iterator(xcb_get_setup(conn
));
104 xcb_screen_t
*screen
= screen_iter
.data
;
106 int dri3_fd
= wsi_dri3_open(conn
, screen
->root
, None
);
108 char *local_dev
= drmGetRenderDeviceNameFromFd(local_fd
);
109 char *dri3_dev
= drmGetRenderDeviceNameFromFd(dri3_fd
);
114 ret
= strcmp(local_dev
, dri3_dev
);
125 static struct wsi_x11_connection
*
126 wsi_x11_connection_create(const VkAllocationCallbacks
*alloc
,
127 xcb_connection_t
*conn
)
129 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
, amd_cookie
, nv_cookie
;
130 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
, *amd_reply
, *nv_reply
;
132 struct wsi_x11_connection
*wsi_conn
=
133 vk_alloc(alloc
, sizeof(*wsi_conn
), 8,
134 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
138 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
139 pres_cookie
= xcb_query_extension(conn
, 7, "PRESENT");
141 /* We try to be nice to users and emit a warning if they try to use a
142 * Vulkan application on a system without DRI3 enabled. However, this ends
143 * up spewing the warning when a user has, for example, both Intel
144 * integrated graphics and a discrete card with proprietary drivers and are
145 * running on the discrete card with the proprietary DDX. In this case, we
146 * really don't want to print the warning because it just confuses users.
147 * As a heuristic to detect this case, we check for a couple of proprietary
150 amd_cookie
= xcb_query_extension(conn
, 11, "ATIFGLRXDRI");
151 nv_cookie
= xcb_query_extension(conn
, 10, "NV-CONTROL");
153 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
154 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
155 amd_reply
= xcb_query_extension_reply(conn
, amd_cookie
, NULL
);
156 nv_reply
= xcb_query_extension_reply(conn
, nv_cookie
, NULL
);
157 if (!dri3_reply
|| !pres_reply
) {
162 vk_free(alloc
, wsi_conn
);
166 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
167 wsi_conn
->has_present
= pres_reply
->present
!= 0;
168 wsi_conn
->is_proprietary_x11
= false;
169 if (amd_reply
&& amd_reply
->present
)
170 wsi_conn
->is_proprietary_x11
= true;
171 if (nv_reply
&& nv_reply
->present
)
172 wsi_conn
->is_proprietary_x11
= true;
183 wsi_x11_connection_destroy(const VkAllocationCallbacks
*alloc
,
184 struct wsi_x11_connection
*conn
)
186 vk_free(alloc
, conn
);
190 wsi_x11_check_for_dri3(struct wsi_x11_connection
*wsi_conn
)
192 if (wsi_conn
->has_dri3
)
194 if (!wsi_conn
->is_proprietary_x11
) {
195 fprintf(stderr
, "vulkan: No DRI3 support detected - required for presentation\n"
196 "Note: you can probably enable DRI3 in your Xorg config\n");
201 static struct wsi_x11_connection
*
202 wsi_x11_get_connection(struct wsi_device
*wsi_dev
,
203 const VkAllocationCallbacks
*alloc
,
204 xcb_connection_t
*conn
)
206 struct wsi_x11
*wsi
=
207 (struct wsi_x11
*)wsi_dev
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
209 pthread_mutex_lock(&wsi
->mutex
);
211 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
213 /* We're about to make a bunch of blocking calls. Let's drop the
214 * mutex for now so we don't block up too badly.
216 pthread_mutex_unlock(&wsi
->mutex
);
218 struct wsi_x11_connection
*wsi_conn
=
219 wsi_x11_connection_create(alloc
, conn
);
223 pthread_mutex_lock(&wsi
->mutex
);
225 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
227 /* Oops, someone raced us to it */
228 wsi_x11_connection_destroy(alloc
, wsi_conn
);
230 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
234 pthread_mutex_unlock(&wsi
->mutex
);
239 static const VkFormat formats
[] = {
240 VK_FORMAT_B8G8R8A8_SRGB
,
241 VK_FORMAT_B8G8R8A8_UNORM
,
244 static const VkPresentModeKHR present_modes
[] = {
245 VK_PRESENT_MODE_IMMEDIATE_KHR
,
246 VK_PRESENT_MODE_MAILBOX_KHR
,
247 VK_PRESENT_MODE_FIFO_KHR
,
250 static xcb_screen_t
*
251 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
253 xcb_screen_iterator_t screen_iter
=
254 xcb_setup_roots_iterator(xcb_get_setup(conn
));
256 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
257 if (screen_iter
.data
->root
== root
)
258 return screen_iter
.data
;
264 static xcb_visualtype_t
*
265 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
268 xcb_depth_iterator_t depth_iter
=
269 xcb_screen_allowed_depths_iterator(screen
);
271 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
272 xcb_visualtype_iterator_t visual_iter
=
273 xcb_depth_visuals_iterator (depth_iter
.data
);
275 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
276 if (visual_iter
.data
->visual_id
== visual_id
) {
278 *depth
= depth_iter
.data
->depth
;
279 return visual_iter
.data
;
287 static xcb_visualtype_t
*
288 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
291 xcb_screen_iterator_t screen_iter
=
292 xcb_setup_roots_iterator(xcb_get_setup(conn
));
294 /* For this we have to iterate over all of the screens which is rather
295 * annoying. Fortunately, there is probably only 1.
297 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
298 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
307 static xcb_visualtype_t
*
308 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
311 xcb_query_tree_cookie_t tree_cookie
;
312 xcb_get_window_attributes_cookie_t attrib_cookie
;
313 xcb_query_tree_reply_t
*tree
;
314 xcb_get_window_attributes_reply_t
*attrib
;
316 tree_cookie
= xcb_query_tree(conn
, window
);
317 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
319 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
320 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
321 if (attrib
== NULL
|| tree
== NULL
) {
327 xcb_window_t root
= tree
->root
;
328 xcb_visualid_t visual_id
= attrib
->visual
;
332 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
336 return screen_get_visualtype(screen
, visual_id
, depth
);
340 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
342 uint32_t rgb_mask
= visual
->red_mask
|
346 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
348 /* Do we have bits left over after RGB? */
349 return (all_mask
& ~rgb_mask
) != 0;
352 VkBool32
wsi_get_physical_device_xcb_presentation_support(
353 struct wsi_device
*wsi_device
,
354 VkAllocationCallbacks
*alloc
,
355 uint32_t queueFamilyIndex
,
357 bool can_handle_different_gpu
,
358 xcb_connection_t
* connection
,
359 xcb_visualid_t visual_id
)
361 struct wsi_x11_connection
*wsi_conn
=
362 wsi_x11_get_connection(wsi_device
, alloc
, connection
);
367 if (!wsi_x11_check_for_dri3(wsi_conn
))
370 if (!can_handle_different_gpu
)
371 if (!wsi_x11_check_dri3_compatible(connection
, fd
))
374 unsigned visual_depth
;
375 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
378 if (visual_depth
!= 24 && visual_depth
!= 32)
384 static xcb_connection_t
*
385 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
387 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
388 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
390 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
394 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
396 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
397 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
399 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
403 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
404 struct wsi_device
*wsi_device
,
405 const VkAllocationCallbacks
*alloc
,
406 uint32_t queueFamilyIndex
,
408 bool can_handle_different_gpu
,
409 VkBool32
* pSupported
)
411 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
412 xcb_window_t window
= x11_surface_get_window(icd_surface
);
414 struct wsi_x11_connection
*wsi_conn
=
415 wsi_x11_get_connection(wsi_device
, alloc
, conn
);
417 return VK_ERROR_OUT_OF_HOST_MEMORY
;
419 if (!wsi_x11_check_for_dri3(wsi_conn
)) {
424 if (!can_handle_different_gpu
)
425 if (!wsi_x11_check_dri3_compatible(conn
, local_fd
))
428 unsigned visual_depth
;
429 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
434 if (visual_depth
!= 24 && visual_depth
!= 32) {
444 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
445 VkSurfaceCapabilitiesKHR
*caps
)
447 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
448 xcb_window_t window
= x11_surface_get_window(icd_surface
);
449 xcb_get_geometry_cookie_t geom_cookie
;
450 xcb_generic_error_t
*err
;
451 xcb_get_geometry_reply_t
*geom
;
452 unsigned visual_depth
;
454 geom_cookie
= xcb_get_geometry(conn
, window
);
456 /* This does a round-trip. This is why we do get_geometry first and
457 * wait to read the reply until after we have a visual.
459 xcb_visualtype_t
*visual
=
460 get_visualtype_for_window(conn
, window
, &visual_depth
);
463 return VK_ERROR_SURFACE_LOST_KHR
;
465 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
467 VkExtent2D extent
= { geom
->width
, geom
->height
};
468 caps
->currentExtent
= extent
;
469 caps
->minImageExtent
= extent
;
470 caps
->maxImageExtent
= extent
;
472 /* This can happen if the client didn't wait for the configure event
473 * to come back from the compositor. In that case, we don't know the
474 * size of the window so we just return valid "I don't know" stuff.
476 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
477 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
478 /* This is the maximum supported size on Intel */
479 caps
->maxImageExtent
= (VkExtent2D
) { 1 << 14, 1 << 14 };
484 if (visual_has_alpha(visual
, visual_depth
)) {
485 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
486 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
488 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
489 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
492 /* For true mailbox mode, we need at least 4 images:
493 * 1) One to scan out from
494 * 2) One to have queued for scan-out
495 * 3) One to be currently held by the X server
496 * 4) One to render to
498 caps
->minImageCount
= 2;
499 /* There is no real maximum */
500 caps
->maxImageCount
= 0;
502 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
503 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
504 caps
->maxImageArrayLayers
= 1;
505 caps
->supportedUsageFlags
=
506 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
507 VK_IMAGE_USAGE_SAMPLED_BIT
|
508 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
509 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
515 x11_surface_get_capabilities2(VkIcdSurfaceBase
*icd_surface
,
516 const void *info_next
,
517 VkSurfaceCapabilities2KHR
*caps
)
519 assert(caps
->sType
== VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR
);
521 return x11_surface_get_capabilities(icd_surface
, &caps
->surfaceCapabilities
);
525 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
526 struct wsi_device
*wsi_device
,
527 uint32_t *pSurfaceFormatCount
,
528 VkSurfaceFormatKHR
*pSurfaceFormats
)
530 VK_OUTARRAY_MAKE(out
, pSurfaceFormats
, pSurfaceFormatCount
);
532 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
533 vk_outarray_append(&out
, f
) {
534 f
->format
= formats
[i
];
535 f
->colorSpace
= VK_COLORSPACE_SRGB_NONLINEAR_KHR
;
539 return vk_outarray_status(&out
);
543 x11_surface_get_formats2(VkIcdSurfaceBase
*surface
,
544 struct wsi_device
*wsi_device
,
545 const void *info_next
,
546 uint32_t *pSurfaceFormatCount
,
547 VkSurfaceFormat2KHR
*pSurfaceFormats
)
549 VK_OUTARRAY_MAKE(out
, pSurfaceFormats
, pSurfaceFormatCount
);
551 for (unsigned i
= 0; i
< ARRAY_SIZE(formats
); i
++) {
552 vk_outarray_append(&out
, f
) {
553 assert(f
->sType
== VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR
);
554 f
->surfaceFormat
.format
= formats
[i
];
555 f
->surfaceFormat
.colorSpace
= VK_COLORSPACE_SRGB_NONLINEAR_KHR
;
559 return vk_outarray_status(&out
);
563 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
564 uint32_t *pPresentModeCount
,
565 VkPresentModeKHR
*pPresentModes
)
567 if (pPresentModes
== NULL
) {
568 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
572 *pPresentModeCount
= MIN2(*pPresentModeCount
, ARRAY_SIZE(present_modes
));
573 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
575 return *pPresentModeCount
< ARRAY_SIZE(present_modes
) ?
576 VK_INCOMPLETE
: VK_SUCCESS
;
579 VkResult
wsi_create_xcb_surface(const VkAllocationCallbacks
*pAllocator
,
580 const VkXcbSurfaceCreateInfoKHR
*pCreateInfo
,
581 VkSurfaceKHR
*pSurface
)
583 VkIcdSurfaceXcb
*surface
;
585 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
586 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
588 return VK_ERROR_OUT_OF_HOST_MEMORY
;
590 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
591 surface
->connection
= pCreateInfo
->connection
;
592 surface
->window
= pCreateInfo
->window
;
594 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
598 VkResult
wsi_create_xlib_surface(const VkAllocationCallbacks
*pAllocator
,
599 const VkXlibSurfaceCreateInfoKHR
*pCreateInfo
,
600 VkSurfaceKHR
*pSurface
)
602 VkIcdSurfaceXlib
*surface
;
604 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
605 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
607 return VK_ERROR_OUT_OF_HOST_MEMORY
;
609 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
610 surface
->dpy
= pCreateInfo
->dpy
;
611 surface
->window
= pCreateInfo
->window
;
613 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
618 struct wsi_image base
;
621 struct xshmfence
* shm_fence
;
625 struct x11_swapchain
{
626 struct wsi_swapchain base
;
630 xcb_connection_t
* conn
;
636 xcb_present_event_t event_id
;
637 xcb_special_event_t
* special_event
;
639 uint64_t last_present_msc
;
644 struct wsi_queue present_queue
;
645 struct wsi_queue acquire_queue
;
646 pthread_t queue_manager
;
648 struct x11_image images
[0];
652 x11_get_images(struct wsi_swapchain
*anv_chain
,
653 uint32_t* pCount
, VkImage
*pSwapchainImages
)
655 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
659 if (pSwapchainImages
== NULL
) {
660 *pCount
= chain
->base
.image_count
;
665 ret_count
= chain
->base
.image_count
;
666 if (chain
->base
.image_count
> *pCount
) {
668 result
= VK_INCOMPLETE
;
671 for (uint32_t i
= 0; i
< ret_count
; i
++)
672 pSwapchainImages
[i
] = chain
->images
[i
].base
.image
;
678 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
679 xcb_present_generic_event_t
*event
)
681 switch (event
->evtype
) {
682 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
683 xcb_present_configure_notify_event_t
*config
= (void *) event
;
685 if (config
->width
!= chain
->extent
.width
||
686 config
->height
!= chain
->extent
.height
)
687 return VK_ERROR_OUT_OF_DATE_KHR
;
692 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
693 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
695 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++) {
696 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
697 chain
->images
[i
].busy
= false;
699 wsi_queue_push(&chain
->acquire_queue
, i
);
707 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY
: {
708 xcb_present_complete_notify_event_t
*complete
= (void *) event
;
709 if (complete
->kind
== XCB_PRESENT_COMPLETE_KIND_PIXMAP
)
710 chain
->last_present_msc
= complete
->msc
;
722 static uint64_t wsi_get_current_time(void)
724 uint64_t current_time
;
727 clock_gettime(CLOCK_MONOTONIC
, &tv
);
728 current_time
= tv
.tv_nsec
+ tv
.tv_sec
*1000000000ull;
732 static uint64_t wsi_get_absolute_timeout(uint64_t timeout
)
734 uint64_t current_time
= wsi_get_current_time();
736 timeout
= MIN2(UINT64_MAX
- current_time
, timeout
);
738 return current_time
+ timeout
;
742 x11_acquire_next_image_poll_x11(struct x11_swapchain
*chain
,
743 uint32_t *image_index
, uint64_t timeout
)
745 xcb_generic_event_t
*event
;
749 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++) {
750 if (!chain
->images
[i
].busy
) {
751 /* We found a non-busy image */
752 xshmfence_await(chain
->images
[i
].shm_fence
);
754 chain
->images
[i
].busy
= true;
759 xcb_flush(chain
->conn
);
761 if (timeout
== UINT64_MAX
) {
762 event
= xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
764 return VK_ERROR_OUT_OF_DATE_KHR
;
766 event
= xcb_poll_for_special_event(chain
->conn
, chain
->special_event
);
772 atimeout
= wsi_get_absolute_timeout(timeout
);
774 pfds
.fd
= xcb_get_file_descriptor(chain
->conn
);
775 pfds
.events
= POLLIN
;
776 ret
= poll(&pfds
, 1, timeout
/ 1000 / 1000);
780 return VK_ERROR_OUT_OF_DATE_KHR
;
782 /* If a non-special event happens, the fd will still
783 * poll. So recalculate the timeout now just in case.
785 uint64_t current_time
= wsi_get_current_time();
786 if (atimeout
> current_time
)
787 timeout
= atimeout
- current_time
;
794 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
796 if (result
!= VK_SUCCESS
)
802 x11_acquire_next_image_from_queue(struct x11_swapchain
*chain
,
803 uint32_t *image_index_out
, uint64_t timeout
)
805 assert(chain
->threaded
);
807 uint32_t image_index
;
808 VkResult result
= wsi_queue_pull(&chain
->acquire_queue
,
809 &image_index
, timeout
);
810 if (result
!= VK_SUCCESS
) {
812 } else if (chain
->status
!= VK_SUCCESS
) {
813 return chain
->status
;
816 assert(image_index
< chain
->base
.image_count
);
817 xshmfence_await(chain
->images
[image_index
].shm_fence
);
819 *image_index_out
= image_index
;
825 x11_present_to_x11(struct x11_swapchain
*chain
, uint32_t image_index
,
828 struct x11_image
*image
= &chain
->images
[image_index
];
830 assert(image_index
< chain
->base
.image_count
);
832 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
835 int64_t remainder
= 0;
837 if (chain
->base
.present_mode
== VK_PRESENT_MODE_IMMEDIATE_KHR
)
838 options
|= XCB_PRESENT_OPTION_ASYNC
;
840 xshmfence_reset(image
->shm_fence
);
843 xcb_void_cookie_t cookie
=
844 xcb_present_pixmap(chain
->conn
,
847 (uint32_t) chain
->send_sbc
,
852 XCB_NONE
, /* target_crtc */
859 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
862 xcb_flush(chain
->conn
);
868 x11_acquire_next_image(struct wsi_swapchain
*anv_chain
,
870 VkSemaphore semaphore
,
871 uint32_t *image_index
)
873 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
875 if (chain
->threaded
) {
876 return x11_acquire_next_image_from_queue(chain
, image_index
, timeout
);
878 return x11_acquire_next_image_poll_x11(chain
, image_index
, timeout
);
883 x11_queue_present(struct wsi_swapchain
*anv_chain
,
885 uint32_t waitSemaphoreCount
,
886 const VkSemaphore
*pWaitSemaphores
,
887 uint32_t image_index
,
888 const VkPresentRegionKHR
*damage
)
890 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
893 if (chain
->use_prime_blit
) {
894 result
= wsi_prime_image_blit_to_linear(&chain
->base
,
895 &chain
->images
[image_index
].base
,
899 if (result
!= VK_SUCCESS
)
903 if (chain
->threaded
) {
904 wsi_queue_push(&chain
->present_queue
, image_index
);
905 return chain
->status
;
907 return x11_present_to_x11(chain
, image_index
, 0);
912 x11_manage_fifo_queues(void *state
)
914 struct x11_swapchain
*chain
= state
;
917 assert(chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
);
919 while (chain
->status
== VK_SUCCESS
) {
920 /* It should be safe to unconditionally block here. Later in the loop
921 * we blocks until the previous present has landed on-screen. At that
922 * point, we should have received IDLE_NOTIFY on all images presented
923 * before that point so the client should be able to acquire any image
924 * other than the currently presented one.
926 uint32_t image_index
;
927 result
= wsi_queue_pull(&chain
->present_queue
, &image_index
, INT64_MAX
);
928 if (result
!= VK_SUCCESS
) {
930 } else if (chain
->status
!= VK_SUCCESS
) {
934 uint64_t target_msc
= chain
->last_present_msc
+ 1;
935 result
= x11_present_to_x11(chain
, image_index
, target_msc
);
936 if (result
!= VK_SUCCESS
)
939 while (chain
->last_present_msc
< target_msc
) {
940 xcb_generic_event_t
*event
=
941 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
945 result
= x11_handle_dri3_present_event(chain
, (void *)event
);
947 if (result
!= VK_SUCCESS
)
953 chain
->status
= result
;
954 wsi_queue_push(&chain
->acquire_queue
, UINT32_MAX
);
960 x11_image_init(VkDevice device_h
, struct x11_swapchain
*chain
,
961 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
962 const VkAllocationCallbacks
* pAllocator
,
963 struct x11_image
*image
)
965 xcb_void_cookie_t cookie
;
969 if (chain
->use_prime_blit
) {
970 result
= wsi_create_prime_image(&chain
->base
, pCreateInfo
, &image
->base
);
972 result
= wsi_create_native_image(&chain
->base
, pCreateInfo
, &image
->base
);
974 if (result
!= VK_SUCCESS
)
977 image
->pixmap
= xcb_generate_id(chain
->conn
);
980 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
984 pCreateInfo
->imageExtent
.width
,
985 pCreateInfo
->imageExtent
.height
,
986 image
->base
.row_pitch
,
989 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
990 image
->base
.fd
= -1; /* XCB has now taken ownership of the FD */
992 int fence_fd
= xshmfence_alloc_shm();
996 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
997 if (image
->shm_fence
== NULL
)
998 goto fail_shmfence_alloc
;
1000 image
->sync_fence
= xcb_generate_id(chain
->conn
);
1001 xcb_dri3_fence_from_fd(chain
->conn
,
1007 image
->busy
= false;
1008 xshmfence_trigger(image
->shm_fence
);
1012 fail_shmfence_alloc
:
1016 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
1017 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1019 wsi_destroy_image(&chain
->base
, &image
->base
);
1025 x11_image_finish(struct x11_swapchain
*chain
,
1026 const VkAllocationCallbacks
* pAllocator
,
1027 struct x11_image
*image
)
1029 xcb_void_cookie_t cookie
;
1031 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
1032 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1033 xshmfence_unmap_shm(image
->shm_fence
);
1035 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
1036 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1038 wsi_destroy_image(&chain
->base
, &image
->base
);
1042 x11_swapchain_destroy(struct wsi_swapchain
*anv_chain
,
1043 const VkAllocationCallbacks
*pAllocator
)
1045 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1046 xcb_void_cookie_t cookie
;
1048 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++)
1049 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
1051 if (chain
->threaded
) {
1052 chain
->status
= VK_ERROR_OUT_OF_DATE_KHR
;
1053 /* Push a UINT32_MAX to wake up the manager */
1054 wsi_queue_push(&chain
->present_queue
, UINT32_MAX
);
1055 pthread_join(chain
->queue_manager
, NULL
);
1056 wsi_queue_destroy(&chain
->acquire_queue
);
1057 wsi_queue_destroy(&chain
->present_queue
);
1060 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1061 cookie
= xcb_present_select_input_checked(chain
->conn
, chain
->event_id
,
1063 XCB_PRESENT_EVENT_MASK_NO_EVENT
);
1064 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1066 wsi_swapchain_finish(&chain
->base
);
1068 vk_free(pAllocator
, chain
);
1074 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
1076 struct wsi_device
*wsi_device
,
1078 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
1079 const VkAllocationCallbacks
* pAllocator
,
1080 struct wsi_swapchain
**swapchain_out
)
1082 struct x11_swapchain
*chain
;
1083 xcb_void_cookie_t cookie
;
1086 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
1088 const unsigned num_images
= pCreateInfo
->minImageCount
;
1090 /* Check for whether or not we have a window up-front */
1091 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
1092 xcb_window_t window
= x11_surface_get_window(icd_surface
);
1093 xcb_get_geometry_reply_t
*geometry
=
1094 xcb_get_geometry_reply(conn
, xcb_get_geometry(conn
, window
), NULL
);
1095 if (geometry
== NULL
)
1096 return VK_ERROR_SURFACE_LOST_KHR
;
1097 const uint32_t bit_depth
= geometry
->depth
;
1100 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
1101 chain
= vk_alloc(pAllocator
, size
, 8,
1102 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1104 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1106 result
= wsi_swapchain_init(wsi_device
, &chain
->base
, device
,
1107 pCreateInfo
, pAllocator
);
1108 if (result
!= VK_SUCCESS
)
1111 chain
->base
.destroy
= x11_swapchain_destroy
;
1112 chain
->base
.get_images
= x11_get_images
;
1113 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
1114 chain
->base
.queue_present
= x11_queue_present
;
1115 chain
->base
.present_mode
= pCreateInfo
->presentMode
;
1116 chain
->base
.image_count
= num_images
;
1118 chain
->window
= window
;
1119 chain
->depth
= bit_depth
;
1120 chain
->extent
= pCreateInfo
->imageExtent
;
1121 chain
->send_sbc
= 0;
1122 chain
->last_present_msc
= 0;
1123 chain
->threaded
= false;
1124 chain
->status
= VK_SUCCESS
;
1127 chain
->use_prime_blit
= false;
1128 if (!wsi_x11_check_dri3_compatible(conn
, local_fd
)) {
1129 chain
->use_prime_blit
= true;
1132 chain
->event_id
= xcb_generate_id(chain
->conn
);
1133 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
1134 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
1135 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
1136 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
1138 /* Create an XCB event queue to hold present events outside of the usual
1139 * application event queue
1141 chain
->special_event
=
1142 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
1143 chain
->event_id
, NULL
);
1145 chain
->gc
= xcb_generate_id(chain
->conn
);
1147 /* FINISHME: Choose a better error. */
1148 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1152 cookie
= xcb_create_gc(chain
->conn
,
1155 XCB_GC_GRAPHICS_EXPOSURES
,
1156 (uint32_t []) { 0 });
1157 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1160 for (; image
< chain
->base
.image_count
; image
++) {
1161 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
1162 &chain
->images
[image
]);
1163 if (result
!= VK_SUCCESS
)
1164 goto fail_init_images
;
1167 if (chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
) {
1168 chain
->threaded
= true;
1170 /* Initialize our queues. We make them base.image_count + 1 because we will
1171 * occasionally use UINT32_MAX to signal the other thread that an error
1172 * has occurred and we don't want an overflow.
1175 ret
= wsi_queue_init(&chain
->acquire_queue
, chain
->base
.image_count
+ 1);
1177 goto fail_init_images
;
1180 ret
= wsi_queue_init(&chain
->present_queue
, chain
->base
.image_count
+ 1);
1182 wsi_queue_destroy(&chain
->acquire_queue
);
1183 goto fail_init_images
;
1186 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++)
1187 wsi_queue_push(&chain
->acquire_queue
, i
);
1189 ret
= pthread_create(&chain
->queue_manager
, NULL
,
1190 x11_manage_fifo_queues
, chain
);
1192 wsi_queue_destroy(&chain
->present_queue
);
1193 wsi_queue_destroy(&chain
->acquire_queue
);
1194 goto fail_init_images
;
1198 *swapchain_out
= &chain
->base
;
1203 for (uint32_t j
= 0; j
< image
; j
++)
1204 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
1207 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1209 wsi_swapchain_finish(&chain
->base
);
1212 vk_free(pAllocator
, chain
);
1218 wsi_x11_init_wsi(struct wsi_device
*wsi_device
,
1219 const VkAllocationCallbacks
*alloc
)
1221 struct wsi_x11
*wsi
;
1224 wsi
= vk_alloc(alloc
, sizeof(*wsi
), 8,
1225 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
1227 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1231 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
1233 if (ret
== ENOMEM
) {
1234 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1236 /* FINISHME: Choose a better error. */
1237 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1243 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1244 _mesa_key_pointer_equal
);
1245 if (!wsi
->connections
) {
1246 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1250 wsi
->base
.get_support
= x11_surface_get_support
;
1251 wsi
->base
.get_capabilities
= x11_surface_get_capabilities
;
1252 wsi
->base
.get_capabilities2
= x11_surface_get_capabilities2
;
1253 wsi
->base
.get_formats
= x11_surface_get_formats
;
1254 wsi
->base
.get_formats2
= x11_surface_get_formats2
;
1255 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
1256 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
1258 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
1259 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
1264 pthread_mutex_destroy(&wsi
->mutex
);
1266 vk_free(alloc
, wsi
);
1268 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
1269 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
1275 wsi_x11_finish_wsi(struct wsi_device
*wsi_device
,
1276 const VkAllocationCallbacks
*alloc
)
1278 struct wsi_x11
*wsi
=
1279 (struct wsi_x11
*)wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
1282 struct hash_entry
*entry
;
1283 hash_table_foreach(wsi
->connections
, entry
)
1284 wsi_x11_connection_destroy(alloc
, entry
->data
);
1286 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
1288 pthread_mutex_destroy(&wsi
->mutex
);
1290 vk_free(alloc
, wsi
);