2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
30 #include "util/macros.h"
39 #include "util/hash_table.h"
41 #include "wsi_common.h"
42 #include "wsi_common_x11.h"
43 #include "wsi_common_queue.h"
45 #define typed_memcpy(dest, src, count) ({ \
46 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
47 memcpy((dest), (src), (count) * sizeof(*(src))); \
50 struct wsi_x11_connection
{
56 struct wsi_interface base
;
58 pthread_mutex_t mutex
;
59 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
60 struct hash_table
*connections
;
66 * Wrapper around xcb_dri3_open
69 wsi_dri3_open(xcb_connection_t
*conn
,
73 xcb_dri3_open_cookie_t cookie
;
74 xcb_dri3_open_reply_t
*reply
;
77 cookie
= xcb_dri3_open(conn
,
81 reply
= xcb_dri3_open_reply(conn
, cookie
, NULL
);
85 if (reply
->nfd
!= 1) {
90 fd
= xcb_dri3_open_reply_fds(conn
, reply
)[0];
92 fcntl(fd
, F_SETFD
, fcntl(fd
, F_GETFD
) | FD_CLOEXEC
);
98 wsi_x11_check_dri3_compatible(xcb_connection_t
*conn
, int local_fd
)
100 xcb_screen_iterator_t screen_iter
=
101 xcb_setup_roots_iterator(xcb_get_setup(conn
));
102 xcb_screen_t
*screen
= screen_iter
.data
;
104 int dri3_fd
= wsi_dri3_open(conn
, screen
->root
, None
);
106 char *local_dev
= drmGetRenderDeviceNameFromFd(local_fd
);
107 char *dri3_dev
= drmGetRenderDeviceNameFromFd(dri3_fd
);
112 ret
= strcmp(local_dev
, dri3_dev
);
123 static struct wsi_x11_connection
*
124 wsi_x11_connection_create(const VkAllocationCallbacks
*alloc
,
125 xcb_connection_t
*conn
)
127 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
;
128 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
;
130 struct wsi_x11_connection
*wsi_conn
=
131 vk_alloc(alloc
, sizeof(*wsi_conn
), 8,
132 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
136 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
137 pres_cookie
= xcb_query_extension(conn
, 7, "PRESENT");
139 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
140 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
141 if (dri3_reply
== NULL
|| pres_reply
== NULL
) {
144 vk_free(alloc
, wsi_conn
);
148 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
149 wsi_conn
->has_present
= pres_reply
->present
!= 0;
158 wsi_x11_connection_destroy(const VkAllocationCallbacks
*alloc
,
159 struct wsi_x11_connection
*conn
)
161 vk_free(alloc
, conn
);
164 static struct wsi_x11_connection
*
165 wsi_x11_get_connection(struct wsi_device
*wsi_dev
,
166 const VkAllocationCallbacks
*alloc
,
167 xcb_connection_t
*conn
)
169 struct wsi_x11
*wsi
=
170 (struct wsi_x11
*)wsi_dev
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
172 pthread_mutex_lock(&wsi
->mutex
);
174 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
176 /* We're about to make a bunch of blocking calls. Let's drop the
177 * mutex for now so we don't block up too badly.
179 pthread_mutex_unlock(&wsi
->mutex
);
181 struct wsi_x11_connection
*wsi_conn
=
182 wsi_x11_connection_create(alloc
, conn
);
186 pthread_mutex_lock(&wsi
->mutex
);
188 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
190 /* Oops, someone raced us to it */
191 wsi_x11_connection_destroy(alloc
, wsi_conn
);
193 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
197 pthread_mutex_unlock(&wsi
->mutex
);
202 static const VkSurfaceFormatKHR formats
[] = {
203 { .format
= VK_FORMAT_B8G8R8A8_SRGB
, },
204 { .format
= VK_FORMAT_B8G8R8A8_UNORM
, },
207 static const VkPresentModeKHR present_modes
[] = {
208 VK_PRESENT_MODE_IMMEDIATE_KHR
,
209 VK_PRESENT_MODE_MAILBOX_KHR
,
210 VK_PRESENT_MODE_FIFO_KHR
,
213 static xcb_screen_t
*
214 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
216 xcb_screen_iterator_t screen_iter
=
217 xcb_setup_roots_iterator(xcb_get_setup(conn
));
219 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
220 if (screen_iter
.data
->root
== root
)
221 return screen_iter
.data
;
227 static xcb_visualtype_t
*
228 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
231 xcb_depth_iterator_t depth_iter
=
232 xcb_screen_allowed_depths_iterator(screen
);
234 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
235 xcb_visualtype_iterator_t visual_iter
=
236 xcb_depth_visuals_iterator (depth_iter
.data
);
238 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
239 if (visual_iter
.data
->visual_id
== visual_id
) {
241 *depth
= depth_iter
.data
->depth
;
242 return visual_iter
.data
;
250 static xcb_visualtype_t
*
251 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
254 xcb_screen_iterator_t screen_iter
=
255 xcb_setup_roots_iterator(xcb_get_setup(conn
));
257 /* For this we have to iterate over all of the screens which is rather
258 * annoying. Fortunately, there is probably only 1.
260 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
261 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
270 static xcb_visualtype_t
*
271 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
274 xcb_query_tree_cookie_t tree_cookie
;
275 xcb_get_window_attributes_cookie_t attrib_cookie
;
276 xcb_query_tree_reply_t
*tree
;
277 xcb_get_window_attributes_reply_t
*attrib
;
279 tree_cookie
= xcb_query_tree(conn
, window
);
280 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
282 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
283 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
284 if (attrib
== NULL
|| tree
== NULL
) {
290 xcb_window_t root
= tree
->root
;
291 xcb_visualid_t visual_id
= attrib
->visual
;
295 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
299 return screen_get_visualtype(screen
, visual_id
, depth
);
303 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
305 uint32_t rgb_mask
= visual
->red_mask
|
309 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
311 /* Do we have bits left over after RGB? */
312 return (all_mask
& ~rgb_mask
) != 0;
315 VkBool32
wsi_get_physical_device_xcb_presentation_support(
316 struct wsi_device
*wsi_device
,
317 VkAllocationCallbacks
*alloc
,
318 uint32_t queueFamilyIndex
,
320 bool can_handle_different_gpu
,
321 xcb_connection_t
* connection
,
322 xcb_visualid_t visual_id
)
324 struct wsi_x11_connection
*wsi_conn
=
325 wsi_x11_get_connection(wsi_device
, alloc
, connection
);
330 if (!wsi_conn
->has_dri3
) {
331 fprintf(stderr
, "vulkan: No DRI3 support detected - required for presentation\n");
332 fprintf(stderr
, "Note: Buggy applications may crash, if they do please report to vendor\n");
336 if (!can_handle_different_gpu
)
337 if (!wsi_x11_check_dri3_compatible(connection
, fd
))
340 unsigned visual_depth
;
341 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
344 if (visual_depth
!= 24 && visual_depth
!= 32)
350 static xcb_connection_t
*
351 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
353 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
354 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
356 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
360 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
362 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
363 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
365 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
369 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
370 struct wsi_device
*wsi_device
,
371 const VkAllocationCallbacks
*alloc
,
372 uint32_t queueFamilyIndex
,
374 bool can_handle_different_gpu
,
375 VkBool32
* pSupported
)
377 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
378 xcb_window_t window
= x11_surface_get_window(icd_surface
);
380 struct wsi_x11_connection
*wsi_conn
=
381 wsi_x11_get_connection(wsi_device
, alloc
, conn
);
383 return VK_ERROR_OUT_OF_HOST_MEMORY
;
385 if (!wsi_conn
->has_dri3
) {
386 fprintf(stderr
, "vulkan: No DRI3 support detected - required for presentation\n");
387 fprintf(stderr
, "Note: Buggy applications may crash, if they do please report to vendor\n");
392 if (!can_handle_different_gpu
)
393 if (!wsi_x11_check_dri3_compatible(conn
, local_fd
))
396 unsigned visual_depth
;
397 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
402 if (visual_depth
!= 24 && visual_depth
!= 32) {
412 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
413 VkSurfaceCapabilitiesKHR
*caps
)
415 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
416 xcb_window_t window
= x11_surface_get_window(icd_surface
);
417 xcb_get_geometry_cookie_t geom_cookie
;
418 xcb_generic_error_t
*err
;
419 xcb_get_geometry_reply_t
*geom
;
420 unsigned visual_depth
;
422 geom_cookie
= xcb_get_geometry(conn
, window
);
424 /* This does a round-trip. This is why we do get_geometry first and
425 * wait to read the reply until after we have a visual.
427 xcb_visualtype_t
*visual
=
428 get_visualtype_for_window(conn
, window
, &visual_depth
);
431 return VK_ERROR_SURFACE_LOST_KHR
;
433 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
435 VkExtent2D extent
= { geom
->width
, geom
->height
};
436 caps
->currentExtent
= extent
;
437 caps
->minImageExtent
= extent
;
438 caps
->maxImageExtent
= extent
;
440 /* This can happen if the client didn't wait for the configure event
441 * to come back from the compositor. In that case, we don't know the
442 * size of the window so we just return valid "I don't know" stuff.
444 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
445 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
446 /* This is the maximum supported size on Intel */
447 caps
->maxImageExtent
= (VkExtent2D
) { 1 << 14, 1 << 14 };
452 if (visual_has_alpha(visual
, visual_depth
)) {
453 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
454 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
456 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
457 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
460 /* For true mailbox mode, we need at least 4 images:
461 * 1) One to scan out from
462 * 2) One to have queued for scan-out
463 * 3) One to be currently held by the X server
464 * 4) One to render to
466 caps
->minImageCount
= 2;
467 /* There is no real maximum */
468 caps
->maxImageCount
= 0;
470 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
471 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
472 caps
->maxImageArrayLayers
= 1;
473 caps
->supportedUsageFlags
=
474 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
475 VK_IMAGE_USAGE_SAMPLED_BIT
|
476 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
477 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
483 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
484 struct wsi_device
*wsi_device
,
485 uint32_t *pSurfaceFormatCount
,
486 VkSurfaceFormatKHR
*pSurfaceFormats
)
488 if (pSurfaceFormats
== NULL
) {
489 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
493 *pSurfaceFormatCount
= MIN2(*pSurfaceFormatCount
, ARRAY_SIZE(formats
));
494 typed_memcpy(pSurfaceFormats
, formats
, *pSurfaceFormatCount
);
496 return *pSurfaceFormatCount
< ARRAY_SIZE(formats
) ?
497 VK_INCOMPLETE
: VK_SUCCESS
;
501 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
502 uint32_t *pPresentModeCount
,
503 VkPresentModeKHR
*pPresentModes
)
505 if (pPresentModes
== NULL
) {
506 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
510 *pPresentModeCount
= MIN2(*pPresentModeCount
, ARRAY_SIZE(present_modes
));
511 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
513 return *pPresentModeCount
< ARRAY_SIZE(present_modes
) ?
514 VK_INCOMPLETE
: VK_SUCCESS
;
517 VkResult
wsi_create_xcb_surface(const VkAllocationCallbacks
*pAllocator
,
518 const VkXcbSurfaceCreateInfoKHR
*pCreateInfo
,
519 VkSurfaceKHR
*pSurface
)
521 VkIcdSurfaceXcb
*surface
;
523 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
524 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
526 return VK_ERROR_OUT_OF_HOST_MEMORY
;
528 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
529 surface
->connection
= pCreateInfo
->connection
;
530 surface
->window
= pCreateInfo
->window
;
532 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
536 VkResult
wsi_create_xlib_surface(const VkAllocationCallbacks
*pAllocator
,
537 const VkXlibSurfaceCreateInfoKHR
*pCreateInfo
,
538 VkSurfaceKHR
*pSurface
)
540 VkIcdSurfaceXlib
*surface
;
542 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
543 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
545 return VK_ERROR_OUT_OF_HOST_MEMORY
;
547 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
548 surface
->dpy
= pCreateInfo
->dpy
;
549 surface
->window
= pCreateInfo
->window
;
551 *pSurface
= VkIcdSurfaceBase_to_handle(&surface
->base
);
557 VkImage linear_image
; // for prime
558 VkDeviceMemory memory
;
559 VkDeviceMemory linear_memory
; // for prime
562 struct xshmfence
* shm_fence
;
566 struct x11_swapchain
{
567 struct wsi_swapchain base
;
569 xcb_connection_t
* conn
;
575 xcb_present_event_t event_id
;
576 xcb_special_event_t
* special_event
;
578 uint64_t last_present_msc
;
583 struct wsi_queue present_queue
;
584 struct wsi_queue acquire_queue
;
585 pthread_t queue_manager
;
587 struct x11_image images
[0];
591 x11_get_images(struct wsi_swapchain
*anv_chain
,
592 uint32_t* pCount
, VkImage
*pSwapchainImages
)
594 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
598 if (pSwapchainImages
== NULL
) {
599 *pCount
= chain
->base
.image_count
;
604 ret_count
= chain
->base
.image_count
;
605 if (chain
->base
.image_count
> *pCount
) {
607 result
= VK_INCOMPLETE
;
610 for (uint32_t i
= 0; i
< ret_count
; i
++)
611 pSwapchainImages
[i
] = chain
->images
[i
].image
;
617 x11_get_image_and_linear(struct wsi_swapchain
*drv_chain
,
618 int imageIndex
, VkImage
*image
, VkImage
*linear_image
)
620 struct x11_swapchain
*chain
= (struct x11_swapchain
*)drv_chain
;
621 *image
= chain
->images
[imageIndex
].image
;
622 *linear_image
= chain
->images
[imageIndex
].linear_image
;
626 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
627 xcb_present_generic_event_t
*event
)
629 switch (event
->evtype
) {
630 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
631 xcb_present_configure_notify_event_t
*config
= (void *) event
;
633 if (config
->width
!= chain
->extent
.width
||
634 config
->height
!= chain
->extent
.height
)
635 return VK_ERROR_OUT_OF_DATE_KHR
;
640 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
641 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
643 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++) {
644 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
645 chain
->images
[i
].busy
= false;
647 wsi_queue_push(&chain
->acquire_queue
, i
);
655 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY
: {
656 xcb_present_complete_notify_event_t
*complete
= (void *) event
;
657 if (complete
->kind
== XCB_PRESENT_COMPLETE_KIND_PIXMAP
)
658 chain
->last_present_msc
= complete
->msc
;
670 static uint64_t wsi_get_current_time(void)
672 uint64_t current_time
;
675 clock_gettime(CLOCK_MONOTONIC
, &tv
);
676 current_time
= tv
.tv_nsec
+ tv
.tv_sec
*1000000000ull;
680 static uint64_t wsi_get_absolute_timeout(uint64_t timeout
)
682 uint64_t current_time
= wsi_get_current_time();
684 timeout
= MIN2(UINT64_MAX
- current_time
, timeout
);
686 return current_time
+ timeout
;
690 x11_acquire_next_image_poll_x11(struct x11_swapchain
*chain
,
691 uint32_t *image_index
, uint64_t timeout
)
693 xcb_generic_event_t
*event
;
697 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++) {
698 if (!chain
->images
[i
].busy
) {
699 /* We found a non-busy image */
700 xshmfence_await(chain
->images
[i
].shm_fence
);
702 chain
->images
[i
].busy
= true;
707 xcb_flush(chain
->conn
);
709 if (timeout
== UINT64_MAX
) {
710 event
= xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
712 return VK_ERROR_OUT_OF_DATE_KHR
;
714 event
= xcb_poll_for_special_event(chain
->conn
, chain
->special_event
);
720 atimeout
= wsi_get_absolute_timeout(timeout
);
722 pfds
.fd
= xcb_get_file_descriptor(chain
->conn
);
723 pfds
.events
= POLLIN
;
724 ret
= poll(&pfds
, 1, timeout
/ 1000 / 1000);
728 return VK_ERROR_OUT_OF_DATE_KHR
;
730 /* If a non-special event happens, the fd will still
731 * poll. So recalculate the timeout now just in case.
733 uint64_t current_time
= wsi_get_current_time();
734 if (atimeout
> current_time
)
735 timeout
= atimeout
- current_time
;
742 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
744 if (result
!= VK_SUCCESS
)
750 x11_acquire_next_image_from_queue(struct x11_swapchain
*chain
,
751 uint32_t *image_index_out
, uint64_t timeout
)
753 assert(chain
->threaded
);
755 uint32_t image_index
;
756 VkResult result
= wsi_queue_pull(&chain
->acquire_queue
,
757 &image_index
, timeout
);
758 if (result
!= VK_SUCCESS
) {
760 } else if (chain
->status
!= VK_SUCCESS
) {
761 return chain
->status
;
764 assert(image_index
< chain
->base
.image_count
);
765 xshmfence_await(chain
->images
[image_index
].shm_fence
);
767 *image_index_out
= image_index
;
773 x11_present_to_x11(struct x11_swapchain
*chain
, uint32_t image_index
,
776 struct x11_image
*image
= &chain
->images
[image_index
];
778 assert(image_index
< chain
->base
.image_count
);
780 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
783 int64_t remainder
= 0;
785 if (chain
->base
.present_mode
== VK_PRESENT_MODE_IMMEDIATE_KHR
)
786 options
|= XCB_PRESENT_OPTION_ASYNC
;
788 xshmfence_reset(image
->shm_fence
);
791 xcb_void_cookie_t cookie
=
792 xcb_present_pixmap(chain
->conn
,
795 (uint32_t) chain
->send_sbc
,
800 XCB_NONE
, /* target_crtc */
807 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
810 xcb_flush(chain
->conn
);
816 x11_acquire_next_image(struct wsi_swapchain
*anv_chain
,
818 VkSemaphore semaphore
,
819 uint32_t *image_index
)
821 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
823 if (chain
->threaded
) {
824 return x11_acquire_next_image_from_queue(chain
, image_index
, timeout
);
826 return x11_acquire_next_image_poll_x11(chain
, image_index
, timeout
);
831 x11_queue_present(struct wsi_swapchain
*anv_chain
,
832 uint32_t image_index
)
834 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
836 if (chain
->threaded
) {
837 wsi_queue_push(&chain
->present_queue
, image_index
);
838 return chain
->status
;
840 return x11_present_to_x11(chain
, image_index
, 0);
845 x11_manage_fifo_queues(void *state
)
847 struct x11_swapchain
*chain
= state
;
850 assert(chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
);
852 while (chain
->status
== VK_SUCCESS
) {
853 /* It should be safe to unconditionally block here. Later in the loop
854 * we blocks until the previous present has landed on-screen. At that
855 * point, we should have received IDLE_NOTIFY on all images presented
856 * before that point so the client should be able to acquire any image
857 * other than the currently presented one.
859 uint32_t image_index
;
860 result
= wsi_queue_pull(&chain
->present_queue
, &image_index
, INT64_MAX
);
861 if (result
!= VK_SUCCESS
) {
863 } else if (chain
->status
!= VK_SUCCESS
) {
867 uint64_t target_msc
= chain
->last_present_msc
+ 1;
868 result
= x11_present_to_x11(chain
, image_index
, target_msc
);
869 if (result
!= VK_SUCCESS
)
872 while (chain
->last_present_msc
< target_msc
) {
873 xcb_generic_event_t
*event
=
874 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
878 result
= x11_handle_dri3_present_event(chain
, (void *)event
);
879 if (result
!= VK_SUCCESS
)
885 chain
->status
= result
;
886 wsi_queue_push(&chain
->acquire_queue
, UINT32_MAX
);
892 x11_image_init(VkDevice device_h
, struct x11_swapchain
*chain
,
893 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
894 const VkAllocationCallbacks
* pAllocator
,
895 struct x11_image
*image
)
897 xcb_void_cookie_t cookie
;
905 result
= chain
->base
.image_fns
->create_wsi_image(device_h
,
908 chain
->base
.needs_linear_copy
,
916 if (result
!= VK_SUCCESS
)
919 if (chain
->base
.needs_linear_copy
) {
920 result
= chain
->base
.image_fns
->create_wsi_image(device_h
,
923 chain
->base
.needs_linear_copy
,
925 &image
->linear_image
,
926 &image
->linear_memory
,
931 if (result
!= VK_SUCCESS
) {
932 chain
->base
.image_fns
->free_wsi_image(device_h
, pAllocator
,
933 image
->image
, image
->memory
);
938 image
->pixmap
= xcb_generate_id(chain
->conn
);
941 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
945 pCreateInfo
->imageExtent
.width
,
946 pCreateInfo
->imageExtent
.height
,
948 chain
->depth
, bpp
, fd
);
949 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
951 int fence_fd
= xshmfence_alloc_shm();
955 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
956 if (image
->shm_fence
== NULL
)
957 goto fail_shmfence_alloc
;
959 image
->sync_fence
= xcb_generate_id(chain
->conn
);
960 xcb_dri3_fence_from_fd(chain
->conn
,
967 xshmfence_trigger(image
->shm_fence
);
975 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
976 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
978 if (chain
->base
.needs_linear_copy
) {
979 chain
->base
.image_fns
->free_wsi_image(device_h
, pAllocator
,
980 image
->linear_image
, image
->linear_memory
);
982 chain
->base
.image_fns
->free_wsi_image(device_h
, pAllocator
,
983 image
->image
, image
->memory
);
989 x11_image_finish(struct x11_swapchain
*chain
,
990 const VkAllocationCallbacks
* pAllocator
,
991 struct x11_image
*image
)
993 xcb_void_cookie_t cookie
;
995 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
996 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
997 xshmfence_unmap_shm(image
->shm_fence
);
999 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
1000 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1002 if (chain
->base
.needs_linear_copy
) {
1003 chain
->base
.image_fns
->free_wsi_image(chain
->base
.device
, pAllocator
,
1004 image
->linear_image
, image
->linear_memory
);
1006 chain
->base
.image_fns
->free_wsi_image(chain
->base
.device
, pAllocator
,
1007 image
->image
, image
->memory
);
1011 x11_swapchain_destroy(struct wsi_swapchain
*anv_chain
,
1012 const VkAllocationCallbacks
*pAllocator
)
1014 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
1015 xcb_void_cookie_t cookie
;
1017 for (uint32_t i
= 0; i
< chain
->base
.image_count
; i
++)
1018 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
1020 if (chain
->threaded
) {
1021 chain
->status
= VK_ERROR_OUT_OF_DATE_KHR
;
1022 /* Push a UINT32_MAX to wake up the manager */
1023 wsi_queue_push(&chain
->present_queue
, UINT32_MAX
);
1024 pthread_join(chain
->queue_manager
, NULL
);
1025 wsi_queue_destroy(&chain
->acquire_queue
);
1026 wsi_queue_destroy(&chain
->present_queue
);
1029 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1030 cookie
= xcb_present_select_input_checked(chain
->conn
, chain
->event_id
,
1032 XCB_PRESENT_EVENT_MASK_NO_EVENT
);
1033 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1035 vk_free(pAllocator
, chain
);
1041 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
1043 struct wsi_device
*wsi_device
,
1045 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
1046 const VkAllocationCallbacks
* pAllocator
,
1047 const struct wsi_image_fns
*image_fns
,
1048 struct wsi_swapchain
**swapchain_out
)
1050 struct x11_swapchain
*chain
;
1051 xcb_void_cookie_t cookie
;
1054 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
1056 const unsigned num_images
= pCreateInfo
->minImageCount
;
1058 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
1059 chain
= vk_alloc(pAllocator
, size
, 8,
1060 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
1062 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1064 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
1065 xcb_window_t window
= x11_surface_get_window(icd_surface
);
1066 xcb_get_geometry_reply_t
*geometry
=
1067 xcb_get_geometry_reply(conn
, xcb_get_geometry(conn
, window
), NULL
);
1069 if (geometry
== NULL
)
1070 return VK_ERROR_SURFACE_LOST_KHR
;
1072 chain
->base
.device
= device
;
1073 chain
->base
.destroy
= x11_swapchain_destroy
;
1074 chain
->base
.get_images
= x11_get_images
;
1075 chain
->base
.get_image_and_linear
= x11_get_image_and_linear
;
1076 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
1077 chain
->base
.queue_present
= x11_queue_present
;
1078 chain
->base
.image_fns
= image_fns
;
1079 chain
->base
.present_mode
= pCreateInfo
->presentMode
;
1080 chain
->base
.image_count
= num_images
;
1082 chain
->window
= window
;
1083 chain
->depth
= geometry
->depth
;
1084 chain
->extent
= pCreateInfo
->imageExtent
;
1085 chain
->send_sbc
= 0;
1086 chain
->last_present_msc
= 0;
1087 chain
->threaded
= false;
1088 chain
->status
= VK_SUCCESS
;
1092 chain
->base
.needs_linear_copy
= false;
1093 if (!wsi_x11_check_dri3_compatible(conn
, local_fd
))
1094 chain
->base
.needs_linear_copy
= true;
1096 chain
->event_id
= xcb_generate_id(chain
->conn
);
1097 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
1098 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
1099 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
1100 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
1102 /* Create an XCB event queue to hold present events outside of the usual
1103 * application event queue
1105 chain
->special_event
=
1106 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
1107 chain
->event_id
, NULL
);
1109 chain
->gc
= xcb_generate_id(chain
->conn
);
1111 /* FINISHME: Choose a better error. */
1112 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1116 cookie
= xcb_create_gc(chain
->conn
,
1119 XCB_GC_GRAPHICS_EXPOSURES
,
1120 (uint32_t []) { 0 });
1121 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
1124 for (; image
< chain
->base
.image_count
; image
++) {
1125 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
1126 &chain
->images
[image
]);
1127 if (result
!= VK_SUCCESS
)
1128 goto fail_init_images
;
1131 if (chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
) {
1132 chain
->threaded
= true;
1134 /* Initialize our queues. We make them base.image_count + 1 because we will
1135 * occasionally use UINT32_MAX to signal the other thread that an error
1136 * has occurred and we don't want an overflow.
1139 ret
= wsi_queue_init(&chain
->acquire_queue
, chain
->base
.image_count
+ 1);
1141 goto fail_init_images
;
1144 ret
= wsi_queue_init(&chain
->present_queue
, chain
->base
.image_count
+ 1);
1146 wsi_queue_destroy(&chain
->acquire_queue
);
1147 goto fail_init_images
;
1150 for (unsigned i
= 0; i
< chain
->base
.image_count
; i
++)
1151 wsi_queue_push(&chain
->acquire_queue
, i
);
1153 ret
= pthread_create(&chain
->queue_manager
, NULL
,
1154 x11_manage_fifo_queues
, chain
);
1156 wsi_queue_destroy(&chain
->present_queue
);
1157 wsi_queue_destroy(&chain
->acquire_queue
);
1158 goto fail_init_images
;
1162 *swapchain_out
= &chain
->base
;
1167 for (uint32_t j
= 0; j
< image
; j
++)
1168 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
1171 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1173 vk_free(pAllocator
, chain
);
1179 wsi_x11_init_wsi(struct wsi_device
*wsi_device
,
1180 const VkAllocationCallbacks
*alloc
)
1182 struct wsi_x11
*wsi
;
1185 wsi
= vk_alloc(alloc
, sizeof(*wsi
), 8,
1186 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
1188 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1192 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
1194 if (ret
== ENOMEM
) {
1195 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1197 /* FINISHME: Choose a better error. */
1198 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1204 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1205 _mesa_key_pointer_equal
);
1206 if (!wsi
->connections
) {
1207 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1211 wsi
->base
.get_support
= x11_surface_get_support
;
1212 wsi
->base
.get_capabilities
= x11_surface_get_capabilities
;
1213 wsi
->base
.get_formats
= x11_surface_get_formats
;
1214 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
1215 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
1217 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
1218 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
1223 pthread_mutex_destroy(&wsi
->mutex
);
1225 vk_free(alloc
, wsi
);
1227 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
1228 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
1234 wsi_x11_finish_wsi(struct wsi_device
*wsi_device
,
1235 const VkAllocationCallbacks
*alloc
)
1237 struct wsi_x11
*wsi
=
1238 (struct wsi_x11
*)wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
1241 struct hash_entry
*entry
;
1242 hash_table_foreach(wsi
->connections
, entry
)
1243 wsi_x11_connection_destroy(alloc
, entry
->data
);
1245 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
1247 pthread_mutex_destroy(&wsi
->mutex
);
1249 vk_free(alloc
, wsi
);