2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
30 #include "util/macros.h"
38 #include "util/hash_table.h"
40 #include "wsi_common.h"
41 #include "wsi_common_x11.h"
42 #include "wsi_common_queue.h"
44 #define typed_memcpy(dest, src, count) ({ \
45 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
46 memcpy((dest), (src), (count) * sizeof(*(src))); \
49 struct wsi_x11_connection
{
55 struct wsi_interface base
;
57 pthread_mutex_t mutex
;
58 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
59 struct hash_table
*connections
;
62 static struct wsi_x11_connection
*
63 wsi_x11_connection_create(const VkAllocationCallbacks
*alloc
,
64 xcb_connection_t
*conn
)
66 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
;
67 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
;
69 struct wsi_x11_connection
*wsi_conn
=
70 vk_alloc(alloc
, sizeof(*wsi_conn
), 8,
71 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
75 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
76 pres_cookie
= xcb_query_extension(conn
, 7, "PRESENT");
78 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
79 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
80 if (dri3_reply
== NULL
|| pres_reply
== NULL
) {
83 vk_free(alloc
, wsi_conn
);
87 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
88 wsi_conn
->has_present
= pres_reply
->present
!= 0;
97 wsi_x11_connection_destroy(const VkAllocationCallbacks
*alloc
,
98 struct wsi_x11_connection
*conn
)
100 vk_free(alloc
, conn
);
103 static struct wsi_x11_connection
*
104 wsi_x11_get_connection(struct wsi_device
*wsi_dev
,
105 const VkAllocationCallbacks
*alloc
,
106 xcb_connection_t
*conn
)
108 struct wsi_x11
*wsi
=
109 (struct wsi_x11
*)wsi_dev
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
111 pthread_mutex_lock(&wsi
->mutex
);
113 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
115 /* We're about to make a bunch of blocking calls. Let's drop the
116 * mutex for now so we don't block up too badly.
118 pthread_mutex_unlock(&wsi
->mutex
);
120 struct wsi_x11_connection
*wsi_conn
=
121 wsi_x11_connection_create(alloc
, conn
);
125 pthread_mutex_lock(&wsi
->mutex
);
127 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
129 /* Oops, someone raced us to it */
130 wsi_x11_connection_destroy(alloc
, wsi_conn
);
132 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
136 pthread_mutex_unlock(&wsi
->mutex
);
141 static const VkSurfaceFormatKHR formats
[] = {
142 { .format
= VK_FORMAT_B8G8R8A8_SRGB
, },
143 { .format
= VK_FORMAT_B8G8R8A8_UNORM
, },
146 static const VkPresentModeKHR present_modes
[] = {
147 VK_PRESENT_MODE_IMMEDIATE_KHR
,
148 VK_PRESENT_MODE_MAILBOX_KHR
,
149 VK_PRESENT_MODE_FIFO_KHR
,
152 static xcb_screen_t
*
153 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
155 xcb_screen_iterator_t screen_iter
=
156 xcb_setup_roots_iterator(xcb_get_setup(conn
));
158 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
159 if (screen_iter
.data
->root
== root
)
160 return screen_iter
.data
;
166 static xcb_visualtype_t
*
167 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
170 xcb_depth_iterator_t depth_iter
=
171 xcb_screen_allowed_depths_iterator(screen
);
173 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
174 xcb_visualtype_iterator_t visual_iter
=
175 xcb_depth_visuals_iterator (depth_iter
.data
);
177 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
178 if (visual_iter
.data
->visual_id
== visual_id
) {
180 *depth
= depth_iter
.data
->depth
;
181 return visual_iter
.data
;
189 static xcb_visualtype_t
*
190 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
193 xcb_screen_iterator_t screen_iter
=
194 xcb_setup_roots_iterator(xcb_get_setup(conn
));
196 /* For this we have to iterate over all of the screens which is rather
197 * annoying. Fortunately, there is probably only 1.
199 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
200 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
209 static xcb_visualtype_t
*
210 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
213 xcb_query_tree_cookie_t tree_cookie
;
214 xcb_get_window_attributes_cookie_t attrib_cookie
;
215 xcb_query_tree_reply_t
*tree
;
216 xcb_get_window_attributes_reply_t
*attrib
;
218 tree_cookie
= xcb_query_tree(conn
, window
);
219 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
221 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
222 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
223 if (attrib
== NULL
|| tree
== NULL
) {
229 xcb_window_t root
= tree
->root
;
230 xcb_visualid_t visual_id
= attrib
->visual
;
234 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
238 return screen_get_visualtype(screen
, visual_id
, depth
);
242 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
244 uint32_t rgb_mask
= visual
->red_mask
|
248 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
250 /* Do we have bits left over after RGB? */
251 return (all_mask
& ~rgb_mask
) != 0;
254 VkBool32
wsi_get_physical_device_xcb_presentation_support(
255 struct wsi_device
*wsi_device
,
256 VkAllocationCallbacks
*alloc
,
257 uint32_t queueFamilyIndex
,
258 xcb_connection_t
* connection
,
259 xcb_visualid_t visual_id
)
261 struct wsi_x11_connection
*wsi_conn
=
262 wsi_x11_get_connection(wsi_device
, alloc
, connection
);
264 if (!wsi_conn
->has_dri3
) {
265 fprintf(stderr
, "vulkan: No DRI3 support\n");
269 unsigned visual_depth
;
270 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
273 if (visual_depth
!= 24 && visual_depth
!= 32)
279 static xcb_connection_t
*
280 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
282 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
283 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
285 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
289 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
291 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
292 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
294 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
298 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
299 struct wsi_device
*wsi_device
,
300 const VkAllocationCallbacks
*alloc
,
301 uint32_t queueFamilyIndex
,
302 VkBool32
* pSupported
)
304 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
305 xcb_window_t window
= x11_surface_get_window(icd_surface
);
307 struct wsi_x11_connection
*wsi_conn
=
308 wsi_x11_get_connection(wsi_device
, alloc
, conn
);
310 return VK_ERROR_OUT_OF_HOST_MEMORY
;
312 if (!wsi_conn
->has_dri3
) {
313 fprintf(stderr
, "vulkan: No DRI3 support\n");
318 unsigned visual_depth
;
319 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
324 if (visual_depth
!= 24 && visual_depth
!= 32) {
334 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
335 VkSurfaceCapabilitiesKHR
*caps
)
337 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
338 xcb_window_t window
= x11_surface_get_window(icd_surface
);
339 xcb_get_geometry_cookie_t geom_cookie
;
340 xcb_generic_error_t
*err
;
341 xcb_get_geometry_reply_t
*geom
;
342 unsigned visual_depth
;
344 geom_cookie
= xcb_get_geometry(conn
, window
);
346 /* This does a round-trip. This is why we do get_geometry first and
347 * wait to read the reply until after we have a visual.
349 xcb_visualtype_t
*visual
=
350 get_visualtype_for_window(conn
, window
, &visual_depth
);
352 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
354 VkExtent2D extent
= { geom
->width
, geom
->height
};
355 caps
->currentExtent
= extent
;
356 caps
->minImageExtent
= extent
;
357 caps
->maxImageExtent
= extent
;
359 /* This can happen if the client didn't wait for the configure event
360 * to come back from the compositor. In that case, we don't know the
361 * size of the window so we just return valid "I don't know" stuff.
363 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
364 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
365 caps
->maxImageExtent
= (VkExtent2D
) { INT16_MAX
, INT16_MAX
};
370 if (visual_has_alpha(visual
, visual_depth
)) {
371 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
372 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
374 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
375 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
378 /* For true mailbox mode, we need at least 4 images:
379 * 1) One to scan out from
380 * 2) One to have queued for scan-out
381 * 3) One to be currently held by the X server
382 * 4) One to render to
384 caps
->minImageCount
= 2;
385 /* There is no real maximum */
386 caps
->maxImageCount
= 0;
388 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
389 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
390 caps
->maxImageArrayLayers
= 1;
391 caps
->supportedUsageFlags
=
392 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
393 VK_IMAGE_USAGE_SAMPLED_BIT
|
394 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
395 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
401 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
402 struct wsi_device
*wsi_device
,
403 uint32_t *pSurfaceFormatCount
,
404 VkSurfaceFormatKHR
*pSurfaceFormats
)
406 if (pSurfaceFormats
== NULL
) {
407 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
411 *pSurfaceFormatCount
= MIN2(*pSurfaceFormatCount
, ARRAY_SIZE(formats
));
412 typed_memcpy(pSurfaceFormats
, formats
, *pSurfaceFormatCount
);
414 return *pSurfaceFormatCount
< ARRAY_SIZE(formats
) ?
415 VK_INCOMPLETE
: VK_SUCCESS
;
419 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
420 uint32_t *pPresentModeCount
,
421 VkPresentModeKHR
*pPresentModes
)
423 if (pPresentModes
== NULL
) {
424 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
428 *pPresentModeCount
= MIN2(*pPresentModeCount
, ARRAY_SIZE(present_modes
));
429 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
431 return *pPresentModeCount
< ARRAY_SIZE(present_modes
) ?
432 VK_INCOMPLETE
: VK_SUCCESS
;
435 VkResult
wsi_create_xcb_surface(const VkAllocationCallbacks
*pAllocator
,
436 const VkXcbSurfaceCreateInfoKHR
*pCreateInfo
,
437 VkSurfaceKHR
*pSurface
)
439 VkIcdSurfaceXcb
*surface
;
441 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
442 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
444 return VK_ERROR_OUT_OF_HOST_MEMORY
;
446 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
447 surface
->connection
= pCreateInfo
->connection
;
448 surface
->window
= pCreateInfo
->window
;
450 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
454 VkResult
wsi_create_xlib_surface(const VkAllocationCallbacks
*pAllocator
,
455 const VkXlibSurfaceCreateInfoKHR
*pCreateInfo
,
456 VkSurfaceKHR
*pSurface
)
458 VkIcdSurfaceXlib
*surface
;
460 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
461 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
463 return VK_ERROR_OUT_OF_HOST_MEMORY
;
465 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
466 surface
->dpy
= pCreateInfo
->dpy
;
467 surface
->window
= pCreateInfo
->window
;
469 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
475 VkDeviceMemory memory
;
478 struct xshmfence
* shm_fence
;
482 struct x11_swapchain
{
483 struct wsi_swapchain base
;
485 xcb_connection_t
* conn
;
490 uint32_t image_count
;
492 xcb_present_event_t event_id
;
493 xcb_special_event_t
* special_event
;
495 uint64_t last_present_msc
;
500 struct wsi_queue present_queue
;
501 struct wsi_queue acquire_queue
;
502 pthread_t queue_manager
;
504 struct x11_image images
[0];
508 x11_get_images(struct wsi_swapchain
*anv_chain
,
509 uint32_t* pCount
, VkImage
*pSwapchainImages
)
511 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
515 if (pSwapchainImages
== NULL
) {
516 *pCount
= chain
->image_count
;
521 ret_count
= chain
->image_count
;
522 if (chain
->image_count
> *pCount
) {
524 result
= VK_INCOMPLETE
;
527 for (uint32_t i
= 0; i
< ret_count
; i
++)
528 pSwapchainImages
[i
] = chain
->images
[i
].image
;
534 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
535 xcb_present_generic_event_t
*event
)
537 switch (event
->evtype
) {
538 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
539 xcb_present_configure_notify_event_t
*config
= (void *) event
;
541 if (config
->width
!= chain
->extent
.width
||
542 config
->height
!= chain
->extent
.height
)
543 return VK_ERROR_OUT_OF_DATE_KHR
;
548 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
549 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
551 for (unsigned i
= 0; i
< chain
->image_count
; i
++) {
552 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
553 chain
->images
[i
].busy
= false;
555 wsi_queue_push(&chain
->acquire_queue
, i
);
563 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY
: {
564 xcb_present_complete_notify_event_t
*complete
= (void *) event
;
565 if (complete
->kind
== XCB_PRESENT_COMPLETE_KIND_PIXMAP
)
566 chain
->last_present_msc
= complete
->msc
;
578 static uint64_t wsi_get_current_time(void)
580 uint64_t current_time
;
583 clock_gettime(CLOCK_MONOTONIC
, &tv
);
584 current_time
= tv
.tv_nsec
+ tv
.tv_sec
*1000000000ull;
588 static uint64_t wsi_get_absolute_timeout(uint64_t timeout
)
590 uint64_t current_time
= wsi_get_current_time();
592 timeout
= MIN2(UINT64_MAX
- current_time
, timeout
);
594 return current_time
+ timeout
;
598 x11_acquire_next_image_poll_x11(struct x11_swapchain
*chain
,
599 uint32_t *image_index
, uint64_t timeout
)
601 xcb_generic_event_t
*event
;
605 for (uint32_t i
= 0; i
< chain
->image_count
; i
++) {
606 if (!chain
->images
[i
].busy
) {
607 /* We found a non-busy image */
608 xshmfence_await(chain
->images
[i
].shm_fence
);
610 chain
->images
[i
].busy
= true;
615 xcb_flush(chain
->conn
);
617 if (timeout
== UINT64_MAX
) {
618 event
= xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
620 return VK_ERROR_OUT_OF_DATE_KHR
;
622 event
= xcb_poll_for_special_event(chain
->conn
, chain
->special_event
);
628 atimeout
= wsi_get_absolute_timeout(timeout
);
630 pfds
.fd
= xcb_get_file_descriptor(chain
->conn
);
631 pfds
.events
= POLLIN
;
632 ret
= poll(&pfds
, 1, timeout
/ 1000 / 1000);
636 return VK_ERROR_OUT_OF_DATE_KHR
;
638 /* If a non-special event happens, the fd will still
639 * poll. So recalculate the timeout now just in case.
641 uint64_t current_time
= wsi_get_current_time();
642 if (atimeout
> current_time
)
643 timeout
= atimeout
- current_time
;
650 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
652 if (result
!= VK_SUCCESS
)
658 x11_acquire_next_image_from_queue(struct x11_swapchain
*chain
,
659 uint32_t *image_index_out
, uint64_t timeout
)
661 assert(chain
->threaded
);
663 uint32_t image_index
;
664 VkResult result
= wsi_queue_pull(&chain
->acquire_queue
,
665 &image_index
, timeout
);
666 if (result
!= VK_SUCCESS
) {
668 } else if (chain
->status
!= VK_SUCCESS
) {
669 return chain
->status
;
672 assert(image_index
< chain
->image_count
);
673 xshmfence_await(chain
->images
[image_index
].shm_fence
);
675 *image_index_out
= image_index
;
681 x11_present_to_x11(struct x11_swapchain
*chain
, uint32_t image_index
,
684 struct x11_image
*image
= &chain
->images
[image_index
];
686 assert(image_index
< chain
->image_count
);
688 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
691 int64_t remainder
= 0;
693 if (chain
->base
.present_mode
== VK_PRESENT_MODE_IMMEDIATE_KHR
)
694 options
|= XCB_PRESENT_OPTION_ASYNC
;
696 xshmfence_reset(image
->shm_fence
);
699 xcb_void_cookie_t cookie
=
700 xcb_present_pixmap(chain
->conn
,
703 (uint32_t) chain
->send_sbc
,
708 XCB_NONE
, /* target_crtc */
715 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
718 xcb_flush(chain
->conn
);
724 x11_acquire_next_image(struct wsi_swapchain
*anv_chain
,
726 VkSemaphore semaphore
,
727 uint32_t *image_index
)
729 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
731 if (chain
->threaded
) {
732 return x11_acquire_next_image_from_queue(chain
, image_index
, timeout
);
734 return x11_acquire_next_image_poll_x11(chain
, image_index
, timeout
);
739 x11_queue_present(struct wsi_swapchain
*anv_chain
,
740 uint32_t image_index
)
742 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
744 if (chain
->threaded
) {
745 wsi_queue_push(&chain
->present_queue
, image_index
);
746 return chain
->status
;
748 return x11_present_to_x11(chain
, image_index
, 0);
753 x11_manage_fifo_queues(void *state
)
755 struct x11_swapchain
*chain
= state
;
758 assert(chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
);
760 while (chain
->status
== VK_SUCCESS
) {
761 /* It should be safe to unconditionally block here. Later in the loop
762 * we blocks until the previous present has landed on-screen. At that
763 * point, we should have received IDLE_NOTIFY on all images presented
764 * before that point so the client should be able to acquire any image
765 * other than the currently presented one.
767 uint32_t image_index
;
768 result
= wsi_queue_pull(&chain
->present_queue
, &image_index
, INT64_MAX
);
769 if (result
!= VK_SUCCESS
) {
771 } else if (chain
->status
!= VK_SUCCESS
) {
775 uint64_t target_msc
= chain
->last_present_msc
+ 1;
776 result
= x11_present_to_x11(chain
, image_index
, target_msc
);
777 if (result
!= VK_SUCCESS
)
780 while (chain
->last_present_msc
< target_msc
) {
781 xcb_generic_event_t
*event
=
782 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
786 result
= x11_handle_dri3_present_event(chain
, (void *)event
);
787 if (result
!= VK_SUCCESS
)
793 chain
->status
= result
;
794 wsi_queue_push(&chain
->acquire_queue
, UINT32_MAX
);
800 x11_image_init(VkDevice device_h
, struct x11_swapchain
*chain
,
801 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
802 const VkAllocationCallbacks
* pAllocator
,
803 struct x11_image
*image
)
805 xcb_void_cookie_t cookie
;
813 result
= chain
->base
.image_fns
->create_wsi_image(device_h
,
822 if (result
!= VK_SUCCESS
)
825 image
->pixmap
= xcb_generate_id(chain
->conn
);
828 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
832 pCreateInfo
->imageExtent
.width
,
833 pCreateInfo
->imageExtent
.height
,
835 chain
->depth
, bpp
, fd
);
836 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
838 int fence_fd
= xshmfence_alloc_shm();
842 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
843 if (image
->shm_fence
== NULL
)
844 goto fail_shmfence_alloc
;
846 image
->sync_fence
= xcb_generate_id(chain
->conn
);
847 xcb_dri3_fence_from_fd(chain
->conn
,
854 xshmfence_trigger(image
->shm_fence
);
862 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
863 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
865 chain
->base
.image_fns
->free_wsi_image(device_h
, pAllocator
,
866 image
->image
, image
->memory
);
872 x11_image_finish(struct x11_swapchain
*chain
,
873 const VkAllocationCallbacks
* pAllocator
,
874 struct x11_image
*image
)
876 xcb_void_cookie_t cookie
;
878 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
879 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
880 xshmfence_unmap_shm(image
->shm_fence
);
882 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
883 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
885 chain
->base
.image_fns
->free_wsi_image(chain
->base
.device
, pAllocator
,
886 image
->image
, image
->memory
);
890 x11_swapchain_destroy(struct wsi_swapchain
*anv_chain
,
891 const VkAllocationCallbacks
*pAllocator
)
893 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
894 xcb_void_cookie_t cookie
;
896 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
897 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
899 if (chain
->threaded
) {
900 chain
->status
= VK_ERROR_OUT_OF_DATE_KHR
;
901 /* Push a UINT32_MAX to wake up the manager */
902 wsi_queue_push(&chain
->present_queue
, UINT32_MAX
);
903 pthread_join(chain
->queue_manager
, NULL
);
904 wsi_queue_destroy(&chain
->acquire_queue
);
905 wsi_queue_destroy(&chain
->present_queue
);
908 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
909 cookie
= xcb_present_select_input_checked(chain
->conn
, chain
->event_id
,
911 XCB_PRESENT_EVENT_MASK_NO_EVENT
);
912 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
914 vk_free(pAllocator
, chain
);
920 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
922 struct wsi_device
*wsi_device
,
923 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
924 const VkAllocationCallbacks
* pAllocator
,
925 const struct wsi_image_fns
*image_fns
,
926 struct wsi_swapchain
**swapchain_out
)
928 struct x11_swapchain
*chain
;
929 xcb_void_cookie_t cookie
;
932 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
934 const unsigned num_images
= pCreateInfo
->minImageCount
;
936 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
937 chain
= vk_alloc(pAllocator
, size
, 8,
938 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
940 return VK_ERROR_OUT_OF_HOST_MEMORY
;
942 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
943 xcb_window_t window
= x11_surface_get_window(icd_surface
);
944 xcb_get_geometry_reply_t
*geometry
=
945 xcb_get_geometry_reply(conn
, xcb_get_geometry(conn
, window
), NULL
);
947 if (geometry
== NULL
)
948 return VK_ERROR_SURFACE_LOST_KHR
;
950 chain
->base
.device
= device
;
951 chain
->base
.destroy
= x11_swapchain_destroy
;
952 chain
->base
.get_images
= x11_get_images
;
953 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
954 chain
->base
.queue_present
= x11_queue_present
;
955 chain
->base
.image_fns
= image_fns
;
956 chain
->base
.present_mode
= pCreateInfo
->presentMode
;
958 chain
->window
= window
;
959 chain
->depth
= geometry
->depth
;
960 chain
->extent
= pCreateInfo
->imageExtent
;
961 chain
->image_count
= num_images
;
963 chain
->last_present_msc
= 0;
964 chain
->threaded
= false;
965 chain
->status
= VK_SUCCESS
;
969 chain
->event_id
= xcb_generate_id(chain
->conn
);
970 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
971 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
972 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
973 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
975 /* Create an XCB event queue to hold present events outside of the usual
976 * application event queue
978 chain
->special_event
=
979 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
980 chain
->event_id
, NULL
);
982 chain
->gc
= xcb_generate_id(chain
->conn
);
984 /* FINISHME: Choose a better error. */
985 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
989 cookie
= xcb_create_gc(chain
->conn
,
992 XCB_GC_GRAPHICS_EXPOSURES
,
993 (uint32_t []) { 0 });
994 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
997 for (; image
< chain
->image_count
; image
++) {
998 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
999 &chain
->images
[image
]);
1000 if (result
!= VK_SUCCESS
)
1001 goto fail_init_images
;
1004 if (chain
->base
.present_mode
== VK_PRESENT_MODE_FIFO_KHR
) {
1005 chain
->threaded
= true;
1007 /* Initialize our queues. We make them image_count + 1 because we will
1008 * occasionally use UINT32_MAX to signal the other thread that an error
1009 * has occurred and we don't want an overflow.
1012 ret
= wsi_queue_init(&chain
->acquire_queue
, chain
->image_count
+ 1);
1014 goto fail_init_images
;
1017 ret
= wsi_queue_init(&chain
->present_queue
, chain
->image_count
+ 1);
1019 wsi_queue_destroy(&chain
->acquire_queue
);
1020 goto fail_init_images
;
1023 for (unsigned i
= 0; i
< chain
->image_count
; i
++)
1024 wsi_queue_push(&chain
->acquire_queue
, i
);
1026 ret
= pthread_create(&chain
->queue_manager
, NULL
,
1027 x11_manage_fifo_queues
, chain
);
1029 wsi_queue_destroy(&chain
->present_queue
);
1030 wsi_queue_destroy(&chain
->acquire_queue
);
1031 goto fail_init_images
;
1035 *swapchain_out
= &chain
->base
;
1040 for (uint32_t j
= 0; j
< image
; j
++)
1041 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
1044 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
1046 vk_free(pAllocator
, chain
);
1052 wsi_x11_init_wsi(struct wsi_device
*wsi_device
,
1053 const VkAllocationCallbacks
*alloc
)
1055 struct wsi_x11
*wsi
;
1058 wsi
= vk_alloc(alloc
, sizeof(*wsi
), 8,
1059 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
1061 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1065 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
1067 if (ret
== ENOMEM
) {
1068 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1070 /* FINISHME: Choose a better error. */
1071 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1077 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
1078 _mesa_key_pointer_equal
);
1079 if (!wsi
->connections
) {
1080 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1084 wsi
->base
.get_support
= x11_surface_get_support
;
1085 wsi
->base
.get_capabilities
= x11_surface_get_capabilities
;
1086 wsi
->base
.get_formats
= x11_surface_get_formats
;
1087 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
1088 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
1090 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
1091 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
1096 pthread_mutex_destroy(&wsi
->mutex
);
1098 vk_free(alloc
, wsi
);
1100 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
1101 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
1107 wsi_x11_finish_wsi(struct wsi_device
*wsi_device
,
1108 const VkAllocationCallbacks
*alloc
)
1110 struct wsi_x11
*wsi
=
1111 (struct wsi_x11
*)wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
1114 struct hash_entry
*entry
;
1115 hash_table_foreach(wsi
->connections
, entry
)
1116 wsi_x11_connection_destroy(alloc
, entry
->data
);
1118 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
1120 pthread_mutex_destroy(&wsi
->mutex
);
1122 vk_free(alloc
, wsi
);