2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
30 #include "util/macros.h"
38 #include "util/hash_table.h"
40 #include "wsi_common.h"
41 #include "wsi_common_x11.h"
43 #define typed_memcpy(dest, src, count) ({ \
44 static_assert(sizeof(*src) == sizeof(*dest), ""); \
45 memcpy((dest), (src), (count) * sizeof(*(src))); \
48 struct wsi_x11_connection
{
54 struct wsi_interface base
;
56 pthread_mutex_t mutex
;
57 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
58 struct hash_table
*connections
;
61 static struct wsi_x11_connection
*
62 wsi_x11_connection_create(const VkAllocationCallbacks
*alloc
,
63 xcb_connection_t
*conn
)
65 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
;
66 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
;
68 struct wsi_x11_connection
*wsi_conn
=
69 vk_alloc(alloc
, sizeof(*wsi_conn
), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
74 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
75 pres_cookie
= xcb_query_extension(conn
, 7, "PRESENT");
77 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
78 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
79 if (dri3_reply
== NULL
|| pres_reply
== NULL
) {
82 vk_free(alloc
, wsi_conn
);
86 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
87 wsi_conn
->has_present
= pres_reply
->present
!= 0;
96 wsi_x11_connection_destroy(const VkAllocationCallbacks
*alloc
,
97 struct wsi_x11_connection
*conn
)
102 static struct wsi_x11_connection
*
103 wsi_x11_get_connection(struct wsi_device
*wsi_dev
,
104 const VkAllocationCallbacks
*alloc
,
105 xcb_connection_t
*conn
)
107 struct wsi_x11
*wsi
=
108 (struct wsi_x11
*)wsi_dev
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
110 pthread_mutex_lock(&wsi
->mutex
);
112 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
114 /* We're about to make a bunch of blocking calls. Let's drop the
115 * mutex for now so we don't block up too badly.
117 pthread_mutex_unlock(&wsi
->mutex
);
119 struct wsi_x11_connection
*wsi_conn
=
120 wsi_x11_connection_create(alloc
, conn
);
122 pthread_mutex_lock(&wsi
->mutex
);
124 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
126 /* Oops, someone raced us to it */
127 wsi_x11_connection_destroy(alloc
, wsi_conn
);
129 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
133 pthread_mutex_unlock(&wsi
->mutex
);
138 static const VkSurfaceFormatKHR formats
[] = {
139 { .format
= VK_FORMAT_B8G8R8A8_SRGB
, },
140 { .format
= VK_FORMAT_B8G8R8A8_UNORM
, },
143 static const VkPresentModeKHR present_modes
[] = {
144 VK_PRESENT_MODE_MAILBOX_KHR
,
147 static xcb_screen_t
*
148 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
150 xcb_screen_iterator_t screen_iter
=
151 xcb_setup_roots_iterator(xcb_get_setup(conn
));
153 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
154 if (screen_iter
.data
->root
== root
)
155 return screen_iter
.data
;
161 static xcb_visualtype_t
*
162 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
165 xcb_depth_iterator_t depth_iter
=
166 xcb_screen_allowed_depths_iterator(screen
);
168 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
169 xcb_visualtype_iterator_t visual_iter
=
170 xcb_depth_visuals_iterator (depth_iter
.data
);
172 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
173 if (visual_iter
.data
->visual_id
== visual_id
) {
175 *depth
= depth_iter
.data
->depth
;
176 return visual_iter
.data
;
184 static xcb_visualtype_t
*
185 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
188 xcb_screen_iterator_t screen_iter
=
189 xcb_setup_roots_iterator(xcb_get_setup(conn
));
191 /* For this we have to iterate over all of the screens which is rather
192 * annoying. Fortunately, there is probably only 1.
194 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
195 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
204 static xcb_visualtype_t
*
205 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
208 xcb_query_tree_cookie_t tree_cookie
;
209 xcb_get_window_attributes_cookie_t attrib_cookie
;
210 xcb_query_tree_reply_t
*tree
;
211 xcb_get_window_attributes_reply_t
*attrib
;
213 tree_cookie
= xcb_query_tree(conn
, window
);
214 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
216 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
217 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
218 if (attrib
== NULL
|| tree
== NULL
) {
224 xcb_window_t root
= tree
->root
;
225 xcb_visualid_t visual_id
= attrib
->visual
;
229 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
233 return screen_get_visualtype(screen
, visual_id
, depth
);
237 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
239 uint32_t rgb_mask
= visual
->red_mask
|
243 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
245 /* Do we have bits left over after RGB? */
246 return (all_mask
& ~rgb_mask
) != 0;
249 VkBool32
wsi_get_physical_device_xcb_presentation_support(
250 struct wsi_device
*wsi_device
,
251 VkAllocationCallbacks
*alloc
,
252 uint32_t queueFamilyIndex
,
253 xcb_connection_t
* connection
,
254 xcb_visualid_t visual_id
)
256 struct wsi_x11_connection
*wsi_conn
=
257 wsi_x11_get_connection(wsi_device
, alloc
, connection
);
259 if (!wsi_conn
->has_dri3
) {
260 fprintf(stderr
, "vulkan: No DRI3 support\n");
264 unsigned visual_depth
;
265 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
268 if (visual_depth
!= 24 && visual_depth
!= 32)
274 static xcb_connection_t
*
275 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
277 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
278 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
280 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
284 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
286 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
287 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
289 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
293 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
294 struct wsi_device
*wsi_device
,
295 const VkAllocationCallbacks
*alloc
,
296 uint32_t queueFamilyIndex
,
297 VkBool32
* pSupported
)
299 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
300 xcb_window_t window
= x11_surface_get_window(icd_surface
);
302 struct wsi_x11_connection
*wsi_conn
=
303 wsi_x11_get_connection(wsi_device
, alloc
, conn
);
305 return VK_ERROR_OUT_OF_HOST_MEMORY
;
307 if (!wsi_conn
->has_dri3
) {
308 fprintf(stderr
, "vulkan: No DRI3 support\n");
313 unsigned visual_depth
;
314 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
319 if (visual_depth
!= 24 && visual_depth
!= 32) {
329 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
330 VkSurfaceCapabilitiesKHR
*caps
)
332 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
333 xcb_window_t window
= x11_surface_get_window(icd_surface
);
334 xcb_get_geometry_cookie_t geom_cookie
;
335 xcb_generic_error_t
*err
;
336 xcb_get_geometry_reply_t
*geom
;
337 unsigned visual_depth
;
339 geom_cookie
= xcb_get_geometry(conn
, window
);
341 /* This does a round-trip. This is why we do get_geometry first and
342 * wait to read the reply until after we have a visual.
344 xcb_visualtype_t
*visual
=
345 get_visualtype_for_window(conn
, window
, &visual_depth
);
347 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
349 VkExtent2D extent
= { geom
->width
, geom
->height
};
350 caps
->currentExtent
= extent
;
351 caps
->minImageExtent
= extent
;
352 caps
->maxImageExtent
= extent
;
354 /* This can happen if the client didn't wait for the configure event
355 * to come back from the compositor. In that case, we don't know the
356 * size of the window so we just return valid "I don't know" stuff.
358 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
359 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
360 caps
->maxImageExtent
= (VkExtent2D
) { INT16_MAX
, INT16_MAX
};
365 if (visual_has_alpha(visual
, visual_depth
)) {
366 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
367 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
369 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
370 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
373 caps
->minImageCount
= 2;
374 caps
->maxImageCount
= 4;
375 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
376 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
377 caps
->maxImageArrayLayers
= 1;
378 caps
->supportedUsageFlags
=
379 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
380 VK_IMAGE_USAGE_SAMPLED_BIT
|
381 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
382 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
388 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
389 struct wsi_device
*wsi_device
,
390 uint32_t *pSurfaceFormatCount
,
391 VkSurfaceFormatKHR
*pSurfaceFormats
)
393 if (pSurfaceFormats
== NULL
) {
394 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
398 VkResult result
= VK_SUCCESS
;
400 if (*pSurfaceFormatCount
> ARRAY_SIZE(formats
))
401 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
402 else if (*pSurfaceFormatCount
< ARRAY_SIZE(formats
))
403 result
= VK_INCOMPLETE
;
405 typed_memcpy(pSurfaceFormats
, formats
, *pSurfaceFormatCount
);
411 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
412 uint32_t *pPresentModeCount
,
413 VkPresentModeKHR
*pPresentModes
)
415 if (pPresentModes
== NULL
) {
416 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
420 assert(*pPresentModeCount
>= ARRAY_SIZE(present_modes
));
421 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
422 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
427 VkResult
wsi_create_xcb_surface(const VkAllocationCallbacks
*pAllocator
,
428 const VkXcbSurfaceCreateInfoKHR
*pCreateInfo
,
429 VkSurfaceKHR
*pSurface
)
431 VkIcdSurfaceXcb
*surface
;
433 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
434 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
436 return VK_ERROR_OUT_OF_HOST_MEMORY
;
438 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
439 surface
->connection
= pCreateInfo
->connection
;
440 surface
->window
= pCreateInfo
->window
;
442 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
446 VkResult
wsi_create_xlib_surface(const VkAllocationCallbacks
*pAllocator
,
447 const VkXlibSurfaceCreateInfoKHR
*pCreateInfo
,
448 VkSurfaceKHR
*pSurface
)
450 VkIcdSurfaceXlib
*surface
;
452 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
453 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
455 return VK_ERROR_OUT_OF_HOST_MEMORY
;
457 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
458 surface
->dpy
= pCreateInfo
->dpy
;
459 surface
->window
= pCreateInfo
->window
;
461 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
467 VkDeviceMemory memory
;
470 struct xshmfence
* shm_fence
;
474 struct x11_swapchain
{
475 struct wsi_swapchain base
;
477 xcb_connection_t
* conn
;
482 uint32_t image_count
;
484 xcb_present_event_t event_id
;
485 xcb_special_event_t
* special_event
;
489 struct x11_image images
[0];
493 x11_get_images(struct wsi_swapchain
*anv_chain
,
494 uint32_t* pCount
, VkImage
*pSwapchainImages
)
496 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
498 if (pSwapchainImages
== NULL
) {
499 *pCount
= chain
->image_count
;
503 assert(chain
->image_count
<= *pCount
);
504 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
505 pSwapchainImages
[i
] = chain
->images
[i
].image
;
507 *pCount
= chain
->image_count
;
513 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
514 xcb_present_generic_event_t
*event
)
516 switch (event
->evtype
) {
517 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
518 xcb_present_configure_notify_event_t
*config
= (void *) event
;
520 if (config
->width
!= chain
->extent
.width
||
521 config
->height
!= chain
->extent
.height
)
522 return VK_ERROR_OUT_OF_DATE_KHR
;
527 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
528 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
530 for (unsigned i
= 0; i
< chain
->image_count
; i
++) {
531 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
532 chain
->images
[i
].busy
= false;
540 case XCB_PRESENT_COMPLETE_NOTIFY
:
549 static uint64_t wsi_get_current_time(void)
551 uint64_t current_time
;
554 clock_gettime(CLOCK_MONOTONIC
, &tv
);
555 current_time
= tv
.tv_nsec
+ tv
.tv_sec
*1000000000ull;
559 static uint64_t wsi_get_absolute_timeout(uint64_t timeout
)
561 uint64_t current_time
= wsi_get_current_time();
563 timeout
= MIN2(UINT64_MAX
- current_time
, timeout
);
565 return current_time
+ timeout
;
569 x11_acquire_next_image(struct wsi_swapchain
*anv_chain
,
571 VkSemaphore semaphore
,
572 uint32_t *image_index
)
574 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
575 xcb_generic_event_t
*event
;
579 for (uint32_t i
= 0; i
< chain
->image_count
; i
++) {
580 if (!chain
->images
[i
].busy
) {
581 /* We found a non-busy image */
582 xshmfence_await(chain
->images
[i
].shm_fence
);
584 chain
->images
[i
].busy
= true;
589 xcb_flush(chain
->conn
);
591 if (timeout
== UINT64_MAX
) {
592 event
= xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
594 return VK_ERROR_OUT_OF_DATE_KHR
;
596 event
= xcb_poll_for_special_event(chain
->conn
, chain
->special_event
);
602 atimeout
= wsi_get_absolute_timeout(timeout
);
604 pfds
.fd
= xcb_get_file_descriptor(chain
->conn
);
605 pfds
.events
= POLLIN
;
606 ret
= poll(&pfds
, 1, timeout
/ 1000 / 1000);
610 return VK_ERROR_OUT_OF_DATE_KHR
;
612 /* If a non-special event happens, the fd will still
613 * poll. So recalculate the timeout now just in case.
615 uint64_t current_time
= wsi_get_current_time();
616 if (atimeout
> current_time
)
617 timeout
= atimeout
- current_time
;
624 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
626 if (result
!= VK_SUCCESS
)
632 x11_queue_present(struct wsi_swapchain
*anv_chain
,
633 uint32_t image_index
)
635 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
636 struct x11_image
*image
= &chain
->images
[image_index
];
638 assert(image_index
< chain
->image_count
);
640 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
642 int64_t target_msc
= 0;
644 int64_t remainder
= 0;
646 options
|= XCB_PRESENT_OPTION_ASYNC
;
648 xshmfence_reset(image
->shm_fence
);
651 xcb_void_cookie_t cookie
=
652 xcb_present_pixmap(chain
->conn
,
655 (uint32_t) chain
->send_sbc
,
660 XCB_NONE
, /* target_crtc */
667 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
670 xcb_flush(chain
->conn
);
676 x11_image_init(VkDevice device_h
, struct x11_swapchain
*chain
,
677 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
678 const VkAllocationCallbacks
* pAllocator
,
679 struct x11_image
*image
)
681 xcb_void_cookie_t cookie
;
689 result
= chain
->base
.image_fns
->create_wsi_image(device_h
,
698 if (result
!= VK_SUCCESS
)
701 image
->pixmap
= xcb_generate_id(chain
->conn
);
704 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
708 pCreateInfo
->imageExtent
.width
,
709 pCreateInfo
->imageExtent
.height
,
711 chain
->depth
, bpp
, fd
);
712 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
714 int fence_fd
= xshmfence_alloc_shm();
718 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
719 if (image
->shm_fence
== NULL
)
720 goto fail_shmfence_alloc
;
722 image
->sync_fence
= xcb_generate_id(chain
->conn
);
723 xcb_dri3_fence_from_fd(chain
->conn
,
730 xshmfence_trigger(image
->shm_fence
);
738 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
739 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
741 chain
->base
.image_fns
->free_wsi_image(device_h
, pAllocator
,
742 image
->image
, image
->memory
);
748 x11_image_finish(struct x11_swapchain
*chain
,
749 const VkAllocationCallbacks
* pAllocator
,
750 struct x11_image
*image
)
752 xcb_void_cookie_t cookie
;
754 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
755 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
756 xshmfence_unmap_shm(image
->shm_fence
);
758 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
759 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
761 chain
->base
.image_fns
->free_wsi_image(chain
->base
.device
, pAllocator
,
762 image
->image
, image
->memory
);
766 x11_swapchain_destroy(struct wsi_swapchain
*anv_chain
,
767 const VkAllocationCallbacks
*pAllocator
)
769 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
770 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
771 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
773 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
775 vk_free(pAllocator
, chain
);
781 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
783 struct wsi_device
*wsi_device
,
784 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
785 const VkAllocationCallbacks
* pAllocator
,
786 const struct wsi_image_fns
*image_fns
,
787 struct wsi_swapchain
**swapchain_out
)
789 struct x11_swapchain
*chain
;
790 xcb_void_cookie_t cookie
;
793 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
795 int num_images
= pCreateInfo
->minImageCount
;
797 /* For true mailbox mode, we need at least 4 images:
798 * 1) One to scan out from
799 * 2) One to have queued for scan-out
800 * 3) One to be currently held by the Wayland compositor
801 * 4) One to render to
803 if (pCreateInfo
->presentMode
== VK_PRESENT_MODE_MAILBOX_KHR
)
804 num_images
= MAX2(num_images
, 4);
806 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
807 chain
= vk_alloc(pAllocator
, size
, 8,
808 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
810 return VK_ERROR_OUT_OF_HOST_MEMORY
;
812 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
813 xcb_window_t window
= x11_surface_get_window(icd_surface
);
814 xcb_get_geometry_reply_t
*geometry
=
815 xcb_get_geometry_reply(conn
, xcb_get_geometry(conn
, window
), NULL
);
817 if (geometry
== NULL
)
818 return VK_ERROR_SURFACE_LOST_KHR
;
820 chain
->base
.device
= device
;
821 chain
->base
.destroy
= x11_swapchain_destroy
;
822 chain
->base
.get_images
= x11_get_images
;
823 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
824 chain
->base
.queue_present
= x11_queue_present
;
825 chain
->base
.image_fns
= image_fns
;
826 chain
->base
.present_mode
= pCreateInfo
->presentMode
;
828 chain
->window
= window
;
829 chain
->depth
= geometry
->depth
;
830 chain
->extent
= pCreateInfo
->imageExtent
;
831 chain
->image_count
= num_images
;
836 chain
->event_id
= xcb_generate_id(chain
->conn
);
837 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
838 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
839 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
840 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
842 /* Create an XCB event queue to hold present events outside of the usual
843 * application event queue
845 chain
->special_event
=
846 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
847 chain
->event_id
, NULL
);
849 chain
->gc
= xcb_generate_id(chain
->conn
);
851 /* FINISHME: Choose a better error. */
852 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
856 cookie
= xcb_create_gc(chain
->conn
,
859 XCB_GC_GRAPHICS_EXPOSURES
,
860 (uint32_t []) { 0 });
861 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
864 for (; image
< chain
->image_count
; image
++) {
865 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
866 &chain
->images
[image
]);
867 if (result
!= VK_SUCCESS
)
868 goto fail_init_images
;
871 *swapchain_out
= &chain
->base
;
876 for (uint32_t j
= 0; j
< image
; j
++)
877 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
880 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
882 vk_free(pAllocator
, chain
);
888 wsi_x11_init_wsi(struct wsi_device
*wsi_device
,
889 const VkAllocationCallbacks
*alloc
)
894 wsi
= vk_alloc(alloc
, sizeof(*wsi
), 8,
895 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
897 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
901 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
904 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
906 /* FINISHME: Choose a better error. */
907 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
913 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
914 _mesa_key_pointer_equal
);
915 if (!wsi
->connections
) {
916 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
920 wsi
->base
.get_support
= x11_surface_get_support
;
921 wsi
->base
.get_capabilities
= x11_surface_get_capabilities
;
922 wsi
->base
.get_formats
= x11_surface_get_formats
;
923 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
924 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
926 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
927 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
932 pthread_mutex_destroy(&wsi
->mutex
);
936 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
937 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
943 wsi_x11_finish_wsi(struct wsi_device
*wsi_device
,
944 const VkAllocationCallbacks
*alloc
)
946 struct wsi_x11
*wsi
=
947 (struct wsi_x11
*)wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
950 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
952 pthread_mutex_destroy(&wsi
->mutex
);