2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
32 #include "vk_format_info.h"
33 #include "util/hash_table.h"
35 struct wsi_x11_connection
{
41 struct anv_wsi_interface base
;
43 pthread_mutex_t mutex
;
44 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
45 struct hash_table
*connections
;
48 static struct wsi_x11_connection
*
49 wsi_x11_connection_create(const VkAllocationCallbacks
*alloc
,
50 xcb_connection_t
*conn
)
52 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
;
53 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
;
55 struct wsi_x11_connection
*wsi_conn
=
56 vk_alloc(alloc
, sizeof(*wsi_conn
), 8,
57 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
61 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
62 pres_cookie
= xcb_query_extension(conn
, 7, "PRESENT");
64 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
65 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
66 if (dri3_reply
== NULL
|| pres_reply
== NULL
) {
69 vk_free(alloc
, wsi_conn
);
73 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
74 wsi_conn
->has_present
= pres_reply
->present
!= 0;
83 wsi_x11_connection_destroy(const VkAllocationCallbacks
*alloc
,
84 struct wsi_x11_connection
*conn
)
89 static struct wsi_x11_connection
*
90 wsi_x11_get_connection(struct anv_wsi_device
*wsi_dev
,
91 const VkAllocationCallbacks
*alloc
,
92 xcb_connection_t
*conn
)
95 (struct wsi_x11
*)wsi_dev
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
97 pthread_mutex_lock(&wsi
->mutex
);
99 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
101 /* We're about to make a bunch of blocking calls. Let's drop the
102 * mutex for now so we don't block up too badly.
104 pthread_mutex_unlock(&wsi
->mutex
);
106 struct wsi_x11_connection
*wsi_conn
=
107 wsi_x11_connection_create(alloc
, conn
);
109 pthread_mutex_lock(&wsi
->mutex
);
111 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
113 /* Oops, someone raced us to it */
114 wsi_x11_connection_destroy(alloc
, wsi_conn
);
116 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
120 pthread_mutex_unlock(&wsi
->mutex
);
125 static const VkSurfaceFormatKHR formats
[] = {
126 { .format
= VK_FORMAT_B8G8R8A8_SRGB
, },
127 { .format
= VK_FORMAT_B8G8R8A8_UNORM
, },
130 static const VkPresentModeKHR present_modes
[] = {
131 VK_PRESENT_MODE_MAILBOX_KHR
,
134 static xcb_screen_t
*
135 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
137 xcb_screen_iterator_t screen_iter
=
138 xcb_setup_roots_iterator(xcb_get_setup(conn
));
140 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
141 if (screen_iter
.data
->root
== root
)
142 return screen_iter
.data
;
148 static xcb_visualtype_t
*
149 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
152 xcb_depth_iterator_t depth_iter
=
153 xcb_screen_allowed_depths_iterator(screen
);
155 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
156 xcb_visualtype_iterator_t visual_iter
=
157 xcb_depth_visuals_iterator (depth_iter
.data
);
159 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
160 if (visual_iter
.data
->visual_id
== visual_id
) {
162 *depth
= depth_iter
.data
->depth
;
163 return visual_iter
.data
;
171 static xcb_visualtype_t
*
172 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
175 xcb_screen_iterator_t screen_iter
=
176 xcb_setup_roots_iterator(xcb_get_setup(conn
));
178 /* For this we have to iterate over all of the screens which is rather
179 * annoying. Fortunately, there is probably only 1.
181 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
182 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
191 static xcb_visualtype_t
*
192 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
195 xcb_query_tree_cookie_t tree_cookie
;
196 xcb_get_window_attributes_cookie_t attrib_cookie
;
197 xcb_query_tree_reply_t
*tree
;
198 xcb_get_window_attributes_reply_t
*attrib
;
200 tree_cookie
= xcb_query_tree(conn
, window
);
201 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
203 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
204 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
205 if (attrib
== NULL
|| tree
== NULL
) {
211 xcb_window_t root
= tree
->root
;
212 xcb_visualid_t visual_id
= attrib
->visual
;
216 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
220 return screen_get_visualtype(screen
, visual_id
, depth
);
224 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
226 uint32_t rgb_mask
= visual
->red_mask
|
230 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
232 /* Do we have bits left over after RGB? */
233 return (all_mask
& ~rgb_mask
) != 0;
236 static VkBool32
anv_get_physical_device_xcb_presentation_support(
237 struct anv_wsi_device
*wsi_device
,
238 VkAllocationCallbacks
*alloc
,
239 uint32_t queueFamilyIndex
,
240 xcb_connection_t
* connection
,
241 xcb_visualid_t visual_id
)
243 struct wsi_x11_connection
*wsi_conn
=
244 wsi_x11_get_connection(wsi_device
, alloc
, connection
);
246 if (!wsi_conn
->has_dri3
) {
247 fprintf(stderr
, "vulkan: No DRI3 support\n");
251 unsigned visual_depth
;
252 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
255 if (visual_depth
!= 24 && visual_depth
!= 32)
261 VkBool32
anv_GetPhysicalDeviceXcbPresentationSupportKHR(
262 VkPhysicalDevice physicalDevice
,
263 uint32_t queueFamilyIndex
,
264 xcb_connection_t
* connection
,
265 xcb_visualid_t visual_id
)
267 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
269 return anv_get_physical_device_xcb_presentation_support(
271 &device
->instance
->alloc
,
272 queueFamilyIndex
, connection
, visual_id
);
275 VkBool32
anv_GetPhysicalDeviceXlibPresentationSupportKHR(
276 VkPhysicalDevice physicalDevice
,
277 uint32_t queueFamilyIndex
,
281 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
283 return anv_get_physical_device_xcb_presentation_support(
285 &device
->instance
->alloc
,
286 queueFamilyIndex
, XGetXCBConnection(dpy
), visualID
);
289 static xcb_connection_t
*
290 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
292 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
293 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
295 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
299 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
301 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
302 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
304 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
308 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
309 struct anv_wsi_device
*wsi_device
,
310 const VkAllocationCallbacks
*alloc
,
311 uint32_t queueFamilyIndex
,
312 VkBool32
* pSupported
)
314 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
315 xcb_window_t window
= x11_surface_get_window(icd_surface
);
317 struct wsi_x11_connection
*wsi_conn
=
318 wsi_x11_get_connection(wsi_device
, alloc
, conn
);
320 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
322 if (!wsi_conn
->has_dri3
) {
323 fprintf(stderr
, "vulkan: No DRI3 support\n");
328 unsigned visual_depth
;
329 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
334 if (visual_depth
!= 24 && visual_depth
!= 32) {
344 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
345 VkSurfaceCapabilitiesKHR
*caps
)
347 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
348 xcb_window_t window
= x11_surface_get_window(icd_surface
);
349 xcb_get_geometry_cookie_t geom_cookie
;
350 xcb_generic_error_t
*err
;
351 xcb_get_geometry_reply_t
*geom
;
352 unsigned visual_depth
;
354 geom_cookie
= xcb_get_geometry(conn
, window
);
356 /* This does a round-trip. This is why we do get_geometry first and
357 * wait to read the reply until after we have a visual.
359 xcb_visualtype_t
*visual
=
360 get_visualtype_for_window(conn
, window
, &visual_depth
);
362 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
364 VkExtent2D extent
= { geom
->width
, geom
->height
};
365 caps
->currentExtent
= extent
;
366 caps
->minImageExtent
= extent
;
367 caps
->maxImageExtent
= extent
;
369 /* This can happen if the client didn't wait for the configure event
370 * to come back from the compositor. In that case, we don't know the
371 * size of the window so we just return valid "I don't know" stuff.
373 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
374 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
375 caps
->maxImageExtent
= (VkExtent2D
) { INT16_MAX
, INT16_MAX
};
380 if (visual_has_alpha(visual
, visual_depth
)) {
381 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
382 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
384 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
385 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
388 caps
->minImageCount
= 2;
389 caps
->maxImageCount
= 4;
390 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
391 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
392 caps
->maxImageArrayLayers
= 1;
393 caps
->supportedUsageFlags
=
394 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
395 VK_IMAGE_USAGE_SAMPLED_BIT
|
396 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
397 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
403 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
404 struct anv_wsi_device
*wsi_device
,
405 uint32_t *pSurfaceFormatCount
,
406 VkSurfaceFormatKHR
*pSurfaceFormats
)
408 if (pSurfaceFormats
== NULL
) {
409 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
413 assert(*pSurfaceFormatCount
>= ARRAY_SIZE(formats
));
414 typed_memcpy(pSurfaceFormats
, formats
, *pSurfaceFormatCount
);
415 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
421 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
422 uint32_t *pPresentModeCount
,
423 VkPresentModeKHR
*pPresentModes
)
425 if (pPresentModes
== NULL
) {
426 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
430 assert(*pPresentModeCount
>= ARRAY_SIZE(present_modes
));
431 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
432 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
437 static VkResult
anv_create_xcb_surface(const VkAllocationCallbacks
*pAllocator
,
438 const VkXcbSurfaceCreateInfoKHR
*pCreateInfo
,
439 VkSurfaceKHR
*pSurface
)
441 VkIcdSurfaceXcb
*surface
;
443 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
444 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
446 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
448 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
449 surface
->connection
= pCreateInfo
->connection
;
450 surface
->window
= pCreateInfo
->window
;
452 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
456 VkResult
anv_CreateXcbSurfaceKHR(
457 VkInstance _instance
,
458 const VkXcbSurfaceCreateInfoKHR
* pCreateInfo
,
459 const VkAllocationCallbacks
* pAllocator
,
460 VkSurfaceKHR
* pSurface
)
462 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
463 const VkAllocationCallbacks
*alloc
;
464 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR
);
469 alloc
= &instance
->alloc
;
471 return anv_create_xcb_surface(alloc
, pCreateInfo
, pSurface
);
474 static VkResult
anv_create_xlib_surface(const VkAllocationCallbacks
*pAllocator
,
475 const VkXlibSurfaceCreateInfoKHR
*pCreateInfo
,
476 VkSurfaceKHR
*pSurface
)
478 VkIcdSurfaceXlib
*surface
;
480 surface
= vk_alloc(pAllocator
, sizeof *surface
, 8,
481 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
483 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
485 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
486 surface
->dpy
= pCreateInfo
->dpy
;
487 surface
->window
= pCreateInfo
->window
;
489 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
493 VkResult
anv_CreateXlibSurfaceKHR(
494 VkInstance _instance
,
495 const VkXlibSurfaceCreateInfoKHR
* pCreateInfo
,
496 const VkAllocationCallbacks
* pAllocator
,
497 VkSurfaceKHR
* pSurface
)
499 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
500 const VkAllocationCallbacks
*alloc
;
502 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR
);
507 alloc
= &instance
->alloc
;
509 return anv_create_xlib_surface(alloc
, pCreateInfo
, pSurface
);
514 VkDeviceMemory memory
;
517 struct xshmfence
* shm_fence
;
521 struct x11_swapchain
{
522 struct anv_swapchain base
;
524 xcb_connection_t
* conn
;
528 uint32_t image_count
;
530 xcb_present_event_t event_id
;
531 xcb_special_event_t
* special_event
;
535 struct x11_image images
[0];
539 x11_get_images(struct anv_swapchain
*anv_chain
,
540 uint32_t* pCount
, VkImage
*pSwapchainImages
)
542 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
544 if (pSwapchainImages
== NULL
) {
545 *pCount
= chain
->image_count
;
549 assert(chain
->image_count
<= *pCount
);
550 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
551 pSwapchainImages
[i
] = chain
->images
[i
].image
;
553 *pCount
= chain
->image_count
;
559 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
560 xcb_present_generic_event_t
*event
)
562 switch (event
->evtype
) {
563 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
564 xcb_present_configure_notify_event_t
*config
= (void *) event
;
566 if (config
->width
!= chain
->extent
.width
||
567 config
->height
!= chain
->extent
.height
)
568 return vk_error(VK_ERROR_OUT_OF_DATE_KHR
);
573 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
574 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
576 for (unsigned i
= 0; i
< chain
->image_count
; i
++) {
577 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
578 chain
->images
[i
].busy
= false;
586 case XCB_PRESENT_COMPLETE_NOTIFY
:
595 x11_acquire_next_image(struct anv_swapchain
*anv_chain
,
597 VkSemaphore semaphore
,
598 uint32_t *image_index
)
600 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
603 for (uint32_t i
= 0; i
< chain
->image_count
; i
++) {
604 if (!chain
->images
[i
].busy
) {
605 /* We found a non-busy image */
606 xshmfence_await(chain
->images
[i
].shm_fence
);
608 chain
->images
[i
].busy
= true;
613 xcb_flush(chain
->conn
);
614 xcb_generic_event_t
*event
=
615 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
617 return vk_error(VK_ERROR_OUT_OF_DATE_KHR
);
619 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
621 if (result
!= VK_SUCCESS
)
627 x11_queue_present(struct anv_swapchain
*anv_chain
,
628 uint32_t image_index
)
630 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
631 struct x11_image
*image
= &chain
->images
[image_index
];
633 assert(image_index
< chain
->image_count
);
635 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
637 int64_t target_msc
= 0;
639 int64_t remainder
= 0;
641 options
|= XCB_PRESENT_OPTION_ASYNC
;
643 xshmfence_reset(image
->shm_fence
);
646 xcb_void_cookie_t cookie
=
647 xcb_present_pixmap(chain
->conn
,
650 (uint32_t) chain
->send_sbc
,
655 XCB_NONE
, /* target_crtc */
662 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
665 xcb_flush(chain
->conn
);
671 x11_image_init(VkDevice device_h
, struct x11_swapchain
*chain
,
672 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
673 const VkAllocationCallbacks
* pAllocator
,
674 struct x11_image
*image
)
676 xcb_void_cookie_t cookie
;
685 result
= chain
->base
.image_fns
->create_wsi_image(device_h
,
694 if (result
!= VK_SUCCESS
)
697 image
->pixmap
= xcb_generate_id(chain
->conn
);
700 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
704 pCreateInfo
->imageExtent
.width
,
705 pCreateInfo
->imageExtent
.height
,
708 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
710 int fence_fd
= xshmfence_alloc_shm();
714 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
715 if (image
->shm_fence
== NULL
)
716 goto fail_shmfence_alloc
;
718 image
->sync_fence
= xcb_generate_id(chain
->conn
);
719 xcb_dri3_fence_from_fd(chain
->conn
,
726 xshmfence_trigger(image
->shm_fence
);
734 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
735 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
737 chain
->base
.image_fns
->free_wsi_image(device_h
, pAllocator
,
738 image
->image
, image
->memory
);
743 x11_image_finish(struct x11_swapchain
*chain
,
744 const VkAllocationCallbacks
* pAllocator
,
745 struct x11_image
*image
)
747 xcb_void_cookie_t cookie
;
749 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
750 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
751 xshmfence_unmap_shm(image
->shm_fence
);
753 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
754 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
756 chain
->base
.image_fns
->free_wsi_image(chain
->base
.device
, pAllocator
,
757 image
->image
, image
->memory
);
761 x11_swapchain_destroy(struct anv_swapchain
*anv_chain
,
762 const VkAllocationCallbacks
*pAllocator
)
764 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
765 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
766 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
768 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
770 vk_free(pAllocator
, chain
);
776 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
778 struct anv_wsi_device
*wsi_device
,
779 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
780 const VkAllocationCallbacks
* pAllocator
,
781 const struct anv_wsi_image_fns
*image_fns
,
782 struct anv_swapchain
**swapchain_out
)
784 struct x11_swapchain
*chain
;
785 xcb_void_cookie_t cookie
;
788 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
790 int num_images
= pCreateInfo
->minImageCount
;
792 /* For true mailbox mode, we need at least 4 images:
793 * 1) One to scan out from
794 * 2) One to have queued for scan-out
795 * 3) One to be currently held by the Wayland compositor
796 * 4) One to render to
798 if (pCreateInfo
->presentMode
== VK_PRESENT_MODE_MAILBOX_KHR
)
799 num_images
= MAX2(num_images
, 4);
801 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
802 chain
= vk_alloc(pAllocator
, size
, 8,
803 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
805 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
807 chain
->base
.device
= device
;
808 chain
->base
.destroy
= x11_swapchain_destroy
;
809 chain
->base
.get_images
= x11_get_images
;
810 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
811 chain
->base
.queue_present
= x11_queue_present
;
812 chain
->base
.image_fns
= image_fns
;
813 chain
->conn
= x11_surface_get_connection(icd_surface
);
814 chain
->window
= x11_surface_get_window(icd_surface
);
815 chain
->extent
= pCreateInfo
->imageExtent
;
816 chain
->image_count
= num_images
;
819 chain
->event_id
= xcb_generate_id(chain
->conn
);
820 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
821 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
822 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
823 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
825 /* Create an XCB event queue to hold present events outside of the usual
826 * application event queue
828 chain
->special_event
=
829 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
830 chain
->event_id
, NULL
);
832 chain
->gc
= xcb_generate_id(chain
->conn
);
834 /* FINISHME: Choose a better error. */
835 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
839 cookie
= xcb_create_gc(chain
->conn
,
842 XCB_GC_GRAPHICS_EXPOSURES
,
843 (uint32_t []) { 0 });
844 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
847 for (; image
< chain
->image_count
; image
++) {
848 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
849 &chain
->images
[image
]);
850 if (result
!= VK_SUCCESS
)
851 goto fail_init_images
;
854 *swapchain_out
= &chain
->base
;
859 for (uint32_t j
= 0; j
< image
; j
++)
860 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
863 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
865 vk_free(pAllocator
, chain
);
871 anv_x11_init_wsi(struct anv_wsi_device
*wsi_device
,
872 const VkAllocationCallbacks
*alloc
)
877 wsi
= vk_alloc(alloc
, sizeof(*wsi
), 8,
878 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
880 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
884 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
887 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
889 /* FINISHME: Choose a better error. */
890 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
896 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
897 _mesa_key_pointer_equal
);
898 if (!wsi
->connections
) {
899 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
903 wsi
->base
.get_support
= x11_surface_get_support
;
904 wsi
->base
.get_capabilities
= x11_surface_get_capabilities
;
905 wsi
->base
.get_formats
= x11_surface_get_formats
;
906 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
907 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
909 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
910 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
915 pthread_mutex_destroy(&wsi
->mutex
);
919 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
920 wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
926 anv_x11_finish_wsi(struct anv_wsi_device
*wsi_device
,
927 const VkAllocationCallbacks
*alloc
)
929 struct wsi_x11
*wsi
=
930 (struct wsi_x11
*)wsi_device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
933 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
935 pthread_mutex_destroy(&wsi
->mutex
);