2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based mostly on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include <X11/Xlib-xcb.h>
29 #include <X11/xshmfence.h>
32 #include <xcb/present.h>
37 #include "vk_format.h"
38 #include "util/hash_table.h"
40 struct wsi_x11_connection
{
46 struct radv_wsi_interface base
;
48 pthread_mutex_t mutex
;
49 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
50 struct hash_table
*connections
;
53 static struct wsi_x11_connection
*
54 wsi_x11_connection_create(struct radv_physical_device
*device
,
55 xcb_connection_t
*conn
)
57 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
;
58 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
;
60 struct wsi_x11_connection
*wsi_conn
=
61 radv_alloc(&device
->instance
->alloc
, sizeof(*wsi_conn
), 8,
62 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
66 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
67 pres_cookie
= xcb_query_extension(conn
, 7, "PRESENT");
69 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
70 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
71 if (dri3_reply
== NULL
|| pres_reply
== NULL
) {
74 radv_free(&device
->instance
->alloc
, wsi_conn
);
78 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
79 wsi_conn
->has_present
= pres_reply
->present
!= 0;
88 wsi_x11_connection_destroy(struct radv_physical_device
*device
,
89 struct wsi_x11_connection
*conn
)
91 radv_free(&device
->instance
->alloc
, conn
);
94 static struct wsi_x11_connection
*
95 wsi_x11_get_connection(struct radv_physical_device
*device
,
96 xcb_connection_t
*conn
)
99 (struct wsi_x11
*)device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
101 pthread_mutex_lock(&wsi
->mutex
);
103 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
105 /* We're about to make a bunch of blocking calls. Let's drop the
106 * mutex for now so we don't block up too badly.
108 pthread_mutex_unlock(&wsi
->mutex
);
110 struct wsi_x11_connection
*wsi_conn
=
111 wsi_x11_connection_create(device
, conn
);
113 pthread_mutex_lock(&wsi
->mutex
);
115 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
117 /* Oops, someone raced us to it */
118 wsi_x11_connection_destroy(device
, wsi_conn
);
120 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
124 pthread_mutex_unlock(&wsi
->mutex
);
129 static const VkSurfaceFormatKHR formats
[] = {
130 { .format
= VK_FORMAT_B8G8R8A8_UNORM
, },
131 { .format
= VK_FORMAT_B8G8R8A8_SRGB
, },
134 static const VkPresentModeKHR present_modes
[] = {
135 VK_PRESENT_MODE_MAILBOX_KHR
,
138 static xcb_screen_t
*
139 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
141 xcb_screen_iterator_t screen_iter
=
142 xcb_setup_roots_iterator(xcb_get_setup(conn
));
144 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
145 if (screen_iter
.data
->root
== root
)
146 return screen_iter
.data
;
152 static xcb_visualtype_t
*
153 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
156 xcb_depth_iterator_t depth_iter
=
157 xcb_screen_allowed_depths_iterator(screen
);
159 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
160 xcb_visualtype_iterator_t visual_iter
=
161 xcb_depth_visuals_iterator (depth_iter
.data
);
163 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
164 if (visual_iter
.data
->visual_id
== visual_id
) {
166 *depth
= depth_iter
.data
->depth
;
167 return visual_iter
.data
;
175 static xcb_visualtype_t
*
176 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
179 xcb_screen_iterator_t screen_iter
=
180 xcb_setup_roots_iterator(xcb_get_setup(conn
));
182 /* For this we have to iterate over all of the screens which is rather
183 * annoying. Fortunately, there is probably only 1.
185 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
186 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
195 static xcb_visualtype_t
*
196 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
199 xcb_query_tree_cookie_t tree_cookie
;
200 xcb_get_window_attributes_cookie_t attrib_cookie
;
201 xcb_query_tree_reply_t
*tree
;
202 xcb_get_window_attributes_reply_t
*attrib
;
204 tree_cookie
= xcb_query_tree(conn
, window
);
205 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
207 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
208 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
209 if (attrib
== NULL
|| tree
== NULL
) {
215 xcb_window_t root
= tree
->root
;
216 xcb_visualid_t visual_id
= attrib
->visual
;
220 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
224 return screen_get_visualtype(screen
, visual_id
, depth
);
228 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
230 uint32_t rgb_mask
= visual
->red_mask
|
234 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
236 /* Do we have bits left over after RGB? */
237 return (all_mask
& ~rgb_mask
) != 0;
240 VkBool32
radv_GetPhysicalDeviceXcbPresentationSupportKHR(
241 VkPhysicalDevice physicalDevice
,
242 uint32_t queueFamilyIndex
,
243 xcb_connection_t
* connection
,
244 xcb_visualid_t visual_id
)
246 RADV_FROM_HANDLE(radv_physical_device
, device
, physicalDevice
);
248 struct wsi_x11_connection
*wsi_conn
=
249 wsi_x11_get_connection(device
, connection
);
251 if (!wsi_conn
->has_dri3
) {
252 fprintf(stderr
, "vulkan: No DRI3 support\n");
256 unsigned visual_depth
;
257 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
260 if (visual_depth
!= 24 && visual_depth
!= 32)
266 VkBool32
radv_GetPhysicalDeviceXlibPresentationSupportKHR(
267 VkPhysicalDevice physicalDevice
,
268 uint32_t queueFamilyIndex
,
272 return radv_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice
,
274 XGetXCBConnection(dpy
),
278 static xcb_connection_t
*
279 x11_surface_get_connection(VkIcdSurfaceBase
*icd_surface
)
281 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
282 return XGetXCBConnection(((VkIcdSurfaceXlib
*)icd_surface
)->dpy
);
284 return ((VkIcdSurfaceXcb
*)icd_surface
)->connection
;
288 x11_surface_get_window(VkIcdSurfaceBase
*icd_surface
)
290 if (icd_surface
->platform
== VK_ICD_WSI_PLATFORM_XLIB
)
291 return ((VkIcdSurfaceXlib
*)icd_surface
)->window
;
293 return ((VkIcdSurfaceXcb
*)icd_surface
)->window
;
297 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
298 struct radv_physical_device
*device
,
299 uint32_t queueFamilyIndex
,
300 VkBool32
* pSupported
)
302 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
303 xcb_window_t window
= x11_surface_get_window(icd_surface
);
305 struct wsi_x11_connection
*wsi_conn
=
306 wsi_x11_get_connection(device
, conn
);
308 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
310 if (!wsi_conn
->has_dri3
) {
311 fprintf(stderr
, "vulkan: No DRI3 support\n");
316 unsigned visual_depth
;
317 if (!get_visualtype_for_window(conn
, window
, &visual_depth
)) {
322 if (visual_depth
!= 24 && visual_depth
!= 32) {
332 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
333 struct radv_physical_device
*device
,
334 VkSurfaceCapabilitiesKHR
*caps
)
336 xcb_connection_t
*conn
= x11_surface_get_connection(icd_surface
);
337 xcb_window_t window
= x11_surface_get_window(icd_surface
);
338 xcb_get_geometry_cookie_t geom_cookie
;
339 xcb_generic_error_t
*err
;
340 xcb_get_geometry_reply_t
*geom
;
341 unsigned visual_depth
;
343 geom_cookie
= xcb_get_geometry(conn
, window
);
345 /* This does a round-trip. This is why we do get_geometry first and
346 * wait to read the reply until after we have a visual.
348 xcb_visualtype_t
*visual
=
349 get_visualtype_for_window(conn
, window
, &visual_depth
);
351 geom
= xcb_get_geometry_reply(conn
, geom_cookie
, &err
);
353 VkExtent2D extent
= { geom
->width
, geom
->height
};
354 caps
->currentExtent
= extent
;
355 caps
->minImageExtent
= extent
;
356 caps
->maxImageExtent
= extent
;
358 /* This can happen if the client didn't wait for the configure event
359 * to come back from the compositor. In that case, we don't know the
360 * size of the window so we just return valid "I don't know" stuff.
362 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
363 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
364 caps
->maxImageExtent
= (VkExtent2D
) { INT16_MAX
, INT16_MAX
};
369 if (visual_has_alpha(visual
, visual_depth
)) {
370 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
371 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
373 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
374 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
377 caps
->minImageCount
= 2;
378 caps
->maxImageCount
= 4;
379 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
380 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
381 caps
->maxImageArrayLayers
= 1;
382 caps
->supportedUsageFlags
=
383 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
384 VK_IMAGE_USAGE_SAMPLED_BIT
|
385 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
386 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
392 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
393 struct radv_physical_device
*device
,
394 uint32_t *pSurfaceFormatCount
,
395 VkSurfaceFormatKHR
*pSurfaceFormats
)
397 if (pSurfaceFormats
== NULL
) {
398 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
402 assert(*pSurfaceFormatCount
>= ARRAY_SIZE(formats
));
403 typed_memcpy(pSurfaceFormats
, formats
, *pSurfaceFormatCount
);
404 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
410 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
411 struct radv_physical_device
*device
,
412 uint32_t *pPresentModeCount
,
413 VkPresentModeKHR
*pPresentModes
)
415 if (pPresentModes
== NULL
) {
416 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
420 assert(*pPresentModeCount
>= ARRAY_SIZE(present_modes
));
421 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
422 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
428 x11_surface_create_swapchain(VkIcdSurfaceBase
*surface
,
429 struct radv_device
*device
,
430 const VkSwapchainCreateInfoKHR
* pCreateInfo
,
431 const VkAllocationCallbacks
* pAllocator
,
432 struct radv_swapchain
**swapchain
);
434 VkResult
radv_CreateXcbSurfaceKHR(
435 VkInstance _instance
,
436 const VkXcbSurfaceCreateInfoKHR
* pCreateInfo
,
437 const VkAllocationCallbacks
* pAllocator
,
438 VkSurfaceKHR
* pSurface
)
440 RADV_FROM_HANDLE(radv_instance
, instance
, _instance
);
442 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR
);
444 VkIcdSurfaceXcb
*surface
;
446 surface
= radv_alloc2(&instance
->alloc
, pAllocator
, sizeof *surface
, 8,
447 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
449 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
451 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
452 surface
->connection
= pCreateInfo
->connection
;
453 surface
->window
= pCreateInfo
->window
;
455 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
460 VkResult
radv_CreateXlibSurfaceKHR(
461 VkInstance _instance
,
462 const VkXlibSurfaceCreateInfoKHR
* pCreateInfo
,
463 const VkAllocationCallbacks
* pAllocator
,
464 VkSurfaceKHR
* pSurface
)
466 RADV_FROM_HANDLE(radv_instance
, instance
, _instance
);
468 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR
);
470 VkIcdSurfaceXlib
*surface
;
472 surface
= radv_alloc2(&instance
->alloc
, pAllocator
, sizeof *surface
, 8,
473 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
475 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
477 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XLIB
;
478 surface
->dpy
= pCreateInfo
->dpy
;
479 surface
->window
= pCreateInfo
->window
;
481 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
487 struct radv_image
* image
;
488 struct radv_device_memory
* memory
;
491 struct xshmfence
* shm_fence
;
495 struct x11_swapchain
{
496 struct radv_swapchain base
;
498 xcb_connection_t
* conn
;
502 uint32_t image_count
;
504 xcb_present_event_t event_id
;
505 xcb_special_event_t
* special_event
;
509 struct x11_image images
[0];
513 x11_get_images(struct radv_swapchain
*radv_chain
,
514 uint32_t* pCount
, VkImage
*pSwapchainImages
)
516 struct x11_swapchain
*chain
= (struct x11_swapchain
*)radv_chain
;
518 if (pSwapchainImages
== NULL
) {
519 *pCount
= chain
->image_count
;
523 assert(chain
->image_count
<= *pCount
);
524 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
525 pSwapchainImages
[i
] = radv_image_to_handle(chain
->images
[i
].image
);
527 *pCount
= chain
->image_count
;
533 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
534 xcb_present_generic_event_t
*event
)
536 switch (event
->evtype
) {
537 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
538 xcb_present_configure_notify_event_t
*config
= (void *) event
;
540 if (config
->width
!= chain
->extent
.width
||
541 config
->height
!= chain
->extent
.height
)
542 return vk_error(VK_ERROR_OUT_OF_DATE_KHR
);
547 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
548 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
550 for (unsigned i
= 0; i
< chain
->image_count
; i
++) {
551 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
552 chain
->images
[i
].busy
= false;
560 case XCB_PRESENT_COMPLETE_NOTIFY
:
569 x11_acquire_next_image(struct radv_swapchain
*radv_chain
,
571 VkSemaphore semaphore
,
572 uint32_t *image_index
)
574 struct x11_swapchain
*chain
= (struct x11_swapchain
*)radv_chain
;
577 for (uint32_t i
= 0; i
< chain
->image_count
; i
++) {
578 if (!chain
->images
[i
].busy
) {
579 /* We found a non-busy image */
580 xshmfence_await(chain
->images
[i
].shm_fence
);
582 chain
->images
[i
].busy
= true;
587 xcb_flush(chain
->conn
);
588 xcb_generic_event_t
*event
=
589 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
591 return vk_error(VK_ERROR_OUT_OF_DATE_KHR
);
593 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
595 if (result
!= VK_SUCCESS
)
601 x11_queue_present(struct radv_swapchain
*radv_chain
,
602 struct radv_queue
*queue
,
603 uint32_t image_index
)
605 struct x11_swapchain
*chain
= (struct x11_swapchain
*)radv_chain
;
606 struct x11_image
*image
= &chain
->images
[image_index
];
608 assert(image_index
< chain
->image_count
);
610 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
612 int64_t target_msc
= 0;
614 int64_t remainder
= 0;
616 options
|= XCB_PRESENT_OPTION_ASYNC
;
618 xshmfence_reset(image
->shm_fence
);
621 xcb_void_cookie_t cookie
=
622 xcb_present_pixmap(chain
->conn
,
625 (uint32_t) chain
->send_sbc
,
630 XCB_NONE
, /* target_crtc */
637 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
640 xcb_flush(chain
->conn
);
646 x11_image_init(struct radv_device
*device
, struct x11_swapchain
*chain
,
647 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
648 const VkAllocationCallbacks
* pAllocator
,
649 struct x11_image
*image
)
651 xcb_void_cookie_t cookie
;
652 VkResult result
= VK_SUCCESS
;
656 struct radeon_surf
*surface
;
657 result
= radv_image_create(radv_device_to_handle(device
),
658 &(struct radv_image_create_info
) {
660 &(VkImageCreateInfo
) {
661 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
662 .imageType
= VK_IMAGE_TYPE_2D
,
663 .format
= pCreateInfo
->imageFormat
,
665 .width
= pCreateInfo
->imageExtent
.width
,
666 .height
= pCreateInfo
->imageExtent
.height
,
672 /* FIXME: Need a way to use X tiling to allow scanout */
673 .tiling
= VK_IMAGE_TILING_OPTIMAL
,
674 .usage
= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
,
680 if (result
!= VK_SUCCESS
)
683 image
->image
= radv_image_from_handle(image_h
);
685 VkDeviceMemory memory_h
;
686 result
= radv_AllocateMemory(radv_device_to_handle(device
),
687 &(VkMemoryAllocateInfo
) {
688 .sType
= VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
,
689 .allocationSize
= image
->image
->size
,
690 .memoryTypeIndex
= 0,
692 NULL
/* XXX: pAllocator */,
694 if (result
!= VK_SUCCESS
)
695 goto fail_create_image
;
697 image
->memory
= radv_device_memory_from_handle(memory_h
);
698 // image->memory->bo.is_winsys_bo = true;
700 radv_BindImageMemory(VK_NULL_HANDLE
, image_h
, memory_h
, 0);
702 bret
= device
->ws
->buffer_get_fd(device
->ws
,
703 image
->memory
->bo
, &fd
);
705 goto fail_alloc_memory
;
708 struct radeon_bo_metadata metadata
;
709 radv_init_metadata(device
, image
->image
, &metadata
);
710 device
->ws
->buffer_set_metadata(image
->memory
->bo
, &metadata
);
712 surface
= &image
->image
->surface
;
715 image
->pixmap
= xcb_generate_id(chain
->conn
);
718 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
722 pCreateInfo
->imageExtent
.width
,
723 pCreateInfo
->imageExtent
.height
,
724 surface
->level
[0].pitch_bytes
,
726 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
728 int fence_fd
= xshmfence_alloc_shm();
732 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
733 if (image
->shm_fence
== NULL
)
734 goto fail_shmfence_alloc
;
736 image
->sync_fence
= xcb_generate_id(chain
->conn
);
737 xcb_dri3_fence_from_fd(chain
->conn
,
744 xshmfence_trigger(image
->shm_fence
);
752 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
753 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
756 radv_FreeMemory(radv_device_to_handle(chain
->base
.device
),
757 radv_device_memory_to_handle(image
->memory
), pAllocator
);
760 radv_DestroyImage(radv_device_to_handle(chain
->base
.device
),
761 radv_image_to_handle(image
->image
), pAllocator
);
767 x11_image_finish(struct x11_swapchain
*chain
,
768 const VkAllocationCallbacks
* pAllocator
,
769 struct x11_image
*image
)
771 xcb_void_cookie_t cookie
;
773 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
774 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
775 xshmfence_unmap_shm(image
->shm_fence
);
777 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
778 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
780 radv_DestroyImage(radv_device_to_handle(chain
->base
.device
),
781 radv_image_to_handle(image
->image
), pAllocator
);
783 radv_FreeMemory(radv_device_to_handle(chain
->base
.device
),
784 radv_device_memory_to_handle(image
->memory
), pAllocator
);
789 x11_swapchain_destroy(struct radv_swapchain
*radv_chain
,
790 const VkAllocationCallbacks
*pAllocator
)
792 struct x11_swapchain
*chain
= (struct x11_swapchain
*)radv_chain
;
794 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
795 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
797 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
799 radv_free2(&chain
->base
.device
->alloc
, pAllocator
, chain
);
805 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
806 struct radv_device
*device
,
807 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
808 const VkAllocationCallbacks
* pAllocator
,
809 struct radv_swapchain
**swapchain_out
)
811 struct x11_swapchain
*chain
;
812 xcb_void_cookie_t cookie
;
815 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
817 int num_images
= pCreateInfo
->minImageCount
;
819 /* For true mailbox mode, we need at least 4 images:
820 * 1) One to scan out from
821 * 2) One to have queued for scan-out
822 * 3) One to be currently held by the Wayland compositor
823 * 4) One to render to
825 if (pCreateInfo
->presentMode
== VK_PRESENT_MODE_MAILBOX_KHR
)
826 num_images
= MAX2(num_images
, 4);
828 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
829 chain
= radv_alloc2(&device
->alloc
, pAllocator
, size
, 8,
830 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
832 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
834 chain
->base
.device
= device
;
835 chain
->base
.destroy
= x11_swapchain_destroy
;
836 chain
->base
.get_images
= x11_get_images
;
837 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
838 chain
->base
.queue_present
= x11_queue_present
;
840 chain
->conn
= x11_surface_get_connection(icd_surface
);
841 chain
->window
= x11_surface_get_window(icd_surface
);
842 chain
->extent
= pCreateInfo
->imageExtent
;
843 chain
->image_count
= num_images
;
846 chain
->event_id
= xcb_generate_id(chain
->conn
);
847 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
848 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
849 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
850 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
852 /* Create an XCB event queue to hold present events outside of the usual
853 * application event queue
855 chain
->special_event
=
856 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
857 chain
->event_id
, NULL
);
859 chain
->gc
= xcb_generate_id(chain
->conn
);
861 /* FINISHME: Choose a better error. */
862 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
866 cookie
= xcb_create_gc(chain
->conn
,
869 XCB_GC_GRAPHICS_EXPOSURES
,
870 (uint32_t []) { 0 });
871 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
874 for (; image
< chain
->image_count
; image
++) {
875 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
876 &chain
->images
[image
]);
877 if (result
!= VK_SUCCESS
)
878 goto fail_init_images
;
881 *swapchain_out
= &chain
->base
;
886 for (uint32_t j
= 0; j
< image
; j
++)
887 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
890 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
892 radv_free2(&device
->alloc
, pAllocator
, chain
);
898 radv_x11_init_wsi(struct radv_physical_device
*device
)
903 wsi
= radv_alloc(&device
->instance
->alloc
, sizeof(*wsi
), 8,
904 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
906 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
910 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
913 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
915 /* FINISHME: Choose a better error. */
916 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
922 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
923 _mesa_key_pointer_equal
);
924 if (!wsi
->connections
) {
925 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
929 wsi
->base
.get_support
= x11_surface_get_support
;
930 wsi
->base
.get_capabilities
= x11_surface_get_capabilities
;
931 wsi
->base
.get_formats
= x11_surface_get_formats
;
932 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
933 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
935 device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
936 device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = &wsi
->base
;
941 pthread_mutex_destroy(&wsi
->mutex
);
943 radv_free(&device
->instance
->alloc
, wsi
);
945 device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
946 device
->wsi
[VK_ICD_WSI_PLATFORM_XLIB
] = NULL
;
952 radv_x11_finish_wsi(struct radv_physical_device
*device
)
954 struct wsi_x11
*wsi
=
955 (struct wsi_x11
*)device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
958 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
960 pthread_mutex_destroy(&wsi
->mutex
);
962 radv_free(&device
->instance
->alloc
, wsi
);