2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/xshmfence.h>
27 #include <xcb/present.h>
31 #include "vk_format_info.h"
32 #include "util/hash_table.h"
34 struct wsi_x11_connection
{
40 struct anv_wsi_interface base
;
42 pthread_mutex_t mutex
;
43 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
44 struct hash_table
*connections
;
47 static struct wsi_x11_connection
*
48 wsi_x11_connection_create(struct anv_physical_device
*device
,
49 xcb_connection_t
*conn
)
51 xcb_query_extension_cookie_t dri3_cookie
, pres_cookie
;
52 xcb_query_extension_reply_t
*dri3_reply
, *pres_reply
;
54 struct wsi_x11_connection
*wsi_conn
=
55 anv_alloc(&device
->instance
->alloc
, sizeof(*wsi_conn
), 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
60 dri3_cookie
= xcb_query_extension(conn
, 4, "DRI3");
61 pres_cookie
= xcb_query_extension(conn
, 7, "PRESENT");
63 dri3_reply
= xcb_query_extension_reply(conn
, dri3_cookie
, NULL
);
64 pres_reply
= xcb_query_extension_reply(conn
, pres_cookie
, NULL
);
65 if (dri3_reply
== NULL
|| pres_reply
== NULL
) {
68 anv_free(&device
->instance
->alloc
, wsi_conn
);
72 wsi_conn
->has_dri3
= dri3_reply
->present
!= 0;
73 wsi_conn
->has_present
= pres_reply
->present
!= 0;
82 wsi_x11_connection_destroy(struct anv_physical_device
*device
,
83 struct wsi_x11_connection
*conn
)
85 anv_free(&device
->instance
->alloc
, conn
);
88 static struct wsi_x11_connection
*
89 wsi_x11_get_connection(struct anv_physical_device
*device
,
90 xcb_connection_t
*conn
)
93 (struct wsi_x11
*)device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
95 pthread_mutex_lock(&wsi
->mutex
);
97 struct hash_entry
*entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
99 /* We're about to make a bunch of blocking calls. Let's drop the
100 * mutex for now so we don't block up too badly.
102 pthread_mutex_unlock(&wsi
->mutex
);
104 struct wsi_x11_connection
*wsi_conn
=
105 wsi_x11_connection_create(device
, conn
);
107 pthread_mutex_lock(&wsi
->mutex
);
109 entry
= _mesa_hash_table_search(wsi
->connections
, conn
);
111 /* Oops, someone raced us to it */
112 wsi_x11_connection_destroy(device
, wsi_conn
);
114 entry
= _mesa_hash_table_insert(wsi
->connections
, conn
, wsi_conn
);
118 pthread_mutex_unlock(&wsi
->mutex
);
123 static const VkSurfaceFormatKHR formats
[] = {
124 { .format
= VK_FORMAT_B8G8R8A8_SRGB
, },
127 static const VkPresentModeKHR present_modes
[] = {
128 VK_PRESENT_MODE_MAILBOX_KHR
,
131 static xcb_screen_t
*
132 get_screen_for_root(xcb_connection_t
*conn
, xcb_window_t root
)
134 xcb_screen_iterator_t screen_iter
=
135 xcb_setup_roots_iterator(xcb_get_setup(conn
));
137 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
138 if (screen_iter
.data
->root
== root
)
139 return screen_iter
.data
;
145 static xcb_visualtype_t
*
146 screen_get_visualtype(xcb_screen_t
*screen
, xcb_visualid_t visual_id
,
149 xcb_depth_iterator_t depth_iter
=
150 xcb_screen_allowed_depths_iterator(screen
);
152 for (; depth_iter
.rem
; xcb_depth_next (&depth_iter
)) {
153 xcb_visualtype_iterator_t visual_iter
=
154 xcb_depth_visuals_iterator (depth_iter
.data
);
156 for (; visual_iter
.rem
; xcb_visualtype_next (&visual_iter
)) {
157 if (visual_iter
.data
->visual_id
== visual_id
) {
159 *depth
= depth_iter
.data
->depth
;
160 return visual_iter
.data
;
168 static xcb_visualtype_t
*
169 connection_get_visualtype(xcb_connection_t
*conn
, xcb_visualid_t visual_id
,
172 xcb_screen_iterator_t screen_iter
=
173 xcb_setup_roots_iterator(xcb_get_setup(conn
));
175 /* For this we have to iterate over all of the screens which is rather
176 * annoying. Fortunately, there is probably only 1.
178 for (; screen_iter
.rem
; xcb_screen_next (&screen_iter
)) {
179 xcb_visualtype_t
*visual
= screen_get_visualtype(screen_iter
.data
,
188 static xcb_visualtype_t
*
189 get_visualtype_for_window(xcb_connection_t
*conn
, xcb_window_t window
,
192 xcb_query_tree_cookie_t tree_cookie
;
193 xcb_get_window_attributes_cookie_t attrib_cookie
;
194 xcb_query_tree_reply_t
*tree
;
195 xcb_get_window_attributes_reply_t
*attrib
;
197 tree_cookie
= xcb_query_tree(conn
, window
);
198 attrib_cookie
= xcb_get_window_attributes(conn
, window
);
200 tree
= xcb_query_tree_reply(conn
, tree_cookie
, NULL
);
201 attrib
= xcb_get_window_attributes_reply(conn
, attrib_cookie
, NULL
);
202 if (attrib
== NULL
|| tree
== NULL
) {
208 xcb_window_t root
= tree
->root
;
209 xcb_visualid_t visual_id
= attrib
->visual
;
213 xcb_screen_t
*screen
= get_screen_for_root(conn
, root
);
217 return screen_get_visualtype(screen
, visual_id
, depth
);
221 visual_has_alpha(xcb_visualtype_t
*visual
, unsigned depth
)
223 uint32_t rgb_mask
= visual
->red_mask
|
227 uint32_t all_mask
= 0xffffffff >> (32 - depth
);
229 /* Do we have bits left over after RGB? */
230 return (all_mask
& ~rgb_mask
) != 0;
233 VkBool32
anv_GetPhysicalDeviceXcbPresentationSupportKHR(
234 VkPhysicalDevice physicalDevice
,
235 uint32_t queueFamilyIndex
,
236 xcb_connection_t
* connection
,
237 xcb_visualid_t visual_id
)
239 ANV_FROM_HANDLE(anv_physical_device
, device
, physicalDevice
);
241 struct wsi_x11_connection
*wsi_conn
=
242 wsi_x11_get_connection(device
, connection
);
244 if (!wsi_conn
->has_dri3
) {
245 fprintf(stderr
, "vulkan: No DRI3 support\n");
249 unsigned visual_depth
;
250 if (!connection_get_visualtype(connection
, visual_id
, &visual_depth
))
253 if (visual_depth
!= 24 && visual_depth
!= 32)
260 x11_surface_get_support(VkIcdSurfaceBase
*icd_surface
,
261 struct anv_physical_device
*device
,
262 uint32_t queueFamilyIndex
,
263 VkBool32
* pSupported
)
265 VkIcdSurfaceXcb
*surface
= (VkIcdSurfaceXcb
*)icd_surface
;
267 struct wsi_x11_connection
*wsi_conn
=
268 wsi_x11_get_connection(device
, surface
->connection
);
270 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
272 if (!wsi_conn
->has_dri3
) {
273 fprintf(stderr
, "vulkan: No DRI3 support\n");
278 unsigned visual_depth
;
279 if (!get_visualtype_for_window(surface
->connection
, surface
->window
,
285 if (visual_depth
!= 24 && visual_depth
!= 32) {
295 x11_surface_get_capabilities(VkIcdSurfaceBase
*icd_surface
,
296 struct anv_physical_device
*device
,
297 VkSurfaceCapabilitiesKHR
*caps
)
299 VkIcdSurfaceXcb
*surface
= (VkIcdSurfaceXcb
*)icd_surface
;
300 xcb_get_geometry_cookie_t geom_cookie
;
301 xcb_generic_error_t
*err
;
302 xcb_get_geometry_reply_t
*geom
;
303 unsigned visual_depth
;
305 geom_cookie
= xcb_get_geometry(surface
->connection
, surface
->window
);
307 /* This does a round-trip. This is why we do get_geometry first and
308 * wait to read the reply until after we have a visual.
310 xcb_visualtype_t
*visual
=
311 get_visualtype_for_window(surface
->connection
, surface
->window
,
314 geom
= xcb_get_geometry_reply(surface
->connection
, geom_cookie
, &err
);
316 VkExtent2D extent
= { geom
->width
, geom
->height
};
317 caps
->currentExtent
= extent
;
318 caps
->minImageExtent
= extent
;
319 caps
->maxImageExtent
= extent
;
321 /* This can happen if the client didn't wait for the configure event
322 * to come back from the compositor. In that case, we don't know the
323 * size of the window so we just return valid "I don't know" stuff.
325 caps
->currentExtent
= (VkExtent2D
) { -1, -1 };
326 caps
->minImageExtent
= (VkExtent2D
) { 1, 1 };
327 caps
->maxImageExtent
= (VkExtent2D
) { INT16_MAX
, INT16_MAX
};
332 if (visual_has_alpha(visual
, visual_depth
)) {
333 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
334 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR
;
336 caps
->supportedCompositeAlpha
= VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
|
337 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR
;
340 caps
->minImageCount
= 2;
341 caps
->maxImageCount
= 4;
342 caps
->supportedTransforms
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
343 caps
->currentTransform
= VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR
;
344 caps
->maxImageArrayLayers
= 1;
345 caps
->supportedUsageFlags
=
346 VK_IMAGE_USAGE_TRANSFER_SRC_BIT
|
347 VK_IMAGE_USAGE_SAMPLED_BIT
|
348 VK_IMAGE_USAGE_TRANSFER_DST_BIT
|
349 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
;
355 x11_surface_get_formats(VkIcdSurfaceBase
*surface
,
356 struct anv_physical_device
*device
,
357 uint32_t *pSurfaceFormatCount
,
358 VkSurfaceFormatKHR
*pSurfaceFormats
)
360 if (pSurfaceFormats
== NULL
) {
361 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
365 assert(*pSurfaceFormatCount
>= ARRAY_SIZE(formats
));
366 typed_memcpy(pSurfaceFormats
, formats
, *pSurfaceFormatCount
);
367 *pSurfaceFormatCount
= ARRAY_SIZE(formats
);
373 x11_surface_get_present_modes(VkIcdSurfaceBase
*surface
,
374 struct anv_physical_device
*device
,
375 uint32_t *pPresentModeCount
,
376 VkPresentModeKHR
*pPresentModes
)
378 if (pPresentModes
== NULL
) {
379 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
383 assert(*pPresentModeCount
>= ARRAY_SIZE(present_modes
));
384 typed_memcpy(pPresentModes
, present_modes
, *pPresentModeCount
);
385 *pPresentModeCount
= ARRAY_SIZE(present_modes
);
391 x11_surface_create_swapchain(VkIcdSurfaceBase
*surface
,
392 struct anv_device
*device
,
393 const VkSwapchainCreateInfoKHR
* pCreateInfo
,
394 const VkAllocationCallbacks
* pAllocator
,
395 struct anv_swapchain
**swapchain
);
397 VkResult
anv_CreateXcbSurfaceKHR(
398 VkInstance _instance
,
399 const VkXcbSurfaceCreateInfoKHR
* pCreateInfo
,
400 const VkAllocationCallbacks
* pAllocator
,
401 VkSurfaceKHR
* pSurface
)
403 ANV_FROM_HANDLE(anv_instance
, instance
, _instance
);
405 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR
);
407 VkIcdSurfaceXcb
*surface
;
409 surface
= anv_alloc2(&instance
->alloc
, pAllocator
, sizeof *surface
, 8,
410 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
412 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
414 surface
->base
.platform
= VK_ICD_WSI_PLATFORM_XCB
;
415 surface
->connection
= pCreateInfo
->connection
;
416 surface
->window
= pCreateInfo
->window
;
418 *pSurface
= _VkIcdSurfaceBase_to_handle(&surface
->base
);
424 struct anv_image
* image
;
425 struct anv_device_memory
* memory
;
428 struct xshmfence
* shm_fence
;
432 struct x11_swapchain
{
433 struct anv_swapchain base
;
435 xcb_connection_t
* conn
;
439 uint32_t image_count
;
441 xcb_present_event_t event_id
;
442 xcb_special_event_t
* special_event
;
446 struct x11_image images
[0];
450 x11_get_images(struct anv_swapchain
*anv_chain
,
451 uint32_t* pCount
, VkImage
*pSwapchainImages
)
453 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
455 if (pSwapchainImages
== NULL
) {
456 *pCount
= chain
->image_count
;
460 assert(chain
->image_count
<= *pCount
);
461 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
462 pSwapchainImages
[i
] = anv_image_to_handle(chain
->images
[i
].image
);
464 *pCount
= chain
->image_count
;
470 x11_handle_dri3_present_event(struct x11_swapchain
*chain
,
471 xcb_present_generic_event_t
*event
)
473 switch (event
->evtype
) {
474 case XCB_PRESENT_CONFIGURE_NOTIFY
: {
475 xcb_present_configure_notify_event_t
*config
= (void *) event
;
477 if (config
->width
!= chain
->extent
.width
||
478 config
->height
!= chain
->extent
.height
)
479 return vk_error(VK_ERROR_OUT_OF_DATE_KHR
);
484 case XCB_PRESENT_EVENT_IDLE_NOTIFY
: {
485 xcb_present_idle_notify_event_t
*idle
= (void *) event
;
487 for (unsigned i
= 0; i
< chain
->image_count
; i
++) {
488 if (chain
->images
[i
].pixmap
== idle
->pixmap
) {
489 chain
->images
[i
].busy
= false;
497 case XCB_PRESENT_COMPLETE_NOTIFY
:
506 x11_acquire_next_image(struct anv_swapchain
*anv_chain
,
508 VkSemaphore semaphore
,
509 uint32_t *image_index
)
511 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
514 for (uint32_t i
= 0; i
< chain
->image_count
; i
++) {
515 if (!chain
->images
[i
].busy
) {
516 /* We found a non-busy image */
517 xshmfence_await(chain
->images
[i
].shm_fence
);
523 xcb_flush(chain
->conn
);
524 xcb_generic_event_t
*event
=
525 xcb_wait_for_special_event(chain
->conn
, chain
->special_event
);
527 return vk_error(VK_ERROR_OUT_OF_DATE_KHR
);
529 VkResult result
= x11_handle_dri3_present_event(chain
, (void *)event
);
531 if (result
!= VK_SUCCESS
)
537 x11_queue_present(struct anv_swapchain
*anv_chain
,
538 struct anv_queue
*queue
,
539 uint32_t image_index
)
541 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
542 struct x11_image
*image
= &chain
->images
[image_index
];
544 assert(image_index
< chain
->image_count
);
546 uint32_t options
= XCB_PRESENT_OPTION_NONE
;
548 int64_t target_msc
= 0;
550 int64_t remainder
= 0;
552 options
|= XCB_PRESENT_OPTION_ASYNC
;
554 xshmfence_reset(image
->shm_fence
);
556 xcb_void_cookie_t cookie
=
557 xcb_present_pixmap(chain
->conn
,
560 (uint32_t) chain
->send_sbc
,
565 XCB_NONE
, /* target_crtc */
572 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
575 xcb_flush(chain
->conn
);
581 x11_image_init(struct anv_device
*device
, struct x11_swapchain
*chain
,
582 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
583 const VkAllocationCallbacks
* pAllocator
,
584 struct x11_image
*image
)
586 xcb_void_cookie_t cookie
;
590 result
= anv_image_create(anv_device_to_handle(device
),
591 &(struct anv_image_create_info
) {
592 .isl_tiling_flags
= ISL_TILING_X_BIT
,
595 &(VkImageCreateInfo
) {
596 .sType
= VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
,
597 .imageType
= VK_IMAGE_TYPE_2D
,
598 .format
= pCreateInfo
->imageFormat
,
600 .width
= pCreateInfo
->imageExtent
.width
,
601 .height
= pCreateInfo
->imageExtent
.height
,
607 /* FIXME: Need a way to use X tiling to allow scanout */
608 .tiling
= VK_IMAGE_TILING_OPTIMAL
,
609 .usage
= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
,
614 if (result
!= VK_SUCCESS
)
617 image
->image
= anv_image_from_handle(image_h
);
618 assert(vk_format_is_color(image
->image
->vk_format
));
620 VkDeviceMemory memory_h
;
621 result
= anv_AllocateMemory(anv_device_to_handle(device
),
622 &(VkMemoryAllocateInfo
) {
623 .sType
= VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
,
624 .allocationSize
= image
->image
->size
,
625 .memoryTypeIndex
= 0,
627 NULL
/* XXX: pAllocator */,
629 if (result
!= VK_SUCCESS
)
630 goto fail_create_image
;
632 image
->memory
= anv_device_memory_from_handle(memory_h
);
633 image
->memory
->bo
.is_winsys_bo
= true;
635 anv_BindImageMemory(VK_NULL_HANDLE
, image_h
, memory_h
, 0);
637 struct anv_surface
*surface
= &image
->image
->color_surface
;
638 assert(surface
->isl
.tiling
== ISL_TILING_X
);
640 int ret
= anv_gem_set_tiling(device
, image
->memory
->bo
.gem_handle
,
641 surface
->isl
.row_pitch
, I915_TILING_X
);
643 /* FINISHME: Choose a better error. */
644 result
= vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY
,
645 "set_tiling failed: %m");
646 goto fail_alloc_memory
;
649 int fd
= anv_gem_handle_to_fd(device
, image
->memory
->bo
.gem_handle
);
651 /* FINISHME: Choose a better error. */
652 result
= vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY
,
653 "handle_to_fd failed: %m");
654 goto fail_alloc_memory
;
659 image
->pixmap
= xcb_generate_id(chain
->conn
);
662 xcb_dri3_pixmap_from_buffer_checked(chain
->conn
,
666 pCreateInfo
->imageExtent
.width
,
667 pCreateInfo
->imageExtent
.height
,
668 surface
->isl
.row_pitch
,
670 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
672 int fence_fd
= xshmfence_alloc_shm();
676 image
->shm_fence
= xshmfence_map_shm(fence_fd
);
677 if (image
->shm_fence
== NULL
)
678 goto fail_shmfence_alloc
;
680 image
->sync_fence
= xcb_generate_id(chain
->conn
);
681 xcb_dri3_fence_from_fd(chain
->conn
,
688 xshmfence_trigger(image
->shm_fence
);
696 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
697 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
700 anv_FreeMemory(anv_device_to_handle(chain
->base
.device
),
701 anv_device_memory_to_handle(image
->memory
), pAllocator
);
704 anv_DestroyImage(anv_device_to_handle(chain
->base
.device
),
705 anv_image_to_handle(image
->image
), pAllocator
);
711 x11_image_finish(struct x11_swapchain
*chain
,
712 const VkAllocationCallbacks
* pAllocator
,
713 struct x11_image
*image
)
715 xcb_void_cookie_t cookie
;
717 cookie
= xcb_sync_destroy_fence(chain
->conn
, image
->sync_fence
);
718 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
719 xshmfence_unmap_shm(image
->shm_fence
);
721 cookie
= xcb_free_pixmap(chain
->conn
, image
->pixmap
);
722 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
724 anv_DestroyImage(anv_device_to_handle(chain
->base
.device
),
725 anv_image_to_handle(image
->image
), pAllocator
);
727 anv_FreeMemory(anv_device_to_handle(chain
->base
.device
),
728 anv_device_memory_to_handle(image
->memory
), pAllocator
);
732 x11_swapchain_destroy(struct anv_swapchain
*anv_chain
,
733 const VkAllocationCallbacks
*pAllocator
)
735 struct x11_swapchain
*chain
= (struct x11_swapchain
*)anv_chain
;
737 for (uint32_t i
= 0; i
< chain
->image_count
; i
++)
738 x11_image_finish(chain
, pAllocator
, &chain
->images
[i
]);
740 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
742 anv_free2(&chain
->base
.device
->alloc
, pAllocator
, chain
);
748 x11_surface_create_swapchain(VkIcdSurfaceBase
*icd_surface
,
749 struct anv_device
*device
,
750 const VkSwapchainCreateInfoKHR
*pCreateInfo
,
751 const VkAllocationCallbacks
* pAllocator
,
752 struct anv_swapchain
**swapchain_out
)
754 VkIcdSurfaceXcb
*surface
= (VkIcdSurfaceXcb
*)icd_surface
;
755 struct x11_swapchain
*chain
;
756 xcb_void_cookie_t cookie
;
759 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR
);
761 int num_images
= pCreateInfo
->minImageCount
;
763 /* For true mailbox mode, we need at least 4 images:
764 * 1) One to scan out from
765 * 2) One to have queued for scan-out
766 * 3) One to be currently held by the Wayland compositor
767 * 4) One to render to
769 if (pCreateInfo
->presentMode
== VK_PRESENT_MODE_MAILBOX_KHR
)
770 num_images
= MAX2(num_images
, 4);
772 size_t size
= sizeof(*chain
) + num_images
* sizeof(chain
->images
[0]);
773 chain
= anv_alloc2(&device
->alloc
, pAllocator
, size
, 8,
774 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
776 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
778 chain
->base
.device
= device
;
779 chain
->base
.destroy
= x11_swapchain_destroy
;
780 chain
->base
.get_images
= x11_get_images
;
781 chain
->base
.acquire_next_image
= x11_acquire_next_image
;
782 chain
->base
.queue_present
= x11_queue_present
;
784 chain
->conn
= surface
->connection
;
785 chain
->window
= surface
->window
;
786 chain
->extent
= pCreateInfo
->imageExtent
;
787 chain
->image_count
= num_images
;
789 chain
->event_id
= xcb_generate_id(chain
->conn
);
790 xcb_present_select_input(chain
->conn
, chain
->event_id
, chain
->window
,
791 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY
|
792 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY
|
793 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY
);
795 /* Create an XCB event queue to hold present events outside of the usual
796 * application event queue
798 chain
->special_event
=
799 xcb_register_for_special_xge(chain
->conn
, &xcb_present_id
,
800 chain
->event_id
, NULL
);
802 chain
->gc
= xcb_generate_id(chain
->conn
);
804 /* FINISHME: Choose a better error. */
805 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
809 cookie
= xcb_create_gc(chain
->conn
,
812 XCB_GC_GRAPHICS_EXPOSURES
,
813 (uint32_t []) { 0 });
814 xcb_discard_reply(chain
->conn
, cookie
.sequence
);
817 for (; image
< chain
->image_count
; image
++) {
818 result
= x11_image_init(device
, chain
, pCreateInfo
, pAllocator
,
819 &chain
->images
[image
]);
820 if (result
!= VK_SUCCESS
)
821 goto fail_init_images
;
824 *swapchain_out
= &chain
->base
;
829 for (uint32_t j
= 0; j
< image
; j
++)
830 x11_image_finish(chain
, pAllocator
, &chain
->images
[j
]);
833 xcb_unregister_for_special_event(chain
->conn
, chain
->special_event
);
835 anv_free2(&device
->alloc
, pAllocator
, chain
);
841 anv_x11_init_wsi(struct anv_physical_device
*device
)
846 wsi
= anv_alloc(&device
->instance
->alloc
, sizeof(*wsi
), 8,
847 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE
);
849 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
853 int ret
= pthread_mutex_init(&wsi
->mutex
, NULL
);
856 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
858 /* FINISHME: Choose a better error. */
859 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
865 wsi
->connections
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
866 _mesa_key_pointer_equal
);
867 if (!wsi
->connections
) {
868 result
= vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
872 wsi
->base
.get_support
= x11_surface_get_support
;
873 wsi
->base
.get_capabilities
= x11_surface_get_capabilities
;
874 wsi
->base
.get_formats
= x11_surface_get_formats
;
875 wsi
->base
.get_present_modes
= x11_surface_get_present_modes
;
876 wsi
->base
.create_swapchain
= x11_surface_create_swapchain
;
878 device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = &wsi
->base
;
883 pthread_mutex_destroy(&wsi
->mutex
);
885 anv_free(&device
->instance
->alloc
, wsi
);
887 device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
] = NULL
;
893 anv_x11_finish_wsi(struct anv_physical_device
*device
)
895 struct wsi_x11
*wsi
=
896 (struct wsi_x11
*)device
->wsi
[VK_ICD_WSI_PLATFORM_XCB
];
899 _mesa_hash_table_destroy(wsi
->connections
, NULL
);
901 pthread_mutex_destroy(&wsi
->mutex
);
903 anv_free(&device
->instance
->alloc
, wsi
);