anv/wsi: Use vk_format_info for asserts rather than anv_format
[mesa.git] / src / intel / vulkan / anv_wsi_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/xshmfence.h>
25 #include <xcb/xcb.h>
26 #include <xcb/dri3.h>
27 #include <xcb/present.h>
28
29 #include "anv_wsi.h"
30
31 #include "vk_format_info.h"
32 #include "util/hash_table.h"
33
34 struct wsi_x11_connection {
35 bool has_dri3;
36 bool has_present;
37 };
38
39 struct wsi_x11 {
40 struct anv_wsi_interface base;
41
42 pthread_mutex_t mutex;
43 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
44 struct hash_table *connections;
45 };
46
47 static struct wsi_x11_connection *
48 wsi_x11_connection_create(struct anv_physical_device *device,
49 xcb_connection_t *conn)
50 {
51 xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
52 xcb_query_extension_reply_t *dri3_reply, *pres_reply;
53
54 struct wsi_x11_connection *wsi_conn =
55 anv_alloc(&device->instance->alloc, sizeof(*wsi_conn), 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
57 if (!wsi_conn)
58 return NULL;
59
60 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
61 pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
62
63 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
64 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
65 if (dri3_reply == NULL || pres_reply == NULL) {
66 free(dri3_reply);
67 free(pres_reply);
68 anv_free(&device->instance->alloc, wsi_conn);
69 return NULL;
70 }
71
72 wsi_conn->has_dri3 = dri3_reply->present != 0;
73 wsi_conn->has_present = pres_reply->present != 0;
74
75 free(dri3_reply);
76 free(pres_reply);
77
78 return wsi_conn;
79 }
80
81 static void
82 wsi_x11_connection_destroy(struct anv_physical_device *device,
83 struct wsi_x11_connection *conn)
84 {
85 anv_free(&device->instance->alloc, conn);
86 }
87
88 static struct wsi_x11_connection *
89 wsi_x11_get_connection(struct anv_physical_device *device,
90 xcb_connection_t *conn)
91 {
92 struct wsi_x11 *wsi =
93 (struct wsi_x11 *)device->wsi[VK_ICD_WSI_PLATFORM_XCB];
94
95 pthread_mutex_lock(&wsi->mutex);
96
97 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
98 if (!entry) {
99 /* We're about to make a bunch of blocking calls. Let's drop the
100 * mutex for now so we don't block up too badly.
101 */
102 pthread_mutex_unlock(&wsi->mutex);
103
104 struct wsi_x11_connection *wsi_conn =
105 wsi_x11_connection_create(device, conn);
106
107 pthread_mutex_lock(&wsi->mutex);
108
109 entry = _mesa_hash_table_search(wsi->connections, conn);
110 if (entry) {
111 /* Oops, someone raced us to it */
112 wsi_x11_connection_destroy(device, wsi_conn);
113 } else {
114 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
115 }
116 }
117
118 pthread_mutex_unlock(&wsi->mutex);
119
120 return entry->data;
121 }
122
123 static const VkSurfaceFormatKHR formats[] = {
124 { .format = VK_FORMAT_B8G8R8A8_SRGB, },
125 };
126
127 static const VkPresentModeKHR present_modes[] = {
128 VK_PRESENT_MODE_MAILBOX_KHR,
129 };
130
131 static xcb_screen_t *
132 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
133 {
134 xcb_screen_iterator_t screen_iter =
135 xcb_setup_roots_iterator(xcb_get_setup(conn));
136
137 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
138 if (screen_iter.data->root == root)
139 return screen_iter.data;
140 }
141
142 return NULL;
143 }
144
145 static xcb_visualtype_t *
146 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
147 unsigned *depth)
148 {
149 xcb_depth_iterator_t depth_iter =
150 xcb_screen_allowed_depths_iterator(screen);
151
152 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
153 xcb_visualtype_iterator_t visual_iter =
154 xcb_depth_visuals_iterator (depth_iter.data);
155
156 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
157 if (visual_iter.data->visual_id == visual_id) {
158 if (depth)
159 *depth = depth_iter.data->depth;
160 return visual_iter.data;
161 }
162 }
163 }
164
165 return NULL;
166 }
167
168 static xcb_visualtype_t *
169 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
170 unsigned *depth)
171 {
172 xcb_screen_iterator_t screen_iter =
173 xcb_setup_roots_iterator(xcb_get_setup(conn));
174
175 /* For this we have to iterate over all of the screens which is rather
176 * annoying. Fortunately, there is probably only 1.
177 */
178 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
179 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
180 visual_id, depth);
181 if (visual)
182 return visual;
183 }
184
185 return NULL;
186 }
187
188 static xcb_visualtype_t *
189 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
190 unsigned *depth)
191 {
192 xcb_query_tree_cookie_t tree_cookie;
193 xcb_get_window_attributes_cookie_t attrib_cookie;
194 xcb_query_tree_reply_t *tree;
195 xcb_get_window_attributes_reply_t *attrib;
196
197 tree_cookie = xcb_query_tree(conn, window);
198 attrib_cookie = xcb_get_window_attributes(conn, window);
199
200 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
201 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
202 if (attrib == NULL || tree == NULL) {
203 free(attrib);
204 free(tree);
205 return NULL;
206 }
207
208 xcb_window_t root = tree->root;
209 xcb_visualid_t visual_id = attrib->visual;
210 free(attrib);
211 free(tree);
212
213 xcb_screen_t *screen = get_screen_for_root(conn, root);
214 if (screen == NULL)
215 return NULL;
216
217 return screen_get_visualtype(screen, visual_id, depth);
218 }
219
220 static bool
221 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
222 {
223 uint32_t rgb_mask = visual->red_mask |
224 visual->green_mask |
225 visual->blue_mask;
226
227 uint32_t all_mask = 0xffffffff >> (32 - depth);
228
229 /* Do we have bits left over after RGB? */
230 return (all_mask & ~rgb_mask) != 0;
231 }
232
233 VkBool32 anv_GetPhysicalDeviceXcbPresentationSupportKHR(
234 VkPhysicalDevice physicalDevice,
235 uint32_t queueFamilyIndex,
236 xcb_connection_t* connection,
237 xcb_visualid_t visual_id)
238 {
239 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
240
241 struct wsi_x11_connection *wsi_conn =
242 wsi_x11_get_connection(device, connection);
243
244 if (!wsi_conn->has_dri3) {
245 fprintf(stderr, "vulkan: No DRI3 support\n");
246 return false;
247 }
248
249 unsigned visual_depth;
250 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
251 return false;
252
253 if (visual_depth != 24 && visual_depth != 32)
254 return false;
255
256 return true;
257 }
258
259 static VkResult
260 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
261 struct anv_physical_device *device,
262 uint32_t queueFamilyIndex,
263 VkBool32* pSupported)
264 {
265 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
266
267 struct wsi_x11_connection *wsi_conn =
268 wsi_x11_get_connection(device, surface->connection);
269 if (!wsi_conn)
270 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
271
272 if (!wsi_conn->has_dri3) {
273 fprintf(stderr, "vulkan: No DRI3 support\n");
274 *pSupported = false;
275 return VK_SUCCESS;
276 }
277
278 unsigned visual_depth;
279 if (!get_visualtype_for_window(surface->connection, surface->window,
280 &visual_depth)) {
281 *pSupported = false;
282 return VK_SUCCESS;
283 }
284
285 if (visual_depth != 24 && visual_depth != 32) {
286 *pSupported = false;
287 return VK_SUCCESS;
288 }
289
290 *pSupported = true;
291 return VK_SUCCESS;
292 }
293
294 static VkResult
295 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
296 struct anv_physical_device *device,
297 VkSurfaceCapabilitiesKHR *caps)
298 {
299 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
300 xcb_get_geometry_cookie_t geom_cookie;
301 xcb_generic_error_t *err;
302 xcb_get_geometry_reply_t *geom;
303 unsigned visual_depth;
304
305 geom_cookie = xcb_get_geometry(surface->connection, surface->window);
306
307 /* This does a round-trip. This is why we do get_geometry first and
308 * wait to read the reply until after we have a visual.
309 */
310 xcb_visualtype_t *visual =
311 get_visualtype_for_window(surface->connection, surface->window,
312 &visual_depth);
313
314 geom = xcb_get_geometry_reply(surface->connection, geom_cookie, &err);
315 if (geom) {
316 VkExtent2D extent = { geom->width, geom->height };
317 caps->currentExtent = extent;
318 caps->minImageExtent = extent;
319 caps->maxImageExtent = extent;
320 } else {
321 /* This can happen if the client didn't wait for the configure event
322 * to come back from the compositor. In that case, we don't know the
323 * size of the window so we just return valid "I don't know" stuff.
324 */
325 caps->currentExtent = (VkExtent2D) { -1, -1 };
326 caps->minImageExtent = (VkExtent2D) { 1, 1 };
327 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
328 }
329 free(err);
330 free(geom);
331
332 if (visual_has_alpha(visual, visual_depth)) {
333 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
334 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
335 } else {
336 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
337 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
338 }
339
340 caps->minImageCount = 2;
341 caps->maxImageCount = 4;
342 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
343 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
344 caps->maxImageArrayLayers = 1;
345 caps->supportedUsageFlags =
346 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
347 VK_IMAGE_USAGE_SAMPLED_BIT |
348 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
349 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
350
351 return VK_SUCCESS;
352 }
353
354 static VkResult
355 x11_surface_get_formats(VkIcdSurfaceBase *surface,
356 struct anv_physical_device *device,
357 uint32_t *pSurfaceFormatCount,
358 VkSurfaceFormatKHR *pSurfaceFormats)
359 {
360 if (pSurfaceFormats == NULL) {
361 *pSurfaceFormatCount = ARRAY_SIZE(formats);
362 return VK_SUCCESS;
363 }
364
365 assert(*pSurfaceFormatCount >= ARRAY_SIZE(formats));
366 typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
367 *pSurfaceFormatCount = ARRAY_SIZE(formats);
368
369 return VK_SUCCESS;
370 }
371
372 static VkResult
373 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
374 struct anv_physical_device *device,
375 uint32_t *pPresentModeCount,
376 VkPresentModeKHR *pPresentModes)
377 {
378 if (pPresentModes == NULL) {
379 *pPresentModeCount = ARRAY_SIZE(present_modes);
380 return VK_SUCCESS;
381 }
382
383 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
384 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
385 *pPresentModeCount = ARRAY_SIZE(present_modes);
386
387 return VK_SUCCESS;
388 }
389
390 static VkResult
391 x11_surface_create_swapchain(VkIcdSurfaceBase *surface,
392 struct anv_device *device,
393 const VkSwapchainCreateInfoKHR* pCreateInfo,
394 const VkAllocationCallbacks* pAllocator,
395 struct anv_swapchain **swapchain);
396
397 VkResult anv_CreateXcbSurfaceKHR(
398 VkInstance _instance,
399 const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
400 const VkAllocationCallbacks* pAllocator,
401 VkSurfaceKHR* pSurface)
402 {
403 ANV_FROM_HANDLE(anv_instance, instance, _instance);
404
405 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
406
407 VkIcdSurfaceXcb *surface;
408
409 surface = anv_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
410 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
411 if (surface == NULL)
412 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
413
414 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
415 surface->connection = pCreateInfo->connection;
416 surface->window = pCreateInfo->window;
417
418 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
419
420 return VK_SUCCESS;
421 }
422
423 struct x11_image {
424 struct anv_image * image;
425 struct anv_device_memory * memory;
426 xcb_pixmap_t pixmap;
427 bool busy;
428 struct xshmfence * shm_fence;
429 uint32_t sync_fence;
430 };
431
432 struct x11_swapchain {
433 struct anv_swapchain base;
434
435 xcb_connection_t * conn;
436 xcb_window_t window;
437 xcb_gc_t gc;
438 VkExtent2D extent;
439 uint32_t image_count;
440
441 xcb_present_event_t event_id;
442 xcb_special_event_t * special_event;
443 uint64_t send_sbc;
444 uint32_t stamp;
445
446 struct x11_image images[0];
447 };
448
449 static VkResult
450 x11_get_images(struct anv_swapchain *anv_chain,
451 uint32_t* pCount, VkImage *pSwapchainImages)
452 {
453 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
454
455 if (pSwapchainImages == NULL) {
456 *pCount = chain->image_count;
457 return VK_SUCCESS;
458 }
459
460 assert(chain->image_count <= *pCount);
461 for (uint32_t i = 0; i < chain->image_count; i++)
462 pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
463
464 *pCount = chain->image_count;
465
466 return VK_SUCCESS;
467 }
468
469 static VkResult
470 x11_handle_dri3_present_event(struct x11_swapchain *chain,
471 xcb_present_generic_event_t *event)
472 {
473 switch (event->evtype) {
474 case XCB_PRESENT_CONFIGURE_NOTIFY: {
475 xcb_present_configure_notify_event_t *config = (void *) event;
476
477 if (config->width != chain->extent.width ||
478 config->height != chain->extent.height)
479 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
480
481 break;
482 }
483
484 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
485 xcb_present_idle_notify_event_t *idle = (void *) event;
486
487 for (unsigned i = 0; i < chain->image_count; i++) {
488 if (chain->images[i].pixmap == idle->pixmap) {
489 chain->images[i].busy = false;
490 break;
491 }
492 }
493
494 break;
495 }
496
497 case XCB_PRESENT_COMPLETE_NOTIFY:
498 default:
499 break;
500 }
501
502 return VK_SUCCESS;
503 }
504
505 static VkResult
506 x11_acquire_next_image(struct anv_swapchain *anv_chain,
507 uint64_t timeout,
508 VkSemaphore semaphore,
509 uint32_t *image_index)
510 {
511 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
512
513 while (1) {
514 for (uint32_t i = 0; i < chain->image_count; i++) {
515 if (!chain->images[i].busy) {
516 /* We found a non-busy image */
517 xshmfence_await(chain->images[i].shm_fence);
518 *image_index = i;
519 return VK_SUCCESS;
520 }
521 }
522
523 xcb_flush(chain->conn);
524 xcb_generic_event_t *event =
525 xcb_wait_for_special_event(chain->conn, chain->special_event);
526 if (!event)
527 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
528
529 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
530 free(event);
531 if (result != VK_SUCCESS)
532 return result;
533 }
534 }
535
536 static VkResult
537 x11_queue_present(struct anv_swapchain *anv_chain,
538 struct anv_queue *queue,
539 uint32_t image_index)
540 {
541 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
542 struct x11_image *image = &chain->images[image_index];
543
544 assert(image_index < chain->image_count);
545
546 uint32_t options = XCB_PRESENT_OPTION_NONE;
547
548 int64_t target_msc = 0;
549 int64_t divisor = 0;
550 int64_t remainder = 0;
551
552 options |= XCB_PRESENT_OPTION_ASYNC;
553
554 xshmfence_reset(image->shm_fence);
555
556 xcb_void_cookie_t cookie =
557 xcb_present_pixmap(chain->conn,
558 chain->window,
559 image->pixmap,
560 (uint32_t) chain->send_sbc,
561 0, /* valid */
562 0, /* update */
563 0, /* x_off */
564 0, /* y_off */
565 XCB_NONE, /* target_crtc */
566 XCB_NONE,
567 image->sync_fence,
568 options,
569 target_msc,
570 divisor,
571 remainder, 0, NULL);
572 xcb_discard_reply(chain->conn, cookie.sequence);
573 image->busy = true;
574
575 xcb_flush(chain->conn);
576
577 return VK_SUCCESS;
578 }
579
580 static VkResult
581 x11_image_init(struct anv_device *device, struct x11_swapchain *chain,
582 const VkSwapchainCreateInfoKHR *pCreateInfo,
583 const VkAllocationCallbacks* pAllocator,
584 struct x11_image *image)
585 {
586 xcb_void_cookie_t cookie;
587 VkResult result;
588
589 VkImage image_h;
590 result = anv_image_create(anv_device_to_handle(device),
591 &(struct anv_image_create_info) {
592 .isl_tiling_flags = ISL_TILING_X_BIT,
593 .stride = 0,
594 .vk_info =
595 &(VkImageCreateInfo) {
596 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
597 .imageType = VK_IMAGE_TYPE_2D,
598 .format = pCreateInfo->imageFormat,
599 .extent = {
600 .width = pCreateInfo->imageExtent.width,
601 .height = pCreateInfo->imageExtent.height,
602 .depth = 1
603 },
604 .mipLevels = 1,
605 .arrayLayers = 1,
606 .samples = 1,
607 /* FIXME: Need a way to use X tiling to allow scanout */
608 .tiling = VK_IMAGE_TILING_OPTIMAL,
609 .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
610 .flags = 0,
611 }},
612 NULL,
613 &image_h);
614 if (result != VK_SUCCESS)
615 return result;
616
617 image->image = anv_image_from_handle(image_h);
618 assert(vk_format_is_color(image->image->vk_format));
619
620 VkDeviceMemory memory_h;
621 result = anv_AllocateMemory(anv_device_to_handle(device),
622 &(VkMemoryAllocateInfo) {
623 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
624 .allocationSize = image->image->size,
625 .memoryTypeIndex = 0,
626 },
627 NULL /* XXX: pAllocator */,
628 &memory_h);
629 if (result != VK_SUCCESS)
630 goto fail_create_image;
631
632 image->memory = anv_device_memory_from_handle(memory_h);
633 image->memory->bo.is_winsys_bo = true;
634
635 anv_BindImageMemory(VK_NULL_HANDLE, image_h, memory_h, 0);
636
637 struct anv_surface *surface = &image->image->color_surface;
638 assert(surface->isl.tiling == ISL_TILING_X);
639
640 int ret = anv_gem_set_tiling(device, image->memory->bo.gem_handle,
641 surface->isl.row_pitch, I915_TILING_X);
642 if (ret) {
643 /* FINISHME: Choose a better error. */
644 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
645 "set_tiling failed: %m");
646 goto fail_alloc_memory;
647 }
648
649 int fd = anv_gem_handle_to_fd(device, image->memory->bo.gem_handle);
650 if (fd == -1) {
651 /* FINISHME: Choose a better error. */
652 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
653 "handle_to_fd failed: %m");
654 goto fail_alloc_memory;
655 }
656
657 uint32_t bpp = 32;
658 uint32_t depth = 24;
659 image->pixmap = xcb_generate_id(chain->conn);
660
661 cookie =
662 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
663 image->pixmap,
664 chain->window,
665 image->image->size,
666 pCreateInfo->imageExtent.width,
667 pCreateInfo->imageExtent.height,
668 surface->isl.row_pitch,
669 depth, bpp, fd);
670 xcb_discard_reply(chain->conn, cookie.sequence);
671
672 int fence_fd = xshmfence_alloc_shm();
673 if (fence_fd < 0)
674 goto fail_pixmap;
675
676 image->shm_fence = xshmfence_map_shm(fence_fd);
677 if (image->shm_fence == NULL)
678 goto fail_shmfence_alloc;
679
680 image->sync_fence = xcb_generate_id(chain->conn);
681 xcb_dri3_fence_from_fd(chain->conn,
682 image->pixmap,
683 image->sync_fence,
684 false,
685 fence_fd);
686
687 image->busy = false;
688 xshmfence_trigger(image->shm_fence);
689
690 return VK_SUCCESS;
691
692 fail_shmfence_alloc:
693 close(fence_fd);
694
695 fail_pixmap:
696 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
697 xcb_discard_reply(chain->conn, cookie.sequence);
698
699 fail_alloc_memory:
700 anv_FreeMemory(anv_device_to_handle(chain->base.device),
701 anv_device_memory_to_handle(image->memory), pAllocator);
702
703 fail_create_image:
704 anv_DestroyImage(anv_device_to_handle(chain->base.device),
705 anv_image_to_handle(image->image), pAllocator);
706
707 return result;
708 }
709
710 static void
711 x11_image_finish(struct x11_swapchain *chain,
712 const VkAllocationCallbacks* pAllocator,
713 struct x11_image *image)
714 {
715 xcb_void_cookie_t cookie;
716
717 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
718 xcb_discard_reply(chain->conn, cookie.sequence);
719 xshmfence_unmap_shm(image->shm_fence);
720
721 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
722 xcb_discard_reply(chain->conn, cookie.sequence);
723
724 anv_DestroyImage(anv_device_to_handle(chain->base.device),
725 anv_image_to_handle(image->image), pAllocator);
726
727 anv_FreeMemory(anv_device_to_handle(chain->base.device),
728 anv_device_memory_to_handle(image->memory), pAllocator);
729 }
730
731 static VkResult
732 x11_swapchain_destroy(struct anv_swapchain *anv_chain,
733 const VkAllocationCallbacks *pAllocator)
734 {
735 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
736
737 for (uint32_t i = 0; i < chain->image_count; i++)
738 x11_image_finish(chain, pAllocator, &chain->images[i]);
739
740 xcb_unregister_for_special_event(chain->conn, chain->special_event);
741
742 anv_free2(&chain->base.device->alloc, pAllocator, chain);
743
744 return VK_SUCCESS;
745 }
746
747 static VkResult
748 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
749 struct anv_device *device,
750 const VkSwapchainCreateInfoKHR *pCreateInfo,
751 const VkAllocationCallbacks* pAllocator,
752 struct anv_swapchain **swapchain_out)
753 {
754 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
755 struct x11_swapchain *chain;
756 xcb_void_cookie_t cookie;
757 VkResult result;
758
759 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
760
761 int num_images = pCreateInfo->minImageCount;
762
763 /* For true mailbox mode, we need at least 4 images:
764 * 1) One to scan out from
765 * 2) One to have queued for scan-out
766 * 3) One to be currently held by the Wayland compositor
767 * 4) One to render to
768 */
769 if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
770 num_images = MAX2(num_images, 4);
771
772 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
773 chain = anv_alloc2(&device->alloc, pAllocator, size, 8,
774 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
775 if (chain == NULL)
776 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
777
778 chain->base.device = device;
779 chain->base.destroy = x11_swapchain_destroy;
780 chain->base.get_images = x11_get_images;
781 chain->base.acquire_next_image = x11_acquire_next_image;
782 chain->base.queue_present = x11_queue_present;
783
784 chain->conn = surface->connection;
785 chain->window = surface->window;
786 chain->extent = pCreateInfo->imageExtent;
787 chain->image_count = num_images;
788
789 chain->event_id = xcb_generate_id(chain->conn);
790 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
791 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
792 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
793 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
794
795 /* Create an XCB event queue to hold present events outside of the usual
796 * application event queue
797 */
798 chain->special_event =
799 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
800 chain->event_id, NULL);
801
802 chain->gc = xcb_generate_id(chain->conn);
803 if (!chain->gc) {
804 /* FINISHME: Choose a better error. */
805 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
806 goto fail_register;
807 }
808
809 cookie = xcb_create_gc(chain->conn,
810 chain->gc,
811 chain->window,
812 XCB_GC_GRAPHICS_EXPOSURES,
813 (uint32_t []) { 0 });
814 xcb_discard_reply(chain->conn, cookie.sequence);
815
816 uint32_t image = 0;
817 for (; image < chain->image_count; image++) {
818 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
819 &chain->images[image]);
820 if (result != VK_SUCCESS)
821 goto fail_init_images;
822 }
823
824 *swapchain_out = &chain->base;
825
826 return VK_SUCCESS;
827
828 fail_init_images:
829 for (uint32_t j = 0; j < image; j++)
830 x11_image_finish(chain, pAllocator, &chain->images[j]);
831
832 fail_register:
833 xcb_unregister_for_special_event(chain->conn, chain->special_event);
834
835 anv_free2(&device->alloc, pAllocator, chain);
836
837 return result;
838 }
839
840 VkResult
841 anv_x11_init_wsi(struct anv_physical_device *device)
842 {
843 struct wsi_x11 *wsi;
844 VkResult result;
845
846 wsi = anv_alloc(&device->instance->alloc, sizeof(*wsi), 8,
847 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
848 if (!wsi) {
849 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
850 goto fail;
851 }
852
853 int ret = pthread_mutex_init(&wsi->mutex, NULL);
854 if (ret != 0) {
855 if (ret == ENOMEM) {
856 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
857 } else {
858 /* FINISHME: Choose a better error. */
859 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
860 }
861
862 goto fail_alloc;
863 }
864
865 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
866 _mesa_key_pointer_equal);
867 if (!wsi->connections) {
868 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
869 goto fail_mutex;
870 }
871
872 wsi->base.get_support = x11_surface_get_support;
873 wsi->base.get_capabilities = x11_surface_get_capabilities;
874 wsi->base.get_formats = x11_surface_get_formats;
875 wsi->base.get_present_modes = x11_surface_get_present_modes;
876 wsi->base.create_swapchain = x11_surface_create_swapchain;
877
878 device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
879
880 return VK_SUCCESS;
881
882 fail_mutex:
883 pthread_mutex_destroy(&wsi->mutex);
884 fail_alloc:
885 anv_free(&device->instance->alloc, wsi);
886 fail:
887 device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
888
889 return result;
890 }
891
892 void
893 anv_x11_finish_wsi(struct anv_physical_device *device)
894 {
895 struct wsi_x11 *wsi =
896 (struct wsi_x11 *)device->wsi[VK_ICD_WSI_PLATFORM_XCB];
897
898 if (wsi) {
899 _mesa_hash_table_destroy(wsi->connections, NULL);
900
901 pthread_mutex_destroy(&wsi->mutex);
902
903 anv_free(&device->instance->alloc, wsi);
904 }
905 }