Merge remote-tracking branch 'origin/master' into vulkan
[mesa.git] / src / intel / vulkan / anv_wsi_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/xshmfence.h>
25 #include <xcb/xcb.h>
26 #include <xcb/dri3.h>
27 #include <xcb/present.h>
28
29 #include "anv_wsi.h"
30
31 #include "util/hash_table.h"
32
33 struct wsi_x11_connection {
34 bool has_dri3;
35 bool has_present;
36 };
37
38 struct wsi_x11 {
39 struct anv_wsi_interface base;
40
41 pthread_mutex_t mutex;
42 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
43 struct hash_table *connections;
44 };
45
46 static struct wsi_x11_connection *
47 wsi_x11_connection_create(struct anv_instance *instance, xcb_connection_t *conn)
48 {
49 xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
50 xcb_query_extension_reply_t *dri3_reply, *pres_reply;
51
52 struct wsi_x11_connection *wsi_conn =
53 anv_alloc(&instance->alloc, sizeof(*wsi_conn), 8,
54 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
55 if (!wsi_conn)
56 return NULL;
57
58 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
59 pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
60
61 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
62 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
63 if (dri3_reply == NULL || pres_reply == NULL) {
64 free(dri3_reply);
65 free(pres_reply);
66 anv_free(&instance->alloc, wsi_conn);
67 return NULL;
68 }
69
70 wsi_conn->has_dri3 = dri3_reply->present != 0;
71 wsi_conn->has_present = pres_reply->present != 0;
72
73 free(dri3_reply);
74 free(pres_reply);
75
76 return wsi_conn;
77 }
78
79 static void
80 wsi_x11_connection_destroy(struct anv_instance *instance,
81 struct wsi_x11_connection *conn)
82 {
83 anv_free(&instance->alloc, conn);
84 }
85
86 static struct wsi_x11_connection *
87 wsi_x11_get_connection(struct anv_instance *instance, xcb_connection_t *conn)
88 {
89 struct wsi_x11 *wsi =
90 (struct wsi_x11 *)instance->wsi[VK_ICD_WSI_PLATFORM_XCB];
91
92 pthread_mutex_lock(&wsi->mutex);
93
94 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
95 if (!entry) {
96 /* We're about to make a bunch of blocking calls. Let's drop the
97 * mutex for now so we don't block up too badly.
98 */
99 pthread_mutex_unlock(&wsi->mutex);
100
101 struct wsi_x11_connection *wsi_conn =
102 wsi_x11_connection_create(instance, conn);
103
104 pthread_mutex_lock(&wsi->mutex);
105
106 entry = _mesa_hash_table_search(wsi->connections, conn);
107 if (entry) {
108 /* Oops, someone raced us to it */
109 wsi_x11_connection_destroy(instance, wsi_conn);
110 } else {
111 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
112 }
113 }
114
115 pthread_mutex_unlock(&wsi->mutex);
116
117 return entry->data;
118 }
119
120 static const VkSurfaceFormatKHR formats[] = {
121 { .format = VK_FORMAT_B8G8R8A8_SRGB, },
122 };
123
124 static const VkPresentModeKHR present_modes[] = {
125 VK_PRESENT_MODE_MAILBOX_KHR,
126 };
127
128 static xcb_screen_t *
129 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
130 {
131 xcb_screen_iterator_t screen_iter =
132 xcb_setup_roots_iterator(xcb_get_setup(conn));
133
134 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
135 if (screen_iter.data->root == root)
136 return screen_iter.data;
137 }
138
139 return NULL;
140 }
141
142 static xcb_visualtype_t *
143 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
144 unsigned *depth)
145 {
146 xcb_depth_iterator_t depth_iter =
147 xcb_screen_allowed_depths_iterator(screen);
148
149 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
150 xcb_visualtype_iterator_t visual_iter =
151 xcb_depth_visuals_iterator (depth_iter.data);
152
153 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
154 if (visual_iter.data->visual_id == visual_id) {
155 if (depth)
156 *depth = depth_iter.data->depth;
157 return visual_iter.data;
158 }
159 }
160 }
161
162 return NULL;
163 }
164
165 static xcb_visualtype_t *
166 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
167 unsigned *depth)
168 {
169 xcb_screen_iterator_t screen_iter =
170 xcb_setup_roots_iterator(xcb_get_setup(conn));
171
172 /* For this we have to iterate over all of the screens which is rather
173 * annoying. Fortunately, there is probably only 1.
174 */
175 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
176 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
177 visual_id, depth);
178 if (visual)
179 return visual;
180 }
181
182 return NULL;
183 }
184
185 static xcb_visualtype_t *
186 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
187 unsigned *depth)
188 {
189 xcb_query_tree_cookie_t tree_cookie;
190 xcb_get_window_attributes_cookie_t attrib_cookie;
191 xcb_query_tree_reply_t *tree;
192 xcb_get_window_attributes_reply_t *attrib;
193
194 tree_cookie = xcb_query_tree(conn, window);
195 attrib_cookie = xcb_get_window_attributes(conn, window);
196
197 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
198 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
199 if (attrib == NULL || tree == NULL) {
200 free(attrib);
201 free(tree);
202 return NULL;
203 }
204
205 xcb_window_t root = tree->root;
206 xcb_visualid_t visual_id = attrib->visual;
207 free(attrib);
208 free(tree);
209
210 xcb_screen_t *screen = get_screen_for_root(conn, root);
211 if (screen == NULL)
212 return NULL;
213
214 return screen_get_visualtype(screen, visual_id, depth);
215 }
216
217 static bool
218 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
219 {
220 uint32_t rgb_mask = visual->red_mask |
221 visual->green_mask |
222 visual->blue_mask;
223
224 uint32_t all_mask = 0xffffffff >> (32 - depth);
225
226 /* Do we have bits left over after RGB? */
227 return (all_mask & ~rgb_mask) != 0;
228 }
229
230 VkBool32 anv_GetPhysicalDeviceXcbPresentationSupportKHR(
231 VkPhysicalDevice physicalDevice,
232 uint32_t queueFamilyIndex,
233 xcb_connection_t* connection,
234 xcb_visualid_t visual_id)
235 {
236 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
237
238 struct wsi_x11_connection *wsi_conn =
239 wsi_x11_get_connection(device->instance, connection);
240
241 if (!wsi_conn->has_dri3) {
242 fprintf(stderr, "vulkan: No DRI3 support\n");
243 return false;
244 }
245
246 unsigned visual_depth;
247 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
248 return false;
249
250 if (visual_depth != 24 && visual_depth != 32)
251 return false;
252
253 return true;
254 }
255
256 static VkResult
257 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
258 struct anv_physical_device *device,
259 uint32_t queueFamilyIndex,
260 VkBool32* pSupported)
261 {
262 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
263
264 struct wsi_x11_connection *wsi_conn =
265 wsi_x11_get_connection(device->instance, surface->connection);
266 if (!wsi_conn)
267 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
268
269 if (!wsi_conn->has_dri3) {
270 fprintf(stderr, "vulkan: No DRI3 support\n");
271 *pSupported = false;
272 return VK_SUCCESS;
273 }
274
275 unsigned visual_depth;
276 if (!get_visualtype_for_window(surface->connection, surface->window,
277 &visual_depth)) {
278 *pSupported = false;
279 return VK_SUCCESS;
280 }
281
282 if (visual_depth != 24 && visual_depth != 32) {
283 *pSupported = false;
284 return VK_SUCCESS;
285 }
286
287 *pSupported = true;
288 return VK_SUCCESS;
289 }
290
291 static VkResult
292 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
293 struct anv_physical_device *device,
294 VkSurfaceCapabilitiesKHR *caps)
295 {
296 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
297 xcb_get_geometry_cookie_t geom_cookie;
298 xcb_generic_error_t *err;
299 xcb_get_geometry_reply_t *geom;
300 unsigned visual_depth;
301
302 geom_cookie = xcb_get_geometry(surface->connection, surface->window);
303
304 /* This does a round-trip. This is why we do get_geometry first and
305 * wait to read the reply until after we have a visual.
306 */
307 xcb_visualtype_t *visual =
308 get_visualtype_for_window(surface->connection, surface->window,
309 &visual_depth);
310
311 geom = xcb_get_geometry_reply(surface->connection, geom_cookie, &err);
312 if (geom) {
313 VkExtent2D extent = { geom->width, geom->height };
314 caps->currentExtent = extent;
315 caps->minImageExtent = extent;
316 caps->maxImageExtent = extent;
317 } else {
318 /* This can happen if the client didn't wait for the configure event
319 * to come back from the compositor. In that case, we don't know the
320 * size of the window so we just return valid "I don't know" stuff.
321 */
322 caps->currentExtent = (VkExtent2D) { -1, -1 };
323 caps->minImageExtent = (VkExtent2D) { 1, 1 };
324 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
325 }
326 free(err);
327 free(geom);
328
329 if (visual_has_alpha(visual, visual_depth)) {
330 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
331 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
332 } else {
333 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
334 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
335 }
336
337 caps->minImageCount = 2;
338 caps->maxImageCount = 4;
339 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
340 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
341 caps->maxImageArrayLayers = 1;
342 caps->supportedUsageFlags =
343 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
344 VK_IMAGE_USAGE_SAMPLED_BIT |
345 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
346 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
347
348 return VK_SUCCESS;
349 }
350
351 static VkResult
352 x11_surface_get_formats(VkIcdSurfaceBase *surface,
353 struct anv_physical_device *device,
354 uint32_t *pSurfaceFormatCount,
355 VkSurfaceFormatKHR *pSurfaceFormats)
356 {
357 if (pSurfaceFormats == NULL) {
358 *pSurfaceFormatCount = ARRAY_SIZE(formats);
359 return VK_SUCCESS;
360 }
361
362 assert(*pSurfaceFormatCount >= ARRAY_SIZE(formats));
363 typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
364 *pSurfaceFormatCount = ARRAY_SIZE(formats);
365
366 return VK_SUCCESS;
367 }
368
369 static VkResult
370 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
371 struct anv_physical_device *device,
372 uint32_t *pPresentModeCount,
373 VkPresentModeKHR *pPresentModes)
374 {
375 if (pPresentModes == NULL) {
376 *pPresentModeCount = ARRAY_SIZE(present_modes);
377 return VK_SUCCESS;
378 }
379
380 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
381 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
382 *pPresentModeCount = ARRAY_SIZE(present_modes);
383
384 return VK_SUCCESS;
385 }
386
387 static VkResult
388 x11_surface_create_swapchain(VkIcdSurfaceBase *surface,
389 struct anv_device *device,
390 const VkSwapchainCreateInfoKHR* pCreateInfo,
391 const VkAllocationCallbacks* pAllocator,
392 struct anv_swapchain **swapchain);
393
394 VkResult anv_CreateXcbSurfaceKHR(
395 VkInstance _instance,
396 const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
397 const VkAllocationCallbacks* pAllocator,
398 VkSurfaceKHR* pSurface)
399 {
400 ANV_FROM_HANDLE(anv_instance, instance, _instance);
401
402 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
403
404 VkIcdSurfaceXcb *surface;
405
406 surface = anv_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
407 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
408 if (surface == NULL)
409 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
410
411 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
412 surface->connection = pCreateInfo->connection;
413 surface->window = pCreateInfo->window;
414
415 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
416
417 return VK_SUCCESS;
418 }
419
420 struct x11_image {
421 struct anv_image * image;
422 struct anv_device_memory * memory;
423 xcb_pixmap_t pixmap;
424 bool busy;
425 struct xshmfence * shm_fence;
426 uint32_t sync_fence;
427 };
428
429 struct x11_swapchain {
430 struct anv_swapchain base;
431
432 xcb_connection_t * conn;
433 xcb_window_t window;
434 xcb_gc_t gc;
435 VkExtent2D extent;
436 uint32_t image_count;
437
438 xcb_present_event_t event_id;
439 xcb_special_event_t * special_event;
440 uint64_t send_sbc;
441 uint32_t stamp;
442
443 struct x11_image images[0];
444 };
445
446 static VkResult
447 x11_get_images(struct anv_swapchain *anv_chain,
448 uint32_t* pCount, VkImage *pSwapchainImages)
449 {
450 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
451
452 if (pSwapchainImages == NULL) {
453 *pCount = chain->image_count;
454 return VK_SUCCESS;
455 }
456
457 assert(chain->image_count <= *pCount);
458 for (uint32_t i = 0; i < chain->image_count; i++)
459 pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
460
461 *pCount = chain->image_count;
462
463 return VK_SUCCESS;
464 }
465
466 static VkResult
467 x11_handle_dri3_present_event(struct x11_swapchain *chain,
468 xcb_present_generic_event_t *event)
469 {
470 switch (event->evtype) {
471 case XCB_PRESENT_CONFIGURE_NOTIFY: {
472 xcb_present_configure_notify_event_t *config = (void *) event;
473
474 if (config->width != chain->extent.width ||
475 config->height != chain->extent.height)
476 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
477
478 break;
479 }
480
481 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
482 xcb_present_idle_notify_event_t *idle = (void *) event;
483
484 for (unsigned i = 0; i < chain->image_count; i++) {
485 if (chain->images[i].pixmap == idle->pixmap) {
486 chain->images[i].busy = false;
487 break;
488 }
489 }
490
491 break;
492 }
493
494 case XCB_PRESENT_COMPLETE_NOTIFY:
495 default:
496 break;
497 }
498
499 return VK_SUCCESS;
500 }
501
502 static VkResult
503 x11_acquire_next_image(struct anv_swapchain *anv_chain,
504 uint64_t timeout,
505 VkSemaphore semaphore,
506 uint32_t *image_index)
507 {
508 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
509
510 while (1) {
511 for (uint32_t i = 0; i < chain->image_count; i++) {
512 if (!chain->images[i].busy) {
513 /* We found a non-busy image */
514 xshmfence_await(chain->images[i].shm_fence);
515 *image_index = i;
516 return VK_SUCCESS;
517 }
518 }
519
520 xcb_flush(chain->conn);
521 xcb_generic_event_t *event =
522 xcb_wait_for_special_event(chain->conn, chain->special_event);
523 if (!event)
524 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
525
526 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
527 free(event);
528 if (result != VK_SUCCESS)
529 return result;
530 }
531 }
532
533 static VkResult
534 x11_queue_present(struct anv_swapchain *anv_chain,
535 struct anv_queue *queue,
536 uint32_t image_index)
537 {
538 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
539 struct x11_image *image = &chain->images[image_index];
540
541 assert(image_index < chain->image_count);
542
543 uint32_t options = XCB_PRESENT_OPTION_NONE;
544
545 int64_t target_msc = 0;
546 int64_t divisor = 0;
547 int64_t remainder = 0;
548
549 options |= XCB_PRESENT_OPTION_ASYNC;
550
551 xshmfence_reset(image->shm_fence);
552
553 xcb_void_cookie_t cookie =
554 xcb_present_pixmap(chain->conn,
555 chain->window,
556 image->pixmap,
557 (uint32_t) chain->send_sbc,
558 0, /* valid */
559 0, /* update */
560 0, /* x_off */
561 0, /* y_off */
562 XCB_NONE, /* target_crtc */
563 XCB_NONE,
564 image->sync_fence,
565 options,
566 target_msc,
567 divisor,
568 remainder, 0, NULL);
569 xcb_discard_reply(chain->conn, cookie.sequence);
570 image->busy = true;
571
572 xcb_flush(chain->conn);
573
574 return VK_SUCCESS;
575 }
576
577 static VkResult
578 x11_image_init(struct anv_device *device, struct x11_swapchain *chain,
579 const VkSwapchainCreateInfoKHR *pCreateInfo,
580 const VkAllocationCallbacks* pAllocator,
581 struct x11_image *image)
582 {
583 xcb_void_cookie_t cookie;
584 VkResult result;
585
586 VkImage image_h;
587 result = anv_image_create(anv_device_to_handle(device),
588 &(struct anv_image_create_info) {
589 .isl_tiling_flags = ISL_TILING_X_BIT,
590 .stride = 0,
591 .vk_info =
592 &(VkImageCreateInfo) {
593 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
594 .imageType = VK_IMAGE_TYPE_2D,
595 .format = pCreateInfo->imageFormat,
596 .extent = {
597 .width = pCreateInfo->imageExtent.width,
598 .height = pCreateInfo->imageExtent.height,
599 .depth = 1
600 },
601 .mipLevels = 1,
602 .arrayLayers = 1,
603 .samples = 1,
604 /* FIXME: Need a way to use X tiling to allow scanout */
605 .tiling = VK_IMAGE_TILING_OPTIMAL,
606 .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
607 .flags = 0,
608 }},
609 NULL,
610 &image_h);
611 if (result != VK_SUCCESS)
612 return result;
613
614 image->image = anv_image_from_handle(image_h);
615 assert(anv_format_is_color(image->image->format));
616
617 VkDeviceMemory memory_h;
618 result = anv_AllocateMemory(anv_device_to_handle(device),
619 &(VkMemoryAllocateInfo) {
620 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
621 .allocationSize = image->image->size,
622 .memoryTypeIndex = 0,
623 },
624 NULL /* XXX: pAllocator */,
625 &memory_h);
626 if (result != VK_SUCCESS)
627 goto fail_create_image;
628
629 image->memory = anv_device_memory_from_handle(memory_h);
630 image->memory->bo.is_winsys_bo = true;
631
632 anv_BindImageMemory(VK_NULL_HANDLE, image_h, memory_h, 0);
633
634 struct anv_surface *surface = &image->image->color_surface;
635 assert(surface->isl.tiling == ISL_TILING_X);
636
637 int ret = anv_gem_set_tiling(device, image->memory->bo.gem_handle,
638 surface->isl.row_pitch, I915_TILING_X);
639 if (ret) {
640 /* FINISHME: Choose a better error. */
641 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
642 "set_tiling failed: %m");
643 goto fail_alloc_memory;
644 }
645
646 int fd = anv_gem_handle_to_fd(device, image->memory->bo.gem_handle);
647 if (fd == -1) {
648 /* FINISHME: Choose a better error. */
649 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
650 "handle_to_fd failed: %m");
651 goto fail_alloc_memory;
652 }
653
654 uint32_t bpp = 32;
655 uint32_t depth = 24;
656 image->pixmap = xcb_generate_id(chain->conn);
657
658 cookie =
659 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
660 image->pixmap,
661 chain->window,
662 image->image->size,
663 pCreateInfo->imageExtent.width,
664 pCreateInfo->imageExtent.height,
665 surface->isl.row_pitch,
666 depth, bpp, fd);
667 xcb_discard_reply(chain->conn, cookie.sequence);
668
669 int fence_fd = xshmfence_alloc_shm();
670 if (fence_fd < 0)
671 goto fail_pixmap;
672
673 image->shm_fence = xshmfence_map_shm(fence_fd);
674 if (image->shm_fence == NULL)
675 goto fail_shmfence_alloc;
676
677 image->sync_fence = xcb_generate_id(chain->conn);
678 xcb_dri3_fence_from_fd(chain->conn,
679 image->pixmap,
680 image->sync_fence,
681 false,
682 fence_fd);
683
684 image->busy = false;
685 xshmfence_trigger(image->shm_fence);
686
687 return VK_SUCCESS;
688
689 fail_shmfence_alloc:
690 close(fence_fd);
691
692 fail_pixmap:
693 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
694 xcb_discard_reply(chain->conn, cookie.sequence);
695
696 fail_alloc_memory:
697 anv_FreeMemory(anv_device_to_handle(chain->base.device),
698 anv_device_memory_to_handle(image->memory), pAllocator);
699
700 fail_create_image:
701 anv_DestroyImage(anv_device_to_handle(chain->base.device),
702 anv_image_to_handle(image->image), pAllocator);
703
704 return result;
705 }
706
707 static void
708 x11_image_finish(struct x11_swapchain *chain,
709 const VkAllocationCallbacks* pAllocator,
710 struct x11_image *image)
711 {
712 xcb_void_cookie_t cookie;
713
714 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
715 xcb_discard_reply(chain->conn, cookie.sequence);
716 xshmfence_unmap_shm(image->shm_fence);
717
718 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
719 xcb_discard_reply(chain->conn, cookie.sequence);
720
721 anv_DestroyImage(anv_device_to_handle(chain->base.device),
722 anv_image_to_handle(image->image), pAllocator);
723
724 anv_FreeMemory(anv_device_to_handle(chain->base.device),
725 anv_device_memory_to_handle(image->memory), pAllocator);
726 }
727
728 static VkResult
729 x11_swapchain_destroy(struct anv_swapchain *anv_chain,
730 const VkAllocationCallbacks *pAllocator)
731 {
732 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
733
734 for (uint32_t i = 0; i < chain->image_count; i++)
735 x11_image_finish(chain, pAllocator, &chain->images[i]);
736
737 xcb_unregister_for_special_event(chain->conn, chain->special_event);
738
739 anv_free2(&chain->base.device->alloc, pAllocator, chain);
740
741 return VK_SUCCESS;
742 }
743
744 static VkResult
745 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
746 struct anv_device *device,
747 const VkSwapchainCreateInfoKHR *pCreateInfo,
748 const VkAllocationCallbacks* pAllocator,
749 struct anv_swapchain **swapchain_out)
750 {
751 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
752 struct x11_swapchain *chain;
753 xcb_void_cookie_t cookie;
754 VkResult result;
755
756 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
757
758 int num_images = pCreateInfo->minImageCount;
759
760 /* For true mailbox mode, we need at least 4 images:
761 * 1) One to scan out from
762 * 2) One to have queued for scan-out
763 * 3) One to be currently held by the Wayland compositor
764 * 4) One to render to
765 */
766 if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
767 num_images = MAX2(num_images, 4);
768
769 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
770 chain = anv_alloc2(&device->alloc, pAllocator, size, 8,
771 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
772 if (chain == NULL)
773 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
774
775 chain->base.device = device;
776 chain->base.destroy = x11_swapchain_destroy;
777 chain->base.get_images = x11_get_images;
778 chain->base.acquire_next_image = x11_acquire_next_image;
779 chain->base.queue_present = x11_queue_present;
780
781 chain->conn = surface->connection;
782 chain->window = surface->window;
783 chain->extent = pCreateInfo->imageExtent;
784 chain->image_count = num_images;
785
786 chain->event_id = xcb_generate_id(chain->conn);
787 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
788 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
789 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
790 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
791
792 /* Create an XCB event queue to hold present events outside of the usual
793 * application event queue
794 */
795 chain->special_event =
796 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
797 chain->event_id, NULL);
798
799 chain->gc = xcb_generate_id(chain->conn);
800 if (!chain->gc) {
801 /* FINISHME: Choose a better error. */
802 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
803 goto fail_register;
804 }
805
806 cookie = xcb_create_gc(chain->conn,
807 chain->gc,
808 chain->window,
809 XCB_GC_GRAPHICS_EXPOSURES,
810 (uint32_t []) { 0 });
811 xcb_discard_reply(chain->conn, cookie.sequence);
812
813 uint32_t image = 0;
814 for (; image < chain->image_count; image++) {
815 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
816 &chain->images[image]);
817 if (result != VK_SUCCESS)
818 goto fail_init_images;
819 }
820
821 *swapchain_out = &chain->base;
822
823 return VK_SUCCESS;
824
825 fail_init_images:
826 for (uint32_t j = 0; j < image; j++)
827 x11_image_finish(chain, pAllocator, &chain->images[j]);
828
829 fail_register:
830 xcb_unregister_for_special_event(chain->conn, chain->special_event);
831
832 anv_free2(&device->alloc, pAllocator, chain);
833
834 return result;
835 }
836
837 VkResult
838 anv_x11_init_wsi(struct anv_instance *instance)
839 {
840 struct wsi_x11 *wsi;
841 VkResult result;
842
843 wsi = anv_alloc(&instance->alloc, sizeof(*wsi), 8,
844 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
845 if (!wsi) {
846 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
847 goto fail;
848 }
849
850 int ret = pthread_mutex_init(&wsi->mutex, NULL);
851 if (ret != 0) {
852 if (ret == ENOMEM) {
853 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
854 } else {
855 /* FINISHME: Choose a better error. */
856 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
857 }
858
859 goto fail_alloc;
860 }
861
862 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
863 _mesa_key_pointer_equal);
864 if (!wsi->connections) {
865 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
866 goto fail_mutex;
867 }
868
869 wsi->base.get_support = x11_surface_get_support;
870 wsi->base.get_capabilities = x11_surface_get_capabilities;
871 wsi->base.get_formats = x11_surface_get_formats;
872 wsi->base.get_present_modes = x11_surface_get_present_modes;
873 wsi->base.create_swapchain = x11_surface_create_swapchain;
874
875 instance->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
876
877 return VK_SUCCESS;
878
879 fail_mutex:
880 pthread_mutex_destroy(&wsi->mutex);
881 fail_alloc:
882 anv_free(&instance->alloc, wsi);
883 fail:
884 instance->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
885
886 return result;
887 }
888
889 void
890 anv_x11_finish_wsi(struct anv_instance *instance)
891 {
892 struct wsi_x11 *wsi =
893 (struct wsi_x11 *)instance->wsi[VK_ICD_WSI_PLATFORM_XCB];
894
895 if (wsi) {
896 _mesa_hash_table_destroy(wsi->connections, NULL);
897
898 pthread_mutex_destroy(&wsi->mutex);
899
900 anv_free(&instance->alloc, wsi);
901 }
902 }