anv/wsi/x11: push anv_device out of the init/finish routines
[mesa.git] / src / intel / vulkan / anv_wsi_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #include <xcb/xcb.h>
27 #include <xcb/dri3.h>
28 #include <xcb/present.h>
29
30 #include "anv_wsi.h"
31
32 #include "vk_format_info.h"
33 #include "util/hash_table.h"
34
35 struct wsi_x11_connection {
36 bool has_dri3;
37 bool has_present;
38 };
39
40 struct wsi_x11 {
41 struct anv_wsi_interface base;
42
43 pthread_mutex_t mutex;
44 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
45 struct hash_table *connections;
46 };
47
48 static struct wsi_x11_connection *
49 wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
50 xcb_connection_t *conn)
51 {
52 xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
53 xcb_query_extension_reply_t *dri3_reply, *pres_reply;
54
55 struct wsi_x11_connection *wsi_conn =
56 vk_alloc(alloc, sizeof(*wsi_conn), 8,
57 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
58 if (!wsi_conn)
59 return NULL;
60
61 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
62 pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
63
64 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
65 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
66 if (dri3_reply == NULL || pres_reply == NULL) {
67 free(dri3_reply);
68 free(pres_reply);
69 vk_free(alloc, wsi_conn);
70 return NULL;
71 }
72
73 wsi_conn->has_dri3 = dri3_reply->present != 0;
74 wsi_conn->has_present = pres_reply->present != 0;
75
76 free(dri3_reply);
77 free(pres_reply);
78
79 return wsi_conn;
80 }
81
82 static void
83 wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
84 struct wsi_x11_connection *conn)
85 {
86 vk_free(alloc, conn);
87 }
88
89 static struct wsi_x11_connection *
90 wsi_x11_get_connection(struct anv_wsi_device *wsi_dev,
91 const VkAllocationCallbacks *alloc,
92 xcb_connection_t *conn)
93 {
94 struct wsi_x11 *wsi =
95 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
96
97 pthread_mutex_lock(&wsi->mutex);
98
99 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
100 if (!entry) {
101 /* We're about to make a bunch of blocking calls. Let's drop the
102 * mutex for now so we don't block up too badly.
103 */
104 pthread_mutex_unlock(&wsi->mutex);
105
106 struct wsi_x11_connection *wsi_conn =
107 wsi_x11_connection_create(alloc, conn);
108
109 pthread_mutex_lock(&wsi->mutex);
110
111 entry = _mesa_hash_table_search(wsi->connections, conn);
112 if (entry) {
113 /* Oops, someone raced us to it */
114 wsi_x11_connection_destroy(alloc, wsi_conn);
115 } else {
116 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
117 }
118 }
119
120 pthread_mutex_unlock(&wsi->mutex);
121
122 return entry->data;
123 }
124
125 static const VkSurfaceFormatKHR formats[] = {
126 { .format = VK_FORMAT_B8G8R8A8_SRGB, },
127 { .format = VK_FORMAT_B8G8R8A8_UNORM, },
128 };
129
130 static const VkPresentModeKHR present_modes[] = {
131 VK_PRESENT_MODE_MAILBOX_KHR,
132 };
133
134 static xcb_screen_t *
135 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
136 {
137 xcb_screen_iterator_t screen_iter =
138 xcb_setup_roots_iterator(xcb_get_setup(conn));
139
140 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
141 if (screen_iter.data->root == root)
142 return screen_iter.data;
143 }
144
145 return NULL;
146 }
147
148 static xcb_visualtype_t *
149 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
150 unsigned *depth)
151 {
152 xcb_depth_iterator_t depth_iter =
153 xcb_screen_allowed_depths_iterator(screen);
154
155 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
156 xcb_visualtype_iterator_t visual_iter =
157 xcb_depth_visuals_iterator (depth_iter.data);
158
159 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
160 if (visual_iter.data->visual_id == visual_id) {
161 if (depth)
162 *depth = depth_iter.data->depth;
163 return visual_iter.data;
164 }
165 }
166 }
167
168 return NULL;
169 }
170
171 static xcb_visualtype_t *
172 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
173 unsigned *depth)
174 {
175 xcb_screen_iterator_t screen_iter =
176 xcb_setup_roots_iterator(xcb_get_setup(conn));
177
178 /* For this we have to iterate over all of the screens which is rather
179 * annoying. Fortunately, there is probably only 1.
180 */
181 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
182 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
183 visual_id, depth);
184 if (visual)
185 return visual;
186 }
187
188 return NULL;
189 }
190
191 static xcb_visualtype_t *
192 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
193 unsigned *depth)
194 {
195 xcb_query_tree_cookie_t tree_cookie;
196 xcb_get_window_attributes_cookie_t attrib_cookie;
197 xcb_query_tree_reply_t *tree;
198 xcb_get_window_attributes_reply_t *attrib;
199
200 tree_cookie = xcb_query_tree(conn, window);
201 attrib_cookie = xcb_get_window_attributes(conn, window);
202
203 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
204 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
205 if (attrib == NULL || tree == NULL) {
206 free(attrib);
207 free(tree);
208 return NULL;
209 }
210
211 xcb_window_t root = tree->root;
212 xcb_visualid_t visual_id = attrib->visual;
213 free(attrib);
214 free(tree);
215
216 xcb_screen_t *screen = get_screen_for_root(conn, root);
217 if (screen == NULL)
218 return NULL;
219
220 return screen_get_visualtype(screen, visual_id, depth);
221 }
222
223 static bool
224 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
225 {
226 uint32_t rgb_mask = visual->red_mask |
227 visual->green_mask |
228 visual->blue_mask;
229
230 uint32_t all_mask = 0xffffffff >> (32 - depth);
231
232 /* Do we have bits left over after RGB? */
233 return (all_mask & ~rgb_mask) != 0;
234 }
235
236 VkBool32 anv_GetPhysicalDeviceXcbPresentationSupportKHR(
237 VkPhysicalDevice physicalDevice,
238 uint32_t queueFamilyIndex,
239 xcb_connection_t* connection,
240 xcb_visualid_t visual_id)
241 {
242 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
243
244 struct wsi_x11_connection *wsi_conn =
245 wsi_x11_get_connection(&device->wsi_device, &device->instance->alloc, connection);
246
247 if (!wsi_conn->has_dri3) {
248 fprintf(stderr, "vulkan: No DRI3 support\n");
249 return false;
250 }
251
252 unsigned visual_depth;
253 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
254 return false;
255
256 if (visual_depth != 24 && visual_depth != 32)
257 return false;
258
259 return true;
260 }
261
262 VkBool32 anv_GetPhysicalDeviceXlibPresentationSupportKHR(
263 VkPhysicalDevice physicalDevice,
264 uint32_t queueFamilyIndex,
265 Display* dpy,
266 VisualID visualID)
267 {
268 return anv_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
269 queueFamilyIndex,
270 XGetXCBConnection(dpy),
271 visualID);
272 }
273
274 static xcb_connection_t*
275 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
276 {
277 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
278 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
279 else
280 return ((VkIcdSurfaceXcb *)icd_surface)->connection;
281 }
282
283 static xcb_window_t
284 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
285 {
286 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
287 return ((VkIcdSurfaceXlib *)icd_surface)->window;
288 else
289 return ((VkIcdSurfaceXcb *)icd_surface)->window;
290 }
291
292 static VkResult
293 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
294 struct anv_physical_device *device,
295 uint32_t queueFamilyIndex,
296 VkBool32* pSupported)
297 {
298 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
299 xcb_window_t window = x11_surface_get_window(icd_surface);
300
301 struct wsi_x11_connection *wsi_conn =
302 wsi_x11_get_connection(&device->wsi_device, &device->instance->alloc, conn);
303 if (!wsi_conn)
304 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
305
306 if (!wsi_conn->has_dri3) {
307 fprintf(stderr, "vulkan: No DRI3 support\n");
308 *pSupported = false;
309 return VK_SUCCESS;
310 }
311
312 unsigned visual_depth;
313 if (!get_visualtype_for_window(conn, window, &visual_depth)) {
314 *pSupported = false;
315 return VK_SUCCESS;
316 }
317
318 if (visual_depth != 24 && visual_depth != 32) {
319 *pSupported = false;
320 return VK_SUCCESS;
321 }
322
323 *pSupported = true;
324 return VK_SUCCESS;
325 }
326
327 static VkResult
328 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
329 VkSurfaceCapabilitiesKHR *caps)
330 {
331 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
332 xcb_window_t window = x11_surface_get_window(icd_surface);
333 xcb_get_geometry_cookie_t geom_cookie;
334 xcb_generic_error_t *err;
335 xcb_get_geometry_reply_t *geom;
336 unsigned visual_depth;
337
338 geom_cookie = xcb_get_geometry(conn, window);
339
340 /* This does a round-trip. This is why we do get_geometry first and
341 * wait to read the reply until after we have a visual.
342 */
343 xcb_visualtype_t *visual =
344 get_visualtype_for_window(conn, window, &visual_depth);
345
346 geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
347 if (geom) {
348 VkExtent2D extent = { geom->width, geom->height };
349 caps->currentExtent = extent;
350 caps->minImageExtent = extent;
351 caps->maxImageExtent = extent;
352 } else {
353 /* This can happen if the client didn't wait for the configure event
354 * to come back from the compositor. In that case, we don't know the
355 * size of the window so we just return valid "I don't know" stuff.
356 */
357 caps->currentExtent = (VkExtent2D) { -1, -1 };
358 caps->minImageExtent = (VkExtent2D) { 1, 1 };
359 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
360 }
361 free(err);
362 free(geom);
363
364 if (visual_has_alpha(visual, visual_depth)) {
365 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
366 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
367 } else {
368 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
369 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
370 }
371
372 caps->minImageCount = 2;
373 caps->maxImageCount = 4;
374 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
375 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
376 caps->maxImageArrayLayers = 1;
377 caps->supportedUsageFlags =
378 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
379 VK_IMAGE_USAGE_SAMPLED_BIT |
380 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
381 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
382
383 return VK_SUCCESS;
384 }
385
386 static VkResult
387 x11_surface_get_formats(VkIcdSurfaceBase *surface,
388 struct anv_physical_device *device,
389 uint32_t *pSurfaceFormatCount,
390 VkSurfaceFormatKHR *pSurfaceFormats)
391 {
392 if (pSurfaceFormats == NULL) {
393 *pSurfaceFormatCount = ARRAY_SIZE(formats);
394 return VK_SUCCESS;
395 }
396
397 assert(*pSurfaceFormatCount >= ARRAY_SIZE(formats));
398 typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
399 *pSurfaceFormatCount = ARRAY_SIZE(formats);
400
401 return VK_SUCCESS;
402 }
403
404 static VkResult
405 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
406 uint32_t *pPresentModeCount,
407 VkPresentModeKHR *pPresentModes)
408 {
409 if (pPresentModes == NULL) {
410 *pPresentModeCount = ARRAY_SIZE(present_modes);
411 return VK_SUCCESS;
412 }
413
414 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
415 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
416 *pPresentModeCount = ARRAY_SIZE(present_modes);
417
418 return VK_SUCCESS;
419 }
420
421 static VkResult
422 x11_surface_create_swapchain(VkIcdSurfaceBase *surface,
423 struct anv_device *device,
424 const VkSwapchainCreateInfoKHR* pCreateInfo,
425 const VkAllocationCallbacks* pAllocator,
426 struct anv_swapchain **swapchain);
427
428 VkResult anv_CreateXcbSurfaceKHR(
429 VkInstance _instance,
430 const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
431 const VkAllocationCallbacks* pAllocator,
432 VkSurfaceKHR* pSurface)
433 {
434 ANV_FROM_HANDLE(anv_instance, instance, _instance);
435
436 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
437
438 VkIcdSurfaceXcb *surface;
439
440 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
441 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
442 if (surface == NULL)
443 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
444
445 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
446 surface->connection = pCreateInfo->connection;
447 surface->window = pCreateInfo->window;
448
449 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
450
451 return VK_SUCCESS;
452 }
453
454 VkResult anv_CreateXlibSurfaceKHR(
455 VkInstance _instance,
456 const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
457 const VkAllocationCallbacks* pAllocator,
458 VkSurfaceKHR* pSurface)
459 {
460 ANV_FROM_HANDLE(anv_instance, instance, _instance);
461
462 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
463
464 VkIcdSurfaceXlib *surface;
465
466 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
467 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
468 if (surface == NULL)
469 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
470
471 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
472 surface->dpy = pCreateInfo->dpy;
473 surface->window = pCreateInfo->window;
474
475 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
476
477 return VK_SUCCESS;
478 }
479
480 struct x11_image {
481 struct anv_image * image;
482 struct anv_device_memory * memory;
483 xcb_pixmap_t pixmap;
484 bool busy;
485 struct xshmfence * shm_fence;
486 uint32_t sync_fence;
487 };
488
489 struct x11_swapchain {
490 struct anv_swapchain base;
491
492 xcb_connection_t * conn;
493 xcb_window_t window;
494 xcb_gc_t gc;
495 VkExtent2D extent;
496 uint32_t image_count;
497
498 xcb_present_event_t event_id;
499 xcb_special_event_t * special_event;
500 uint64_t send_sbc;
501 uint32_t stamp;
502
503 struct x11_image images[0];
504 };
505
506 static VkResult
507 x11_get_images(struct anv_swapchain *anv_chain,
508 uint32_t* pCount, VkImage *pSwapchainImages)
509 {
510 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
511
512 if (pSwapchainImages == NULL) {
513 *pCount = chain->image_count;
514 return VK_SUCCESS;
515 }
516
517 assert(chain->image_count <= *pCount);
518 for (uint32_t i = 0; i < chain->image_count; i++)
519 pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
520
521 *pCount = chain->image_count;
522
523 return VK_SUCCESS;
524 }
525
526 static VkResult
527 x11_handle_dri3_present_event(struct x11_swapchain *chain,
528 xcb_present_generic_event_t *event)
529 {
530 switch (event->evtype) {
531 case XCB_PRESENT_CONFIGURE_NOTIFY: {
532 xcb_present_configure_notify_event_t *config = (void *) event;
533
534 if (config->width != chain->extent.width ||
535 config->height != chain->extent.height)
536 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
537
538 break;
539 }
540
541 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
542 xcb_present_idle_notify_event_t *idle = (void *) event;
543
544 for (unsigned i = 0; i < chain->image_count; i++) {
545 if (chain->images[i].pixmap == idle->pixmap) {
546 chain->images[i].busy = false;
547 break;
548 }
549 }
550
551 break;
552 }
553
554 case XCB_PRESENT_COMPLETE_NOTIFY:
555 default:
556 break;
557 }
558
559 return VK_SUCCESS;
560 }
561
562 static VkResult
563 x11_acquire_next_image(struct anv_swapchain *anv_chain,
564 uint64_t timeout,
565 VkSemaphore semaphore,
566 uint32_t *image_index)
567 {
568 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
569
570 while (1) {
571 for (uint32_t i = 0; i < chain->image_count; i++) {
572 if (!chain->images[i].busy) {
573 /* We found a non-busy image */
574 xshmfence_await(chain->images[i].shm_fence);
575 *image_index = i;
576 chain->images[i].busy = true;
577 return VK_SUCCESS;
578 }
579 }
580
581 xcb_flush(chain->conn);
582 xcb_generic_event_t *event =
583 xcb_wait_for_special_event(chain->conn, chain->special_event);
584 if (!event)
585 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
586
587 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
588 free(event);
589 if (result != VK_SUCCESS)
590 return result;
591 }
592 }
593
594 static VkResult
595 x11_queue_present(struct anv_swapchain *anv_chain,
596 uint32_t image_index)
597 {
598 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
599 struct x11_image *image = &chain->images[image_index];
600
601 assert(image_index < chain->image_count);
602
603 uint32_t options = XCB_PRESENT_OPTION_NONE;
604
605 int64_t target_msc = 0;
606 int64_t divisor = 0;
607 int64_t remainder = 0;
608
609 options |= XCB_PRESENT_OPTION_ASYNC;
610
611 xshmfence_reset(image->shm_fence);
612
613 ++chain->send_sbc;
614 xcb_void_cookie_t cookie =
615 xcb_present_pixmap(chain->conn,
616 chain->window,
617 image->pixmap,
618 (uint32_t) chain->send_sbc,
619 0, /* valid */
620 0, /* update */
621 0, /* x_off */
622 0, /* y_off */
623 XCB_NONE, /* target_crtc */
624 XCB_NONE,
625 image->sync_fence,
626 options,
627 target_msc,
628 divisor,
629 remainder, 0, NULL);
630 xcb_discard_reply(chain->conn, cookie.sequence);
631 image->busy = true;
632
633 xcb_flush(chain->conn);
634
635 return VK_SUCCESS;
636 }
637
638 static VkResult
639 x11_image_init(struct anv_device *device, struct x11_swapchain *chain,
640 const VkSwapchainCreateInfoKHR *pCreateInfo,
641 const VkAllocationCallbacks* pAllocator,
642 struct x11_image *image)
643 {
644 xcb_void_cookie_t cookie;
645 VkResult result;
646
647 VkImage image_h;
648 result = anv_image_create(anv_device_to_handle(device),
649 &(struct anv_image_create_info) {
650 .isl_tiling_flags = ISL_TILING_X_BIT,
651 .stride = 0,
652 .vk_info =
653 &(VkImageCreateInfo) {
654 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
655 .imageType = VK_IMAGE_TYPE_2D,
656 .format = pCreateInfo->imageFormat,
657 .extent = {
658 .width = pCreateInfo->imageExtent.width,
659 .height = pCreateInfo->imageExtent.height,
660 .depth = 1
661 },
662 .mipLevels = 1,
663 .arrayLayers = 1,
664 .samples = 1,
665 /* FIXME: Need a way to use X tiling to allow scanout */
666 .tiling = VK_IMAGE_TILING_OPTIMAL,
667 .usage = (pCreateInfo->imageUsage |
668 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
669 .flags = 0,
670 }},
671 NULL,
672 &image_h);
673 if (result != VK_SUCCESS)
674 return result;
675
676 image->image = anv_image_from_handle(image_h);
677 assert(vk_format_is_color(image->image->vk_format));
678
679 VkDeviceMemory memory_h;
680 result = anv_AllocateMemory(anv_device_to_handle(device),
681 &(VkMemoryAllocateInfo) {
682 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
683 .allocationSize = image->image->size,
684 .memoryTypeIndex = 0,
685 },
686 NULL /* XXX: pAllocator */,
687 &memory_h);
688 if (result != VK_SUCCESS)
689 goto fail_create_image;
690
691 image->memory = anv_device_memory_from_handle(memory_h);
692 image->memory->bo.is_winsys_bo = true;
693
694 anv_BindImageMemory(VK_NULL_HANDLE, image_h, memory_h, 0);
695
696 struct anv_surface *surface = &image->image->color_surface;
697 assert(surface->isl.tiling == ISL_TILING_X);
698
699 int ret = anv_gem_set_tiling(device, image->memory->bo.gem_handle,
700 surface->isl.row_pitch, I915_TILING_X);
701 if (ret) {
702 /* FINISHME: Choose a better error. */
703 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
704 "set_tiling failed: %m");
705 goto fail_alloc_memory;
706 }
707
708 int fd = anv_gem_handle_to_fd(device, image->memory->bo.gem_handle);
709 if (fd == -1) {
710 /* FINISHME: Choose a better error. */
711 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
712 "handle_to_fd failed: %m");
713 goto fail_alloc_memory;
714 }
715
716 uint32_t bpp = 32;
717 uint32_t depth = 24;
718 image->pixmap = xcb_generate_id(chain->conn);
719
720 cookie =
721 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
722 image->pixmap,
723 chain->window,
724 image->image->size,
725 pCreateInfo->imageExtent.width,
726 pCreateInfo->imageExtent.height,
727 surface->isl.row_pitch,
728 depth, bpp, fd);
729 xcb_discard_reply(chain->conn, cookie.sequence);
730
731 int fence_fd = xshmfence_alloc_shm();
732 if (fence_fd < 0)
733 goto fail_pixmap;
734
735 image->shm_fence = xshmfence_map_shm(fence_fd);
736 if (image->shm_fence == NULL)
737 goto fail_shmfence_alloc;
738
739 image->sync_fence = xcb_generate_id(chain->conn);
740 xcb_dri3_fence_from_fd(chain->conn,
741 image->pixmap,
742 image->sync_fence,
743 false,
744 fence_fd);
745
746 image->busy = false;
747 xshmfence_trigger(image->shm_fence);
748
749 return VK_SUCCESS;
750
751 fail_shmfence_alloc:
752 close(fence_fd);
753
754 fail_pixmap:
755 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
756 xcb_discard_reply(chain->conn, cookie.sequence);
757
758 fail_alloc_memory:
759 anv_FreeMemory(anv_device_to_handle(chain->base.device),
760 anv_device_memory_to_handle(image->memory), pAllocator);
761
762 fail_create_image:
763 anv_DestroyImage(anv_device_to_handle(chain->base.device),
764 anv_image_to_handle(image->image), pAllocator);
765
766 return result;
767 }
768
769 static void
770 x11_image_finish(struct x11_swapchain *chain,
771 const VkAllocationCallbacks* pAllocator,
772 struct x11_image *image)
773 {
774 xcb_void_cookie_t cookie;
775
776 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
777 xcb_discard_reply(chain->conn, cookie.sequence);
778 xshmfence_unmap_shm(image->shm_fence);
779
780 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
781 xcb_discard_reply(chain->conn, cookie.sequence);
782
783 anv_DestroyImage(anv_device_to_handle(chain->base.device),
784 anv_image_to_handle(image->image), pAllocator);
785
786 anv_FreeMemory(anv_device_to_handle(chain->base.device),
787 anv_device_memory_to_handle(image->memory), pAllocator);
788 }
789
790 static VkResult
791 x11_swapchain_destroy(struct anv_swapchain *anv_chain,
792 const VkAllocationCallbacks *pAllocator)
793 {
794 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
795
796 for (uint32_t i = 0; i < chain->image_count; i++)
797 x11_image_finish(chain, pAllocator, &chain->images[i]);
798
799 xcb_unregister_for_special_event(chain->conn, chain->special_event);
800
801 vk_free2(&chain->base.device->alloc, pAllocator, chain);
802
803 return VK_SUCCESS;
804 }
805
806 static VkResult
807 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
808 struct anv_device *device,
809 const VkSwapchainCreateInfoKHR *pCreateInfo,
810 const VkAllocationCallbacks* pAllocator,
811 struct anv_swapchain **swapchain_out)
812 {
813 struct x11_swapchain *chain;
814 xcb_void_cookie_t cookie;
815 VkResult result;
816
817 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
818
819 int num_images = pCreateInfo->minImageCount;
820
821 /* For true mailbox mode, we need at least 4 images:
822 * 1) One to scan out from
823 * 2) One to have queued for scan-out
824 * 3) One to be currently held by the Wayland compositor
825 * 4) One to render to
826 */
827 if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
828 num_images = MAX2(num_images, 4);
829
830 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
831 chain = vk_alloc2(&device->alloc, pAllocator, size, 8,
832 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
833 if (chain == NULL)
834 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
835
836 chain->base.device = device;
837 chain->base.destroy = x11_swapchain_destroy;
838 chain->base.get_images = x11_get_images;
839 chain->base.acquire_next_image = x11_acquire_next_image;
840 chain->base.queue_present = x11_queue_present;
841
842 chain->conn = x11_surface_get_connection(icd_surface);
843 chain->window = x11_surface_get_window(icd_surface);
844 chain->extent = pCreateInfo->imageExtent;
845 chain->image_count = num_images;
846 chain->send_sbc = 0;
847
848 chain->event_id = xcb_generate_id(chain->conn);
849 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
850 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
851 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
852 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
853
854 /* Create an XCB event queue to hold present events outside of the usual
855 * application event queue
856 */
857 chain->special_event =
858 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
859 chain->event_id, NULL);
860
861 chain->gc = xcb_generate_id(chain->conn);
862 if (!chain->gc) {
863 /* FINISHME: Choose a better error. */
864 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
865 goto fail_register;
866 }
867
868 cookie = xcb_create_gc(chain->conn,
869 chain->gc,
870 chain->window,
871 XCB_GC_GRAPHICS_EXPOSURES,
872 (uint32_t []) { 0 });
873 xcb_discard_reply(chain->conn, cookie.sequence);
874
875 uint32_t image = 0;
876 for (; image < chain->image_count; image++) {
877 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
878 &chain->images[image]);
879 if (result != VK_SUCCESS)
880 goto fail_init_images;
881 }
882
883 *swapchain_out = &chain->base;
884
885 return VK_SUCCESS;
886
887 fail_init_images:
888 for (uint32_t j = 0; j < image; j++)
889 x11_image_finish(chain, pAllocator, &chain->images[j]);
890
891 fail_register:
892 xcb_unregister_for_special_event(chain->conn, chain->special_event);
893
894 vk_free2(&device->alloc, pAllocator, chain);
895
896 return result;
897 }
898
899 VkResult
900 anv_x11_init_wsi(struct anv_wsi_device *wsi_device,
901 const VkAllocationCallbacks *alloc)
902 {
903 struct wsi_x11 *wsi;
904 VkResult result;
905
906 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
907 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
908 if (!wsi) {
909 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
910 goto fail;
911 }
912
913 int ret = pthread_mutex_init(&wsi->mutex, NULL);
914 if (ret != 0) {
915 if (ret == ENOMEM) {
916 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
917 } else {
918 /* FINISHME: Choose a better error. */
919 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
920 }
921
922 goto fail_alloc;
923 }
924
925 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
926 _mesa_key_pointer_equal);
927 if (!wsi->connections) {
928 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
929 goto fail_mutex;
930 }
931
932 wsi->base.get_support = x11_surface_get_support;
933 wsi->base.get_capabilities = x11_surface_get_capabilities;
934 wsi->base.get_formats = x11_surface_get_formats;
935 wsi->base.get_present_modes = x11_surface_get_present_modes;
936 wsi->base.create_swapchain = x11_surface_create_swapchain;
937
938 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
939 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
940
941 return VK_SUCCESS;
942
943 fail_mutex:
944 pthread_mutex_destroy(&wsi->mutex);
945 fail_alloc:
946 vk_free(alloc, wsi);
947 fail:
948 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
949 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
950
951 return result;
952 }
953
954 void
955 anv_x11_finish_wsi(struct anv_wsi_device *wsi_device,
956 const VkAllocationCallbacks *alloc)
957 {
958 struct wsi_x11 *wsi =
959 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
960
961 if (wsi) {
962 _mesa_hash_table_destroy(wsi->connections, NULL);
963
964 pthread_mutex_destroy(&wsi->mutex);
965
966 vk_free(alloc, wsi);
967 }
968 }