radv/anv/wsi: drop unneeded parameter
[mesa.git] / src / intel / vulkan / anv_wsi_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #include <xcb/xcb.h>
27 #include <xcb/dri3.h>
28 #include <xcb/present.h>
29
30 #include "anv_wsi.h"
31
32 #include "vk_format_info.h"
33 #include "util/hash_table.h"
34
35 struct wsi_x11_connection {
36 bool has_dri3;
37 bool has_present;
38 };
39
40 struct wsi_x11 {
41 struct anv_wsi_interface base;
42
43 pthread_mutex_t mutex;
44 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
45 struct hash_table *connections;
46 };
47
48 static struct wsi_x11_connection *
49 wsi_x11_connection_create(struct anv_physical_device *device,
50 xcb_connection_t *conn)
51 {
52 xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
53 xcb_query_extension_reply_t *dri3_reply, *pres_reply;
54
55 struct wsi_x11_connection *wsi_conn =
56 vk_alloc(&device->instance->alloc, sizeof(*wsi_conn), 8,
57 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
58 if (!wsi_conn)
59 return NULL;
60
61 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
62 pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
63
64 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
65 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
66 if (dri3_reply == NULL || pres_reply == NULL) {
67 free(dri3_reply);
68 free(pres_reply);
69 vk_free(&device->instance->alloc, wsi_conn);
70 return NULL;
71 }
72
73 wsi_conn->has_dri3 = dri3_reply->present != 0;
74 wsi_conn->has_present = pres_reply->present != 0;
75
76 free(dri3_reply);
77 free(pres_reply);
78
79 return wsi_conn;
80 }
81
82 static void
83 wsi_x11_connection_destroy(struct anv_physical_device *device,
84 struct wsi_x11_connection *conn)
85 {
86 vk_free(&device->instance->alloc, conn);
87 }
88
89 static struct wsi_x11_connection *
90 wsi_x11_get_connection(struct anv_physical_device *device,
91 xcb_connection_t *conn)
92 {
93 struct wsi_x11 *wsi =
94 (struct wsi_x11 *)device->wsi[VK_ICD_WSI_PLATFORM_XCB];
95
96 pthread_mutex_lock(&wsi->mutex);
97
98 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
99 if (!entry) {
100 /* We're about to make a bunch of blocking calls. Let's drop the
101 * mutex for now so we don't block up too badly.
102 */
103 pthread_mutex_unlock(&wsi->mutex);
104
105 struct wsi_x11_connection *wsi_conn =
106 wsi_x11_connection_create(device, conn);
107
108 pthread_mutex_lock(&wsi->mutex);
109
110 entry = _mesa_hash_table_search(wsi->connections, conn);
111 if (entry) {
112 /* Oops, someone raced us to it */
113 wsi_x11_connection_destroy(device, wsi_conn);
114 } else {
115 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
116 }
117 }
118
119 pthread_mutex_unlock(&wsi->mutex);
120
121 return entry->data;
122 }
123
124 static const VkSurfaceFormatKHR formats[] = {
125 { .format = VK_FORMAT_B8G8R8A8_SRGB, },
126 { .format = VK_FORMAT_B8G8R8A8_UNORM, },
127 };
128
129 static const VkPresentModeKHR present_modes[] = {
130 VK_PRESENT_MODE_MAILBOX_KHR,
131 };
132
133 static xcb_screen_t *
134 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
135 {
136 xcb_screen_iterator_t screen_iter =
137 xcb_setup_roots_iterator(xcb_get_setup(conn));
138
139 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
140 if (screen_iter.data->root == root)
141 return screen_iter.data;
142 }
143
144 return NULL;
145 }
146
147 static xcb_visualtype_t *
148 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
149 unsigned *depth)
150 {
151 xcb_depth_iterator_t depth_iter =
152 xcb_screen_allowed_depths_iterator(screen);
153
154 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
155 xcb_visualtype_iterator_t visual_iter =
156 xcb_depth_visuals_iterator (depth_iter.data);
157
158 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
159 if (visual_iter.data->visual_id == visual_id) {
160 if (depth)
161 *depth = depth_iter.data->depth;
162 return visual_iter.data;
163 }
164 }
165 }
166
167 return NULL;
168 }
169
170 static xcb_visualtype_t *
171 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
172 unsigned *depth)
173 {
174 xcb_screen_iterator_t screen_iter =
175 xcb_setup_roots_iterator(xcb_get_setup(conn));
176
177 /* For this we have to iterate over all of the screens which is rather
178 * annoying. Fortunately, there is probably only 1.
179 */
180 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
181 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
182 visual_id, depth);
183 if (visual)
184 return visual;
185 }
186
187 return NULL;
188 }
189
190 static xcb_visualtype_t *
191 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
192 unsigned *depth)
193 {
194 xcb_query_tree_cookie_t tree_cookie;
195 xcb_get_window_attributes_cookie_t attrib_cookie;
196 xcb_query_tree_reply_t *tree;
197 xcb_get_window_attributes_reply_t *attrib;
198
199 tree_cookie = xcb_query_tree(conn, window);
200 attrib_cookie = xcb_get_window_attributes(conn, window);
201
202 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
203 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
204 if (attrib == NULL || tree == NULL) {
205 free(attrib);
206 free(tree);
207 return NULL;
208 }
209
210 xcb_window_t root = tree->root;
211 xcb_visualid_t visual_id = attrib->visual;
212 free(attrib);
213 free(tree);
214
215 xcb_screen_t *screen = get_screen_for_root(conn, root);
216 if (screen == NULL)
217 return NULL;
218
219 return screen_get_visualtype(screen, visual_id, depth);
220 }
221
222 static bool
223 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
224 {
225 uint32_t rgb_mask = visual->red_mask |
226 visual->green_mask |
227 visual->blue_mask;
228
229 uint32_t all_mask = 0xffffffff >> (32 - depth);
230
231 /* Do we have bits left over after RGB? */
232 return (all_mask & ~rgb_mask) != 0;
233 }
234
235 VkBool32 anv_GetPhysicalDeviceXcbPresentationSupportKHR(
236 VkPhysicalDevice physicalDevice,
237 uint32_t queueFamilyIndex,
238 xcb_connection_t* connection,
239 xcb_visualid_t visual_id)
240 {
241 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
242
243 struct wsi_x11_connection *wsi_conn =
244 wsi_x11_get_connection(device, connection);
245
246 if (!wsi_conn->has_dri3) {
247 fprintf(stderr, "vulkan: No DRI3 support\n");
248 return false;
249 }
250
251 unsigned visual_depth;
252 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
253 return false;
254
255 if (visual_depth != 24 && visual_depth != 32)
256 return false;
257
258 return true;
259 }
260
261 VkBool32 anv_GetPhysicalDeviceXlibPresentationSupportKHR(
262 VkPhysicalDevice physicalDevice,
263 uint32_t queueFamilyIndex,
264 Display* dpy,
265 VisualID visualID)
266 {
267 return anv_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
268 queueFamilyIndex,
269 XGetXCBConnection(dpy),
270 visualID);
271 }
272
273 static xcb_connection_t*
274 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
275 {
276 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
277 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
278 else
279 return ((VkIcdSurfaceXcb *)icd_surface)->connection;
280 }
281
282 static xcb_window_t
283 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
284 {
285 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
286 return ((VkIcdSurfaceXlib *)icd_surface)->window;
287 else
288 return ((VkIcdSurfaceXcb *)icd_surface)->window;
289 }
290
291 static VkResult
292 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
293 struct anv_physical_device *device,
294 uint32_t queueFamilyIndex,
295 VkBool32* pSupported)
296 {
297 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
298 xcb_window_t window = x11_surface_get_window(icd_surface);
299
300 struct wsi_x11_connection *wsi_conn =
301 wsi_x11_get_connection(device, conn);
302 if (!wsi_conn)
303 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
304
305 if (!wsi_conn->has_dri3) {
306 fprintf(stderr, "vulkan: No DRI3 support\n");
307 *pSupported = false;
308 return VK_SUCCESS;
309 }
310
311 unsigned visual_depth;
312 if (!get_visualtype_for_window(conn, window, &visual_depth)) {
313 *pSupported = false;
314 return VK_SUCCESS;
315 }
316
317 if (visual_depth != 24 && visual_depth != 32) {
318 *pSupported = false;
319 return VK_SUCCESS;
320 }
321
322 *pSupported = true;
323 return VK_SUCCESS;
324 }
325
326 static VkResult
327 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
328 struct anv_physical_device *device,
329 VkSurfaceCapabilitiesKHR *caps)
330 {
331 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
332 xcb_window_t window = x11_surface_get_window(icd_surface);
333 xcb_get_geometry_cookie_t geom_cookie;
334 xcb_generic_error_t *err;
335 xcb_get_geometry_reply_t *geom;
336 unsigned visual_depth;
337
338 geom_cookie = xcb_get_geometry(conn, window);
339
340 /* This does a round-trip. This is why we do get_geometry first and
341 * wait to read the reply until after we have a visual.
342 */
343 xcb_visualtype_t *visual =
344 get_visualtype_for_window(conn, window, &visual_depth);
345
346 geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
347 if (geom) {
348 VkExtent2D extent = { geom->width, geom->height };
349 caps->currentExtent = extent;
350 caps->minImageExtent = extent;
351 caps->maxImageExtent = extent;
352 } else {
353 /* This can happen if the client didn't wait for the configure event
354 * to come back from the compositor. In that case, we don't know the
355 * size of the window so we just return valid "I don't know" stuff.
356 */
357 caps->currentExtent = (VkExtent2D) { -1, -1 };
358 caps->minImageExtent = (VkExtent2D) { 1, 1 };
359 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
360 }
361 free(err);
362 free(geom);
363
364 if (visual_has_alpha(visual, visual_depth)) {
365 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
366 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
367 } else {
368 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
369 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
370 }
371
372 caps->minImageCount = 2;
373 caps->maxImageCount = 4;
374 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
375 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
376 caps->maxImageArrayLayers = 1;
377 caps->supportedUsageFlags =
378 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
379 VK_IMAGE_USAGE_SAMPLED_BIT |
380 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
381 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
382
383 return VK_SUCCESS;
384 }
385
386 static VkResult
387 x11_surface_get_formats(VkIcdSurfaceBase *surface,
388 struct anv_physical_device *device,
389 uint32_t *pSurfaceFormatCount,
390 VkSurfaceFormatKHR *pSurfaceFormats)
391 {
392 if (pSurfaceFormats == NULL) {
393 *pSurfaceFormatCount = ARRAY_SIZE(formats);
394 return VK_SUCCESS;
395 }
396
397 assert(*pSurfaceFormatCount >= ARRAY_SIZE(formats));
398 typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
399 *pSurfaceFormatCount = ARRAY_SIZE(formats);
400
401 return VK_SUCCESS;
402 }
403
404 static VkResult
405 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
406 struct anv_physical_device *device,
407 uint32_t *pPresentModeCount,
408 VkPresentModeKHR *pPresentModes)
409 {
410 if (pPresentModes == NULL) {
411 *pPresentModeCount = ARRAY_SIZE(present_modes);
412 return VK_SUCCESS;
413 }
414
415 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
416 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
417 *pPresentModeCount = ARRAY_SIZE(present_modes);
418
419 return VK_SUCCESS;
420 }
421
422 static VkResult
423 x11_surface_create_swapchain(VkIcdSurfaceBase *surface,
424 struct anv_device *device,
425 const VkSwapchainCreateInfoKHR* pCreateInfo,
426 const VkAllocationCallbacks* pAllocator,
427 struct anv_swapchain **swapchain);
428
429 VkResult anv_CreateXcbSurfaceKHR(
430 VkInstance _instance,
431 const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
432 const VkAllocationCallbacks* pAllocator,
433 VkSurfaceKHR* pSurface)
434 {
435 ANV_FROM_HANDLE(anv_instance, instance, _instance);
436
437 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
438
439 VkIcdSurfaceXcb *surface;
440
441 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
442 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
443 if (surface == NULL)
444 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
445
446 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
447 surface->connection = pCreateInfo->connection;
448 surface->window = pCreateInfo->window;
449
450 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
451
452 return VK_SUCCESS;
453 }
454
455 VkResult anv_CreateXlibSurfaceKHR(
456 VkInstance _instance,
457 const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
458 const VkAllocationCallbacks* pAllocator,
459 VkSurfaceKHR* pSurface)
460 {
461 ANV_FROM_HANDLE(anv_instance, instance, _instance);
462
463 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
464
465 VkIcdSurfaceXlib *surface;
466
467 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
468 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
469 if (surface == NULL)
470 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
471
472 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
473 surface->dpy = pCreateInfo->dpy;
474 surface->window = pCreateInfo->window;
475
476 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
477
478 return VK_SUCCESS;
479 }
480
481 struct x11_image {
482 struct anv_image * image;
483 struct anv_device_memory * memory;
484 xcb_pixmap_t pixmap;
485 bool busy;
486 struct xshmfence * shm_fence;
487 uint32_t sync_fence;
488 };
489
490 struct x11_swapchain {
491 struct anv_swapchain base;
492
493 xcb_connection_t * conn;
494 xcb_window_t window;
495 xcb_gc_t gc;
496 VkExtent2D extent;
497 uint32_t image_count;
498
499 xcb_present_event_t event_id;
500 xcb_special_event_t * special_event;
501 uint64_t send_sbc;
502 uint32_t stamp;
503
504 struct x11_image images[0];
505 };
506
507 static VkResult
508 x11_get_images(struct anv_swapchain *anv_chain,
509 uint32_t* pCount, VkImage *pSwapchainImages)
510 {
511 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
512
513 if (pSwapchainImages == NULL) {
514 *pCount = chain->image_count;
515 return VK_SUCCESS;
516 }
517
518 assert(chain->image_count <= *pCount);
519 for (uint32_t i = 0; i < chain->image_count; i++)
520 pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
521
522 *pCount = chain->image_count;
523
524 return VK_SUCCESS;
525 }
526
527 static VkResult
528 x11_handle_dri3_present_event(struct x11_swapchain *chain,
529 xcb_present_generic_event_t *event)
530 {
531 switch (event->evtype) {
532 case XCB_PRESENT_CONFIGURE_NOTIFY: {
533 xcb_present_configure_notify_event_t *config = (void *) event;
534
535 if (config->width != chain->extent.width ||
536 config->height != chain->extent.height)
537 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
538
539 break;
540 }
541
542 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
543 xcb_present_idle_notify_event_t *idle = (void *) event;
544
545 for (unsigned i = 0; i < chain->image_count; i++) {
546 if (chain->images[i].pixmap == idle->pixmap) {
547 chain->images[i].busy = false;
548 break;
549 }
550 }
551
552 break;
553 }
554
555 case XCB_PRESENT_COMPLETE_NOTIFY:
556 default:
557 break;
558 }
559
560 return VK_SUCCESS;
561 }
562
563 static VkResult
564 x11_acquire_next_image(struct anv_swapchain *anv_chain,
565 uint64_t timeout,
566 VkSemaphore semaphore,
567 uint32_t *image_index)
568 {
569 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
570
571 while (1) {
572 for (uint32_t i = 0; i < chain->image_count; i++) {
573 if (!chain->images[i].busy) {
574 /* We found a non-busy image */
575 xshmfence_await(chain->images[i].shm_fence);
576 *image_index = i;
577 chain->images[i].busy = true;
578 return VK_SUCCESS;
579 }
580 }
581
582 xcb_flush(chain->conn);
583 xcb_generic_event_t *event =
584 xcb_wait_for_special_event(chain->conn, chain->special_event);
585 if (!event)
586 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
587
588 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
589 free(event);
590 if (result != VK_SUCCESS)
591 return result;
592 }
593 }
594
595 static VkResult
596 x11_queue_present(struct anv_swapchain *anv_chain,
597 uint32_t image_index)
598 {
599 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
600 struct x11_image *image = &chain->images[image_index];
601
602 assert(image_index < chain->image_count);
603
604 uint32_t options = XCB_PRESENT_OPTION_NONE;
605
606 int64_t target_msc = 0;
607 int64_t divisor = 0;
608 int64_t remainder = 0;
609
610 options |= XCB_PRESENT_OPTION_ASYNC;
611
612 xshmfence_reset(image->shm_fence);
613
614 ++chain->send_sbc;
615 xcb_void_cookie_t cookie =
616 xcb_present_pixmap(chain->conn,
617 chain->window,
618 image->pixmap,
619 (uint32_t) chain->send_sbc,
620 0, /* valid */
621 0, /* update */
622 0, /* x_off */
623 0, /* y_off */
624 XCB_NONE, /* target_crtc */
625 XCB_NONE,
626 image->sync_fence,
627 options,
628 target_msc,
629 divisor,
630 remainder, 0, NULL);
631 xcb_discard_reply(chain->conn, cookie.sequence);
632 image->busy = true;
633
634 xcb_flush(chain->conn);
635
636 return VK_SUCCESS;
637 }
638
639 static VkResult
640 x11_image_init(struct anv_device *device, struct x11_swapchain *chain,
641 const VkSwapchainCreateInfoKHR *pCreateInfo,
642 const VkAllocationCallbacks* pAllocator,
643 struct x11_image *image)
644 {
645 xcb_void_cookie_t cookie;
646 VkResult result;
647
648 VkImage image_h;
649 result = anv_image_create(anv_device_to_handle(device),
650 &(struct anv_image_create_info) {
651 .isl_tiling_flags = ISL_TILING_X_BIT,
652 .stride = 0,
653 .vk_info =
654 &(VkImageCreateInfo) {
655 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
656 .imageType = VK_IMAGE_TYPE_2D,
657 .format = pCreateInfo->imageFormat,
658 .extent = {
659 .width = pCreateInfo->imageExtent.width,
660 .height = pCreateInfo->imageExtent.height,
661 .depth = 1
662 },
663 .mipLevels = 1,
664 .arrayLayers = 1,
665 .samples = 1,
666 /* FIXME: Need a way to use X tiling to allow scanout */
667 .tiling = VK_IMAGE_TILING_OPTIMAL,
668 .usage = (pCreateInfo->imageUsage |
669 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
670 .flags = 0,
671 }},
672 NULL,
673 &image_h);
674 if (result != VK_SUCCESS)
675 return result;
676
677 image->image = anv_image_from_handle(image_h);
678 assert(vk_format_is_color(image->image->vk_format));
679
680 VkDeviceMemory memory_h;
681 result = anv_AllocateMemory(anv_device_to_handle(device),
682 &(VkMemoryAllocateInfo) {
683 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
684 .allocationSize = image->image->size,
685 .memoryTypeIndex = 0,
686 },
687 NULL /* XXX: pAllocator */,
688 &memory_h);
689 if (result != VK_SUCCESS)
690 goto fail_create_image;
691
692 image->memory = anv_device_memory_from_handle(memory_h);
693 image->memory->bo.is_winsys_bo = true;
694
695 anv_BindImageMemory(VK_NULL_HANDLE, image_h, memory_h, 0);
696
697 struct anv_surface *surface = &image->image->color_surface;
698 assert(surface->isl.tiling == ISL_TILING_X);
699
700 int ret = anv_gem_set_tiling(device, image->memory->bo.gem_handle,
701 surface->isl.row_pitch, I915_TILING_X);
702 if (ret) {
703 /* FINISHME: Choose a better error. */
704 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
705 "set_tiling failed: %m");
706 goto fail_alloc_memory;
707 }
708
709 int fd = anv_gem_handle_to_fd(device, image->memory->bo.gem_handle);
710 if (fd == -1) {
711 /* FINISHME: Choose a better error. */
712 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
713 "handle_to_fd failed: %m");
714 goto fail_alloc_memory;
715 }
716
717 uint32_t bpp = 32;
718 uint32_t depth = 24;
719 image->pixmap = xcb_generate_id(chain->conn);
720
721 cookie =
722 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
723 image->pixmap,
724 chain->window,
725 image->image->size,
726 pCreateInfo->imageExtent.width,
727 pCreateInfo->imageExtent.height,
728 surface->isl.row_pitch,
729 depth, bpp, fd);
730 xcb_discard_reply(chain->conn, cookie.sequence);
731
732 int fence_fd = xshmfence_alloc_shm();
733 if (fence_fd < 0)
734 goto fail_pixmap;
735
736 image->shm_fence = xshmfence_map_shm(fence_fd);
737 if (image->shm_fence == NULL)
738 goto fail_shmfence_alloc;
739
740 image->sync_fence = xcb_generate_id(chain->conn);
741 xcb_dri3_fence_from_fd(chain->conn,
742 image->pixmap,
743 image->sync_fence,
744 false,
745 fence_fd);
746
747 image->busy = false;
748 xshmfence_trigger(image->shm_fence);
749
750 return VK_SUCCESS;
751
752 fail_shmfence_alloc:
753 close(fence_fd);
754
755 fail_pixmap:
756 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
757 xcb_discard_reply(chain->conn, cookie.sequence);
758
759 fail_alloc_memory:
760 anv_FreeMemory(anv_device_to_handle(chain->base.device),
761 anv_device_memory_to_handle(image->memory), pAllocator);
762
763 fail_create_image:
764 anv_DestroyImage(anv_device_to_handle(chain->base.device),
765 anv_image_to_handle(image->image), pAllocator);
766
767 return result;
768 }
769
770 static void
771 x11_image_finish(struct x11_swapchain *chain,
772 const VkAllocationCallbacks* pAllocator,
773 struct x11_image *image)
774 {
775 xcb_void_cookie_t cookie;
776
777 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
778 xcb_discard_reply(chain->conn, cookie.sequence);
779 xshmfence_unmap_shm(image->shm_fence);
780
781 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
782 xcb_discard_reply(chain->conn, cookie.sequence);
783
784 anv_DestroyImage(anv_device_to_handle(chain->base.device),
785 anv_image_to_handle(image->image), pAllocator);
786
787 anv_FreeMemory(anv_device_to_handle(chain->base.device),
788 anv_device_memory_to_handle(image->memory), pAllocator);
789 }
790
791 static VkResult
792 x11_swapchain_destroy(struct anv_swapchain *anv_chain,
793 const VkAllocationCallbacks *pAllocator)
794 {
795 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
796
797 for (uint32_t i = 0; i < chain->image_count; i++)
798 x11_image_finish(chain, pAllocator, &chain->images[i]);
799
800 xcb_unregister_for_special_event(chain->conn, chain->special_event);
801
802 vk_free2(&chain->base.device->alloc, pAllocator, chain);
803
804 return VK_SUCCESS;
805 }
806
807 static VkResult
808 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
809 struct anv_device *device,
810 const VkSwapchainCreateInfoKHR *pCreateInfo,
811 const VkAllocationCallbacks* pAllocator,
812 struct anv_swapchain **swapchain_out)
813 {
814 struct x11_swapchain *chain;
815 xcb_void_cookie_t cookie;
816 VkResult result;
817
818 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
819
820 int num_images = pCreateInfo->minImageCount;
821
822 /* For true mailbox mode, we need at least 4 images:
823 * 1) One to scan out from
824 * 2) One to have queued for scan-out
825 * 3) One to be currently held by the Wayland compositor
826 * 4) One to render to
827 */
828 if (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR)
829 num_images = MAX2(num_images, 4);
830
831 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
832 chain = vk_alloc2(&device->alloc, pAllocator, size, 8,
833 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
834 if (chain == NULL)
835 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
836
837 chain->base.device = device;
838 chain->base.destroy = x11_swapchain_destroy;
839 chain->base.get_images = x11_get_images;
840 chain->base.acquire_next_image = x11_acquire_next_image;
841 chain->base.queue_present = x11_queue_present;
842
843 chain->conn = x11_surface_get_connection(icd_surface);
844 chain->window = x11_surface_get_window(icd_surface);
845 chain->extent = pCreateInfo->imageExtent;
846 chain->image_count = num_images;
847 chain->send_sbc = 0;
848
849 chain->event_id = xcb_generate_id(chain->conn);
850 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
851 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
852 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
853 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
854
855 /* Create an XCB event queue to hold present events outside of the usual
856 * application event queue
857 */
858 chain->special_event =
859 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
860 chain->event_id, NULL);
861
862 chain->gc = xcb_generate_id(chain->conn);
863 if (!chain->gc) {
864 /* FINISHME: Choose a better error. */
865 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
866 goto fail_register;
867 }
868
869 cookie = xcb_create_gc(chain->conn,
870 chain->gc,
871 chain->window,
872 XCB_GC_GRAPHICS_EXPOSURES,
873 (uint32_t []) { 0 });
874 xcb_discard_reply(chain->conn, cookie.sequence);
875
876 uint32_t image = 0;
877 for (; image < chain->image_count; image++) {
878 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
879 &chain->images[image]);
880 if (result != VK_SUCCESS)
881 goto fail_init_images;
882 }
883
884 *swapchain_out = &chain->base;
885
886 return VK_SUCCESS;
887
888 fail_init_images:
889 for (uint32_t j = 0; j < image; j++)
890 x11_image_finish(chain, pAllocator, &chain->images[j]);
891
892 fail_register:
893 xcb_unregister_for_special_event(chain->conn, chain->special_event);
894
895 vk_free2(&device->alloc, pAllocator, chain);
896
897 return result;
898 }
899
900 VkResult
901 anv_x11_init_wsi(struct anv_physical_device *device)
902 {
903 struct wsi_x11 *wsi;
904 VkResult result;
905
906 wsi = vk_alloc(&device->instance->alloc, sizeof(*wsi), 8,
907 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
908 if (!wsi) {
909 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
910 goto fail;
911 }
912
913 int ret = pthread_mutex_init(&wsi->mutex, NULL);
914 if (ret != 0) {
915 if (ret == ENOMEM) {
916 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
917 } else {
918 /* FINISHME: Choose a better error. */
919 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
920 }
921
922 goto fail_alloc;
923 }
924
925 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
926 _mesa_key_pointer_equal);
927 if (!wsi->connections) {
928 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
929 goto fail_mutex;
930 }
931
932 wsi->base.get_support = x11_surface_get_support;
933 wsi->base.get_capabilities = x11_surface_get_capabilities;
934 wsi->base.get_formats = x11_surface_get_formats;
935 wsi->base.get_present_modes = x11_surface_get_present_modes;
936 wsi->base.create_swapchain = x11_surface_create_swapchain;
937
938 device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
939 device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
940
941 return VK_SUCCESS;
942
943 fail_mutex:
944 pthread_mutex_destroy(&wsi->mutex);
945 fail_alloc:
946 vk_free(&device->instance->alloc, wsi);
947 fail:
948 device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
949 device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
950
951 return result;
952 }
953
954 void
955 anv_x11_finish_wsi(struct anv_physical_device *device)
956 {
957 struct wsi_x11 *wsi =
958 (struct wsi_x11 *)device->wsi[VK_ICD_WSI_PLATFORM_XCB];
959
960 if (wsi) {
961 _mesa_hash_table_destroy(wsi->connections, NULL);
962
963 pthread_mutex_destroy(&wsi->mutex);
964
965 vk_free(&device->instance->alloc, wsi);
966 }
967 }