843a6b625047ff07a8bca26ff379b44c0f549a1e
[mesa.git] / src / vulkan / anv_wsi_x11.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <xcb/xcb.h>
25 #include <xcb/dri3.h>
26 #include <xcb/present.h>
27
28 #include "anv_wsi.h"
29
30 #include "util/hash_table.h"
31
32 struct wsi_x11_connection {
33 bool has_dri3;
34 bool has_present;
35 };
36
37 struct wsi_x11 {
38 struct anv_wsi_interface base;
39
40 pthread_mutex_t mutex;
41 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
42 struct hash_table *connections;
43 };
44
45 static struct wsi_x11_connection *
46 wsi_x11_connection_create(struct anv_instance *instance, xcb_connection_t *conn)
47 {
48 xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
49 xcb_query_extension_reply_t *dri3_reply, *pres_reply;
50
51 struct wsi_x11_connection *wsi_conn =
52 anv_alloc(&instance->alloc, sizeof(*wsi_conn), 8,
53 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
54 if (!wsi_conn)
55 return NULL;
56
57 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
58 pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
59
60 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
61 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
62 if (dri3_reply == NULL || pres_reply == NULL) {
63 free(dri3_reply);
64 free(pres_reply);
65 anv_free(&instance->alloc, wsi_conn);
66 return NULL;
67 }
68
69 wsi_conn->has_dri3 = dri3_reply->present != 0;
70 wsi_conn->has_present = pres_reply->present != 0;
71
72 free(dri3_reply);
73 free(pres_reply);
74
75 return wsi_conn;
76 }
77
78 static void
79 wsi_x11_connection_destroy(struct anv_instance *instance,
80 struct wsi_x11_connection *conn)
81 {
82 anv_free(&instance->alloc, conn);
83 }
84
85 static struct wsi_x11_connection *
86 wsi_x11_get_connection(struct anv_instance *instance, xcb_connection_t *conn)
87 {
88 struct wsi_x11 *wsi =
89 (struct wsi_x11 *)instance->wsi[VK_ICD_WSI_PLATFORM_XCB];
90
91 pthread_mutex_lock(&wsi->mutex);
92
93 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
94 if (!entry) {
95 /* We're about to make a bunch of blocking calls. Let's drop the
96 * mutex for now so we don't block up too badly.
97 */
98 pthread_mutex_unlock(&wsi->mutex);
99
100 struct wsi_x11_connection *wsi_conn =
101 wsi_x11_connection_create(instance, conn);
102
103 pthread_mutex_lock(&wsi->mutex);
104
105 entry = _mesa_hash_table_search(wsi->connections, conn);
106 if (entry) {
107 /* Oops, someone raced us to it */
108 wsi_x11_connection_destroy(instance, wsi_conn);
109 } else {
110 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
111 }
112 }
113
114 pthread_mutex_unlock(&wsi->mutex);
115
116 return entry->data;
117 }
118
119 static const VkSurfaceFormatKHR formats[] = {
120 { .format = VK_FORMAT_B8G8R8A8_SRGB, },
121 };
122
123 static const VkPresentModeKHR present_modes[] = {
124 VK_PRESENT_MODE_MAILBOX_KHR,
125 };
126
127 static xcb_screen_t *
128 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
129 {
130 xcb_screen_iterator_t screen_iter =
131 xcb_setup_roots_iterator(xcb_get_setup(conn));
132
133 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
134 if (screen_iter.data->root == root)
135 return screen_iter.data;
136 }
137
138 return NULL;
139 }
140
141 static xcb_visualtype_t *
142 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
143 unsigned *depth)
144 {
145 xcb_depth_iterator_t depth_iter =
146 xcb_screen_allowed_depths_iterator(screen);
147
148 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
149 xcb_visualtype_iterator_t visual_iter =
150 xcb_depth_visuals_iterator (depth_iter.data);
151
152 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
153 if (visual_iter.data->visual_id == visual_id) {
154 if (depth)
155 *depth = depth_iter.data->depth;
156 return visual_iter.data;
157 }
158 }
159 }
160
161 return NULL;
162 }
163
164 static xcb_visualtype_t *
165 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
166 unsigned *depth)
167 {
168 xcb_screen_iterator_t screen_iter =
169 xcb_setup_roots_iterator(xcb_get_setup(conn));
170
171 /* For this we have to iterate over all of the screens which is rather
172 * annoying. Fortunately, there is probably only 1.
173 */
174 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
175 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
176 visual_id, depth);
177 if (visual)
178 return visual;
179 }
180
181 return NULL;
182 }
183
184 static xcb_visualtype_t *
185 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
186 unsigned *depth)
187 {
188 xcb_query_tree_cookie_t tree_cookie;
189 xcb_get_window_attributes_cookie_t attrib_cookie;
190 xcb_query_tree_reply_t *tree;
191 xcb_get_window_attributes_reply_t *attrib;
192
193 tree_cookie = xcb_query_tree(conn, window);
194 attrib_cookie = xcb_get_window_attributes(conn, window);
195
196 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
197 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
198 if (attrib == NULL || tree == NULL) {
199 free(attrib);
200 free(tree);
201 return NULL;
202 }
203
204 xcb_window_t root = tree->root;
205 xcb_visualid_t visual_id = attrib->visual;
206 free(attrib);
207 free(tree);
208
209 xcb_screen_t *screen = get_screen_for_root(conn, root);
210 if (screen == NULL)
211 return NULL;
212
213 return screen_get_visualtype(screen, visual_id, depth);
214 }
215
216 static bool
217 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
218 {
219 uint32_t rgb_mask = visual->red_mask |
220 visual->green_mask |
221 visual->blue_mask;
222
223 uint32_t all_mask = 0xffffffff >> (32 - depth);
224
225 /* Do we have bits left over after RGB? */
226 return (all_mask & ~rgb_mask) != 0;
227 }
228
229 VkBool32 anv_GetPhysicalDeviceXcbPresentationSupportKHR(
230 VkPhysicalDevice physicalDevice,
231 uint32_t queueFamilyIndex,
232 xcb_connection_t* connection,
233 xcb_visualid_t visual_id)
234 {
235 ANV_FROM_HANDLE(anv_physical_device, device, physicalDevice);
236
237 struct wsi_x11_connection *wsi_conn =
238 wsi_x11_get_connection(device->instance, connection);
239
240 if (!wsi_conn->has_dri3) {
241 fprintf(stderr, "vulkan: No DRI3 support\n");
242 return false;
243 }
244
245 unsigned visual_depth;
246 if (!connection_get_visualtype(connection, visual_id, &visual_depth))
247 return false;
248
249 if (visual_depth != 24 && visual_depth != 32)
250 return false;
251
252 return true;
253 }
254
255 static VkResult
256 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
257 struct anv_physical_device *device,
258 uint32_t queueFamilyIndex,
259 VkBool32* pSupported)
260 {
261 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
262
263 struct wsi_x11_connection *wsi_conn =
264 wsi_x11_get_connection(device->instance, surface->connection);
265 if (!wsi_conn)
266 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
267
268 if (!wsi_conn->has_dri3) {
269 fprintf(stderr, "vulkan: No DRI3 support\n");
270 *pSupported = false;
271 return VK_SUCCESS;
272 }
273
274 unsigned visual_depth;
275 if (!get_visualtype_for_window(surface->connection, surface->window,
276 &visual_depth)) {
277 *pSupported = false;
278 return VK_SUCCESS;
279 }
280
281 if (visual_depth != 24 && visual_depth != 32) {
282 *pSupported = false;
283 return VK_SUCCESS;
284 }
285
286 *pSupported = true;
287 return VK_SUCCESS;
288 }
289
290 static VkResult
291 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
292 struct anv_physical_device *device,
293 VkSurfaceCapabilitiesKHR *caps)
294 {
295 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
296 xcb_get_geometry_cookie_t geom_cookie;
297 xcb_generic_error_t *err;
298 xcb_get_geometry_reply_t *geom;
299 unsigned visual_depth;
300
301 geom_cookie = xcb_get_geometry(surface->connection, surface->window);
302
303 /* This does a round-trip. This is why we do get_geometry first and
304 * wait to read the reply until after we have a visual.
305 */
306 xcb_visualtype_t *visual =
307 get_visualtype_for_window(surface->connection, surface->window,
308 &visual_depth);
309
310 geom = xcb_get_geometry_reply(surface->connection, geom_cookie, &err);
311 if (geom) {
312 VkExtent2D extent = { geom->width, geom->height };
313 caps->currentExtent = extent;
314 caps->minImageExtent = extent;
315 caps->maxImageExtent = extent;
316 } else {
317 /* This can happen if the client didn't wait for the configure event
318 * to come back from the compositor. In that case, we don't know the
319 * size of the window so we just return valid "I don't know" stuff.
320 */
321 caps->currentExtent = (VkExtent2D) { -1, -1 };
322 caps->minImageExtent = (VkExtent2D) { 1, 1 };
323 caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
324 }
325 free(err);
326 free(geom);
327
328 if (visual_has_alpha(visual, visual_depth)) {
329 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
330 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
331 } else {
332 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
333 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
334 }
335
336 caps->minImageCount = 2;
337 caps->maxImageCount = 4;
338 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
339 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
340 caps->maxImageArrayLayers = 1;
341 caps->supportedUsageFlags =
342 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
343 VK_IMAGE_USAGE_SAMPLED_BIT |
344 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
345 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
346
347 return VK_SUCCESS;
348 }
349
350 static VkResult
351 x11_surface_get_formats(VkIcdSurfaceBase *surface,
352 struct anv_physical_device *device,
353 uint32_t *pSurfaceFormatCount,
354 VkSurfaceFormatKHR *pSurfaceFormats)
355 {
356 if (pSurfaceFormats == NULL) {
357 *pSurfaceFormatCount = ARRAY_SIZE(formats);
358 return VK_SUCCESS;
359 }
360
361 assert(*pSurfaceFormatCount >= ARRAY_SIZE(formats));
362 typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
363 *pSurfaceFormatCount = ARRAY_SIZE(formats);
364
365 return VK_SUCCESS;
366 }
367
368 static VkResult
369 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
370 struct anv_physical_device *device,
371 uint32_t *pPresentModeCount,
372 VkPresentModeKHR *pPresentModes)
373 {
374 if (pPresentModes == NULL) {
375 *pPresentModeCount = ARRAY_SIZE(present_modes);
376 return VK_SUCCESS;
377 }
378
379 assert(*pPresentModeCount >= ARRAY_SIZE(present_modes));
380 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
381 *pPresentModeCount = ARRAY_SIZE(present_modes);
382
383 return VK_SUCCESS;
384 }
385
386 static VkResult
387 x11_surface_create_swapchain(VkIcdSurfaceBase *surface,
388 struct anv_device *device,
389 const VkSwapchainCreateInfoKHR* pCreateInfo,
390 const VkAllocationCallbacks* pAllocator,
391 struct anv_swapchain **swapchain);
392
393 VkResult anv_CreateXcbSurfaceKHR(
394 VkInstance _instance,
395 const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
396 const VkAllocationCallbacks* pAllocator,
397 VkSurfaceKHR* pSurface)
398 {
399 ANV_FROM_HANDLE(anv_instance, instance, _instance);
400
401 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
402
403 VkIcdSurfaceXcb *surface;
404
405 surface = anv_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
406 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
407 if (surface == NULL)
408 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
409
410 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
411 surface->connection = pCreateInfo->connection;
412 surface->window = pCreateInfo->window;
413
414 *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
415
416 return VK_SUCCESS;
417 }
418
419 struct x11_image {
420 struct anv_image * image;
421 struct anv_device_memory * memory;
422 xcb_pixmap_t pixmap;
423 xcb_get_geometry_cookie_t geom_cookie;
424 bool busy;
425 };
426
427 struct x11_swapchain {
428 struct anv_swapchain base;
429
430 xcb_connection_t * conn;
431 xcb_window_t window;
432 xcb_gc_t gc;
433 VkExtent2D extent;
434 uint32_t image_count;
435 uint32_t next_image;
436 struct x11_image images[0];
437 };
438
439 static VkResult
440 x11_get_images(struct anv_swapchain *anv_chain,
441 uint32_t* pCount, VkImage *pSwapchainImages)
442 {
443 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
444
445 if (pSwapchainImages == NULL) {
446 *pCount = chain->image_count;
447 return VK_SUCCESS;
448 }
449
450 assert(chain->image_count <= *pCount);
451 for (uint32_t i = 0; i < chain->image_count; i++)
452 pSwapchainImages[i] = anv_image_to_handle(chain->images[i].image);
453
454 *pCount = chain->image_count;
455
456 return VK_SUCCESS;
457 }
458
459 static VkResult
460 x11_acquire_next_image(struct anv_swapchain *anv_chain,
461 uint64_t timeout,
462 VkSemaphore semaphore,
463 uint32_t *image_index)
464 {
465 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
466 struct x11_image *image = &chain->images[chain->next_image];
467
468 if (image->busy) {
469 xcb_generic_error_t *err;
470 xcb_get_geometry_reply_t *geom =
471 xcb_get_geometry_reply(chain->conn, image->geom_cookie, &err);
472 if (!geom) {
473 free(err);
474 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
475 }
476
477 if (geom->width != chain->extent.width ||
478 geom->height != chain->extent.height) {
479 free(geom);
480 return vk_error(VK_ERROR_OUT_OF_DATE_KHR);
481 }
482 free(geom);
483
484 image->busy = false;
485 }
486
487 *image_index = chain->next_image;
488 chain->next_image = (chain->next_image + 1) % chain->image_count;
489 return VK_SUCCESS;
490 }
491
492 static VkResult
493 x11_queue_present(struct anv_swapchain *anv_chain,
494 struct anv_queue *queue,
495 uint32_t image_index)
496 {
497 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
498 struct x11_image *image = &chain->images[image_index];
499
500 assert(image_index < chain->image_count);
501
502 xcb_void_cookie_t cookie;
503
504 cookie = xcb_copy_area(chain->conn,
505 image->pixmap,
506 chain->window,
507 chain->gc,
508 0, 0,
509 0, 0,
510 chain->extent.width,
511 chain->extent.height);
512 xcb_discard_reply(chain->conn, cookie.sequence);
513
514 image->geom_cookie = xcb_get_geometry(chain->conn, chain->window);
515 image->busy = true;
516
517 xcb_flush(chain->conn);
518
519 return VK_SUCCESS;
520 }
521
522 static VkResult
523 x11_swapchain_destroy(struct anv_swapchain *anv_chain,
524 const VkAllocationCallbacks *pAllocator)
525 {
526 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
527 xcb_void_cookie_t cookie;
528
529 for (uint32_t i = 0; i < chain->image_count; i++) {
530 struct x11_image *image = &chain->images[i];
531
532 if (image->busy)
533 xcb_discard_reply(chain->conn, image->geom_cookie.sequence);
534
535 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
536 xcb_discard_reply(chain->conn, cookie.sequence);
537
538 /* TODO: Delete images and free memory */
539 }
540
541 anv_free2(&chain->base.device->alloc, pAllocator, chain);
542
543 return VK_SUCCESS;
544 }
545
546 static VkResult
547 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
548 struct anv_device *device,
549 const VkSwapchainCreateInfoKHR *pCreateInfo,
550 const VkAllocationCallbacks* pAllocator,
551 struct anv_swapchain **swapchain_out)
552 {
553 VkIcdSurfaceXcb *surface = (VkIcdSurfaceXcb *)icd_surface;
554 struct x11_swapchain *chain;
555 xcb_void_cookie_t cookie;
556 VkResult result;
557
558 int num_images = pCreateInfo->minImageCount;
559
560 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
561
562 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
563 chain = anv_alloc2(&device->alloc, pAllocator, size, 8,
564 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
565 if (chain == NULL)
566 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
567
568 chain->base.device = device;
569 chain->base.destroy = x11_swapchain_destroy;
570 chain->base.get_images = x11_get_images;
571 chain->base.acquire_next_image = x11_acquire_next_image;
572 chain->base.queue_present = x11_queue_present;
573
574 chain->conn = surface->connection;
575 chain->window = surface->window;
576 chain->extent = pCreateInfo->imageExtent;
577 chain->image_count = num_images;
578 chain->next_image = 0;
579
580 for (uint32_t i = 0; i < chain->image_count; i++) {
581 VkDeviceMemory memory_h;
582 VkImage image_h;
583 struct anv_image *image;
584 struct anv_surface *surface;
585 struct anv_device_memory *memory;
586
587 anv_image_create(anv_device_to_handle(device),
588 &(struct anv_image_create_info) {
589 .isl_tiling_flags = ISL_TILING_X_BIT,
590 .stride = 0,
591 .vk_info =
592 &(VkImageCreateInfo) {
593 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
594 .imageType = VK_IMAGE_TYPE_2D,
595 .format = pCreateInfo->imageFormat,
596 .extent = {
597 .width = pCreateInfo->imageExtent.width,
598 .height = pCreateInfo->imageExtent.height,
599 .depth = 1
600 },
601 .mipLevels = 1,
602 .arrayLayers = 1,
603 .samples = 1,
604 /* FIXME: Need a way to use X tiling to allow scanout */
605 .tiling = VK_IMAGE_TILING_OPTIMAL,
606 .usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
607 .flags = 0,
608 }},
609 NULL,
610 &image_h);
611
612 image = anv_image_from_handle(image_h);
613 assert(anv_format_is_color(image->format));
614
615 surface = &image->color_surface;
616
617 anv_AllocateMemory(anv_device_to_handle(device),
618 &(VkMemoryAllocateInfo) {
619 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
620 .allocationSize = image->size,
621 .memoryTypeIndex = 0,
622 },
623 NULL /* XXX: pAllocator */,
624 &memory_h);
625
626 memory = anv_device_memory_from_handle(memory_h);
627 memory->bo.is_winsys_bo = true;
628
629 anv_BindImageMemory(VK_NULL_HANDLE, anv_image_to_handle(image),
630 memory_h, 0);
631
632 int ret = anv_gem_set_tiling(device, memory->bo.gem_handle,
633 surface->isl.row_pitch, I915_TILING_X);
634 if (ret) {
635 /* FINISHME: Choose a better error. */
636 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
637 "set_tiling failed: %m");
638 goto fail;
639 }
640
641 int fd = anv_gem_handle_to_fd(device, memory->bo.gem_handle);
642 if (fd == -1) {
643 /* FINISHME: Choose a better error. */
644 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
645 "handle_to_fd failed: %m");
646 goto fail;
647 }
648
649 uint32_t bpp = 32;
650 uint32_t depth = 24;
651 xcb_pixmap_t pixmap = xcb_generate_id(chain->conn);
652
653 cookie =
654 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
655 pixmap,
656 chain->window,
657 image->size,
658 pCreateInfo->imageExtent.width,
659 pCreateInfo->imageExtent.height,
660 surface->isl.row_pitch,
661 depth, bpp, fd);
662
663 chain->images[i].image = image;
664 chain->images[i].memory = memory;
665 chain->images[i].pixmap = pixmap;
666 chain->images[i].busy = false;
667
668 xcb_discard_reply(chain->conn, cookie.sequence);
669 }
670
671 chain->gc = xcb_generate_id(chain->conn);
672 if (!chain->gc) {
673 /* FINISHME: Choose a better error. */
674 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
675 goto fail;
676 }
677
678 cookie = xcb_create_gc(chain->conn,
679 chain->gc,
680 chain->window,
681 XCB_GC_GRAPHICS_EXPOSURES,
682 (uint32_t []) { 0 });
683 xcb_discard_reply(chain->conn, cookie.sequence);
684
685 *swapchain_out = &chain->base;
686
687 return VK_SUCCESS;
688
689 fail:
690 return result;
691 }
692
693 VkResult
694 anv_x11_init_wsi(struct anv_instance *instance)
695 {
696 struct wsi_x11 *wsi;
697 VkResult result;
698
699 wsi = anv_alloc(&instance->alloc, sizeof(*wsi), 8,
700 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
701 if (!wsi) {
702 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
703 goto fail;
704 }
705
706 int ret = pthread_mutex_init(&wsi->mutex, NULL);
707 if (ret != 0) {
708 if (ret == ENOMEM) {
709 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
710 } else {
711 /* FINISHME: Choose a better error. */
712 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
713 }
714
715 goto fail_alloc;
716 }
717
718 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
719 _mesa_key_pointer_equal);
720 if (!wsi->connections) {
721 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
722 goto fail_mutex;
723 }
724
725 wsi->base.get_support = x11_surface_get_support;
726 wsi->base.get_capabilities = x11_surface_get_capabilities;
727 wsi->base.get_formats = x11_surface_get_formats;
728 wsi->base.get_present_modes = x11_surface_get_present_modes;
729 wsi->base.create_swapchain = x11_surface_create_swapchain;
730
731 instance->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
732
733 return VK_SUCCESS;
734
735 fail_mutex:
736 pthread_mutex_destroy(&wsi->mutex);
737 fail_alloc:
738 anv_free(&instance->alloc, wsi);
739 fail:
740 instance->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
741
742 return result;
743 }
744
745 void
746 anv_x11_finish_wsi(struct anv_instance *instance)
747 {
748 struct wsi_x11 *wsi =
749 (struct wsi_x11 *)instance->wsi[VK_ICD_WSI_PLATFORM_XCB];
750
751 if (wsi) {
752 _mesa_hash_table_destroy(wsi->connections, NULL);
753
754 pthread_mutex_destroy(&wsi->mutex);
755
756 anv_free(&instance->alloc, wsi);
757 }
758 }