#include <xf86drm.h>
#include "drm-uapi/drm_fourcc.h"
#include "util/hash_table.h"
+#include "util/xmlconfig.h"
#include "vk_util.h"
+#include "vk_enum_to_str.h"
#include "wsi_common_private.h"
#include "wsi_common_x11.h"
#include "wsi_common_queue.h"
return VK_SUCCESS;
}
+static uint32_t
+x11_get_min_image_count(struct wsi_device *wsi_device)
+{
+ if (wsi_device->x11.override_minImageCount)
+ return wsi_device->x11.override_minImageCount;
+
+ /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
+ * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
+ * the render latency is CPU duration + GPU duration.
+ *
+ * This means that with scanout from pageflipping we need 3 frames to run
+ * full speed:
+ * 1) CPU rendering work
+ * 2) GPU rendering work
+ * 3) scanout
+ *
+ * Once we have a nonblocking acquire that returns a semaphore we can merge
+ * 1 and 3. Hence the ideal implementation needs only 2 images, but games
+ * cannot tellwe currently do not have an ideal implementation and that
+ * hence they need to allocate 3 images. So let us do it for them.
+ *
+ * This is a tradeoff as it uses more memory than needed for non-fullscreen
+ * and non-performance intensive applications.
+ */
+ return 3;
+}
+
static VkResult
x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
struct wsi_device *wsi_device,
* to come back from the compositor. In that case, we don't know the
* size of the window so we just return valid "I don't know" stuff.
*/
- caps->currentExtent = (VkExtent2D) { -1, -1 };
+ caps->currentExtent = (VkExtent2D) { UINT32_MAX, UINT32_MAX };
caps->minImageExtent = (VkExtent2D) { 1, 1 };
caps->maxImageExtent = (VkExtent2D) {
wsi_device->maxImageDimension2D,
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
}
- /* For true mailbox mode, we need at least 4 images:
- * 1) One to scan out from
- * 2) One to have queued for scan-out
- * 3) One to be currently held by the X server
- * 4) One to render to
- */
- caps->minImageCount = 2;
+ caps->minImageCount = x11_get_min_image_count(wsi_device);
/* There is no real maximum */
caps->maxImageCount = 0;
return result;
}
+static void
+get_sorted_vk_formats(struct wsi_device *wsi_device, VkFormat *sorted_formats)
+{
+ memcpy(sorted_formats, formats, sizeof(formats));
+
+ if (wsi_device->force_bgra8_unorm_first) {
+ for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
+ sorted_formats[i] = sorted_formats[0];
+ sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
+ break;
+ }
+ }
+ }
+}
+
static VkResult
x11_surface_get_formats(VkIcdSurfaceBase *surface,
struct wsi_device *wsi_device,
{
VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
- for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ VkFormat sorted_formats[ARRAY_SIZE(formats)];
+ get_sorted_vk_formats(wsi_device, sorted_formats);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
vk_outarray_append(&out, f) {
- f->format = formats[i];
+ f->format = sorted_formats[i];
f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
}
}
{
VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
- for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ VkFormat sorted_formats[ARRAY_SIZE(formats)];
+ get_sorted_vk_formats(wsi_device, sorted_formats);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
vk_outarray_append(&out, f) {
assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
- f->surfaceFormat.format = formats[i];
+ f->surfaceFormat.format = sorted_formats[i];
f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
}
}
VK_INCOMPLETE : VK_SUCCESS;
}
-static bool
-x11_surface_is_local_to_gpu(struct wsi_device *wsi_dev,
- xcb_connection_t *conn)
-{
- struct wsi_x11_connection *wsi_conn =
- wsi_x11_get_connection(wsi_dev, conn);
-
- if (!wsi_conn)
- return false;
-
- if (!wsi_x11_check_for_dri3(wsi_conn))
- return false;
-
- if (!wsi_x11_check_dri3_compatible(wsi_dev, conn))
- return false;
-
- return true;
-}
-
static VkResult
x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
struct wsi_device *wsi_device,
xcb_window_t window = x11_surface_get_window(icd_surface);
VK_OUTARRAY_MAKE(out, pRects, pRectCount);
- if (x11_surface_is_local_to_gpu(wsi_device, conn)) {
- vk_outarray_append(&out, rect) {
- xcb_generic_error_t *err = NULL;
- xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
- xcb_get_geometry_reply_t *geom =
- xcb_get_geometry_reply(conn, geom_cookie, &err);
- free(err);
- if (geom) {
- *rect = (VkRect2D) {
- .offset = { 0, 0 },
- .extent = { geom->width, geom->height },
- };
- } else {
- /* This can happen if the client didn't wait for the configure event
- * to come back from the compositor. In that case, we don't know the
- * size of the window so we just return valid "I don't know" stuff.
- */
- *rect = (VkRect2D) {
- .offset = { 0, 0 },
- .extent = { -1, -1 },
- };
- }
- free(geom);
+ vk_outarray_append(&out, rect) {
+ xcb_generic_error_t *err = NULL;
+ xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
+ xcb_get_geometry_reply_t *geom =
+ xcb_get_geometry_reply(conn, geom_cookie, &err);
+ free(err);
+ if (geom) {
+ *rect = (VkRect2D) {
+ .offset = { 0, 0 },
+ .extent = { geom->width, geom->height },
+ };
+ } else {
+ /* This can happen if the client didn't wait for the configure event
+ * to come back from the compositor. In that case, we don't know the
+ * size of the window so we just return valid "I don't know" stuff.
+ */
+ *rect = (VkRect2D) {
+ .offset = { 0, 0 },
+ .extent = { UINT32_MAX, UINT32_MAX },
+ };
}
+ free(geom);
}
return vk_outarray_status(&out);
uint64_t send_sbc;
uint64_t last_present_msc;
uint32_t stamp;
+ int sent_image_count;
- bool threaded;
+ bool has_present_queue;
+ bool has_acquire_queue;
VkResult status;
xcb_present_complete_mode_t last_present_mode;
struct wsi_queue present_queue;
struct x11_image images[0];
};
-WSI_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, VkSwapchainKHR)
+VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
+ VK_OBJECT_TYPE_SWAPCHAIN_KHR)
/**
* Update the swapchain status with the result of an operation, and return
* this has not been seen, success will be returned.
*/
static VkResult
-x11_swapchain_result(struct x11_swapchain *chain, VkResult result)
+_x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
+ const char *file, int line)
{
/* Prioritise returning existing errors for consistency. */
if (chain->status < 0)
/* If we have a new error, mark it as permanent on the chain and return. */
if (result < 0) {
+#ifndef NDEBUG
+ fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
+ file, line, vk_Result_to_str(result));
+#endif
chain->status = result;
return result;
}
* and is always returned rather than success.
*/
if (result == VK_SUBOPTIMAL_KHR) {
+#ifndef NDEBUG
+ if (chain->status != VK_SUBOPTIMAL_KHR) {
+ fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
+ file, line, vk_Result_to_str(result));
+ }
+#endif
chain->status = result;
return result;
}
/* No changes, so return the last status. */
return chain->status;
}
+#define x11_swapchain_result(chain, result) \
+ _x11_swapchain_result(chain, result, __FILE__, __LINE__)
static struct wsi_image *
x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
for (unsigned i = 0; i < chain->base.image_count; i++) {
if (chain->images[i].pixmap == idle->pixmap) {
chain->images[i].busy = false;
- if (chain->threaded)
+ chain->sent_image_count--;
+ assert(chain->sent_image_count >= 0);
+ if (chain->has_acquire_queue)
wsi_queue_push(&chain->acquire_queue, i);
break;
}
x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
uint32_t *image_index_out, uint64_t timeout)
{
- assert(chain->threaded);
+ assert(chain->has_acquire_queue);
uint32_t image_index;
VkResult result = wsi_queue_pull(&chain->acquire_queue,
}
static VkResult
-x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
- uint32_t target_msc)
+x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
+ uint32_t target_msc)
{
struct x11_image *image = &chain->images[image_index];
options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
#endif
+ /* Poll for any available event and update the swapchain status. This could
+ * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
+ * associated X11 surface has been resized.
+ */
+ xcb_generic_event_t *event;
+ while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
+ VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
+ free(event);
+ if (result < 0)
+ return x11_swapchain_result(chain, result);
+ x11_swapchain_result(chain, result);
+ }
+
xshmfence_reset(image->shm_fence);
+ ++chain->sent_image_count;
+ assert(chain->sent_image_count <= chain->base.image_count);
+
++chain->send_sbc;
+
xcb_void_cookie_t cookie =
xcb_present_pixmap(chain->conn,
chain->window,
divisor,
remainder, 0, NULL);
xcb_discard_reply(chain->conn, cookie.sequence);
- image->busy = true;
xcb_flush(chain->conn);
return x11_swapchain_result(chain, VK_SUCCESS);
}
+static VkResult
+x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
+ uint32_t target_msc)
+{
+ struct x11_image *image = &chain->images[image_index];
+
+ xcb_void_cookie_t cookie;
+ void *myptr;
+ chain->base.wsi->MapMemory(chain->base.device,
+ image->base.memory,
+ 0, 0, 0, &myptr);
+
+ cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
+ chain->window,
+ chain->gc,
+ image->base.row_pitches[0] / 4,
+ chain->extent.height,
+ 0,0,0,24,
+ image->base.row_pitches[0] * chain->extent.height,
+ myptr);
+
+ chain->base.wsi->UnmapMemory(chain->base.device, image->base.memory);
+ xcb_discard_reply(chain->conn, cookie.sequence);
+ xcb_flush(chain->conn);
+ return x11_swapchain_result(chain, VK_SUCCESS);
+}
+static VkResult
+x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
+ uint32_t target_msc)
+{
+ if (chain->base.wsi->sw)
+ return x11_present_to_x11_sw(chain, image_index, target_msc);
+ return x11_present_to_x11_dri3(chain, image_index, target_msc);
+}
+
static VkResult
x11_acquire_next_image(struct wsi_swapchain *anv_chain,
const VkAcquireNextImageInfoKHR *info,
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
uint64_t timeout = info->timeout;
- if (chain->threaded) {
+ /* If the swapchain is in an error state, don't go any further. */
+ if (chain->status < 0)
+ return chain->status;
+
+ if (chain->base.wsi->sw) {
+ *image_index = 0;
+ return VK_SUCCESS;
+ }
+ if (chain->has_acquire_queue) {
return x11_acquire_next_image_from_queue(chain, image_index, timeout);
} else {
return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
- if (chain->threaded) {
+ /* If the swapchain is in an error state, don't go any further. */
+ if (chain->status < 0)
+ return chain->status;
+
+ chain->images[image_index].busy = true;
+ if (chain->has_present_queue) {
wsi_queue_push(&chain->present_queue, image_index);
return chain->status;
} else {
struct x11_swapchain *chain = state;
VkResult result = VK_SUCCESS;
- assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
-
+ assert(chain->has_present_queue);
while (chain->status >= 0) {
/* It should be safe to unconditionally block here. Later in the loop
* we blocks until the previous present has landed on-screen. At that
return NULL;
}
- uint64_t target_msc = chain->last_present_msc + 1;
+ if (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR) {
+ result = chain->base.wsi->WaitForFences(chain->base.device, 1,
+ &chain->base.fences[image_index],
+ true, UINT64_MAX);
+ if (result != VK_SUCCESS) {
+ result = VK_ERROR_OUT_OF_DATE_KHR;
+ goto fail;
+ }
+ }
+
+ uint64_t target_msc = 0;
+ if (chain->has_acquire_queue)
+ target_msc = chain->last_present_msc + 1;
+
result = x11_present_to_x11(chain, image_index, target_msc);
if (result < 0)
goto fail;
- while (chain->last_present_msc < target_msc) {
- xcb_generic_event_t *event =
- xcb_wait_for_special_event(chain->conn, chain->special_event);
- if (!event) {
- result = VK_ERROR_OUT_OF_DATE_KHR;
- goto fail;
+ if (chain->has_acquire_queue) {
+ while (chain->last_present_msc < target_msc) {
+ xcb_generic_event_t *event =
+ xcb_wait_for_special_event(chain->conn, chain->special_event);
+ if (!event) {
+ result = VK_ERROR_OUT_OF_DATE_KHR;
+ goto fail;
+ }
+
+ result = x11_handle_dri3_present_event(chain, (void *)event);
+ free(event);
+ if (result < 0)
+ goto fail;
}
-
- result = x11_handle_dri3_present_event(chain, (void *)event);
- free(event);
- if (result < 0)
- goto fail;
}
}
fail:
x11_swapchain_result(chain, result);
- wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
+ if (chain->has_acquire_queue)
+ wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
return NULL;
}
if (result < 0)
return result;
+ if (chain->base.wsi->sw) {
+ image->busy = false;
+ return VK_SUCCESS;
+ }
image->pixmap = xcb_generate_id(chain->conn);
#ifdef HAVE_DRI3_MODIFIERS
{
xcb_void_cookie_t cookie;
- cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
- xcb_discard_reply(chain->conn, cookie.sequence);
- xshmfence_unmap_shm(image->shm_fence);
+ if (!chain->base.wsi->sw) {
+ cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
+ xcb_discard_reply(chain->conn, cookie.sequence);
+ xshmfence_unmap_shm(image->shm_fence);
- cookie = xcb_free_pixmap(chain->conn, image->pixmap);
- xcb_discard_reply(chain->conn, cookie.sequence);
+ cookie = xcb_free_pixmap(chain->conn, image->pixmap);
+ xcb_discard_reply(chain->conn, cookie.sequence);
+ }
wsi_destroy_image(&chain->base, &image->base);
}
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
xcb_void_cookie_t cookie;
- if (chain->threaded) {
+ if (chain->has_present_queue) {
chain->status = VK_ERROR_OUT_OF_DATE_KHR;
/* Push a UINT32_MAX to wake up the manager */
wsi_queue_push(&chain->present_queue, UINT32_MAX);
pthread_join(chain->queue_manager, NULL);
- wsi_queue_destroy(&chain->acquire_queue);
+
+ if (chain->has_acquire_queue)
+ wsi_queue_destroy(&chain->acquire_queue);
wsi_queue_destroy(&chain->present_queue);
}
return VK_SUCCESS;
}
+static void
+wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
+ xcb_drawable_t drawable,
+ uint32_t state)
+{
+ static char const name[] = "_VARIABLE_REFRESH";
+ xcb_intern_atom_cookie_t cookie;
+ xcb_intern_atom_reply_t* reply;
+ xcb_void_cookie_t check;
+
+ cookie = xcb_intern_atom(conn, 0, strlen(name), name);
+ reply = xcb_intern_atom_reply(conn, cookie, NULL);
+ if (reply == NULL)
+ return;
+
+ if (state)
+ check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
+ drawable, reply->atom,
+ XCB_ATOM_CARDINAL, 32, 1, &state);
+ else
+ check = xcb_delete_property_checked(conn, drawable, reply->atom);
+
+ xcb_discard_reply(conn, check.sequence);
+ free(reply);
+}
+
+
static VkResult
x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
VkDevice device,
struct x11_swapchain *chain;
xcb_void_cookie_t cookie;
VkResult result;
+ VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
- const unsigned num_images = pCreateInfo->minImageCount;
+ unsigned num_images = pCreateInfo->minImageCount;
+ if (wsi_device->x11.strict_imageCount)
+ num_images = pCreateInfo->minImageCount;
+ else if (present_mode == VK_PRESENT_MODE_MAILBOX_KHR)
+ num_images = MAX2(num_images, 5);
+ else if (wsi_device->x11.ensure_minImageCount)
+ num_images = MAX2(num_images, x11_get_min_image_count(wsi_device));
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
struct wsi_x11_connection *wsi_conn =
chain->base.get_wsi_image = x11_get_wsi_image;
chain->base.acquire_next_image = x11_acquire_next_image;
chain->base.queue_present = x11_queue_present;
- chain->base.present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
+ chain->base.present_mode = present_mode;
chain->base.image_count = num_images;
chain->conn = conn;
chain->window = window;
chain->depth = bit_depth;
chain->extent = pCreateInfo->imageExtent;
chain->send_sbc = 0;
+ chain->sent_image_count = 0;
chain->last_present_msc = 0;
- chain->threaded = false;
+ chain->has_acquire_queue = false;
+ chain->has_present_queue = false;
chain->status = VK_SUCCESS;
chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
* mode which provokes reallocation when anything changes, to make
* sure we have the most optimal allocation.
*/
- WSI_FROM_HANDLE(x11_swapchain, old_chain, pCreateInfo->oldSwapchain);
+ VK_FROM_HANDLE(x11_swapchain, old_chain, pCreateInfo->oldSwapchain);
if (old_chain)
chain->last_present_mode = old_chain->last_present_mode;
else
chain->last_present_mode = XCB_PRESENT_COMPLETE_MODE_COPY;
- if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
- chain->base.use_prime_blit = true;
+ if (!wsi_device->sw)
+ if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
+ chain->base.use_prime_blit = true;
chain->event_id = xcb_generate_id(chain->conn);
xcb_present_select_input(chain->conn, chain->event_id, chain->window,
goto fail_init_images;
}
- if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
- chain->threaded = true;
+ if ((chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
+ chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR) && !chain->base.wsi->sw) {
+ chain->has_present_queue = true;
/* Initialize our queues. We make them base.image_count + 1 because we will
* occasionally use UINT32_MAX to signal the other thread that an error
* has occurred and we don't want an overflow.
*/
int ret;
- ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
- if (ret) {
- goto fail_init_images;
- }
-
ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
if (ret) {
- wsi_queue_destroy(&chain->acquire_queue);
goto fail_init_images;
}
- for (unsigned i = 0; i < chain->base.image_count; i++)
- wsi_queue_push(&chain->acquire_queue, i);
+ if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
+ chain->has_acquire_queue = true;
+
+ ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
+ if (ret) {
+ wsi_queue_destroy(&chain->present_queue);
+ goto fail_init_images;
+ }
+
+ for (unsigned i = 0; i < chain->base.image_count; i++)
+ wsi_queue_push(&chain->acquire_queue, i);
+ }
ret = pthread_create(&chain->queue_manager, NULL,
x11_manage_fifo_queues, chain);
if (ret) {
wsi_queue_destroy(&chain->present_queue);
- wsi_queue_destroy(&chain->acquire_queue);
+ if (chain->has_acquire_queue)
+ wsi_queue_destroy(&chain->acquire_queue);
+
goto fail_init_images;
}
}
+ assert(chain->has_present_queue || !chain->has_acquire_queue);
+
for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
vk_free(pAllocator, modifiers[i]);
+
+ /* It is safe to set it here as only one swapchain can be associated with
+ * the window, and swapchain creation does the association. At this point
+ * we know the creation is going to succeed. */
+ wsi_x11_set_adaptive_sync_property(conn, window,
+ wsi_device->enable_adaptive_sync);
+
*swapchain_out = &chain->base;
return VK_SUCCESS;
VkResult
wsi_x11_init_wsi(struct wsi_device *wsi_device,
- const VkAllocationCallbacks *alloc)
+ const VkAllocationCallbacks *alloc,
+ const struct driOptionCache *dri_options)
{
struct wsi_x11 *wsi;
VkResult result;
goto fail_mutex;
}
+ if (dri_options) {
+ if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
+ wsi_device->x11.override_minImageCount =
+ driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
+ }
+ if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
+ wsi_device->x11.strict_imageCount =
+ driQueryOptionb(dri_options, "vk_x11_strict_image_count");
+ }
+ if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
+ wsi_device->x11.ensure_minImageCount =
+ driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
+ }
+
+ }
+
wsi->base.get_support = x11_surface_get_support;
wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
wsi->base.get_formats = x11_surface_get_formats;