#include <fcntl.h>
#include <poll.h>
#include <xf86drm.h>
+#include <drm_fourcc.h>
#include "util/hash_table.h"
#include "vk_util.h"
struct wsi_x11_connection {
bool has_dri3;
+ bool has_dri3_modifiers;
bool has_present;
bool is_proprietary_x11;
};
}
static struct wsi_x11_connection *
-wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
+wsi_x11_connection_create(struct wsi_device *wsi_dev,
xcb_connection_t *conn)
{
xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie;
xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply;
+ bool has_dri3_v1_2 = false;
+ bool has_present_v1_2 = false;
struct wsi_x11_connection *wsi_conn =
- vk_alloc(alloc, sizeof(*wsi_conn), 8,
+ vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!wsi_conn)
return NULL;
dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
- pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
+ pres_cookie = xcb_query_extension(conn, 7, "Present");
/* We try to be nice to users and emit a warning if they try to use a
* Vulkan application on a system without DRI3 enabled. However, this ends
free(pres_reply);
free(amd_reply);
free(nv_reply);
- vk_free(alloc, wsi_conn);
+ vk_free(&wsi_dev->instance_alloc, wsi_conn);
return NULL;
}
wsi_conn->has_dri3 = dri3_reply->present != 0;
+#ifdef HAVE_DRI3_MODIFIERS
+ if (wsi_conn->has_dri3) {
+ xcb_dri3_query_version_cookie_t ver_cookie;
+ xcb_dri3_query_version_reply_t *ver_reply;
+
+ ver_cookie = xcb_dri3_query_version(conn, 1, 2);
+ ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
+ has_dri3_v1_2 =
+ (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
+ free(ver_reply);
+ }
+#endif
+
wsi_conn->has_present = pres_reply->present != 0;
+#ifdef HAVE_DRI3_MODIFIERS
+ if (wsi_conn->has_present) {
+ xcb_present_query_version_cookie_t ver_cookie;
+ xcb_present_query_version_reply_t *ver_reply;
+
+ ver_cookie = xcb_present_query_version(conn, 1, 2);
+ ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
+ has_present_v1_2 =
+ (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
+ free(ver_reply);
+ }
+#endif
+
+ wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
wsi_conn->is_proprietary_x11 = false;
if (amd_reply && amd_reply->present)
wsi_conn->is_proprietary_x11 = true;
}
static void
-wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
+wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
struct wsi_x11_connection *conn)
{
- vk_free(alloc, conn);
+ vk_free(&wsi_dev->instance_alloc, conn);
}
static bool
static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device *wsi_dev,
- const VkAllocationCallbacks *alloc,
xcb_connection_t *conn)
{
struct wsi_x11 *wsi =
pthread_mutex_unlock(&wsi->mutex);
struct wsi_x11_connection *wsi_conn =
- wsi_x11_connection_create(alloc, conn);
+ wsi_x11_connection_create(wsi_dev, conn);
if (!wsi_conn)
return NULL;
entry = _mesa_hash_table_search(wsi->connections, conn);
if (entry) {
/* Oops, someone raced us to it */
- wsi_x11_connection_destroy(alloc, wsi_conn);
+ wsi_x11_connection_destroy(wsi_dev, wsi_conn);
} else {
entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
}
VkBool32 wsi_get_physical_device_xcb_presentation_support(
struct wsi_device *wsi_device,
- VkAllocationCallbacks *alloc,
uint32_t queueFamilyIndex,
int fd,
bool can_handle_different_gpu,
xcb_visualid_t visual_id)
{
struct wsi_x11_connection *wsi_conn =
- wsi_x11_get_connection(wsi_device, alloc, connection);
+ wsi_x11_get_connection(wsi_device, connection);
if (!wsi_conn)
return false;
static VkResult
x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
struct wsi_device *wsi_device,
- const VkAllocationCallbacks *alloc,
uint32_t queueFamilyIndex,
int local_fd,
- bool can_handle_different_gpu,
VkBool32* pSupported)
{
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
xcb_window_t window = x11_surface_get_window(icd_surface);
struct wsi_x11_connection *wsi_conn =
- wsi_x11_get_connection(wsi_device, alloc, conn);
+ wsi_x11_get_connection(wsi_device, conn);
if (!wsi_conn)
return VK_ERROR_OUT_OF_HOST_MEMORY;
return VK_SUCCESS;
}
- if (!can_handle_different_gpu)
- if (!wsi_x11_check_dri3_compatible(conn, local_fd))
- return false;
-
unsigned visual_depth;
if (!get_visualtype_for_window(conn, window, &visual_depth)) {
*pSupported = false;
VK_INCOMPLETE : VK_SUCCESS;
}
+static bool
+x11_surface_is_local_to_gpu(struct wsi_device *wsi_dev,
+ int local_fd,
+ xcb_connection_t *conn)
+{
+ struct wsi_x11_connection *wsi_conn =
+ wsi_x11_get_connection(wsi_dev, conn);
+
+ if (!wsi_conn)
+ return false;
+
+ if (!wsi_x11_check_for_dri3(wsi_conn))
+ return false;
+
+ if (!wsi_x11_check_dri3_compatible(conn, local_fd))
+ return false;
+
+ return true;
+}
+
+static VkResult
+x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
+ int local_fd,
+ uint32_t* pRectCount,
+ VkRect2D* pRects)
+{
+ xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
+ xcb_window_t window = x11_surface_get_window(icd_surface);
+ VK_OUTARRAY_MAKE(out, pRects, pRectCount);
+
+ if (x11_surface_is_local_to_gpu(wsi_device, local_fd, conn)) {
+ vk_outarray_append(&out, rect) {
+ xcb_generic_error_t *err = NULL;
+ xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
+ xcb_get_geometry_reply_t *geom =
+ xcb_get_geometry_reply(conn, geom_cookie, &err);
+ free(err);
+ if (geom) {
+ *rect = (VkRect2D) {
+ .offset = { 0, 0 },
+ .extent = { geom->width, geom->height },
+ };
+ } else {
+ /* This can happen if the client didn't wait for the configure event
+ * to come back from the compositor. In that case, we don't know the
+ * size of the window so we just return valid "I don't know" stuff.
+ */
+ *rect = (VkRect2D) {
+ .offset = { 0, 0 },
+ .extent = { -1, -1 },
+ };
+ }
+ free(geom);
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
VkSurfaceKHR *pSurface)
struct x11_swapchain {
struct wsi_swapchain base;
- bool use_prime_blit;
+ bool has_dri3_modifiers;
xcb_connection_t * conn;
xcb_window_t window;
bool threaded;
VkResult status;
+ xcb_present_complete_mode_t last_present_mode;
struct wsi_queue present_queue;
struct wsi_queue acquire_queue;
pthread_t queue_manager;
struct x11_image images[0];
};
+/**
+ * Update the swapchain status with the result of an operation, and return
+ * the combined status. The chain status will eventually be returned from
+ * AcquireNextImage and QueuePresent.
+ *
+ * We make sure to 'stick' more pessimistic statuses: an out-of-date error
+ * is permanent once seen, and every subsequent call will return this. If
+ * this has not been seen, success will be returned.
+ */
+static VkResult
+x11_swapchain_result(struct x11_swapchain *chain, VkResult result)
+{
+ /* Prioritise returning existing errors for consistency. */
+ if (chain->status < 0)
+ return chain->status;
+
+ /* If we have a new error, mark it as permanent on the chain and return. */
+ if (result < 0) {
+ chain->status = result;
+ return result;
+ }
+
+ /* Return temporary errors, but don't persist them. */
+ if (result == VK_TIMEOUT || result == VK_NOT_READY)
+ return result;
+
+ /* Suboptimal isn't an error, but is a status which sticks to the swapchain
+ * and is always returned rather than success.
+ */
+ if (result == VK_SUBOPTIMAL_KHR) {
+ chain->status = result;
+ return result;
+ }
+
+ /* No changes, so return the last status. */
+ return chain->status;
+}
+
static struct wsi_image *
x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
{
return &chain->images[image_index].base;
}
+/**
+ * Process an X11 Present event. Does not update chain->status.
+ */
static VkResult
x11_handle_dri3_present_event(struct x11_swapchain *chain,
xcb_present_generic_event_t *event)
xcb_present_complete_notify_event_t *complete = (void *) event;
if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP)
chain->last_present_msc = complete->msc;
- break;
+
+ VkResult result = VK_SUCCESS;
+
+ /* The winsys is now trying to flip directly and cannot due to our
+ * configuration. Request the user reallocate.
+ */
+#ifdef HAVE_DRI3_MODIFIERS
+ if (complete->mode == XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY &&
+ chain->last_present_mode != XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY)
+ result = VK_SUBOPTIMAL_KHR;
+#endif
+
+ /* When we go from flipping to copying, the odds are very likely that
+ * we could reallocate in a more optimal way if we didn't have to care
+ * about scanout, so we always do this.
+ */
+ if (complete->mode == XCB_PRESENT_COMPLETE_MODE_COPY &&
+ chain->last_present_mode == XCB_PRESENT_COMPLETE_MODE_FLIP)
+ result = VK_SUBOPTIMAL_KHR;
+
+ chain->last_present_mode = complete->mode;
+ return result;
}
default:
xshmfence_await(chain->images[i].shm_fence);
*image_index = i;
chain->images[i].busy = true;
- return VK_SUCCESS;
+ return x11_swapchain_result(chain, VK_SUCCESS);
}
}
if (timeout == UINT64_MAX) {
event = xcb_wait_for_special_event(chain->conn, chain->special_event);
if (!event)
- return VK_ERROR_OUT_OF_DATE_KHR;
+ return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
} else {
event = xcb_poll_for_special_event(chain->conn, chain->special_event);
if (!event) {
int ret;
if (timeout == 0)
- return VK_NOT_READY;
+ return x11_swapchain_result(chain, VK_NOT_READY);
atimeout = wsi_get_absolute_timeout(timeout);
pfds.events = POLLIN;
ret = poll(&pfds, 1, timeout / 1000 / 1000);
if (ret == 0)
- return VK_TIMEOUT;
+ return x11_swapchain_result(chain, VK_TIMEOUT);
if (ret == -1)
- return VK_ERROR_OUT_OF_DATE_KHR;
+ return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
/* If a non-special event happens, the fd will still
* poll. So recalculate the timeout now just in case.
}
}
+ /* Update the swapchain status here. We may catch non-fatal errors here,
+ * in which case we need to update the status and continue.
+ */
VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
free(event);
- if (result != VK_SUCCESS)
- return result;
+ if (result < 0)
+ return x11_swapchain_result(chain, result);
}
}
uint32_t image_index;
VkResult result = wsi_queue_pull(&chain->acquire_queue,
&image_index, timeout);
- if (result != VK_SUCCESS) {
- return result;
- } else if (chain->status != VK_SUCCESS) {
+ if (result < 0 || result == VK_TIMEOUT) {
+ /* On error, the thread has shut down, so safe to update chain->status.
+ * Calling x11_swapchain_result with VK_TIMEOUT won't modify
+ * chain->status so that is also safe.
+ */
+ return x11_swapchain_result(chain, result);
+ } else if (chain->status < 0) {
return chain->status;
}
*image_index_out = image_index;
- return VK_SUCCESS;
+ return chain->status;
}
static VkResult
if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR)
options |= XCB_PRESENT_OPTION_ASYNC;
+#ifdef HAVE_DRI3_MODIFIERS
+ if (chain->has_dri3_modifiers)
+ options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
+#endif
+
xshmfence_reset(image->shm_fence);
++chain->send_sbc;
xcb_flush(chain->conn);
- return VK_SUCCESS;
+ return x11_swapchain_result(chain, VK_SUCCESS);
}
static VkResult
x11_acquire_next_image(struct wsi_swapchain *anv_chain,
- uint64_t timeout,
- VkSemaphore semaphore,
+ const VkAcquireNextImageInfoKHR *info,
uint32_t *image_index)
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+ uint64_t timeout = info->timeout;
if (chain->threaded) {
return x11_acquire_next_image_from_queue(chain, image_index, timeout);
static VkResult
x11_queue_present(struct wsi_swapchain *anv_chain,
- VkQueue queue,
- uint32_t waitSemaphoreCount,
- const VkSemaphore *pWaitSemaphores,
uint32_t image_index,
const VkPresentRegionKHR *damage)
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
- VkResult result;
-
- if (chain->use_prime_blit) {
- result = wsi_prime_image_blit_to_linear(&chain->base,
- &chain->images[image_index].base,
- queue,
- waitSemaphoreCount,
- pWaitSemaphores);
- if (result != VK_SUCCESS)
- return result;
- }
if (chain->threaded) {
wsi_queue_push(&chain->present_queue, image_index);
x11_manage_fifo_queues(void *state)
{
struct x11_swapchain *chain = state;
- VkResult result;
+ VkResult result = VK_SUCCESS;
assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
- while (chain->status == VK_SUCCESS) {
+ while (chain->status >= 0) {
/* It should be safe to unconditionally block here. Later in the loop
* we blocks until the previous present has landed on-screen. At that
* point, we should have received IDLE_NOTIFY on all images presented
* before that point so the client should be able to acquire any image
* other than the currently presented one.
*/
- uint32_t image_index;
+ uint32_t image_index = 0;
result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
- if (result != VK_SUCCESS) {
+ assert(result != VK_TIMEOUT);
+ if (result < 0) {
goto fail;
- } else if (chain->status != VK_SUCCESS) {
+ } else if (chain->status < 0) {
+ /* The status can change underneath us if the swapchain is destroyed
+ * from another thread.
+ */
return NULL;
}
uint64_t target_msc = chain->last_present_msc + 1;
result = x11_present_to_x11(chain, image_index, target_msc);
- if (result != VK_SUCCESS)
+ if (result < 0)
goto fail;
while (chain->last_present_msc < target_msc) {
xcb_generic_event_t *event =
xcb_wait_for_special_event(chain->conn, chain->special_event);
- if (!event)
+ if (!event) {
+ result = VK_ERROR_OUT_OF_DATE_KHR;
goto fail;
+ }
result = x11_handle_dri3_present_event(chain, (void *)event);
free(event);
- if (result != VK_SUCCESS)
+ if (result < 0)
goto fail;
}
}
fail:
- chain->status = result;
+ x11_swapchain_result(chain, result);
wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
return NULL;
x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- struct x11_image *image)
+ const uint64_t *const *modifiers,
+ const uint32_t *num_modifiers,
+ int num_tranches, struct x11_image *image)
{
xcb_void_cookie_t cookie;
VkResult result;
uint32_t bpp = 32;
- if (chain->use_prime_blit) {
- result = wsi_create_prime_image(&chain->base, pCreateInfo, &image->base);
+ if (chain->base.use_prime_blit) {
+ bool use_modifier = num_tranches > 0;
+ result = wsi_create_prime_image(&chain->base, pCreateInfo, use_modifier, &image->base);
} else {
- result = wsi_create_native_image(&chain->base, pCreateInfo, &image->base);
+ result = wsi_create_native_image(&chain->base, pCreateInfo,
+ num_tranches, num_modifiers, modifiers,
+ &image->base);
}
- if (result != VK_SUCCESS)
+ if (result < 0)
return result;
image->pixmap = xcb_generate_id(chain->conn);
- cookie =
- xcb_dri3_pixmap_from_buffer_checked(chain->conn,
- image->pixmap,
- chain->window,
- image->base.size,
- pCreateInfo->imageExtent.width,
- pCreateInfo->imageExtent.height,
- image->base.row_pitch,
- chain->depth, bpp,
- image->base.fd);
+#ifdef HAVE_DRI3_MODIFIERS
+ if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
+ /* If the image has a modifier, we must have DRI3 v1.2. */
+ assert(chain->has_dri3_modifiers);
+
+ cookie =
+ xcb_dri3_pixmap_from_buffers_checked(chain->conn,
+ image->pixmap,
+ chain->window,
+ image->base.num_planes,
+ pCreateInfo->imageExtent.width,
+ pCreateInfo->imageExtent.height,
+ image->base.row_pitches[0],
+ image->base.offsets[0],
+ image->base.row_pitches[1],
+ image->base.offsets[1],
+ image->base.row_pitches[2],
+ image->base.offsets[2],
+ image->base.row_pitches[3],
+ image->base.offsets[3],
+ chain->depth, bpp,
+ image->base.drm_modifier,
+ image->base.fds);
+ } else
+#endif
+ {
+ /* Without passing modifiers, we can't have multi-plane RGB images. */
+ assert(image->base.num_planes == 1);
+
+ cookie =
+ xcb_dri3_pixmap_from_buffer_checked(chain->conn,
+ image->pixmap,
+ chain->window,
+ image->base.sizes[0],
+ pCreateInfo->imageExtent.width,
+ pCreateInfo->imageExtent.height,
+ image->base.row_pitches[0],
+ chain->depth, bpp,
+ image->base.fds[0]);
+ }
+
xcb_discard_reply(chain->conn, cookie.sequence);
- image->base.fd = -1; /* XCB has now taken ownership of the FD */
+
+ /* XCB has now taken ownership of the FDs. */
+ for (int i = 0; i < image->base.num_planes; i++)
+ image->base.fds[i] = -1;
int fence_fd = xshmfence_alloc_shm();
if (fence_fd < 0)
wsi_destroy_image(&chain->base, &image->base);
}
+static void
+wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
+ xcb_connection_t *conn, xcb_window_t window,
+ uint8_t depth, uint8_t bpp,
+ VkCompositeAlphaFlagsKHR vk_alpha,
+ uint64_t **modifiers_in, uint32_t *num_modifiers_in,
+ uint32_t *num_tranches_in,
+ const VkAllocationCallbacks *pAllocator)
+{
+ if (!wsi_conn->has_dri3_modifiers)
+ goto out;
+
+#ifdef HAVE_DRI3_MODIFIERS
+ xcb_generic_error_t *error = NULL;
+ xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
+ xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
+ xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
+ xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
+ free(error);
+
+ if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
+ mod_reply->num_screen_modifiers == 0)) {
+ free(mod_reply);
+ goto out;
+ }
+
+ uint32_t n = 0;
+ uint32_t counts[2];
+ uint64_t *modifiers[2];
+
+ if (mod_reply->num_window_modifiers) {
+ counts[n] = mod_reply->num_window_modifiers;
+ modifiers[n] = vk_alloc(pAllocator,
+ counts[n] * sizeof(uint64_t),
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!modifiers[n]) {
+ free(mod_reply);
+ goto out;
+ }
+
+ memcpy(modifiers[n],
+ xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
+ counts[n] * sizeof(uint64_t));
+ n++;
+ }
+
+ if (mod_reply->num_screen_modifiers) {
+ counts[n] = mod_reply->num_screen_modifiers;
+ modifiers[n] = vk_alloc(pAllocator,
+ counts[n] * sizeof(uint64_t),
+ 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!modifiers[n]) {
+ if (n > 0)
+ vk_free(pAllocator, modifiers[0]);
+ free(mod_reply);
+ goto out;
+ }
+
+ memcpy(modifiers[n],
+ xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
+ counts[n] * sizeof(uint64_t));
+ n++;
+ }
+
+ for (int i = 0; i < n; i++) {
+ modifiers_in[i] = modifiers[i];
+ num_modifiers_in[i] = counts[i];
+ }
+ *num_tranches_in = n;
+
+ free(mod_reply);
+ return;
+#endif
+out:
+ *num_tranches_in = 0;
+}
+
static VkResult
x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
const VkAllocationCallbacks *pAllocator)
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
xcb_void_cookie_t cookie;
- for (uint32_t i = 0; i < chain->base.image_count; i++)
- x11_image_finish(chain, pAllocator, &chain->images[i]);
-
if (chain->threaded) {
chain->status = VK_ERROR_OUT_OF_DATE_KHR;
/* Push a UINT32_MAX to wake up the manager */
wsi_queue_destroy(&chain->present_queue);
}
+ for (uint32_t i = 0; i < chain->base.image_count; i++)
+ x11_image_finish(chain, pAllocator, &chain->images[i]);
+
xcb_unregister_for_special_event(chain->conn, chain->special_event);
cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
chain->window,
const unsigned num_images = pCreateInfo->minImageCount;
- /* Check for whether or not we have a window up-front */
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
+ struct wsi_x11_connection *wsi_conn =
+ wsi_x11_get_connection(wsi_device, conn);
+ if (!wsi_conn)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ /* Check for whether or not we have a window up-front */
xcb_window_t window = x11_surface_get_window(icd_surface);
xcb_get_geometry_reply_t *geometry =
xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
chain->last_present_msc = 0;
chain->threaded = false;
chain->status = VK_SUCCESS;
+ chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
+ /* If we are reallocating from an old swapchain, then we inherit its
+ * last completion mode, to ensure we don't get into reallocation
+ * cycles. If we are starting anew, we set 'COPY', as that is the only
+ * mode which provokes reallocation when anything changes, to make
+ * sure we have the most optimal allocation.
+ */
+ struct x11_swapchain *old_chain = (void *)(intptr_t) pCreateInfo->oldSwapchain;
+ if (old_chain)
+ chain->last_present_mode = old_chain->last_present_mode;
+ else
+ chain->last_present_mode = XCB_PRESENT_COMPLETE_MODE_COPY;
- chain->use_prime_blit = false;
- if (!wsi_x11_check_dri3_compatible(conn, local_fd)) {
- chain->use_prime_blit = true;
- }
+ if (!wsi_x11_check_dri3_compatible(conn, local_fd))
+ chain->base.use_prime_blit = true;
chain->event_id = xcb_generate_id(chain->conn);
xcb_present_select_input(chain->conn, chain->event_id, chain->window,
(uint32_t []) { 0 });
xcb_discard_reply(chain->conn, cookie.sequence);
+ uint64_t *modifiers[2] = {NULL, NULL};
+ uint32_t num_modifiers[2] = {0, 0};
+ uint32_t num_tranches = 0;
+ if (wsi_device->supports_modifiers)
+ wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
+ pCreateInfo->compositeAlpha,
+ modifiers, num_modifiers, &num_tranches,
+ pAllocator);
+
uint32_t image = 0;
for (; image < chain->base.image_count; image++) {
result = x11_image_init(device, chain, pCreateInfo, pAllocator,
+ (const uint64_t *const *)modifiers,
+ num_modifiers, num_tranches,
&chain->images[image]);
if (result != VK_SUCCESS)
goto fail_init_images;
}
}
+ for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
+ vk_free(pAllocator, modifiers[i]);
*swapchain_out = &chain->base;
return VK_SUCCESS;
for (uint32_t j = 0; j < image; j++)
x11_image_finish(chain, pAllocator, &chain->images[j]);
+ for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
+ vk_free(pAllocator, modifiers[i]);
+
fail_register:
xcb_unregister_for_special_event(chain->conn, chain->special_event);
}
wsi->base.get_support = x11_surface_get_support;
- wsi->base.get_capabilities = x11_surface_get_capabilities;
wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
wsi->base.get_formats = x11_surface_get_formats;
wsi->base.get_formats2 = x11_surface_get_formats2;
wsi->base.get_present_modes = x11_surface_get_present_modes;
+ wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
wsi->base.create_swapchain = x11_surface_create_swapchain;
wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
if (wsi) {
struct hash_entry *entry;
hash_table_foreach(wsi->connections, entry)
- wsi_x11_connection_destroy(alloc, entry->data);
+ wsi_x11_connection_destroy(wsi_device, entry->data);
_mesa_hash_table_destroy(wsi->connections, NULL);