#include <fcntl.h>
#include <poll.h>
#include <xf86drm.h>
-#include <drm_fourcc.h>
+#include "drm-uapi/drm_fourcc.h"
#include "util/hash_table.h"
+#include "util/xmlconfig.h"
#include "vk_util.h"
#include "wsi_common_private.h"
}
static bool
-wsi_x11_check_dri3_compatible(xcb_connection_t *conn, int local_fd)
+wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
+ xcb_connection_t *conn)
{
xcb_screen_iterator_t screen_iter =
xcb_setup_roots_iterator(xcb_get_setup(conn));
xcb_screen_t *screen = screen_iter.data;
int dri3_fd = wsi_dri3_open(conn, screen->root, None);
- if (dri3_fd != -1) {
- char *local_dev = drmGetRenderDeviceNameFromFd(local_fd);
- char *dri3_dev = drmGetRenderDeviceNameFromFd(dri3_fd);
- int ret;
-
- close(dri3_fd);
+ if (dri3_fd == -1)
+ return true;
- ret = strcmp(local_dev, dri3_dev);
+ bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd);
- free(local_dev);
- free(dri3_dev);
+ close(dri3_fd);
- if (ret != 0)
- return false;
- }
- return true;
+ return match;
}
static struct wsi_x11_connection *
-wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
+wsi_x11_connection_create(struct wsi_device *wsi_dev,
xcb_connection_t *conn)
{
xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie;
bool has_present_v1_2 = false;
struct wsi_x11_connection *wsi_conn =
- vk_alloc(alloc, sizeof(*wsi_conn), 8,
+ vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!wsi_conn)
return NULL;
free(pres_reply);
free(amd_reply);
free(nv_reply);
- vk_free(alloc, wsi_conn);
+ vk_free(&wsi_dev->instance_alloc, wsi_conn);
return NULL;
}
}
static void
-wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
+wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
struct wsi_x11_connection *conn)
{
- vk_free(alloc, conn);
+ vk_free(&wsi_dev->instance_alloc, conn);
}
static bool
static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device *wsi_dev,
- const VkAllocationCallbacks *alloc,
xcb_connection_t *conn)
{
struct wsi_x11 *wsi =
pthread_mutex_unlock(&wsi->mutex);
struct wsi_x11_connection *wsi_conn =
- wsi_x11_connection_create(alloc, conn);
+ wsi_x11_connection_create(wsi_dev, conn);
if (!wsi_conn)
return NULL;
entry = _mesa_hash_table_search(wsi->connections, conn);
if (entry) {
/* Oops, someone raced us to it */
- wsi_x11_connection_destroy(alloc, wsi_conn);
+ wsi_x11_connection_destroy(wsi_dev, wsi_conn);
} else {
entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
}
VkBool32 wsi_get_physical_device_xcb_presentation_support(
struct wsi_device *wsi_device,
- VkAllocationCallbacks *alloc,
uint32_t queueFamilyIndex,
- int fd,
- bool can_handle_different_gpu,
xcb_connection_t* connection,
xcb_visualid_t visual_id)
{
struct wsi_x11_connection *wsi_conn =
- wsi_x11_get_connection(wsi_device, alloc, connection);
+ wsi_x11_get_connection(wsi_device, connection);
if (!wsi_conn)
return false;
if (!wsi_x11_check_for_dri3(wsi_conn))
return false;
- if (!can_handle_different_gpu)
- if (!wsi_x11_check_dri3_compatible(connection, fd))
- return false;
-
unsigned visual_depth;
if (!connection_get_visualtype(connection, visual_id, &visual_depth))
return false;
static VkResult
x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
struct wsi_device *wsi_device,
- const VkAllocationCallbacks *alloc,
uint32_t queueFamilyIndex,
- int local_fd,
VkBool32* pSupported)
{
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
xcb_window_t window = x11_surface_get_window(icd_surface);
struct wsi_x11_connection *wsi_conn =
- wsi_x11_get_connection(wsi_device, alloc, conn);
+ wsi_x11_get_connection(wsi_device, conn);
if (!wsi_conn)
return VK_ERROR_OUT_OF_HOST_MEMORY;
static VkResult
x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
VkSurfaceCapabilitiesKHR *caps)
{
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
*/
caps->currentExtent = (VkExtent2D) { -1, -1 };
caps->minImageExtent = (VkExtent2D) { 1, 1 };
- /* This is the maximum supported size on Intel */
- caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
+ caps->maxImageExtent = (VkExtent2D) {
+ wsi_device->maxImageDimension2D,
+ wsi_device->maxImageDimension2D,
+ };
}
free(err);
free(geom);
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
}
- /* For true mailbox mode, we need at least 4 images:
- * 1) One to scan out from
- * 2) One to have queued for scan-out
- * 3) One to be currently held by the X server
- * 4) One to render to
+ /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
+ * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
+ * the render latency is CPU duration + GPU duration.
+ *
+ * This means that with scanout from pageflipping we need 3 frames to run
+ * full speed:
+ * 1) CPU rendering work
+ * 2) GPU rendering work
+ * 3) scanout
+ *
+ * Once we have a nonblocking acquire that returns a semaphore we can merge
+ * 1 and 3. Hence the ideal implementation needs only 2 images, but games
+ * cannot tellwe currently do not have an ideal implementation and that
+ * hence they need to allocate 3 images. So let us do it for them.
+ *
+ * This is a tradeoff as it uses more memory than needed for non-fullscreen
+ * and non-performance intensive applications.
*/
- caps->minImageCount = 2;
+ caps->minImageCount = 3;
/* There is no real maximum */
caps->maxImageCount = 0;
+ if (wsi_device->x11.override_minImageCount)
+ caps->minImageCount = wsi_device->x11.override_minImageCount;
+
caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
caps->maxImageArrayLayers = 1;
VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
return VK_SUCCESS;
static VkResult
x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
const void *info_next,
VkSurfaceCapabilities2KHR *caps)
{
assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
- return x11_surface_get_capabilities(icd_surface, &caps->surfaceCapabilities);
+ VkResult result =
+ x11_surface_get_capabilities(icd_surface, wsi_device,
+ &caps->surfaceCapabilities);
+
+ vk_foreach_struct(ext, caps->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
+ VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
+ protected->supportsProtected = VK_FALSE;
+ break;
+ }
+
+ default:
+ /* Ignored */
+ break;
+ }
+ }
+
+ return result;
+}
+
+static void
+get_sorted_vk_formats(struct wsi_device *wsi_device, VkFormat *sorted_formats)
+{
+ memcpy(sorted_formats, formats, sizeof(formats));
+
+ if (wsi_device->force_bgra8_unorm_first) {
+ for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
+ sorted_formats[i] = sorted_formats[0];
+ sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
+ break;
+ }
+ }
+ }
}
static VkResult
{
VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
- for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ VkFormat sorted_formats[ARRAY_SIZE(formats)];
+ get_sorted_vk_formats(wsi_device, sorted_formats);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
vk_outarray_append(&out, f) {
- f->format = formats[i];
+ f->format = sorted_formats[i];
f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
}
}
{
VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
- for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
+ VkFormat sorted_formats[ARRAY_SIZE(formats)];
+ get_sorted_vk_formats(wsi_device, sorted_formats);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(sorted_formats); i++) {
vk_outarray_append(&out, f) {
assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
- f->surfaceFormat.format = formats[i];
+ f->surfaceFormat.format = sorted_formats[i];
f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
}
}
VK_INCOMPLETE : VK_SUCCESS;
}
+static bool
+x11_surface_is_local_to_gpu(struct wsi_device *wsi_dev,
+ xcb_connection_t *conn)
+{
+ struct wsi_x11_connection *wsi_conn =
+ wsi_x11_get_connection(wsi_dev, conn);
+
+ if (!wsi_conn)
+ return false;
+
+ if (!wsi_x11_check_for_dri3(wsi_conn))
+ return false;
+
+ if (!wsi_x11_check_dri3_compatible(wsi_dev, conn))
+ return false;
+
+ return true;
+}
+
+static VkResult
+x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
+ struct wsi_device *wsi_device,
+ uint32_t* pRectCount,
+ VkRect2D* pRects)
+{
+ xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
+ xcb_window_t window = x11_surface_get_window(icd_surface);
+ VK_OUTARRAY_MAKE(out, pRects, pRectCount);
+
+ if (x11_surface_is_local_to_gpu(wsi_device, conn)) {
+ vk_outarray_append(&out, rect) {
+ xcb_generic_error_t *err = NULL;
+ xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
+ xcb_get_geometry_reply_t *geom =
+ xcb_get_geometry_reply(conn, geom_cookie, &err);
+ free(err);
+ if (geom) {
+ *rect = (VkRect2D) {
+ .offset = { 0, 0 },
+ .extent = { geom->width, geom->height },
+ };
+ } else {
+ /* This can happen if the client didn't wait for the configure event
+ * to come back from the compositor. In that case, we don't know the
+ * size of the window so we just return valid "I don't know" stuff.
+ */
+ *rect = (VkRect2D) {
+ .offset = { 0, 0 },
+ .extent = { -1, -1 },
+ };
+ }
+ free(geom);
+ }
+ }
+
+ return vk_outarray_status(&out);
+}
+
VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
VkSurfaceKHR *pSurface)
uint64_t last_present_msc;
uint32_t stamp;
- bool threaded;
+ bool has_present_queue;
+ bool has_acquire_queue;
VkResult status;
xcb_present_complete_mode_t last_present_mode;
struct wsi_queue present_queue;
struct x11_image images[0];
};
+WSI_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, VkSwapchainKHR)
/**
* Update the swapchain status with the result of an operation, and return
for (unsigned i = 0; i < chain->base.image_count; i++) {
if (chain->images[i].pixmap == idle->pixmap) {
chain->images[i].busy = false;
- if (chain->threaded)
+ if (chain->has_acquire_queue)
wsi_queue_push(&chain->acquire_queue, i);
break;
}
}
-static uint64_t wsi_get_current_time(void)
-{
- uint64_t current_time;
- struct timespec tv;
-
- clock_gettime(CLOCK_MONOTONIC, &tv);
- current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
- return current_time;
-}
-
static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
{
- uint64_t current_time = wsi_get_current_time();
+ uint64_t current_time = wsi_common_get_current_time();
timeout = MIN2(UINT64_MAX - current_time, timeout);
/* If a non-special event happens, the fd will still
* poll. So recalculate the timeout now just in case.
*/
- uint64_t current_time = wsi_get_current_time();
+ uint64_t current_time = wsi_common_get_current_time();
if (atimeout > current_time)
timeout = atimeout - current_time;
else
x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
uint32_t *image_index_out, uint64_t timeout)
{
- assert(chain->threaded);
+ assert(chain->has_acquire_queue);
uint32_t image_index;
VkResult result = wsi_queue_pull(&chain->acquire_queue,
options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
#endif
+ /* Poll for any available event and update the swapchain status. This could
+ * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
+ * associated X11 surface has been resized.
+ */
+ xcb_generic_event_t *event;
+ while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
+ VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
+ free(event);
+ if (result < 0)
+ return x11_swapchain_result(chain, result);
+ x11_swapchain_result(chain, result);
+ }
+
xshmfence_reset(image->shm_fence);
++chain->send_sbc;
divisor,
remainder, 0, NULL);
xcb_discard_reply(chain->conn, cookie.sequence);
- image->busy = true;
xcb_flush(chain->conn);
static VkResult
x11_acquire_next_image(struct wsi_swapchain *anv_chain,
- uint64_t timeout,
- VkSemaphore semaphore,
+ const VkAcquireNextImageInfoKHR *info,
uint32_t *image_index)
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+ uint64_t timeout = info->timeout;
+
+ /* If the swapchain is in an error state, don't go any further. */
+ if (chain->status < 0)
+ return chain->status;
- if (chain->threaded) {
+ if (chain->has_acquire_queue) {
return x11_acquire_next_image_from_queue(chain, image_index, timeout);
} else {
return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
- if (chain->threaded) {
+ /* If the swapchain is in an error state, don't go any further. */
+ if (chain->status < 0)
+ return chain->status;
+
+ chain->images[image_index].busy = true;
+ if (chain->has_present_queue) {
wsi_queue_push(&chain->present_queue, image_index);
return chain->status;
} else {
x11_manage_fifo_queues(void *state)
{
struct x11_swapchain *chain = state;
- VkResult result;
-
- assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
+ VkResult result = VK_SUCCESS;
+ assert(chain->has_present_queue);
while (chain->status >= 0) {
/* It should be safe to unconditionally block here. Later in the loop
* we blocks until the previous present has landed on-screen. At that
return NULL;
}
- uint64_t target_msc = chain->last_present_msc + 1;
+ if (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR) {
+ result = chain->base.wsi->WaitForFences(chain->base.device, 1,
+ &chain->base.fences[image_index],
+ true, UINT64_MAX);
+ if (result != VK_SUCCESS) {
+ result = VK_ERROR_OUT_OF_DATE_KHR;
+ goto fail;
+ }
+ }
+
+ uint64_t target_msc = 0;
+ if (chain->has_acquire_queue)
+ target_msc = chain->last_present_msc + 1;
+
result = x11_present_to_x11(chain, image_index, target_msc);
if (result < 0)
goto fail;
- while (chain->last_present_msc < target_msc) {
- xcb_generic_event_t *event =
- xcb_wait_for_special_event(chain->conn, chain->special_event);
- if (!event) {
- result = VK_ERROR_OUT_OF_DATE_KHR;
- goto fail;
+ if (chain->has_acquire_queue) {
+ while (chain->last_present_msc < target_msc) {
+ xcb_generic_event_t *event =
+ xcb_wait_for_special_event(chain->conn, chain->special_event);
+ if (!event) {
+ result = VK_ERROR_OUT_OF_DATE_KHR;
+ goto fail;
+ }
+
+ result = x11_handle_dri3_present_event(chain, (void *)event);
+ free(event);
+ if (result < 0)
+ goto fail;
}
-
- result = x11_handle_dri3_present_event(chain, (void *)event);
- free(event);
- if (result < 0)
- goto fail;
}
}
fail:
- result = x11_swapchain_result(chain, result);
- wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
+ x11_swapchain_result(chain, result);
+ if (chain->has_acquire_queue)
+ wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
return NULL;
}
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
xcb_void_cookie_t cookie;
- if (chain->threaded) {
+ if (chain->has_present_queue) {
chain->status = VK_ERROR_OUT_OF_DATE_KHR;
/* Push a UINT32_MAX to wake up the manager */
wsi_queue_push(&chain->present_queue, UINT32_MAX);
pthread_join(chain->queue_manager, NULL);
- wsi_queue_destroy(&chain->acquire_queue);
+
+ if (chain->has_acquire_queue)
+ wsi_queue_destroy(&chain->acquire_queue);
wsi_queue_destroy(&chain->present_queue);
}
return VK_SUCCESS;
}
+static void
+wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
+ xcb_drawable_t drawable,
+ uint32_t state)
+{
+ static char const name[] = "_VARIABLE_REFRESH";
+ xcb_intern_atom_cookie_t cookie;
+ xcb_intern_atom_reply_t* reply;
+ xcb_void_cookie_t check;
+
+ cookie = xcb_intern_atom(conn, 0, strlen(name), name);
+ reply = xcb_intern_atom_reply(conn, cookie, NULL);
+ if (reply == NULL)
+ return;
+
+ if (state)
+ check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
+ drawable, reply->atom,
+ XCB_ATOM_CARDINAL, 32, 1, &state);
+ else
+ check = xcb_delete_property_checked(conn, drawable, reply->atom);
+
+ xcb_discard_reply(conn, check.sequence);
+ free(reply);
+}
+
+
static VkResult
x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
VkDevice device,
struct wsi_device *wsi_device,
- int local_fd,
const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks* pAllocator,
struct wsi_swapchain **swapchain_out)
struct x11_swapchain *chain;
xcb_void_cookie_t cookie;
VkResult result;
+ VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
- const unsigned num_images = pCreateInfo->minImageCount;
+ unsigned num_images = pCreateInfo->minImageCount;
+ if (wsi_device->x11.strict_imageCount)
+ num_images = pCreateInfo->minImageCount;
+ else if (present_mode == VK_PRESENT_MODE_MAILBOX_KHR)
+ num_images = MAX2(num_images, 5);
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
struct wsi_x11_connection *wsi_conn =
- wsi_x11_get_connection(wsi_device, pAllocator, conn);
+ wsi_x11_get_connection(wsi_device, conn);
if (!wsi_conn)
return VK_ERROR_OUT_OF_HOST_MEMORY;
chain->base.get_wsi_image = x11_get_wsi_image;
chain->base.acquire_next_image = x11_acquire_next_image;
chain->base.queue_present = x11_queue_present;
- chain->base.present_mode = pCreateInfo->presentMode;
+ chain->base.present_mode = present_mode;
chain->base.image_count = num_images;
chain->conn = conn;
chain->window = window;
chain->extent = pCreateInfo->imageExtent;
chain->send_sbc = 0;
chain->last_present_msc = 0;
- chain->threaded = false;
+ chain->has_acquire_queue = false;
+ chain->has_present_queue = false;
chain->status = VK_SUCCESS;
chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
* mode which provokes reallocation when anything changes, to make
* sure we have the most optimal allocation.
*/
- struct x11_swapchain *old_chain = (void *) pCreateInfo->oldSwapchain;
+ WSI_FROM_HANDLE(x11_swapchain, old_chain, pCreateInfo->oldSwapchain);
if (old_chain)
chain->last_present_mode = old_chain->last_present_mode;
else
chain->last_present_mode = XCB_PRESENT_COMPLETE_MODE_COPY;
- if (!wsi_x11_check_dri3_compatible(conn, local_fd))
+ if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
chain->base.use_prime_blit = true;
chain->event_id = xcb_generate_id(chain->conn);
goto fail_init_images;
}
- if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
- chain->threaded = true;
+ if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
+ chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR) {
+ chain->has_present_queue = true;
/* Initialize our queues. We make them base.image_count + 1 because we will
* occasionally use UINT32_MAX to signal the other thread that an error
* has occurred and we don't want an overflow.
*/
int ret;
- ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
- if (ret) {
- goto fail_init_images;
- }
-
ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
if (ret) {
- wsi_queue_destroy(&chain->acquire_queue);
goto fail_init_images;
}
- for (unsigned i = 0; i < chain->base.image_count; i++)
- wsi_queue_push(&chain->acquire_queue, i);
+ if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
+ chain->has_acquire_queue = true;
+
+ ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
+ if (ret) {
+ wsi_queue_destroy(&chain->present_queue);
+ goto fail_init_images;
+ }
+
+ for (unsigned i = 0; i < chain->base.image_count; i++)
+ wsi_queue_push(&chain->acquire_queue, i);
+ }
ret = pthread_create(&chain->queue_manager, NULL,
x11_manage_fifo_queues, chain);
if (ret) {
wsi_queue_destroy(&chain->present_queue);
- wsi_queue_destroy(&chain->acquire_queue);
+ if (chain->has_acquire_queue)
+ wsi_queue_destroy(&chain->acquire_queue);
+
goto fail_init_images;
}
}
+ assert(chain->has_present_queue || !chain->has_acquire_queue);
+
for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
vk_free(pAllocator, modifiers[i]);
+
+ /* It is safe to set it here as only one swapchain can be associated with
+ * the window, and swapchain creation does the association. At this point
+ * we know the creation is going to succeed. */
+ wsi_x11_set_adaptive_sync_property(conn, window,
+ wsi_device->enable_adaptive_sync);
+
*swapchain_out = &chain->base;
return VK_SUCCESS;
VkResult
wsi_x11_init_wsi(struct wsi_device *wsi_device,
- const VkAllocationCallbacks *alloc)
+ const VkAllocationCallbacks *alloc,
+ const struct driOptionCache *dri_options)
{
struct wsi_x11 *wsi;
VkResult result;
goto fail_mutex;
}
+ if (dri_options) {
+ if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
+ wsi_device->x11.override_minImageCount =
+ driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
+ }
+ if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
+ wsi_device->x11.strict_imageCount =
+ driQueryOptionb(dri_options, "vk_x11_strict_image_count");
+ }
+ }
+
wsi->base.get_support = x11_surface_get_support;
wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
wsi->base.get_formats = x11_surface_get_formats;
wsi->base.get_formats2 = x11_surface_get_formats2;
wsi->base.get_present_modes = x11_surface_get_present_modes;
+ wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
wsi->base.create_swapchain = x11_surface_create_swapchain;
wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
(struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
if (wsi) {
- struct hash_entry *entry;
hash_table_foreach(wsi->connections, entry)
- wsi_x11_connection_destroy(alloc, entry->data);
+ wsi_x11_connection_destroy(wsi_device, entry->data);
_mesa_hash_table_destroy(wsi->connections, NULL);