#include <unistd.h>
#include <errno.h>
#include <string.h>
-
+#include <fcntl.h>
#include <poll.h>
+#include <xf86drm.h>
#include "util/hash_table.h"
#include "wsi_common.h"
#include "wsi_common_x11.h"
+#include "wsi_common_queue.h"
#define typed_memcpy(dest, src, count) ({ \
- static_assert(sizeof(*src) == sizeof(*dest), ""); \
+ STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
memcpy((dest), (src), (count) * sizeof(*(src))); \
})
struct wsi_x11_connection {
bool has_dri3;
bool has_present;
+ bool is_proprietary_x11;
};
struct wsi_x11 {
struct hash_table *connections;
};
+
+/** wsi_dri3_open
+ *
+ * Wrapper around xcb_dri3_open
+ */
+static int
+wsi_dri3_open(xcb_connection_t *conn,
+ xcb_window_t root,
+ uint32_t provider)
+{
+ xcb_dri3_open_cookie_t cookie;
+ xcb_dri3_open_reply_t *reply;
+ int fd;
+
+ cookie = xcb_dri3_open(conn,
+ root,
+ provider);
+
+ reply = xcb_dri3_open_reply(conn, cookie, NULL);
+ if (!reply)
+ return -1;
+
+ if (reply->nfd != 1) {
+ free(reply);
+ return -1;
+ }
+
+ fd = xcb_dri3_open_reply_fds(conn, reply)[0];
+ free(reply);
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+
+ return fd;
+}
+
+static bool
+wsi_x11_check_dri3_compatible(xcb_connection_t *conn, int local_fd)
+{
+ xcb_screen_iterator_t screen_iter =
+ xcb_setup_roots_iterator(xcb_get_setup(conn));
+ xcb_screen_t *screen = screen_iter.data;
+
+ int dri3_fd = wsi_dri3_open(conn, screen->root, None);
+ if (dri3_fd != -1) {
+ char *local_dev = drmGetRenderDeviceNameFromFd(local_fd);
+ char *dri3_dev = drmGetRenderDeviceNameFromFd(dri3_fd);
+ int ret;
+
+ close(dri3_fd);
+
+ ret = strcmp(local_dev, dri3_dev);
+
+ free(local_dev);
+ free(dri3_dev);
+
+ if (ret != 0)
+ return false;
+ }
+ return true;
+}
+
static struct wsi_x11_connection *
wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
xcb_connection_t *conn)
{
- xcb_query_extension_cookie_t dri3_cookie, pres_cookie;
- xcb_query_extension_reply_t *dri3_reply, *pres_reply;
+ xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie;
+ xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply;
struct wsi_x11_connection *wsi_conn =
vk_alloc(alloc, sizeof(*wsi_conn), 8,
dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
+ /* We try to be nice to users and emit a warning if they try to use a
+ * Vulkan application on a system without DRI3 enabled. However, this ends
+ * up spewing the warning when a user has, for example, both Intel
+ * integrated graphics and a discrete card with proprietary drivers and are
+ * running on the discrete card with the proprietary DDX. In this case, we
+ * really don't want to print the warning because it just confuses users.
+ * As a heuristic to detect this case, we check for a couple of proprietary
+ * X11 extensions.
+ */
+ amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
+ nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
+
dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
- if (dri3_reply == NULL || pres_reply == NULL) {
+ amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
+ nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
+ if (!dri3_reply || !pres_reply) {
free(dri3_reply);
free(pres_reply);
+ free(amd_reply);
+ free(nv_reply);
vk_free(alloc, wsi_conn);
return NULL;
}
wsi_conn->has_dri3 = dri3_reply->present != 0;
wsi_conn->has_present = pres_reply->present != 0;
+ wsi_conn->is_proprietary_x11 = false;
+ if (amd_reply && amd_reply->present)
+ wsi_conn->is_proprietary_x11 = true;
+ if (nv_reply && nv_reply->present)
+ wsi_conn->is_proprietary_x11 = true;
free(dri3_reply);
free(pres_reply);
+ free(amd_reply);
+ free(nv_reply);
return wsi_conn;
}
vk_free(alloc, conn);
}
+static bool
+wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
+{
+ if (wsi_conn->has_dri3)
+ return true;
+ if (!wsi_conn->is_proprietary_x11) {
+ fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
+ "Note: you can probably enable DRI3 in your Xorg config\n");
+ }
+ return false;
+}
+
static struct wsi_x11_connection *
wsi_x11_get_connection(struct wsi_device *wsi_dev,
const VkAllocationCallbacks *alloc,
static const VkPresentModeKHR present_modes[] = {
VK_PRESENT_MODE_IMMEDIATE_KHR,
VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
};
static xcb_screen_t *
struct wsi_device *wsi_device,
VkAllocationCallbacks *alloc,
uint32_t queueFamilyIndex,
+ int fd,
+ bool can_handle_different_gpu,
xcb_connection_t* connection,
xcb_visualid_t visual_id)
{
struct wsi_x11_connection *wsi_conn =
wsi_x11_get_connection(wsi_device, alloc, connection);
- if (!wsi_conn->has_dri3) {
- fprintf(stderr, "vulkan: No DRI3 support\n");
+ if (!wsi_conn)
return false;
- }
+
+ if (!wsi_x11_check_for_dri3(wsi_conn))
+ return false;
+
+ if (!can_handle_different_gpu)
+ if (!wsi_x11_check_dri3_compatible(connection, fd))
+ return false;
unsigned visual_depth;
if (!connection_get_visualtype(connection, visual_id, &visual_depth))
struct wsi_device *wsi_device,
const VkAllocationCallbacks *alloc,
uint32_t queueFamilyIndex,
+ int local_fd,
+ bool can_handle_different_gpu,
VkBool32* pSupported)
{
xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
if (!wsi_conn)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- if (!wsi_conn->has_dri3) {
- fprintf(stderr, "vulkan: No DRI3 support\n");
+ if (!wsi_x11_check_for_dri3(wsi_conn)) {
*pSupported = false;
return VK_SUCCESS;
}
+ if (!can_handle_different_gpu)
+ if (!wsi_x11_check_dri3_compatible(conn, local_fd))
+ return false;
+
unsigned visual_depth;
if (!get_visualtype_for_window(conn, window, &visual_depth)) {
*pSupported = false;
xcb_visualtype_t *visual =
get_visualtype_for_window(conn, window, &visual_depth);
+ if (!visual)
+ return VK_ERROR_SURFACE_LOST_KHR;
+
geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
if (geom) {
VkExtent2D extent = { geom->width, geom->height };
*/
caps->currentExtent = (VkExtent2D) { -1, -1 };
caps->minImageExtent = (VkExtent2D) { 1, 1 };
- caps->maxImageExtent = (VkExtent2D) { INT16_MAX, INT16_MAX };
+ /* This is the maximum supported size on Intel */
+ caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
}
free(err);
free(geom);
surface->connection = pCreateInfo->connection;
surface->window = pCreateInfo->window;
- *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
+ *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
return VK_SUCCESS;
}
surface->dpy = pCreateInfo->dpy;
surface->window = pCreateInfo->window;
- *pSurface = _VkIcdSurfaceBase_to_handle(&surface->base);
+ *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
return VK_SUCCESS;
}
struct x11_image {
VkImage image;
+ VkImage linear_image; // for prime
VkDeviceMemory memory;
+ VkDeviceMemory linear_memory; // for prime
xcb_pixmap_t pixmap;
bool busy;
struct xshmfence * shm_fence;
xcb_gc_t gc;
uint32_t depth;
VkExtent2D extent;
- uint32_t image_count;
xcb_present_event_t event_id;
xcb_special_event_t * special_event;
uint64_t send_sbc;
+ uint64_t last_present_msc;
uint32_t stamp;
+ bool threaded;
+ VkResult status;
+ struct wsi_queue present_queue;
+ struct wsi_queue acquire_queue;
+ pthread_t queue_manager;
+
struct x11_image images[0];
};
uint32_t* pCount, VkImage *pSwapchainImages)
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+ uint32_t ret_count;
+ VkResult result;
if (pSwapchainImages == NULL) {
- *pCount = chain->image_count;
+ *pCount = chain->base.image_count;
return VK_SUCCESS;
}
- assert(chain->image_count <= *pCount);
- for (uint32_t i = 0; i < chain->image_count; i++)
+ result = VK_SUCCESS;
+ ret_count = chain->base.image_count;
+ if (chain->base.image_count > *pCount) {
+ ret_count = *pCount;
+ result = VK_INCOMPLETE;
+ }
+
+ for (uint32_t i = 0; i < ret_count; i++)
pSwapchainImages[i] = chain->images[i].image;
- *pCount = chain->image_count;
+ return result;
+}
- return VK_SUCCESS;
+static void
+x11_get_image_and_linear(struct wsi_swapchain *drv_chain,
+ int imageIndex, VkImage *image, VkImage *linear_image)
+{
+ struct x11_swapchain *chain = (struct x11_swapchain *)drv_chain;
+ *image = chain->images[imageIndex].image;
+ *linear_image = chain->images[imageIndex].linear_image;
}
static VkResult
case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
xcb_present_idle_notify_event_t *idle = (void *) event;
- for (unsigned i = 0; i < chain->image_count; i++) {
+ for (unsigned i = 0; i < chain->base.image_count; i++) {
if (chain->images[i].pixmap == idle->pixmap) {
chain->images[i].busy = false;
+ if (chain->threaded)
+ wsi_queue_push(&chain->acquire_queue, i);
break;
}
}
break;
}
- case XCB_PRESENT_COMPLETE_NOTIFY:
+ case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
+ xcb_present_complete_notify_event_t *complete = (void *) event;
+ if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP)
+ chain->last_present_msc = complete->msc;
+ break;
+ }
+
default:
break;
}
}
static VkResult
-x11_acquire_next_image(struct wsi_swapchain *anv_chain,
- uint64_t timeout,
- VkSemaphore semaphore,
- uint32_t *image_index)
+x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
+ uint32_t *image_index, uint64_t timeout)
{
- struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
xcb_generic_event_t *event;
struct pollfd pfds;
uint64_t atimeout;
while (1) {
- for (uint32_t i = 0; i < chain->image_count; i++) {
+ for (uint32_t i = 0; i < chain->base.image_count; i++) {
if (!chain->images[i].busy) {
/* We found a non-busy image */
xshmfence_await(chain->images[i].shm_fence);
}
static VkResult
-x11_queue_present(struct wsi_swapchain *anv_chain,
- uint32_t image_index)
+x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
+ uint32_t *image_index_out, uint64_t timeout)
+{
+ assert(chain->threaded);
+
+ uint32_t image_index;
+ VkResult result = wsi_queue_pull(&chain->acquire_queue,
+ &image_index, timeout);
+ if (result != VK_SUCCESS) {
+ return result;
+ } else if (chain->status != VK_SUCCESS) {
+ return chain->status;
+ }
+
+ assert(image_index < chain->base.image_count);
+ xshmfence_await(chain->images[image_index].shm_fence);
+
+ *image_index_out = image_index;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
+ uint32_t target_msc)
{
- struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
struct x11_image *image = &chain->images[image_index];
- assert(image_index < chain->image_count);
+ assert(image_index < chain->base.image_count);
uint32_t options = XCB_PRESENT_OPTION_NONE;
- int64_t target_msc = 0;
int64_t divisor = 0;
int64_t remainder = 0;
return VK_SUCCESS;
}
+static VkResult
+x11_acquire_next_image(struct wsi_swapchain *anv_chain,
+ uint64_t timeout,
+ VkSemaphore semaphore,
+ uint32_t *image_index)
+{
+ struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+
+ if (chain->threaded) {
+ return x11_acquire_next_image_from_queue(chain, image_index, timeout);
+ } else {
+ return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
+ }
+}
+
+static VkResult
+x11_queue_present(struct wsi_swapchain *anv_chain,
+ uint32_t image_index,
+ const VkPresentRegionKHR *damage)
+{
+ struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
+
+ if (chain->threaded) {
+ wsi_queue_push(&chain->present_queue, image_index);
+ return chain->status;
+ } else {
+ return x11_present_to_x11(chain, image_index, 0);
+ }
+}
+
+static void *
+x11_manage_fifo_queues(void *state)
+{
+ struct x11_swapchain *chain = state;
+ VkResult result;
+
+ assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
+
+ while (chain->status == VK_SUCCESS) {
+ /* It should be safe to unconditionally block here. Later in the loop
+ * we blocks until the previous present has landed on-screen. At that
+ * point, we should have received IDLE_NOTIFY on all images presented
+ * before that point so the client should be able to acquire any image
+ * other than the currently presented one.
+ */
+ uint32_t image_index;
+ result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
+ if (result != VK_SUCCESS) {
+ goto fail;
+ } else if (chain->status != VK_SUCCESS) {
+ return NULL;
+ }
+
+ uint64_t target_msc = chain->last_present_msc + 1;
+ result = x11_present_to_x11(chain, image_index, target_msc);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ while (chain->last_present_msc < target_msc) {
+ xcb_generic_event_t *event =
+ xcb_wait_for_special_event(chain->conn, chain->special_event);
+ if (!event)
+ goto fail;
+
+ result = x11_handle_dri3_present_event(chain, (void *)event);
+ if (result != VK_SUCCESS)
+ goto fail;
+ }
+ }
+
+fail:
+ chain->status = result;
+ wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
+
+ return NULL;
+}
+
static VkResult
x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
const VkSwapchainCreateInfoKHR *pCreateInfo,
result = chain->base.image_fns->create_wsi_image(device_h,
pCreateInfo,
pAllocator,
+ chain->base.needs_linear_copy,
+ false,
&image->image,
&image->memory,
&size,
if (result != VK_SUCCESS)
return result;
+ if (chain->base.needs_linear_copy) {
+ result = chain->base.image_fns->create_wsi_image(device_h,
+ pCreateInfo,
+ pAllocator,
+ chain->base.needs_linear_copy,
+ true,
+ &image->linear_image,
+ &image->linear_memory,
+ &size,
+ &offset,
+ &row_pitch,
+ &fd);
+ if (result != VK_SUCCESS) {
+ chain->base.image_fns->free_wsi_image(device_h, pAllocator,
+ image->image, image->memory);
+ return result;
+ }
+ }
+
image->pixmap = xcb_generate_id(chain->conn);
cookie =
cookie = xcb_free_pixmap(chain->conn, image->pixmap);
xcb_discard_reply(chain->conn, cookie.sequence);
+ if (chain->base.needs_linear_copy) {
+ chain->base.image_fns->free_wsi_image(device_h, pAllocator,
+ image->linear_image, image->linear_memory);
+ }
chain->base.image_fns->free_wsi_image(device_h, pAllocator,
- image->image, image->memory);
+ image->image, image->memory);
return result;
}
cookie = xcb_free_pixmap(chain->conn, image->pixmap);
xcb_discard_reply(chain->conn, cookie.sequence);
+ if (chain->base.needs_linear_copy) {
+ chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
+ image->linear_image, image->linear_memory);
+ }
chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
image->image, image->memory);
}
const VkAllocationCallbacks *pAllocator)
{
struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
- for (uint32_t i = 0; i < chain->image_count; i++)
+ xcb_void_cookie_t cookie;
+
+ for (uint32_t i = 0; i < chain->base.image_count; i++)
x11_image_finish(chain, pAllocator, &chain->images[i]);
+ if (chain->threaded) {
+ chain->status = VK_ERROR_OUT_OF_DATE_KHR;
+ /* Push a UINT32_MAX to wake up the manager */
+ wsi_queue_push(&chain->present_queue, UINT32_MAX);
+ pthread_join(chain->queue_manager, NULL);
+ wsi_queue_destroy(&chain->acquire_queue);
+ wsi_queue_destroy(&chain->present_queue);
+ }
+
xcb_unregister_for_special_event(chain->conn, chain->special_event);
+ cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
+ chain->window,
+ XCB_PRESENT_EVENT_MASK_NO_EVENT);
+ xcb_discard_reply(chain->conn, cookie.sequence);
vk_free(pAllocator, chain);
x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
VkDevice device,
struct wsi_device *wsi_device,
+ int local_fd,
const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks* pAllocator,
const struct wsi_image_fns *image_fns,
chain->base.device = device;
chain->base.destroy = x11_swapchain_destroy;
chain->base.get_images = x11_get_images;
+ chain->base.get_image_and_linear = x11_get_image_and_linear;
chain->base.acquire_next_image = x11_acquire_next_image;
chain->base.queue_present = x11_queue_present;
chain->base.image_fns = image_fns;
chain->base.present_mode = pCreateInfo->presentMode;
+ chain->base.image_count = num_images;
chain->conn = conn;
chain->window = window;
chain->depth = geometry->depth;
chain->extent = pCreateInfo->imageExtent;
- chain->image_count = num_images;
chain->send_sbc = 0;
+ chain->last_present_msc = 0;
+ chain->threaded = false;
+ chain->status = VK_SUCCESS;
free(geometry);
+ chain->base.needs_linear_copy = false;
+ if (!wsi_x11_check_dri3_compatible(conn, local_fd))
+ chain->base.needs_linear_copy = true;
+
chain->event_id = xcb_generate_id(chain->conn);
xcb_present_select_input(chain->conn, chain->event_id, chain->window,
XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
xcb_discard_reply(chain->conn, cookie.sequence);
uint32_t image = 0;
- for (; image < chain->image_count; image++) {
+ for (; image < chain->base.image_count; image++) {
result = x11_image_init(device, chain, pCreateInfo, pAllocator,
&chain->images[image]);
if (result != VK_SUCCESS)
goto fail_init_images;
}
+ if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
+ chain->threaded = true;
+
+ /* Initialize our queues. We make them base.image_count + 1 because we will
+ * occasionally use UINT32_MAX to signal the other thread that an error
+ * has occurred and we don't want an overflow.
+ */
+ int ret;
+ ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
+ if (ret) {
+ goto fail_init_images;
+ }
+
+ ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
+ if (ret) {
+ wsi_queue_destroy(&chain->acquire_queue);
+ goto fail_init_images;
+ }
+
+ for (unsigned i = 0; i < chain->base.image_count; i++)
+ wsi_queue_push(&chain->acquire_queue, i);
+
+ ret = pthread_create(&chain->queue_manager, NULL,
+ x11_manage_fifo_queues, chain);
+ if (ret) {
+ wsi_queue_destroy(&chain->present_queue);
+ wsi_queue_destroy(&chain->acquire_queue);
+ goto fail_init_images;
+ }
+ }
+
*swapchain_out = &chain->base;
return VK_SUCCESS;