#include "virgl/virgl_public.h"
#include <xf86drm.h>
+#include <libsync.h>
#include "virtgpu_drm.h"
#include "virgl_drm_winsys.h"
#include "virgl_drm_public.h"
+
+#define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
+#define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(1, 0)
+
+
static inline boolean can_cache_resource(struct virgl_hw_res *res)
{
return res->cacheable == TRUE;
if (res->ptr)
os_munmap(res->ptr, res->size);
+ if (res->fence_fd != -1)
+ close(res->fence_fd);
+
memset(&args, 0, sizeof(args));
args.handle = res->bo_handle;
drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
res->size = size;
res->stride = stride;
pipe_reference_init(&res->reference, 1);
- res->num_cs_references = 0;
+ p_atomic_set(&res->num_cs_references, 0);
+ res->fence_fd = -1;
return res;
}
struct virgl_hw_res *res, *curr_res;
struct list_head *curr, *next;
int64_t now;
- int ret;
+ int ret = 0;
/* only store binds for vertex/index/const buffers */
if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
mtx_lock(&qdws->bo_handles_mutex);
- if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
+ if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
if (res) {
struct virgl_hw_res *r = NULL;
}
}
- if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
+ if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
int r;
r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
if (r) {
}
res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
- fprintf(stderr, "resource %p for handle %d, pfd=%d\n", res, handle, whandle->handle);
if (res) {
struct virgl_hw_res *r = NULL;
virgl_drm_resource_reference(qdws, &r, res);
if (!res)
goto done;
- if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
+ if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
res->bo_handle = handle;
} else {
- fprintf(stderr, "gem open handle %d\n", handle);
memset(&open_arg, 0, sizeof(open_arg));
open_arg.name = whandle->handle;
if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
res->stride = info_arg.stride;
pipe_reference_init(&res->reference, 1);
res->num_cs_references = 0;
+ res->fence_fd = -1;
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
if (!res)
return FALSE;
- if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
+ if (whandle->type == WINSYS_HANDLE_TYPE_SHARED) {
if (!res->flinked) {
memset(&flink, 0, sizeof(flink));
flink.handle = res->bo_handle;
mtx_unlock(&qdws->bo_handles_mutex);
}
whandle->handle = res->flink;
- } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
+ } else if (whandle->type == WINSYS_HANDLE_TYPE_KMS) {
whandle->handle = res->bo_handle;
- } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
+ } else if (whandle->type == WINSYS_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
return FALSE;
mtx_lock(&qdws->bo_handles_mutex);
goto again;
}
-static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
-{
- struct virgl_drm_cmd_buf *cbuf;
-
- cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
- if (!cbuf)
- return NULL;
-
- cbuf->ws = qws;
-
- cbuf->nres = 512;
- cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
- if (!cbuf->res_bo) {
- FREE(cbuf);
- return NULL;
- }
- cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
- if (!cbuf->res_hlist) {
- FREE(cbuf->res_bo);
- FREE(cbuf);
- return NULL;
- }
-
- cbuf->base.buf = cbuf->buf;
- return &cbuf->base;
-}
-
-static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
-{
- struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
-
- FREE(cbuf->res_hlist);
- FREE(cbuf->res_bo);
- FREE(cbuf);
-
-}
-
static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
struct virgl_hw_res *res)
{
{
unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
- if (cbuf->cres > cbuf->nres) {
- fprintf(stderr,"failure to add relocation\n");
- return;
+ if (cbuf->cres >= cbuf->nres) {
+ unsigned new_nres = cbuf->nres + 256;
+ void *new_ptr = REALLOC(cbuf->res_bo,
+ cbuf->nres * sizeof(struct virgl_hw_buf*),
+ new_nres * sizeof(struct virgl_hw_buf*));
+ if (!new_ptr) {
+ fprintf(stderr,"failure to add relocation %d, %d\n", cbuf->cres, new_nres);
+ return;
+ }
+ cbuf->res_bo = new_ptr;
+
+ new_ptr = REALLOC(cbuf->res_hlist,
+ cbuf->nres * sizeof(uint32_t),
+ new_nres * sizeof(uint32_t));
+ if (!new_ptr) {
+ fprintf(stderr,"failure to add hlist relocation %d, %d\n", cbuf->cres, cbuf->nres);
+ return;
+ }
+ cbuf->res_hlist = new_ptr;
+ cbuf->nres = new_nres;
}
cbuf->res_bo[cbuf->cres] = NULL;
struct virgl_cmd_buf *_cbuf,
struct virgl_hw_res *res)
{
- if (!res->num_cs_references)
+ if (!p_atomic_read(&res->num_cs_references))
return FALSE;
return TRUE;
}
+static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws,
+ uint32_t size)
+{
+ struct virgl_drm_cmd_buf *cbuf;
+
+ cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
+ if (!cbuf)
+ return NULL;
+
+ cbuf->ws = qws;
+
+ cbuf->nres = 512;
+ cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
+ if (!cbuf->res_bo) {
+ FREE(cbuf);
+ return NULL;
+ }
+ cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
+ if (!cbuf->res_hlist) {
+ FREE(cbuf->res_bo);
+ FREE(cbuf);
+ return NULL;
+ }
+
+ cbuf->buf = CALLOC(size, sizeof(uint32_t));
+ if (!cbuf->buf) {
+ FREE(cbuf->res_hlist);
+ FREE(cbuf->res_bo);
+ FREE(cbuf);
+ return NULL;
+ }
+
+ cbuf->in_fence_fd = -1;
+ cbuf->base.buf = cbuf->buf;
+ return &cbuf->base;
+}
+
+static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
+{
+ struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
+
+ virgl_drm_release_all_res(virgl_drm_winsys(cbuf->ws), cbuf);
+ FREE(cbuf->res_hlist);
+ FREE(cbuf->res_bo);
+ FREE(cbuf->buf);
+ FREE(cbuf);
+
+}
+
static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
- struct virgl_cmd_buf *_cbuf)
+ struct virgl_cmd_buf *_cbuf,
+ int *out_fence_fd)
{
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
eb.num_bo_handles = cbuf->cres;
eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
+ eb.fence_fd = -1;
+ if (qws->supports_fences) {
+ if (cbuf->in_fence_fd >= 0) {
+ eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
+ eb.fence_fd = cbuf->in_fence_fd;
+ }
+
+ if (out_fence_fd != NULL)
+ eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
+ } else {
+ assert(cbuf->in_fence_fd < 0);
+ assert(out_fence_fd == NULL);
+ }
+
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
if (ret == -1)
fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
cbuf->base.cdw = 0;
+ if (qws->supports_fences) {
+ if (cbuf->in_fence_fd >= 0) {
+ close(cbuf->in_fence_fd);
+ cbuf->in_fence_fd = -1;
+ }
+ }
+
+ if (out_fence_fd != NULL)
+ *out_fence_fd = eb.fence_fd;
+
virgl_drm_release_all_res(qdws, cbuf);
memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
struct drm_virtgpu_get_caps args;
int ret;
- bool fill_v2 = false;
- memset(&args, 0, sizeof(args));
+ virgl_ws_fill_new_caps_defaults(caps);
- args.cap_set_id = 1;
+ memset(&args, 0, sizeof(args));
+ if (vdws->has_capset_query_fix) {
+ /* if we have the query fix - try and get cap set id 2 first */
+ args.cap_set_id = 2;
+ args.size = sizeof(union virgl_caps);
+ } else {
+ args.cap_set_id = 1;
+ args.size = sizeof(struct virgl_caps_v1);
+ }
args.addr = (unsigned long)&caps->caps;
- args.size = sizeof(union virgl_caps);
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
-
if (ret == -1 && errno == EINVAL) {
/* Fallback to v1 */
+ args.cap_set_id = 1;
args.size = sizeof(struct virgl_caps_v1);
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
if (ret == -1)
return ret;
- fill_v2 = true;
- }
- if (caps->caps.max_version == 1)
- fill_v2 = true;
-
- if (fill_v2) {
- caps->caps.v2.min_aliased_point_size = 0.f;
- caps->caps.v2.max_aliased_point_size = 255.f;
- caps->caps.v2.min_smooth_point_size = 0.f;
- caps->caps.v2.max_smooth_point_size = 255.f;
- caps->caps.v2.min_aliased_line_width = 0.f;
- caps->caps.v2.max_aliased_line_width = 255.f;
- caps->caps.v2.min_smooth_line_width = 0.f;
- caps->caps.v2.max_smooth_line_width = 255.f;
- caps->caps.v2.max_texture_lod_bias = 16.0f;
- caps->caps.v2.max_geom_output_vertices = 256;
- caps->caps.v2.max_geom_total_output_components = 16384;
- caps->caps.v2.max_vertex_outputs = 32;
- caps->caps.v2.max_vertex_attribs = 16;
- caps->caps.v2.max_shader_patch_varyings = 0;
- caps->caps.v2.min_texel_offset = -8;
- caps->caps.v2.max_texel_offset = 7;
- caps->caps.v2.min_texture_gather_offset = -8;
- caps->caps.v2.max_texture_gather_offset = 7;
}
return ret;
}
}
static struct pipe_fence_handle *
-virgl_cs_create_fence(struct virgl_winsys *vws)
+virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
{
struct virgl_hw_res *res;
VIRGL_BIND_CUSTOM,
8, 1, 1, 0, 0, 0, 8);
+ res->fence_fd = fd;
return (struct pipe_fence_handle *)res;
}
return TRUE;
}
virgl_drm_resource_wait(vws, res);
+
+ if (res->fence_fd != -1) {
+ int ret = sync_wait(res->fence_fd, timeout / 1000000);
+ return ret == 0;
+ }
+
return TRUE;
}
virgl_hw_res(src));
}
+static void virgl_fence_server_sync(struct virgl_winsys *vws,
+ struct virgl_cmd_buf *_cbuf,
+ struct pipe_fence_handle *fence)
+{
+ struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
+ struct virgl_hw_res *hw_res = virgl_hw_res(fence);
+
+ /* if not an external fence, then nothing more to do without preemption: */
+ if (hw_res->fence_fd == -1)
+ return;
+
+ sync_accumulate("virgl", &cbuf->in_fence_fd, hw_res->fence_fd);
+}
+
+static int virgl_fence_get_fd(struct virgl_winsys *vws,
+ struct pipe_fence_handle *fence)
+{
+ struct virgl_hw_res *hw_res = virgl_hw_res(fence);
+
+ return dup(hw_res->fence_fd);
+}
+
+static int virgl_drm_get_version(int fd)
+{
+ int ret;
+ drmVersionPtr version;
+
+ version = drmGetVersion(fd);
+
+ if (!version)
+ ret = -EFAULT;
+ else if (version->version_major != 0)
+ ret = -EINVAL;
+ else
+ ret = version->version_minor;
+
+ drmFreeVersion(version);
+
+ return ret;
+}
static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)
{
struct virgl_drm_winsys *qdws;
+ int drm_version;
+ int ret;
+ int gl = 0;
+ struct drm_virtgpu_getparam getparam = {0};
+
+ getparam.param = VIRTGPU_PARAM_3D_FEATURES;
+ getparam.value = (uint64_t)(uintptr_t)≷
+ ret = drmIoctl(drmFD, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
+ if (ret < 0 || !gl)
+ return NULL;
+
+ drm_version = virgl_drm_get_version(drmFD);
+ if (drm_version < 0)
+ return NULL;
qdws = CALLOC_STRUCT(virgl_drm_winsys);
if (!qdws)
qdws->base.cs_create_fence = virgl_cs_create_fence;
qdws->base.fence_wait = virgl_fence_wait;
qdws->base.fence_reference = virgl_fence_reference;
+ qdws->base.fence_server_sync = virgl_fence_server_sync;
+ qdws->base.fence_get_fd = virgl_fence_get_fd;
+ qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
+ qdws->base.supports_encoded_transfers = 1;
qdws->base.get_caps = virgl_drm_get_caps;
+
+ uint32_t value = 0;
+ getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
+ getparam.value = (uint64_t)(uintptr_t)&value;
+ ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
+ if (ret == 0) {
+ if (value == 1)
+ qdws->has_capset_query_fix = true;
+ }
+
return &qdws->base;
}
if (destroy) {
int fd = virgl_drm_winsys(screen->vws)->fd;
util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
+ close(fd);
}
mtx_unlock(&virgl_screen_mutex);
int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
vws = virgl_drm_winsys_create(dup_fd);
+ if (!vws) {
+ close(dup_fd);
+ goto unlock;
+ }
pscreen = virgl_create_screen(vws);
if (pscreen) {