struct drm_virtgpu_3d_wait waitcmd;
int ret;
+ if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
+ return false;
+
memset(&waitcmd, 0, sizeof(waitcmd));
waitcmd.handle = res->bo_handle;
waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
ret = drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
if (ret && errno == EBUSY)
return TRUE;
+
+ p_atomic_set(&res->maybe_busy, false);
+
return FALSE;
}
pipe_reference_init(&res->reference, 1);
p_atomic_set(&res->external, false);
p_atomic_set(&res->num_cs_references, 0);
+
+ /* A newly created resource is consdiered busy by the kernel until the
+ * command is retired.
+ */
+ p_atomic_set(&res->maybe_busy, true);
+
return res;
}
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
struct drm_virtgpu_3d_transfer_to_host tohostcmd;
+ p_atomic_set(&res->maybe_busy, true);
+
memset(&tohostcmd, 0, sizeof(tohostcmd));
tohostcmd.bo_handle = res->bo_handle;
tohostcmd.box.x = box->x;
struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
+ p_atomic_set(&res->maybe_busy, true);
+
memset(&fromhostcmd, 0, sizeof(fromhostcmd));
fromhostcmd.bo_handle = res->bo_handle;
fromhostcmd.level = level;
struct drm_virtgpu_3d_wait waitcmd;
int ret;
+ if (!p_atomic_read(&res->maybe_busy) && !p_atomic_read(&res->external))
+ return;
+
memset(&waitcmd, 0, sizeof(waitcmd));
waitcmd.handle = res->bo_handle;
again:
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
if (ret == -EAGAIN)
goto again;
+
+ p_atomic_set(&res->maybe_busy, false);
}
static bool virgl_drm_alloc_res_list(struct virgl_drm_cmd_buf *cbuf,
int i;
for (i = 0; i < cbuf->cres; i++) {
+ /* mark all BOs busy after submission */
+ p_atomic_set(&cbuf->res_bo[i]->maybe_busy, true);
+
p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
}