{
struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_buffer *sbuf = svga_buffer(transfer->resource);
-
+ struct svga_context *svga = svga_context(pipe);
unsigned offset = transfer->box.x + box->x;
unsigned length = box->width;
assert(transfer->usage & PIPE_TRANSFER_WRITE);
assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
- mtx_lock(&ss->swc_mutex);
- svga_buffer_add_range(sbuf, offset, offset + length);
- mtx_unlock(&ss->swc_mutex);
+ if (!svga->swc->force_coherent || sbuf->swbuf) {
+ mtx_lock(&ss->swc_mutex);
+ svga_buffer_add_range(sbuf, offset, offset + length);
+ mtx_unlock(&ss->swc_mutex);
+ }
}
sbuf->dma.flags.discard = TRUE;
- svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
+ if (!svga->swc->force_coherent || sbuf->swbuf)
+ svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
}
}
ret = SVGA3D_BindGBSurface(swc, sbuf->handle);
assert(ret == PIPE_OK);
}
+ if (swc->force_coherent) {
+ ret = SVGA3D_UpdateGBSurface(swc, sbuf->handle);
+ if (ret != PIPE_OK) {
+ /* flush and retry */
+ svga_context_flush(svga, NULL);
+ ret = SVGA3D_UpdateGBSurface(swc, sbuf->handle);
+ assert(ret == PIPE_OK);
+ }
+ }
}
} else
sws->buffer_unmap(sws, sbuf->hwbuf);
struct pipe_resource *dummy;
unsigned i;
+ if (swc->force_coherent)
+ return PIPE_OK;
+
assert(svga_have_gb_objects(svga));
assert(numBoxes);
assert(sbuf->dma.updates == NULL);
unsigned i;
struct pipe_resource *dummy;
- if (!sbuf->dma.pending) {
+ if (!sbuf->dma.pending || svga->swc->force_coherent) {
//debug_printf("no dma pending on buffer\n");
return;
}
*/
if (svga_have_gb_objects(svga)) {
struct svga_3d_update_gb_image *update = sbuf->dma.updates;
+
assert(update);
for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
}
+ if (svga->swc->force_coherent)
+ sbuf->map.num_ranges = 0;
+
svga_buffer_hw_storage_unmap(svga, sbuf);
/* This user/malloc buffer is now indistinguishable from a gpu buffer */
}
assert(sbuf->handle);
+ if (svga->swc->force_coherent)
+ return sbuf->handle;
if (sbuf->map.num_ranges) {
if (!sbuf->dma.pending) {
svga_surfaces_flush(svga);
- for (i = 0; i < st->box.d; i++) {
- if (svga_have_vgpu10(svga)) {
- ret = readback_image_vgpu10(svga, surf, st->slice + i, level,
- tex->b.b.last_level + 1);
- } else {
- ret = readback_image_vgpu9(svga, surf, st->slice + i, level);
+ if (!svga->swc->force_coherent || tex->imported) {
+ for (i = 0; i < st->box.d; i++) {
+ if (svga_have_vgpu10(svga)) {
+ ret = readback_image_vgpu10(svga, surf, st->slice + i, level,
+ tex->b.b.last_level + 1);
+ } else {
+ ret = readback_image_vgpu9(svga, surf, st->slice + i, level);
+ }
}
- }
- svga->hud.num_readbacks++;
- SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
+ svga->hud.num_readbacks++;
+ SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
- assert(ret == PIPE_OK);
- (void) ret;
+ assert(ret == PIPE_OK);
+ (void) ret;
- svga_context_flush(svga, NULL);
+ svga_context_flush(svga, NULL);
+ }
/*
* Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
* we could potentially clear the flag for all faces/layers/mips.
ret = SVGA3D_BindGBSurface(swc, surf);
assert(ret == PIPE_OK);
}
+ if (swc->force_coherent) {
+ ret = SVGA3D_UpdateGBSurface(swc, surf);
+ if (ret != PIPE_OK) {
+ /* flush and retry */
+ svga_context_flush(svga, NULL);
+ ret = SVGA3D_UpdateGBSurface(swc, surf);
+ assert(ret == PIPE_OK);
+ }
+ }
}
}
box.x, box.y, box.z,
box.w, box.h, box.d);
- if (svga_have_vgpu10(svga)) {
- unsigned i;
+ if (!svga->swc->force_coherent || tex->imported) {
+ if (svga_have_vgpu10(svga)) {
+ unsigned i;
- for (i = 0; i < nlayers; i++) {
- ret = update_image_vgpu10(svga, surf, &box,
- st->slice + i, transfer->level,
- tex->b.b.last_level + 1);
+ for (i = 0; i < nlayers; i++) {
+ ret = update_image_vgpu10(svga, surf, &box,
+ st->slice + i, transfer->level,
+ tex->b.b.last_level + 1);
+ assert(ret == PIPE_OK);
+ }
+ } else {
+ assert(nlayers == 1);
+ ret = update_image_vgpu9(svga, surf, &box, st->slice,
+ transfer->level);
assert(ret == PIPE_OK);
}
- } else {
- assert(nlayers == 1);
- ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
- assert(ret == PIPE_OK);
}
(void) ret;
}
**/
boolean have_gb_objects;
+ boolean force_coherent;
/**
* Map a guest-backed surface.
vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK);
#endif
+ vswc->base.force_coherent = vws->force_coherent;
return &vswc->base;
out_no_hash:
vws->device = stat_buf.st_rdev;
vws->open_count = 1;
vws->ioctl.drm_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
- vws->base.have_gb_dma = TRUE;
- vws->base.need_to_rebind_resources = FALSE;
-
+ vws->force_coherent = FALSE;
if (!vmw_ioctl_init(vws))
goto out_no_ioctl;
+ vws->base.have_gb_dma = !vws->force_coherent;
+ vws->base.need_to_rebind_resources = FALSE;
vws->base.have_transfer_from_buffer_cmd = vws->base.have_vgpu10;
vws->fence_ops = vmw_fence_ops_create(vws);
if (!vws->fence_ops)
cnd_t cs_cond;
mtx_t cs_mutex;
+
+ boolean force_coherent;
};
if (usage & SVGA_SURFACE_USAGE_SHARED)
req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
+ if (vws->force_coherent)
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
+
req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
req->base.base_size.width = size.width;
req->base.base_size.height = size.height;
drmVersionPtr version;
boolean drm_gb_capable;
boolean have_drm_2_5;
+ boolean have_drm_2_16;
const char *getenv_val;
VMW_FUNC;
(version->version_major == 2 && version->version_minor > 8);
vws->ioctl.have_drm_2_15 = version->version_major > 2 ||
(version->version_major == 2 && version->version_minor > 14);
+ have_drm_2_16 = version->version_major > 2 ||
+ (version->version_major == 2 && version->version_minor > 15);
vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
else
vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
+
+ if (have_drm_2_16) {
+ getenv_val = getenv("SVGA_FORCE_COHERENT");
+ if (getenv_val && strcmp(getenv_val, "0") != 0)
+ vws->force_coherent = TRUE;
+ }
} else {
vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;