+ struct virgl_winsys *vws = virgl_screen(ctx->screen)->vws;
+ vws->transfer_put(vws, virgl_resource(trans->base.resource)->hw_res, box,
+ trans->base.stride, trans->l_stride, trans->offset,
+ trans->base.level);
+}
+
+static void virgl_texture_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
+{
+ struct virgl_context *vctx = virgl_context(ctx);
+ struct virgl_transfer *trans = virgl_transfer(transfer);
+ bool queue_unmap = false;
+
+ if (transfer->usage & PIPE_TRANSFER_WRITE &&
+ (transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) == 0) {
+
+ if (trans->resolve_transfer && (trans->base.resource->format ==
+ trans->resolve_transfer->resource->format)) {
+ flush_data(ctx, virgl_transfer(trans->resolve_transfer),
+ &trans->resolve_transfer->box);
+
+ /* FINISHME: In case the destination format isn't renderable here, the
+ * blit here will currently fail. This could for instance happen if the
+ * mapped resource is of a compressed format, and it's mapped with both
+ * read and write usage.
+ */
+
+ virgl_copy_region_with_blit(ctx,
+ trans->base.resource, trans->base.level,
+ &transfer->box,
+ trans->resolve_transfer->resource, 0,
+ &trans->resolve_transfer->box);
+ ctx->flush(ctx, NULL, 0);
+ } else
+ queue_unmap = true;
+ }
+
+ if (trans->resolve_transfer) {
+ pipe_resource_reference(&trans->resolve_transfer->resource, NULL);
+ virgl_resource_destroy_transfer(&vctx->transfer_pool,
+ virgl_transfer(trans->resolve_transfer));
+ }
+
+ if (queue_unmap)
+ virgl_transfer_queue_unmap(&vctx->queue, trans);
+ else
+ virgl_resource_destroy_transfer(&vctx->transfer_pool, trans);