return 0;
}
+enum virgl_transfer3d_encode_stride {
+ /* The stride and layer_stride are explicitly specified in the command. */
+ virgl_transfer3d_explicit_stride,
+ /* The stride and layer_stride are inferred by the host. In this case, the
+ * host will use the image stride and layer_stride for the specified level.
+ */
+ virgl_transfer3d_host_inferred_stride,
+};
+
static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
struct virgl_cmd_buf *buf,
- struct virgl_transfer *xfer)
+ struct virgl_transfer *xfer,
+ enum virgl_transfer3d_encode_stride encode_stride)
+
{
struct pipe_transfer *transfer = &xfer->base;
struct virgl_resource *res = virgl_resource(transfer->resource);
+ unsigned stride;
+ unsigned layer_stride;
+
+ if (encode_stride == virgl_transfer3d_explicit_stride) {
+ stride = transfer->stride;
+ layer_stride = transfer->layer_stride;
+ } else if (virgl_transfer3d_host_inferred_stride) {
+ stride = 0;
+ layer_stride = 0;
+ } else {
+ assert(!"Invalid virgl_transfer3d_encode_stride value");
+ }
virgl_encoder_emit_resource(vs, buf, res);
virgl_encoder_write_dword(buf, transfer->level);
virgl_encoder_write_dword(buf, transfer->usage);
- virgl_encoder_write_dword(buf, 0);
- virgl_encoder_write_dword(buf, 0);
+ virgl_encoder_write_dword(buf, stride);
+ virgl_encoder_write_dword(buf, layer_stride);
virgl_encoder_write_dword(buf, transfer->box.x);
virgl_encoder_write_dword(buf, transfer->box.y);
virgl_encoder_write_dword(buf, transfer->box.z);
transfer.base.box.width = length;
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
- virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer);
+ virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer,
+ virgl_transfer3d_host_inferred_stride);
virgl_encoder_write_block(ctx->cbuf, data, length);
left_bytes -= length;
transfer.base.box.x += length;
uint32_t command;
command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
virgl_encoder_write_dword(buf, command);
- virgl_encoder_transfer3d_common(vs, buf, trans);
+ virgl_encoder_transfer3d_common(vs, buf, trans,
+ virgl_transfer3d_host_inferred_stride);
virgl_encoder_write_dword(buf, trans->offset);
virgl_encoder_write_dword(buf, direction);
}
+void virgl_encode_copy_transfer(struct virgl_context *ctx,
+ struct virgl_transfer *trans)
+{
+ uint32_t command;
+ struct virgl_resource *copy_src_res = virgl_resource(trans->copy_src_res);
+ struct virgl_screen *vs = virgl_screen(ctx->base.screen);
+
+ command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
+ virgl_encoder_write_cmd_dword(ctx, command);
+ /* Copy transfers need to explicitly specify the stride, since it may differ
+ * from the image stride.
+ */
+ virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
+ virgl_encoder_emit_resource(vs, ctx->cbuf, copy_src_res);
+ virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
+ /* At the moment all copy transfers are synchronized. */
+ virgl_encoder_write_dword(ctx->cbuf, 1);
+}
+
void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
{
uint32_t command, diff;
struct virgl_transfer *transfer)
{
uint32_t dwords = VIRGL_TRANSFER3D_SIZE + 1;
+
if (queue->tbuf) {
if (queue->num_dwords + dwords >= VIRGL_MAX_TBUF_DWORDS) {
struct list_iteration_args iter;
res = transfer->base.resource;
pipe_resource_reference(&pres, res);
+ /* We don't support copy transfers in the transfer queue. */
+ assert(!transfer->copy_src_res);
+
+ /* Attempt to merge multiple intersecting transfers into a single one. */
if (res->target == PIPE_BUFFER) {
memset(&iter, 0, sizeof(iter));
iter.current = transfer;
struct virgl_transfer *queued = NULL;
struct list_iteration_args iter;
+ /* We don't support extending from copy transfers. */
+ assert(!transfer->copy_src_res);
+
if (transfer->base.resource->target == PIPE_BUFFER) {
memset(&iter, 0, sizeof(iter));
iter.current = transfer;