X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvirgl%2Fvirgl_encode.c;h=2975e6048cf01aede54b7c6686e988ebb7082e1f;hb=c4c17ab3ec1d67b0f2fd9816681378bdc8efe220;hp=bcb14d8939a547cc749187cee9b9a84d37e28d82;hpb=a4e60ccb56f339d3a20463b14ae4b3e124771f1a;p=mesa.git diff --git a/src/gallium/drivers/virgl/virgl_encode.c b/src/gallium/drivers/virgl/virgl_encode.c index bcb14d8939a..2975e6048cf 100644 --- a/src/gallium/drivers/virgl/virgl_encode.c +++ b/src/gallium/drivers/virgl/virgl_encode.c @@ -37,6 +37,8 @@ #include "virgl_resource.h" #include "virgl_screen.h" +#define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS) + static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx, uint32_t dword) { @@ -49,18 +51,25 @@ static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx, return 0; } -static void virgl_encoder_write_res(struct virgl_context *ctx, - struct virgl_resource *res) +static void virgl_encoder_emit_resource(struct virgl_screen *vs, + struct virgl_cmd_buf *buf, + struct virgl_resource *res) { - struct virgl_winsys *vws = virgl_screen(ctx->base.screen)->vws; - + struct virgl_winsys *vws = vs->vws; if (res && res->hw_res) - vws->emit_res(vws, ctx->cbuf, res->hw_res, TRUE); + vws->emit_res(vws, buf, res->hw_res, TRUE); else { - virgl_encoder_write_dword(ctx->cbuf, 0); + virgl_encoder_write_dword(buf, 0); } } +static void virgl_encoder_write_res(struct virgl_context *ctx, + struct virgl_resource *res) +{ + struct virgl_screen *vs = virgl_screen(ctx->base.screen); + virgl_encoder_emit_resource(vs, ctx->cbuf, res); +} + int virgl_encode_bind_object(struct virgl_context *ctx, uint32_t handle, uint32_t object) { @@ -154,7 +163,7 @@ int virgl_encode_rasterizer_state(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, handle); tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) | - VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip) | + VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) | VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) | VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) | VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) | @@ -288,10 +297,10 @@ int virgl_encode_shader_state(struct virgl_context *ctx, while (left_bytes) { uint32_t length, offlen; int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0); - if (ctx->cbuf->cdw + hdr_len + 1 > VIRGL_MAX_CMDBUF_DWORDS) + if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS) ctx->base.flush(&ctx->base, NULL, 0); - thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4; + thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4; length = MIN2(thispass, left_bytes); len = ((length + 3) / 4) + hdr_len; @@ -460,10 +469,13 @@ int virgl_encoder_draw_vbo(struct virgl_context *ctx, if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) { virgl_encoder_write_res(ctx, virgl_resource(info->indirect->buffer)); virgl_encoder_write_dword(ctx->cbuf, info->indirect->offset); - virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect stride */ - virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count */ - virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count offset */ - virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */ + virgl_encoder_write_dword(ctx->cbuf, info->indirect->stride); /* indirect stride */ + virgl_encoder_write_dword(ctx->cbuf, info->indirect->draw_count); /* indirect draw count */ + virgl_encoder_write_dword(ctx->cbuf, info->indirect->indirect_draw_count_offset); /* indirect draw count offset */ + if (info->indirect->indirect_draw_count) + virgl_encoder_write_res(ctx, virgl_resource(info->indirect->indirect_draw_count)); + else + virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */ } return 0; } @@ -477,14 +489,11 @@ int virgl_encoder_create_surface(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, handle); virgl_encoder_write_res(ctx, res); virgl_encoder_write_dword(ctx->cbuf, templat->format); - if (templat->texture->target == PIPE_BUFFER) { - virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.first_element); - virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.last_element); - } else { - virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level); - virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16)); - } + assert(templat->texture->target != PIPE_BUFFER); + virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level); + virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16)); + return 0; } @@ -502,23 +511,50 @@ int virgl_encoder_create_so_target(struct virgl_context *ctx, return 0; } -static void virgl_encoder_iw_emit_header_1d(struct virgl_context *ctx, - struct virgl_resource *res, - unsigned level, unsigned usage, - const struct pipe_box *box, - unsigned stride, unsigned layer_stride) +enum virgl_transfer3d_encode_stride { + /* The stride and layer_stride are explicitly specified in the command. */ + virgl_transfer3d_explicit_stride, + /* The stride and layer_stride are inferred by the host. In this case, the + * host will use the image stride and layer_stride for the specified level. + */ + virgl_transfer3d_host_inferred_stride, +}; + +static void virgl_encoder_transfer3d_common(struct virgl_screen *vs, + struct virgl_cmd_buf *buf, + struct virgl_transfer *xfer, + enum virgl_transfer3d_encode_stride encode_stride) + { - virgl_encoder_write_res(ctx, res); - virgl_encoder_write_dword(ctx->cbuf, level); - virgl_encoder_write_dword(ctx->cbuf, usage); - virgl_encoder_write_dword(ctx->cbuf, stride); - virgl_encoder_write_dword(ctx->cbuf, layer_stride); - virgl_encoder_write_dword(ctx->cbuf, box->x); - virgl_encoder_write_dword(ctx->cbuf, box->y); - virgl_encoder_write_dword(ctx->cbuf, box->z); - virgl_encoder_write_dword(ctx->cbuf, box->width); - virgl_encoder_write_dword(ctx->cbuf, box->height); - virgl_encoder_write_dword(ctx->cbuf, box->depth); + struct pipe_transfer *transfer = &xfer->base; + unsigned stride; + unsigned layer_stride; + + if (encode_stride == virgl_transfer3d_explicit_stride) { + stride = transfer->stride; + layer_stride = transfer->layer_stride; + } else if (virgl_transfer3d_host_inferred_stride) { + stride = 0; + layer_stride = 0; + } else { + assert(!"Invalid virgl_transfer3d_encode_stride value"); + } + + /* We cannot use virgl_encoder_emit_resource with transfer->resource here + * because transfer->resource might have a different virgl_hw_res than what + * this transfer targets, which is saved in xfer->hw_res. + */ + vs->vws->emit_res(vs->vws, buf, xfer->hw_res, TRUE); + virgl_encoder_write_dword(buf, transfer->level); + virgl_encoder_write_dword(buf, transfer->usage); + virgl_encoder_write_dword(buf, stride); + virgl_encoder_write_dword(buf, layer_stride); + virgl_encoder_write_dword(buf, transfer->box.x); + virgl_encoder_write_dword(buf, transfer->box.y); + virgl_encoder_write_dword(buf, transfer->box.z); + virgl_encoder_write_dword(buf, transfer->box.width); + virgl_encoder_write_dword(buf, transfer->box.height); + virgl_encoder_write_dword(buf, transfer->box.depth); } int virgl_encoder_inline_write(struct virgl_context *ctx, @@ -530,10 +566,17 @@ int virgl_encoder_inline_write(struct virgl_context *ctx, { uint32_t size = (stride ? stride : box->width) * box->height; uint32_t length, thispass, left_bytes; - struct pipe_box mybox = *box; + struct virgl_transfer transfer; + struct virgl_screen *vs = virgl_screen(ctx->base.screen); + + transfer.base.resource = &res->u.b; + transfer.hw_res = res->hw_res; + transfer.base.level = level; + transfer.base.usage = usage; + transfer.base.box = *box; length = 11 + (size + 3) / 4; - if ((ctx->cbuf->cdw + length + 1) > VIRGL_MAX_CMDBUF_DWORDS) { + if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) { if (box->height > 1 || box->depth > 1) { debug_printf("inline transfer failed due to multi dimensions and too large\n"); assert(0); @@ -542,19 +585,20 @@ int virgl_encoder_inline_write(struct virgl_context *ctx, left_bytes = size; while (left_bytes) { - if (ctx->cbuf->cdw + 12 >= VIRGL_MAX_CMDBUF_DWORDS) + if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS) ctx->base.flush(&ctx->base, NULL, 0); - thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - 12) * 4; + thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4; length = MIN2(thispass, left_bytes); - mybox.width = length; + transfer.base.box.width = length; virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11)); - virgl_encoder_iw_emit_header_1d(ctx, res, level, usage, &mybox, stride, layer_stride); + virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer, + virgl_transfer3d_host_inferred_stride); virgl_encoder_write_block(ctx->cbuf, data, length); left_bytes -= length; - mybox.x += length; + transfer.base.box.x += length; data += length; } return 0; @@ -944,11 +988,42 @@ int virgl_encode_set_shader_buffers(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, shader); virgl_encoder_write_dword(ctx->cbuf, start_slot); for (i = 0; i < count; i++) { - if (buffers) { + if (buffers && buffers[i].buffer) { struct virgl_resource *res = virgl_resource(buffers[i].buffer); virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset); virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size); virgl_encoder_write_res(ctx, res); + + util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset, + buffers[i].buffer_offset + buffers[i].buffer_size); + virgl_resource_dirty(res, 0); + } else { + virgl_encoder_write_dword(ctx->cbuf, 0); + virgl_encoder_write_dword(ctx->cbuf, 0); + virgl_encoder_write_dword(ctx->cbuf, 0); + } + } + return 0; +} + +int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx, + unsigned start_slot, unsigned count, + const struct pipe_shader_buffer *buffers) +{ + int i; + virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count))); + + virgl_encoder_write_dword(ctx->cbuf, start_slot); + for (i = 0; i < count; i++) { + if (buffers && buffers[i].buffer) { + struct virgl_resource *res = virgl_resource(buffers[i].buffer); + virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset); + virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size); + virgl_encoder_write_res(ctx, res); + + util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset, + buffers[i].buffer_offset + buffers[i].buffer_size); + virgl_resource_dirty(res, 0); } else { virgl_encoder_write_dword(ctx->cbuf, 0); virgl_encoder_write_dword(ctx->cbuf, 0); @@ -969,13 +1044,19 @@ int virgl_encode_set_shader_images(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, shader); virgl_encoder_write_dword(ctx->cbuf, start_slot); for (i = 0; i < count; i++) { - if (images) { + if (images && images[i].resource) { struct virgl_resource *res = virgl_resource(images[i].resource); virgl_encoder_write_dword(ctx->cbuf, images[i].format); virgl_encoder_write_dword(ctx->cbuf, images[i].access); virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset); virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size); virgl_encoder_write_res(ctx, res); + + if (res->u.b.target == PIPE_BUFFER) { + util_range_add(&res->valid_buffer_range, images[i].u.buf.offset, + images[i].u.buf.offset + images[i].u.buf.size); + } + virgl_resource_dirty(res, images[i].u.tex.level); } else { virgl_encoder_write_dword(ctx->cbuf, 0); virgl_encoder_write_dword(ctx->cbuf, 0); @@ -1021,3 +1102,93 @@ int virgl_encode_texture_barrier(struct virgl_context *ctx, virgl_encoder_write_dword(ctx->cbuf, flags); return 0; } + +int virgl_encode_host_debug_flagstring(struct virgl_context *ctx, + const char *flagstring) +{ + unsigned long slen = strlen(flagstring) + 1; + uint32_t sslen; + uint32_t string_length; + + if (!slen) + return 0; + + if (slen > 4 * 0xffff) { + debug_printf("VIRGL: host debug flag string too long, will be truncated\n"); + slen = 4 * 0xffff; + } + + sslen = (uint32_t )(slen + 3) / 4; + string_length = (uint32_t)MIN2(sslen * 4, slen); + + virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen)); + virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length); + return 0; +} + +int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value) +{ + virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE)); + virgl_encoder_write_dword(ctx->cbuf, tweak); + virgl_encoder_write_dword(ctx->cbuf, value); + return 0; +} + + +int virgl_encode_get_query_result_qbo(struct virgl_context *ctx, + uint32_t handle, + struct virgl_resource *res, boolean wait, + uint32_t result_type, + uint32_t offset, + uint32_t index) +{ + virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE)); + virgl_encoder_write_dword(ctx->cbuf, handle); + virgl_encoder_write_res(ctx, res); + virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0); + virgl_encoder_write_dword(ctx->cbuf, result_type); + virgl_encoder_write_dword(ctx->cbuf, offset); + virgl_encoder_write_dword(ctx->cbuf, index); + return 0; +} + +void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf, + struct virgl_transfer *trans, uint32_t direction) +{ + uint32_t command; + command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE); + virgl_encoder_write_dword(buf, command); + virgl_encoder_transfer3d_common(vs, buf, trans, + virgl_transfer3d_host_inferred_stride); + virgl_encoder_write_dword(buf, trans->offset); + virgl_encoder_write_dword(buf, direction); +} + +void virgl_encode_copy_transfer(struct virgl_context *ctx, + struct virgl_transfer *trans) +{ + uint32_t command; + struct virgl_resource *copy_src_res = virgl_resource(trans->copy_src_res); + struct virgl_screen *vs = virgl_screen(ctx->base.screen); + + command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE); + virgl_encoder_write_cmd_dword(ctx, command); + /* Copy transfers need to explicitly specify the stride, since it may differ + * from the image stride. + */ + virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride); + virgl_encoder_emit_resource(vs, ctx->cbuf, copy_src_res); + virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset); + /* At the moment all copy transfers are synchronized. */ + virgl_encoder_write_dword(ctx->cbuf, 1); +} + +void virgl_encode_end_transfers(struct virgl_cmd_buf *buf) +{ + uint32_t command, diff; + diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw; + if (diff) { + command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1); + virgl_encoder_write_dword(buf, command); + } +}