* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <stdint.h>
+#include <assert.h>
+#include <string.h>
+#include "util/u_format.h"
#include "util/u_memory.h"
#include "util/u_math.h"
#include "pipe/p_state.h"
-#include "virgl_encode.h"
-#include "virgl_resource.h"
#include "tgsi/tgsi_dump.h"
#include "tgsi/tgsi_parse.h"
+#include "virgl_context.h"
+#include "virgl_encode.h"
+#include "virgl_protocol.h"
+#include "virgl_resource.h"
+#include "virgl_screen.h"
+
+#define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
+
static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
uint32_t dword)
{
return 0;
}
-static void virgl_encoder_write_res(struct virgl_context *ctx,
- struct virgl_resource *res)
+static void virgl_encoder_emit_resource(struct virgl_screen *vs,
+ struct virgl_cmd_buf *buf,
+ struct virgl_resource *res)
{
- struct virgl_winsys *vws = virgl_screen(ctx->base.screen)->vws;
-
+ struct virgl_winsys *vws = vs->vws;
if (res && res->hw_res)
- vws->emit_res(vws, ctx->cbuf, res->hw_res, TRUE);
+ vws->emit_res(vws, buf, res->hw_res, TRUE);
else {
- virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(buf, 0);
}
}
+static void virgl_encoder_write_res(struct virgl_context *ctx,
+ struct virgl_resource *res)
+{
+ struct virgl_screen *vs = virgl_screen(ctx->base.screen);
+ virgl_encoder_emit_resource(vs, ctx->cbuf, res);
+}
+
int virgl_encode_bind_object(struct virgl_context *ctx,
uint32_t handle, uint32_t object)
{
virgl_encoder_write_dword(ctx->cbuf, handle);
tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
- VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip) |
+ VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
- VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule);
+ VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
+ VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
virgl_encoder_write_dword(ctx->cbuf, tmp);
- virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
}
}
}
uint32_t handle,
uint32_t type,
const struct pipe_stream_output_info *so_info,
+ uint32_t cs_req_local_mem,
const struct tgsi_token *tokens)
{
char *str, *sptr;
bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
if (bret == false) {
- fprintf(stderr, "Failed to translate shader in available space - trying again\n");
+ if (virgl_debug & VIRGL_DEBUG_VERBOSE)
+ debug_printf("Failed to translate shader in available space - trying again\n");
old_size = str_total_size;
str_total_size = 65536 * ++retry_size;
str = REALLOC(str, old_size, str_total_size);
if (bret == false)
return -1;
+ if (virgl_debug & VIRGL_DEBUG_TGSI)
+ debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
+
shader_len = strlen(str) + 1;
left_bytes = shader_len;
while (left_bytes) {
uint32_t length, offlen;
int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
- if (ctx->cbuf->cdw + hdr_len + 1 > VIRGL_MAX_CMDBUF_DWORDS)
+ if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
ctx->base.flush(&ctx->base, NULL, 0);
- thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
+ thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
length = MIN2(thispass, left_bytes);
len = ((length + 3) / 4) + hdr_len;
virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
- virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
+ if (type == PIPE_SHADER_COMPUTE)
+ virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
+ else
+ virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
double depth, unsigned stencil)
{
int i;
+ uint64_t qword;
+
+ STATIC_ASSERT(sizeof(qword) == sizeof(depth));
+ memcpy(&qword, &depth, sizeof(qword));
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
virgl_encoder_write_dword(ctx->cbuf, buffers);
for (i = 0; i < 4; i++)
virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
- virgl_encoder_write_qword(ctx->cbuf, *(uint64_t *)&depth);
+ virgl_encoder_write_qword(ctx->cbuf, qword);
virgl_encoder_write_dword(ctx->cbuf, stencil);
return 0;
}
int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
const struct pipe_framebuffer_state *state)
{
- struct virgl_surface *zsurf = (struct virgl_surface *)state->zsbuf;
+ struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
int i;
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
for (i = 0; i < state->nr_cbufs; i++) {
- struct virgl_surface *surf = (struct virgl_surface *)state->cbufs[i];
+ struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
}
+ struct virgl_screen *rs = virgl_screen(ctx->base.screen);
+ if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
+ virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
+ }
return 0;
}
int i;
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
for (i = 0; i < num_buffers; i++) {
- struct virgl_resource *res = (struct virgl_resource *)buffers[i].buffer;
+ struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
virgl_encoder_write_res(ctx, res);
}
int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
- const struct pipe_index_buffer *ib)
+ const struct virgl_indexbuf *ib)
{
int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
struct virgl_resource *res = NULL;
if (ib)
- res = (struct virgl_resource *)ib->buffer;
+ res = virgl_resource(ib->buffer);
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
virgl_encoder_write_res(ctx, res);
int virgl_encoder_draw_vbo(struct virgl_context *ctx,
const struct pipe_draw_info *info)
{
- virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, VIRGL_DRAW_VBO_SIZE));
+ uint32_t length = VIRGL_DRAW_VBO_SIZE;
+ if (info->mode == PIPE_PRIM_PATCHES)
+ length = VIRGL_DRAW_VBO_SIZE_TESS;
+ if (info->indirect)
+ length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
virgl_encoder_write_dword(ctx->cbuf, info->start);
virgl_encoder_write_dword(ctx->cbuf, info->count);
virgl_encoder_write_dword(ctx->cbuf, info->mode);
- virgl_encoder_write_dword(ctx->cbuf, info->indexed);
+ virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
else
virgl_encoder_write_dword(ctx->cbuf, 0);
+ if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
+ virgl_encoder_write_dword(ctx->cbuf, info->vertices_per_patch); /* vertices per patch */
+ virgl_encoder_write_dword(ctx->cbuf, info->drawid); /* drawid */
+ }
+ if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
+ virgl_encoder_write_res(ctx, virgl_resource(info->indirect->buffer));
+ virgl_encoder_write_dword(ctx->cbuf, info->indirect->offset);
+ virgl_encoder_write_dword(ctx->cbuf, info->indirect->stride); /* indirect stride */
+ virgl_encoder_write_dword(ctx->cbuf, info->indirect->draw_count); /* indirect draw count */
+ virgl_encoder_write_dword(ctx->cbuf, info->indirect->indirect_draw_count_offset); /* indirect draw count offset */
+ if (info->indirect->indirect_draw_count)
+ virgl_encoder_write_res(ctx, virgl_resource(info->indirect->indirect_draw_count));
+ else
+ virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
+ }
return 0;
}
virgl_encoder_write_dword(ctx->cbuf, handle);
virgl_encoder_write_res(ctx, res);
virgl_encoder_write_dword(ctx->cbuf, templat->format);
- if (templat->texture->target == PIPE_BUFFER) {
- virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.first_element);
- virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.last_element);
- } else {
- virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
- virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
- }
+ assert(templat->texture->target != PIPE_BUFFER);
+ virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
+ virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
+
return 0;
}
return 0;
}
-static void virgl_encoder_iw_emit_header_1d(struct virgl_context *ctx,
- struct virgl_resource *res,
- unsigned level, unsigned usage,
- const struct pipe_box *box,
- unsigned stride, unsigned layer_stride)
+enum virgl_transfer3d_encode_stride {
+ /* The stride and layer_stride are explicitly specified in the command. */
+ virgl_transfer3d_explicit_stride,
+ /* The stride and layer_stride are inferred by the host. In this case, the
+ * host will use the image stride and layer_stride for the specified level.
+ */
+ virgl_transfer3d_host_inferred_stride,
+};
+
+static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
+ struct virgl_cmd_buf *buf,
+ struct virgl_transfer *xfer,
+ enum virgl_transfer3d_encode_stride encode_stride)
+
{
- virgl_encoder_write_res(ctx, res);
- virgl_encoder_write_dword(ctx->cbuf, level);
- virgl_encoder_write_dword(ctx->cbuf, usage);
- virgl_encoder_write_dword(ctx->cbuf, stride);
- virgl_encoder_write_dword(ctx->cbuf, layer_stride);
- virgl_encoder_write_dword(ctx->cbuf, box->x);
- virgl_encoder_write_dword(ctx->cbuf, box->y);
- virgl_encoder_write_dword(ctx->cbuf, box->z);
- virgl_encoder_write_dword(ctx->cbuf, box->width);
- virgl_encoder_write_dword(ctx->cbuf, box->height);
- virgl_encoder_write_dword(ctx->cbuf, box->depth);
+ struct pipe_transfer *transfer = &xfer->base;
+ unsigned stride;
+ unsigned layer_stride;
+
+ if (encode_stride == virgl_transfer3d_explicit_stride) {
+ stride = transfer->stride;
+ layer_stride = transfer->layer_stride;
+ } else if (virgl_transfer3d_host_inferred_stride) {
+ stride = 0;
+ layer_stride = 0;
+ } else {
+ assert(!"Invalid virgl_transfer3d_encode_stride value");
+ }
+
+ /* We cannot use virgl_encoder_emit_resource with transfer->resource here
+ * because transfer->resource might have a different virgl_hw_res than what
+ * this transfer targets, which is saved in xfer->hw_res.
+ */
+ vs->vws->emit_res(vs->vws, buf, xfer->hw_res, TRUE);
+ virgl_encoder_write_dword(buf, transfer->level);
+ virgl_encoder_write_dword(buf, transfer->usage);
+ virgl_encoder_write_dword(buf, stride);
+ virgl_encoder_write_dword(buf, layer_stride);
+ virgl_encoder_write_dword(buf, transfer->box.x);
+ virgl_encoder_write_dword(buf, transfer->box.y);
+ virgl_encoder_write_dword(buf, transfer->box.z);
+ virgl_encoder_write_dword(buf, transfer->box.width);
+ virgl_encoder_write_dword(buf, transfer->box.height);
+ virgl_encoder_write_dword(buf, transfer->box.depth);
}
int virgl_encoder_inline_write(struct virgl_context *ctx,
{
uint32_t size = (stride ? stride : box->width) * box->height;
uint32_t length, thispass, left_bytes;
- struct pipe_box mybox = *box;
+ struct virgl_transfer transfer;
+ struct virgl_screen *vs = virgl_screen(ctx->base.screen);
+
+ transfer.base.resource = &res->u.b;
+ transfer.hw_res = res->hw_res;
+ transfer.base.level = level;
+ transfer.base.usage = usage;
+ transfer.base.box = *box;
length = 11 + (size + 3) / 4;
- if ((ctx->cbuf->cdw + length + 1) > VIRGL_MAX_CMDBUF_DWORDS) {
+ if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) {
if (box->height > 1 || box->depth > 1) {
debug_printf("inline transfer failed due to multi dimensions and too large\n");
assert(0);
left_bytes = size;
while (left_bytes) {
- if (ctx->cbuf->cdw + 12 > VIRGL_MAX_CMDBUF_DWORDS)
+ if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS)
ctx->base.flush(&ctx->base, NULL, 0);
- thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - 12) * 4;
+ thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4;
length = MIN2(thispass, left_bytes);
- mybox.width = length;
+ transfer.base.box.width = length;
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
- virgl_encoder_iw_emit_header_1d(ctx, res, level, usage, &mybox, stride, layer_stride);
+ virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer,
+ virgl_transfer3d_host_inferred_stride);
virgl_encoder_write_block(ctx->cbuf, data, length);
left_bytes -= length;
- mybox.x += length;
+ transfer.base.box.x += length;
data += length;
}
return 0;
VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
- VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func);
+ VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
+ VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map);
virgl_encoder_write_dword(ctx->cbuf, tmp);
virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
struct virgl_resource *res,
const struct pipe_sampler_view *state)
{
+ unsigned elem_size = util_format_get_blocksize(state->format);
+ struct virgl_screen *rs = virgl_screen(ctx->base.screen);
uint32_t tmp;
+ uint32_t dword_fmt_target = state->format;
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
virgl_encoder_write_dword(ctx->cbuf, handle);
virgl_encoder_write_res(ctx, res);
- virgl_encoder_write_dword(ctx->cbuf, state->format);
+ if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
+ dword_fmt_target |= (state->target << 24);
+ virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
if (res->u.b.target == PIPE_BUFFER) {
- virgl_encoder_write_dword(ctx->cbuf, state->u.buf.first_element);
- virgl_encoder_write_dword(ctx->cbuf, state->u.buf.last_element);
+ virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
+ virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
} else {
virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
virgl_encoder_write_dword(ctx->cbuf, sample_mask);
}
+void virgl_encoder_set_min_samples(struct virgl_context *ctx,
+ unsigned min_samples)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, min_samples);
+}
+
void virgl_encoder_set_clip_state(struct virgl_context *ctx,
const struct pipe_clip_state *clip)
{
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
- VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable);
+ VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
+ VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
+ VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
virgl_encoder_write_dword(ctx->cbuf, tmp);
virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
int virgl_encoder_render_condition(struct virgl_context *ctx,
uint32_t handle, boolean condition,
- uint mode)
+ enum pipe_render_cond_flag mode)
{
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
virgl_encoder_write_dword(ctx->cbuf, handle);
virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
for (i = 0; i < num_targets; i++) {
struct virgl_so_target *tg = virgl_so_target(targets[i]);
- virgl_encoder_write_dword(ctx->cbuf, tg->handle);
+ virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
}
return 0;
}
virgl_encoder_write_dword(ctx->cbuf, type);
return 0;
}
+
+int virgl_encode_set_tess_state(struct virgl_context *ctx,
+ const float outer[4],
+ const float inner[2])
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
+ for (i = 0; i < 4; i++)
+ virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
+ for (i = 0; i < 2; i++)
+ virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
+ return 0;
+}
+
+int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned count,
+ const struct pipe_shader_buffer *buffers)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
+
+ virgl_encoder_write_dword(ctx->cbuf, shader);
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < count; i++) {
+ if (buffers && buffers[i].buffer) {
+ struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
+ virgl_encoder_write_res(ctx, res);
+
+ util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset,
+ buffers[i].buffer_offset + buffers[i].buffer_size);
+ virgl_resource_dirty(res, 0);
+ } else {
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ }
+ }
+ return 0;
+}
+
+int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
+ unsigned start_slot, unsigned count,
+ const struct pipe_shader_buffer *buffers)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
+
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < count; i++) {
+ if (buffers && buffers[i].buffer) {
+ struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
+ virgl_encoder_write_res(ctx, res);
+
+ util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset,
+ buffers[i].buffer_offset + buffers[i].buffer_size);
+ virgl_resource_dirty(res, 0);
+ } else {
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ }
+ }
+ return 0;
+}
+
+int virgl_encode_set_shader_images(struct virgl_context *ctx,
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned count,
+ const struct pipe_image_view *images)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
+
+ virgl_encoder_write_dword(ctx->cbuf, shader);
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < count; i++) {
+ if (images && images[i].resource) {
+ struct virgl_resource *res = virgl_resource(images[i].resource);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].format);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].access);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
+ virgl_encoder_write_res(ctx, res);
+
+ if (res->u.b.target == PIPE_BUFFER) {
+ util_range_add(&res->valid_buffer_range, images[i].u.buf.offset,
+ images[i].u.buf.offset + images[i].u.buf.size);
+ }
+ virgl_resource_dirty(res, images[i].u.tex.level);
+ } else {
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ }
+ }
+ return 0;
+}
+
+int virgl_encode_memory_barrier(struct virgl_context *ctx,
+ unsigned flags)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
+ virgl_encoder_write_dword(ctx->cbuf, flags);
+ return 0;
+}
+
+int virgl_encode_launch_grid(struct virgl_context *ctx,
+ const struct pipe_grid_info *grid_info)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
+ virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
+ virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
+ virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
+ virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
+ virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
+ if (grid_info->indirect) {
+ struct virgl_resource *res = virgl_resource(grid_info->indirect);
+ virgl_encoder_write_res(ctx, res);
+ } else
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
+ return 0;
+}
+
+int virgl_encode_texture_barrier(struct virgl_context *ctx,
+ unsigned flags)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
+ virgl_encoder_write_dword(ctx->cbuf, flags);
+ return 0;
+}
+
+int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
+ const char *flagstring)
+{
+ unsigned long slen = strlen(flagstring) + 1;
+ uint32_t sslen;
+ uint32_t string_length;
+
+ if (!slen)
+ return 0;
+
+ if (slen > 4 * 0xffff) {
+ debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
+ slen = 4 * 0xffff;
+ }
+
+ sslen = (uint32_t )(slen + 3) / 4;
+ string_length = (uint32_t)MIN2(sslen * 4, slen);
+
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
+ virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
+ return 0;
+}
+
+int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, tweak);
+ virgl_encoder_write_dword(ctx->cbuf, value);
+ return 0;
+}
+
+
+int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
+ uint32_t handle,
+ struct virgl_resource *res, boolean wait,
+ uint32_t result_type,
+ uint32_t offset,
+ uint32_t index)
+{
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
+ virgl_encoder_write_dword(ctx->cbuf, handle);
+ virgl_encoder_write_res(ctx, res);
+ virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
+ virgl_encoder_write_dword(ctx->cbuf, result_type);
+ virgl_encoder_write_dword(ctx->cbuf, offset);
+ virgl_encoder_write_dword(ctx->cbuf, index);
+ return 0;
+}
+
+void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
+ struct virgl_transfer *trans, uint32_t direction)
+{
+ uint32_t command;
+ command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
+ virgl_encoder_write_dword(buf, command);
+ virgl_encoder_transfer3d_common(vs, buf, trans,
+ virgl_transfer3d_host_inferred_stride);
+ virgl_encoder_write_dword(buf, trans->offset);
+ virgl_encoder_write_dword(buf, direction);
+}
+
+void virgl_encode_copy_transfer(struct virgl_context *ctx,
+ struct virgl_transfer *trans)
+{
+ uint32_t command;
+ struct virgl_resource *copy_src_res = virgl_resource(trans->copy_src_res);
+ struct virgl_screen *vs = virgl_screen(ctx->base.screen);
+
+ command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
+ virgl_encoder_write_cmd_dword(ctx, command);
+ /* Copy transfers need to explicitly specify the stride, since it may differ
+ * from the image stride.
+ */
+ virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
+ virgl_encoder_emit_resource(vs, ctx->cbuf, copy_src_res);
+ virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
+ /* At the moment all copy transfers are synchronized. */
+ virgl_encoder_write_dword(ctx->cbuf, 1);
+}
+
+void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
+{
+ uint32_t command, diff;
+ diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
+ if (diff) {
+ command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
+ virgl_encoder_write_dword(buf, command);
+ }
+}