X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvirgl%2Fvirgl_context.c;h=bbb5247c86f1b4585a02b8e3dcc1248785052af4;hb=3da029ac1a6e159f1e98110beb136d089ff6cf37;hp=bda9515d9b64ff260f7c79b88a142517d887ae5e;hpb=11cc59afcaf85ec7081587326ac56b24e545d59a;p=mesa.git diff --git a/src/gallium/drivers/virgl/virgl_context.c b/src/gallium/drivers/virgl/virgl_context.c index bda9515d9b6..bbb5247c86f 100644 --- a/src/gallium/drivers/virgl/virgl_context.c +++ b/src/gallium/drivers/virgl/virgl_context.c @@ -21,6 +21,7 @@ * USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include #include "pipe/p_shader_tokens.h" #include "pipe/p_context.h" @@ -30,6 +31,7 @@ #include "util/u_inlines.h" #include "util/u_memory.h" #include "util/u_format.h" +#include "util/u_prim.h" #include "util/u_transfer.h" #include "util/u_helpers.h" #include "util/slab.h" @@ -45,6 +47,13 @@ #include "virgl_protocol.h" #include "virgl_resource.h" #include "virgl_screen.h" +#include "virgl_staging_mgr.h" + +struct virgl_vertex_elements_state { + uint32_t handle; + uint8_t binding_map[PIPE_MAX_ATTRIBS]; + uint8_t num_bindings; +}; static uint32_t next_handle; uint32_t virgl_object_assign_handle(void) @@ -52,27 +61,111 @@ uint32_t virgl_object_assign_handle(void) return ++next_handle; } -static void virgl_buffer_flush(struct virgl_context *vctx, - struct virgl_buffer *vbuf) +bool +virgl_can_rebind_resource(struct virgl_context *vctx, + struct pipe_resource *res) { - struct virgl_screen *rs = virgl_screen(vctx->base.screen); - struct pipe_box box; + /* We cannot rebind resources that are referenced by host objects, which + * are + * + * - VIRGL_OBJECT_SURFACE + * - VIRGL_OBJECT_SAMPLER_VIEW + * - VIRGL_OBJECT_STREAMOUT_TARGET + * + * Because surfaces cannot be created from buffers, we require the resource + * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds). + */ + const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW | + PIPE_BIND_STREAM_OUTPUT); + const unsigned bind_history = virgl_resource(res)->bind_history; + return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind); +} - assert(vbuf->on_list); +void +virgl_rebind_resource(struct virgl_context *vctx, + struct pipe_resource *res) +{ + /* Queries use internally created buffers and do not go through transfers. + * Index buffers are not bindable. They are not tracked. + */ + ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER | + PIPE_BIND_CONSTANT_BUFFER | + PIPE_BIND_SHADER_BUFFER | + PIPE_BIND_SHADER_IMAGE); + const unsigned bind_history = virgl_resource(res)->bind_history; + unsigned i; - box.height = 1; - box.depth = 1; - box.y = 0; - box.z = 0; + assert(virgl_can_rebind_resource(vctx, res) && + (bind_history & tracked_bind) == bind_history); - box.x = vbuf->valid_buffer_range.start; - box.width = MIN2(vbuf->valid_buffer_range.end - vbuf->valid_buffer_range.start, vbuf->base.u.b.width0); + if (bind_history & PIPE_BIND_VERTEX_BUFFER) { + for (i = 0; i < vctx->num_vertex_buffers; i++) { + if (vctx->vertex_buffer[i].buffer.resource == res) { + vctx->vertex_array_dirty = true; + break; + } + } + } - vctx->num_transfers++; - rs->vws->transfer_put(rs->vws, vbuf->base.hw_res, - &box, 0, 0, box.x, 0); + if (bind_history & PIPE_BIND_SHADER_BUFFER) { + uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask; + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + if (vctx->atomic_buffers[i].buffer == res) { + const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i]; + virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo); + } + } + } - util_range_set_empty(&vbuf->valid_buffer_range); + /* check per-stage shader bindings */ + if (bind_history & (PIPE_BIND_CONSTANT_BUFFER | + PIPE_BIND_SHADER_BUFFER | + PIPE_BIND_SHADER_IMAGE)) { + enum pipe_shader_type shader_type; + for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) { + const struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader_type]; + + if (bind_history & PIPE_BIND_CONSTANT_BUFFER) { + uint32_t remaining_mask = binding->ubo_enabled_mask; + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + if (binding->ubos[i].buffer == res) { + const struct pipe_constant_buffer *ubo = &binding->ubos[i]; + virgl_encoder_set_uniform_buffer(vctx, shader_type, i, + ubo->buffer_offset, + ubo->buffer_size, + virgl_resource(res)); + } + } + } + + if (bind_history & PIPE_BIND_SHADER_BUFFER) { + uint32_t remaining_mask = binding->ssbo_enabled_mask; + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + if (binding->ssbos[i].buffer == res) { + const struct pipe_shader_buffer *ssbo = &binding->ssbos[i]; + virgl_encode_set_shader_buffers(vctx, shader_type, i, 1, + ssbo); + } + } + } + + if (bind_history & PIPE_BIND_SHADER_IMAGE) { + uint32_t remaining_mask = binding->image_enabled_mask; + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + if (binding->images[i].resource == res) { + const struct pipe_image_view *image = &binding->images[i]; + virgl_encode_set_shader_images(vctx, shader_type, i, 1, + image); + } + } + } + } + } } static void virgl_attach_res_framebuffer(struct virgl_context *vctx) @@ -85,34 +178,37 @@ static void virgl_attach_res_framebuffer(struct virgl_context *vctx) surf = vctx->framebuffer.zsbuf; if (surf) { res = virgl_resource(surf->texture); - if (res) + if (res) { vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); + virgl_resource_dirty(res, surf->u.tex.level); + } } for (i = 0; i < vctx->framebuffer.nr_cbufs; i++) { surf = vctx->framebuffer.cbufs[i]; if (surf) { res = virgl_resource(surf->texture); - if (res) + if (res) { vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); + virgl_resource_dirty(res, surf->u.tex.level); + } } } } static void virgl_attach_res_sampler_views(struct virgl_context *vctx, - unsigned shader_type) + enum pipe_shader_type shader_type) { struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; - struct virgl_textures_info *tinfo = &vctx->samplers[shader_type]; + const struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader_type]; + uint32_t remaining_mask = binding->view_enabled_mask; struct virgl_resource *res; - uint32_t remaining_mask = tinfo->enabled_mask; - unsigned i; - while (remaining_mask) { - i = u_bit_scan(&remaining_mask); - assert(tinfo->views[i]); - res = virgl_resource(tinfo->views[i]->base.texture); - if (res) - vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + assert(binding->views[i] && binding->views[i]->texture); + res = virgl_resource(binding->views[i]->texture); + vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); } } @@ -123,18 +219,19 @@ static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx) unsigned i; for (i = 0; i < vctx->num_vertex_buffers; i++) { - res = virgl_resource(vctx->vertex_buffer[i].buffer); + res = virgl_resource(vctx->vertex_buffer[i].buffer.resource); if (res) vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); } } -static void virgl_attach_res_index_buffer(struct virgl_context *vctx) +static void virgl_attach_res_index_buffer(struct virgl_context *vctx, + struct virgl_indexbuf *ib) { struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; struct virgl_resource *res; - res = virgl_resource(vctx->index_buffer.buffer); + res = virgl_resource(ib->buffer); if (res) vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); } @@ -153,16 +250,67 @@ static void virgl_attach_res_so_targets(struct virgl_context *vctx) } static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx, - unsigned shader_type) + enum pipe_shader_type shader_type) { struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; + const struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader_type]; + uint32_t remaining_mask = binding->ubo_enabled_mask; struct virgl_resource *res; - unsigned i; - for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) { - res = virgl_resource(vctx->ubos[shader_type][i]); - if (res) { - vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); - } + + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + res = virgl_resource(binding->ubos[i].buffer); + assert(res); + vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); + } +} + +static void virgl_attach_res_shader_buffers(struct virgl_context *vctx, + enum pipe_shader_type shader_type) +{ + struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; + const struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader_type]; + uint32_t remaining_mask = binding->ssbo_enabled_mask; + struct virgl_resource *res; + + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + res = virgl_resource(binding->ssbos[i].buffer); + assert(res); + vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); + } +} + +static void virgl_attach_res_shader_images(struct virgl_context *vctx, + enum pipe_shader_type shader_type) +{ + struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; + const struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader_type]; + uint32_t remaining_mask = binding->image_enabled_mask; + struct virgl_resource *res; + + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + res = virgl_resource(binding->images[i].resource); + assert(res); + vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); + } +} + +static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx) +{ + struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; + uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask; + struct virgl_resource *res; + + while (remaining_mask) { + int i = u_bit_scan(&remaining_mask); + res = virgl_resource(vctx->atomic_buffers[i].buffer); + assert(res); + vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE); } } @@ -170,23 +318,35 @@ static void virgl_attach_res_uniform_buffers(struct virgl_context *vctx, * after flushing, the hw context still has a bunch of * resources bound, so we need to rebind those here. */ -static void virgl_reemit_res(struct virgl_context *vctx) +static void virgl_reemit_draw_resources(struct virgl_context *vctx) { - unsigned shader_type; + enum pipe_shader_type shader_type; /* reattach any flushed resources */ /* framebuffer, sampler views, vertex/index/uniform/stream buffers */ virgl_attach_res_framebuffer(vctx); - for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) { + for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) { virgl_attach_res_sampler_views(vctx, shader_type); virgl_attach_res_uniform_buffers(vctx, shader_type); + virgl_attach_res_shader_buffers(vctx, shader_type); + virgl_attach_res_shader_images(vctx, shader_type); } - virgl_attach_res_index_buffer(vctx); + virgl_attach_res_atomic_buffers(vctx); virgl_attach_res_vertex_buffers(vctx); virgl_attach_res_so_targets(vctx); } +static void virgl_reemit_compute_resources(struct virgl_context *vctx) +{ + virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE); + virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE); + virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE); + virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE); + + virgl_attach_res_atomic_buffers(vctx); +} + static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx, struct pipe_resource *resource, const struct pipe_surface *templ) @@ -196,28 +356,32 @@ static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx, struct virgl_resource *res = virgl_resource(resource); uint32_t handle; + /* no support for buffer surfaces */ + if (resource->target == PIPE_BUFFER) + return NULL; + surf = CALLOC_STRUCT(virgl_surface); if (!surf) return NULL; - res->clean = FALSE; + assert(ctx->screen->get_param(ctx->screen, + PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) || + (util_format_is_srgb(templ->format) == + util_format_is_srgb(resource->format))); + + virgl_resource_dirty(res, 0); handle = virgl_object_assign_handle(); pipe_reference_init(&surf->base.reference, 1); pipe_resource_reference(&surf->base.texture, resource); surf->base.context = ctx; surf->base.format = templ->format; - if (resource->target != PIPE_BUFFER) { - surf->base.width = u_minify(resource->width0, templ->u.tex.level); - surf->base.height = u_minify(resource->height0, templ->u.tex.level); - surf->base.u.tex.level = templ->u.tex.level; - surf->base.u.tex.first_layer = templ->u.tex.first_layer; - surf->base.u.tex.last_layer = templ->u.tex.last_layer; - } else { - surf->base.width = templ->u.buf.last_element - templ->u.buf.first_element + 1; - surf->base.height = resource->height0; - surf->base.u.buf.first_element = templ->u.buf.first_element; - surf->base.u.buf.last_element = templ->u.buf.last_element; - } + + surf->base.width = u_minify(resource->width0, templ->u.tex.level); + surf->base.height = u_minify(resource->height0, templ->u.tex.level); + surf->base.u.tex.level = templ->u.tex.level; + surf->base.u.tex.first_layer = templ->u.tex.first_layer; + surf->base.u.tex.last_layer = templ->u.tex.last_layer; + virgl_encoder_create_surface(vctx, handle, res, &surf->base); surf->handle = handle; return &surf->base; @@ -293,19 +457,30 @@ static void *virgl_create_rasterizer_state(struct pipe_context *ctx, const struct pipe_rasterizer_state *rs_state) { struct virgl_context *vctx = virgl_context(ctx); - uint32_t handle; - handle = virgl_object_assign_handle(); + struct virgl_rasterizer_state *vrs = CALLOC_STRUCT(virgl_rasterizer_state); - virgl_encode_rasterizer_state(vctx, handle, rs_state); - return (void *)(unsigned long)handle; + if (!vrs) + return NULL; + vrs->rs = *rs_state; + vrs->handle = virgl_object_assign_handle(); + + assert(rs_state->depth_clip_near || + virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable); + + virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state); + return (void *)vrs; } static void virgl_bind_rasterizer_state(struct pipe_context *ctx, void *rs_state) { struct virgl_context *vctx = virgl_context(ctx); - uint32_t handle = (unsigned long)rs_state; - + uint32_t handle = 0; + if (rs_state) { + struct virgl_rasterizer_state *vrs = rs_state; + vctx->rs_state = *vrs; + handle = vrs->handle; + } virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_RASTERIZER); } @@ -313,8 +488,9 @@ static void virgl_delete_rasterizer_state(struct pipe_context *ctx, void *rs_state) { struct virgl_context *vctx = virgl_context(ctx); - uint32_t handle = (unsigned long)rs_state; - virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_RASTERIZER); + struct virgl_rasterizer_state *vrs = rs_state; + virgl_encode_delete_object(vctx, vrs->handle, VIRGL_OBJECT_RASTERIZER); + FREE(vrs); } static void virgl_set_framebuffer_state(struct pipe_context *ctx, @@ -340,29 +516,54 @@ static void *virgl_create_vertex_elements_state(struct pipe_context *ctx, unsigned num_elements, const struct pipe_vertex_element *elements) { + struct pipe_vertex_element new_elements[PIPE_MAX_ATTRIBS]; struct virgl_context *vctx = virgl_context(ctx); - uint32_t handle = virgl_object_assign_handle(); - virgl_encoder_create_vertex_elements(vctx, handle, - num_elements, elements); - return (void*)(unsigned long)handle; + struct virgl_vertex_elements_state *state = + CALLOC_STRUCT(virgl_vertex_elements_state); + + for (int i = 0; i < num_elements; ++i) { + if (elements[i].instance_divisor) { + /* Virglrenderer doesn't deal with instance_divisor correctly if + * there isn't a 1:1 relationship between elements and bindings. + * So let's make sure there is, by duplicating bindings. + */ + for (int j = 0; j < num_elements; ++j) { + new_elements[j] = elements[j]; + new_elements[j].vertex_buffer_index = j; + state->binding_map[j] = elements[j].vertex_buffer_index; + } + elements = new_elements; + state->num_bindings = num_elements; + break; + } + } + state->handle = virgl_object_assign_handle(); + virgl_encoder_create_vertex_elements(vctx, state->handle, + num_elements, elements); + return state; } static void virgl_delete_vertex_elements_state(struct pipe_context *ctx, void *ve) { struct virgl_context *vctx = virgl_context(ctx); - uint32_t handle = (unsigned long)ve; - - virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_VERTEX_ELEMENTS); + struct virgl_vertex_elements_state *state = + (struct virgl_vertex_elements_state *)ve; + virgl_encode_delete_object(vctx, state->handle, VIRGL_OBJECT_VERTEX_ELEMENTS); + FREE(state); } static void virgl_bind_vertex_elements_state(struct pipe_context *ctx, void *ve) { struct virgl_context *vctx = virgl_context(ctx); - uint32_t handle = (unsigned long)ve; - virgl_encode_bind_object(vctx, handle, VIRGL_OBJECT_VERTEX_ELEMENTS); + struct virgl_vertex_elements_state *state = + (struct virgl_vertex_elements_state *)ve; + vctx->vertex_elements = state; + virgl_encode_bind_object(vctx, state ? state->handle : 0, + VIRGL_OBJECT_VERTEX_ELEMENTS); + vctx->vertex_array_dirty = TRUE; } static void virgl_set_vertex_buffers(struct pipe_context *ctx, @@ -376,16 +577,35 @@ static void virgl_set_vertex_buffers(struct pipe_context *ctx, &vctx->num_vertex_buffers, buffers, start_slot, num_buffers); + if (buffers) { + for (unsigned i = 0; i < num_buffers; i++) { + struct virgl_resource *res = + virgl_resource(buffers[i].buffer.resource); + if (res && !buffers[i].is_user_buffer) + res->bind_history |= PIPE_BIND_VERTEX_BUFFER; + } + } + vctx->vertex_array_dirty = TRUE; } -static void virgl_hw_set_vertex_buffers(struct pipe_context *ctx) +static void virgl_hw_set_vertex_buffers(struct virgl_context *vctx) { - struct virgl_context *vctx = virgl_context(ctx); - if (vctx->vertex_array_dirty) { - virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer); + struct virgl_vertex_elements_state *ve = vctx->vertex_elements; + + if (ve->num_bindings) { + struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS]; + for (int i = 0; i < ve->num_bindings; ++i) + vertex_buffers[i] = vctx->vertex_buffer[ve->binding_map[i]]; + + virgl_encoder_set_vertex_buffers(vctx, ve->num_bindings, vertex_buffers); + } else + virgl_encoder_set_vertex_buffers(vctx, vctx->num_vertex_buffers, vctx->vertex_buffer); + virgl_attach_res_vertex_buffers(vctx); + + vctx->vertex_array_dirty = FALSE; } } @@ -403,73 +623,43 @@ static void virgl_set_blend_color(struct pipe_context *ctx, virgl_encoder_set_blend_color(vctx, color); } -static void virgl_set_index_buffer(struct pipe_context *ctx, - const struct pipe_index_buffer *ib) -{ - struct virgl_context *vctx = virgl_context(ctx); - - if (ib) { - pipe_resource_reference(&vctx->index_buffer.buffer, ib->buffer); - memcpy(&vctx->index_buffer, ib, sizeof(*ib)); - } else { - pipe_resource_reference(&vctx->index_buffer.buffer, NULL); - } -} - -static void virgl_hw_set_index_buffer(struct pipe_context *ctx, - struct pipe_index_buffer *ib) +static void virgl_hw_set_index_buffer(struct virgl_context *vctx, + struct virgl_indexbuf *ib) { - struct virgl_context *vctx = virgl_context(ctx); virgl_encoder_set_index_buffer(vctx, ib); - virgl_attach_res_index_buffer(vctx); + virgl_attach_res_index_buffer(vctx, ib); } static void virgl_set_constant_buffer(struct pipe_context *ctx, - uint shader, uint index, + enum pipe_shader_type shader, uint index, const struct pipe_constant_buffer *buf) { struct virgl_context *vctx = virgl_context(ctx); + struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader]; - if (buf) { - if (!buf->user_buffer){ - struct virgl_resource *res = virgl_resource(buf->buffer); - virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset, - buf->buffer_size, res); - pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer); - return; - } - pipe_resource_reference(&vctx->ubos[shader][index], NULL); - virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer); - } else { - virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL); - pipe_resource_reference(&vctx->ubos[shader][index], NULL); - } -} - -void virgl_transfer_inline_write(struct pipe_context *ctx, - struct pipe_resource *res, - unsigned level, - unsigned usage, - const struct pipe_box *box, - const void *data, - unsigned stride, - unsigned layer_stride) -{ - struct virgl_context *vctx = virgl_context(ctx); - struct virgl_screen *vs = virgl_screen(ctx->screen); - struct virgl_resource *grres = virgl_resource(res); - struct virgl_buffer *vbuf = virgl_buffer(res); - - grres->clean = FALSE; + if (buf && buf->buffer) { + struct virgl_resource *res = virgl_resource(buf->buffer); + res->bind_history |= PIPE_BIND_CONSTANT_BUFFER; - if (virgl_res_needs_flush_wait(vctx, &vbuf->base, usage)) { - ctx->flush(ctx, NULL, 0); + virgl_encoder_set_uniform_buffer(vctx, shader, index, + buf->buffer_offset, + buf->buffer_size, res); - vs->vws->resource_wait(vs->vws, vbuf->base.hw_res); + pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer); + binding->ubos[index] = *buf; + binding->ubo_enabled_mask |= 1 << index; + } else { + static const struct pipe_constant_buffer dummy_ubo; + if (!buf) + buf = &dummy_ubo; + virgl_encoder_write_constant_buffer(vctx, shader, index, + buf->buffer_size / 4, + buf->user_buffer); + + pipe_resource_reference(&binding->ubos[index].buffer, NULL); + binding->ubo_enabled_mask &= ~(1 << index); } - - virgl_encoder_inline_write(vctx, grres, level, usage, - box, data, stride, layer_stride); } static void *virgl_shader_encoder(struct pipe_context *ctx, @@ -481,14 +671,14 @@ static void *virgl_shader_encoder(struct pipe_context *ctx, struct tgsi_token *new_tokens; int ret; - new_tokens = virgl_tgsi_transform(shader->tokens); + new_tokens = virgl_tgsi_transform(vctx, shader->tokens); if (!new_tokens) return NULL; handle = virgl_object_assign_handle(); /* encode VS state */ ret = virgl_encode_shader_state(vctx, handle, type, - &shader->stream_output, + &shader->stream_output, 0, new_tokens); if (ret) { return NULL; @@ -504,6 +694,18 @@ static void *virgl_create_vs_state(struct pipe_context *ctx, return virgl_shader_encoder(ctx, shader, PIPE_SHADER_VERTEX); } +static void *virgl_create_tcs_state(struct pipe_context *ctx, + const struct pipe_shader_state *shader) +{ + return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_CTRL); +} + +static void *virgl_create_tes_state(struct pipe_context *ctx, + const struct pipe_shader_state *shader) +{ + return virgl_shader_encoder(ctx, shader, PIPE_SHADER_TESS_EVAL); +} + static void *virgl_create_gs_state(struct pipe_context *ctx, const struct pipe_shader_state *shader) { @@ -546,6 +748,26 @@ virgl_delete_vs_state(struct pipe_context *ctx, virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER); } +static void +virgl_delete_tcs_state(struct pipe_context *ctx, + void *tcs) +{ + uint32_t handle = (unsigned long)tcs; + struct virgl_context *vctx = virgl_context(ctx); + + virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER); +} + +static void +virgl_delete_tes_state(struct pipe_context *ctx, + void *tes) +{ + uint32_t handle = (unsigned long)tes; + struct virgl_context *vctx = virgl_context(ctx); + + virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER); +} + static void virgl_bind_vs_state(struct pipe_context *ctx, void *vss) { @@ -555,6 +777,24 @@ static void virgl_bind_vs_state(struct pipe_context *ctx, virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_VERTEX); } +static void virgl_bind_tcs_state(struct pipe_context *ctx, + void *vss) +{ + uint32_t handle = (unsigned long)vss; + struct virgl_context *vctx = virgl_context(ctx); + + virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_CTRL); +} + +static void virgl_bind_tes_state(struct pipe_context *ctx, + void *vss) +{ + uint32_t handle = (unsigned long)vss; + struct virgl_context *vctx = virgl_context(ctx); + + virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_TESS_EVAL); +} + static void virgl_bind_gs_state(struct pipe_context *ctx, void *vss) { @@ -581,6 +821,10 @@ static void virgl_clear(struct pipe_context *ctx, { struct virgl_context *vctx = virgl_context(ctx); + if (!vctx->num_draws) + virgl_reemit_draw_resources(vctx); + vctx->num_draws++; + virgl_encode_clear(vctx, buffers, color, depth, stencil); } @@ -589,33 +833,39 @@ static void virgl_draw_vbo(struct pipe_context *ctx, { struct virgl_context *vctx = virgl_context(ctx); struct virgl_screen *rs = virgl_screen(ctx->screen); - struct pipe_index_buffer ib = {}; + struct virgl_indexbuf ib = {}; struct pipe_draw_info info = *dinfo; + if (!dinfo->count_from_stream_output && !dinfo->indirect && + !dinfo->primitive_restart && + !u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count)) + return; + if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) { - util_primconvert_save_index_buffer(vctx->primconvert, &vctx->index_buffer); + util_primconvert_save_rasterizer_state(vctx->primconvert, &vctx->rs_state.rs); util_primconvert_draw_vbo(vctx->primconvert, dinfo); return; } - if (info.indexed) { - pipe_resource_reference(&ib.buffer, vctx->index_buffer.buffer); - ib.user_buffer = vctx->index_buffer.user_buffer; - ib.index_size = vctx->index_buffer.index_size; - ib.offset = vctx->index_buffer.offset + info.start * ib.index_size; + if (info.index_size) { + pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource); + ib.user_buffer = info.has_user_indices ? info.index.user : NULL; + ib.index_size = dinfo->index_size; + ib.offset = info.start * ib.index_size; if (ib.user_buffer) { - u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256, + u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 4, ib.user_buffer, &ib.offset, &ib.buffer); ib.user_buffer = NULL; } } - u_upload_unmap(vctx->uploader); - + if (!vctx->num_draws) + virgl_reemit_draw_resources(vctx); vctx->num_draws++; - virgl_hw_set_vertex_buffers(ctx); - if (info.indexed) - virgl_hw_set_index_buffer(ctx, &ib); + + virgl_hw_set_vertex_buffers(vctx); + if (info.index_size) + virgl_hw_set_index_buffer(vctx, &ib); virgl_encoder_draw_vbo(vctx, &info); @@ -623,18 +873,55 @@ static void virgl_draw_vbo(struct pipe_context *ctx, } -static void virgl_flush_eq(struct virgl_context *ctx, void *closure) +static void virgl_submit_cmd(struct virgl_winsys *vws, + struct virgl_cmd_buf *cbuf, + struct pipe_fence_handle **fence) +{ + if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) { + struct pipe_fence_handle *sync_fence = NULL; + + vws->submit_cmd(vws, cbuf, &sync_fence); + + vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE); + vws->fence_reference(vws, &sync_fence, NULL); + } else { + vws->submit_cmd(vws, cbuf, fence); + } +} + +static void virgl_flush_eq(struct virgl_context *ctx, void *closure, + struct pipe_fence_handle **fence) { struct virgl_screen *rs = virgl_screen(ctx->base.screen); + /* skip empty cbuf */ + if (ctx->cbuf->cdw == ctx->cbuf_initial_cdw && + ctx->queue.num_dwords == 0 && + !fence) + return; + + if (ctx->num_draws) + u_upload_unmap(ctx->uploader); + /* send the buffer to the remote side for decoding */ - ctx->num_transfers = ctx->num_draws = 0; - rs->vws->submit_cmd(rs->vws, ctx->cbuf); + ctx->num_draws = ctx->num_compute = 0; + + virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf); + + virgl_submit_cmd(rs->vws, ctx->cbuf, fence); + + /* Reserve some space for transfers. */ + if (ctx->encoded_transfers) + ctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS; virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id); - /* add back current framebuffer resources to reference list? */ - virgl_reemit_res(ctx); + ctx->cbuf_initial_cdw = ctx->cbuf->cdw; + + /* We have flushed the command queue, including any pending copy transfers + * involving staging resources. + */ + ctx->queued_staging_res_size = 0; } static void virgl_flush_from_st(struct pipe_context *ctx, @@ -642,21 +929,8 @@ static void virgl_flush_from_st(struct pipe_context *ctx, enum pipe_flush_flags flags) { struct virgl_context *vctx = virgl_context(ctx); - struct virgl_screen *rs = virgl_screen(ctx->screen); - struct virgl_buffer *buf, *tmp; - - if (fence) - *fence = rs->vws->cs_create_fence(rs->vws); - LIST_FOR_EACH_ENTRY_SAFE(buf, tmp, &vctx->to_flush_bufs, flush_list) { - struct pipe_resource *res = &buf->base.u.b; - virgl_buffer_flush(vctx, buf); - list_del(&buf->flush_list); - buf->on_list = FALSE; - pipe_resource_reference(&res, NULL); - - } - virgl_flush_eq(vctx, vctx); + virgl_flush_eq(vctx, vctx, fence); } static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx, @@ -696,42 +970,39 @@ static void virgl_set_sampler_views(struct pipe_context *ctx, struct pipe_sampler_view **views) { struct virgl_context *vctx = virgl_context(ctx); - int i; - uint32_t disable_mask = ~((1ull << num_views) - 1); - struct virgl_textures_info *tinfo = &vctx->samplers[shader_type]; - uint32_t new_mask = 0; - uint32_t remaining_mask; - - remaining_mask = tinfo->enabled_mask & disable_mask; - - while (remaining_mask) { - i = u_bit_scan(&remaining_mask); - assert(tinfo->views[i]); - - pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL); - } - - for (i = 0; i < num_views; i++) { - struct virgl_sampler_view *grview = virgl_sampler_view(views[i]); - - if (views[i] == (struct pipe_sampler_view *)tinfo->views[i]) - continue; - - if (grview) { - new_mask |= 1 << i; - pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]); + struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader_type]; + + binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views); + for (unsigned i = 0; i < num_views; i++) { + unsigned idx = start_slot + i; + if (views && views[i]) { + struct virgl_resource *res = virgl_resource(views[i]->texture); + res->bind_history |= PIPE_BIND_SAMPLER_VIEW; + + pipe_sampler_view_reference(&binding->views[idx], views[i]); + binding->view_enabled_mask |= 1 << idx; } else { - pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL); - disable_mask |= 1 << i; + pipe_sampler_view_reference(&binding->views[idx], NULL); } } - tinfo->enabled_mask &= ~disable_mask; - tinfo->enabled_mask |= new_mask; - virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views); + virgl_encode_set_sampler_views(vctx, shader_type, + start_slot, num_views, (struct virgl_sampler_view **)binding->views); virgl_attach_res_sampler_views(vctx, shader_type); } +static void +virgl_texture_barrier(struct pipe_context *ctx, unsigned flags) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *rs = virgl_screen(ctx->screen); + + if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_BARRIER)) + return; + virgl_encode_texture_barrier(vctx, flags); +} + static void virgl_destroy_sampler_view(struct pipe_context *ctx, struct pipe_sampler_view *view) { @@ -802,6 +1073,17 @@ static void virgl_set_sample_mask(struct pipe_context *ctx, virgl_encoder_set_sample_mask(vctx, sample_mask); } +static void virgl_set_min_samples(struct pipe_context *ctx, + unsigned min_samples) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *rs = virgl_screen(ctx->screen); + + if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_SET_MIN_SAMPLES)) + return; + virgl_encoder_set_min_samples(vctx, min_samples); +} + static void virgl_set_clip_state(struct pipe_context *ctx, const struct pipe_clip_state *clip) { @@ -809,6 +1091,18 @@ static void virgl_set_clip_state(struct pipe_context *ctx, virgl_encoder_set_clip_state(vctx, clip); } +static void virgl_set_tess_state(struct pipe_context *ctx, + const float default_outer_level[4], + const float default_inner_level[2]) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *rs = virgl_screen(ctx->screen); + + if (!rs->caps.caps.v1.bset.has_tessellation_shaders) + return; + virgl_encode_set_tess_state(vctx, default_outer_level, default_inner_level); +} + static void virgl_resource_copy_region(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dst_level, @@ -821,7 +1115,10 @@ static void virgl_resource_copy_region(struct pipe_context *ctx, struct virgl_resource *dres = virgl_resource(dst); struct virgl_resource *sres = virgl_resource(src); - dres->clean = FALSE; + if (dres->u.b.target == PIPE_BUFFER) + util_range_add(&dres->valid_buffer_range, dstx, dstx + src_box->width); + virgl_resource_dirty(dres, dst_level); + virgl_encode_resource_copy_region(vctx, dres, dst_level, dstx, dsty, dstz, sres, src_level, @@ -841,31 +1138,304 @@ static void virgl_blit(struct pipe_context *ctx, struct virgl_resource *dres = virgl_resource(blit->dst.resource); struct virgl_resource *sres = virgl_resource(blit->src.resource); - dres->clean = FALSE; + assert(ctx->screen->get_param(ctx->screen, + PIPE_CAP_DEST_SURFACE_SRGB_CONTROL) || + (util_format_is_srgb(blit->dst.resource->format) == + util_format_is_srgb(blit->dst.format))); + + virgl_resource_dirty(dres, blit->dst.level); virgl_encode_blit(vctx, dres, sres, blit); } +static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx, + unsigned start_slot, + unsigned count, + const struct pipe_shader_buffer *buffers) +{ + struct virgl_context *vctx = virgl_context(ctx); + + vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count); + for (unsigned i = 0; i < count; i++) { + unsigned idx = start_slot + i; + if (buffers && buffers[i].buffer) { + struct virgl_resource *res = virgl_resource(buffers[i].buffer); + res->bind_history |= PIPE_BIND_SHADER_BUFFER; + + pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, + buffers[i].buffer); + vctx->atomic_buffers[idx] = buffers[i]; + vctx->atomic_buffer_enabled_mask |= 1 << idx; + } else { + pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL); + } + } + + virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers); +} + +static void virgl_set_shader_buffers(struct pipe_context *ctx, + enum pipe_shader_type shader, + unsigned start_slot, unsigned count, + const struct pipe_shader_buffer *buffers, + unsigned writable_bitmask) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *rs = virgl_screen(ctx->screen); + struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader]; + + binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count); + for (unsigned i = 0; i < count; i++) { + unsigned idx = start_slot + i; + if (buffers && buffers[i].buffer) { + struct virgl_resource *res = virgl_resource(buffers[i].buffer); + res->bind_history |= PIPE_BIND_SHADER_BUFFER; + + pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer); + binding->ssbos[idx] = buffers[i]; + binding->ssbo_enabled_mask |= 1 << idx; + } else { + pipe_resource_reference(&binding->ssbos[idx].buffer, NULL); + } + } + + uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ? + rs->caps.caps.v2.max_shader_buffer_frag_compute : + rs->caps.caps.v2.max_shader_buffer_other_stages; + if (!max_shader_buffer) + return; + virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers); +} + +static void virgl_create_fence_fd(struct pipe_context *ctx, + struct pipe_fence_handle **fence, + int fd, + enum pipe_fd_type type) +{ + assert(type == PIPE_FD_TYPE_NATIVE_SYNC); + struct virgl_screen *rs = virgl_screen(ctx->screen); + + if (rs->vws->cs_create_fence) + *fence = rs->vws->cs_create_fence(rs->vws, fd); +} + +static void virgl_fence_server_sync(struct pipe_context *ctx, + struct pipe_fence_handle *fence) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *rs = virgl_screen(ctx->screen); + + if (rs->vws->fence_server_sync) + rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence); +} + +static void virgl_set_shader_images(struct pipe_context *ctx, + enum pipe_shader_type shader, + unsigned start_slot, unsigned count, + const struct pipe_image_view *images) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *rs = virgl_screen(ctx->screen); + struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader]; + + binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count); + for (unsigned i = 0; i < count; i++) { + unsigned idx = start_slot + i; + if (images && images[i].resource) { + struct virgl_resource *res = virgl_resource(images[i].resource); + res->bind_history |= PIPE_BIND_SHADER_IMAGE; + + pipe_resource_reference(&binding->images[idx].resource, + images[i].resource); + binding->images[idx] = images[i]; + binding->image_enabled_mask |= 1 << idx; + } else { + pipe_resource_reference(&binding->images[idx].resource, NULL); + } + } + + uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ? + rs->caps.caps.v2.max_shader_image_frag_compute : + rs->caps.caps.v2.max_shader_image_other_stages; + if (!max_shader_images) + return; + virgl_encode_set_shader_images(vctx, shader, start_slot, count, images); +} + +static void virgl_memory_barrier(struct pipe_context *ctx, + unsigned flags) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *rs = virgl_screen(ctx->screen); + + if (!(rs->caps.caps.v2.capability_bits & VIRGL_CAP_MEMORY_BARRIER)) + return; + virgl_encode_memory_barrier(vctx, flags); +} + +static void *virgl_create_compute_state(struct pipe_context *ctx, + const struct pipe_compute_state *state) +{ + struct virgl_context *vctx = virgl_context(ctx); + uint32_t handle; + const struct tgsi_token *new_tokens = state->prog; + struct pipe_stream_output_info so_info = {}; + int ret; + + handle = virgl_object_assign_handle(); + ret = virgl_encode_shader_state(vctx, handle, PIPE_SHADER_COMPUTE, + &so_info, + state->req_local_mem, + new_tokens); + if (ret) { + return NULL; + } + + return (void *)(unsigned long)handle; +} + +static void virgl_bind_compute_state(struct pipe_context *ctx, void *state) +{ + uint32_t handle = (unsigned long)state; + struct virgl_context *vctx = virgl_context(ctx); + + virgl_encode_bind_shader(vctx, handle, PIPE_SHADER_COMPUTE); +} + +static void virgl_delete_compute_state(struct pipe_context *ctx, void *state) +{ + uint32_t handle = (unsigned long)state; + struct virgl_context *vctx = virgl_context(ctx); + + virgl_encode_delete_object(vctx, handle, VIRGL_OBJECT_SHADER); +} + +static void virgl_launch_grid(struct pipe_context *ctx, + const struct pipe_grid_info *info) +{ + struct virgl_context *vctx = virgl_context(ctx); + + if (!vctx->num_compute) + virgl_reemit_compute_resources(vctx); + vctx->num_compute++; + + virgl_encode_launch_grid(vctx, info); +} + +static void +virgl_release_shader_binding(struct virgl_context *vctx, + enum pipe_shader_type shader_type) +{ + struct virgl_shader_binding_state *binding = + &vctx->shader_bindings[shader_type]; + + while (binding->view_enabled_mask) { + int i = u_bit_scan(&binding->view_enabled_mask); + pipe_sampler_view_reference( + (struct pipe_sampler_view **)&binding->views[i], NULL); + } + + while (binding->ubo_enabled_mask) { + int i = u_bit_scan(&binding->ubo_enabled_mask); + pipe_resource_reference(&binding->ubos[i].buffer, NULL); + } + + while (binding->ssbo_enabled_mask) { + int i = u_bit_scan(&binding->ssbo_enabled_mask); + pipe_resource_reference(&binding->ssbos[i].buffer, NULL); + } + + while (binding->image_enabled_mask) { + int i = u_bit_scan(&binding->image_enabled_mask); + pipe_resource_reference(&binding->images[i].resource, NULL); + } +} + static void virgl_context_destroy( struct pipe_context *ctx ) { struct virgl_context *vctx = virgl_context(ctx); struct virgl_screen *rs = virgl_screen(ctx->screen); + enum pipe_shader_type shader_type; vctx->framebuffer.zsbuf = NULL; vctx->framebuffer.nr_cbufs = 0; virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id); - virgl_flush_eq(vctx, vctx); + virgl_flush_eq(vctx, vctx, NULL); + + for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) + virgl_release_shader_binding(vctx, shader_type); + + while (vctx->atomic_buffer_enabled_mask) { + int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask); + pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL); + } rs->vws->cmd_buf_destroy(vctx->cbuf); if (vctx->uploader) u_upload_destroy(vctx->uploader); + if (vctx->supports_staging) + virgl_staging_destroy(&vctx->staging); util_primconvert_destroy(vctx->primconvert); + virgl_transfer_queue_fini(&vctx->queue); - slab_destroy_child(&vctx->texture_transfer_pool); + slab_destroy_child(&vctx->transfer_pool); FREE(vctx); } +static void virgl_get_sample_position(struct pipe_context *ctx, + unsigned sample_count, + unsigned index, + float *out_value) +{ + struct virgl_context *vctx = virgl_context(ctx); + struct virgl_screen *vs = virgl_screen(vctx->base.screen); + + if (sample_count > vs->caps.caps.v1.max_samples) { + debug_printf("VIRGL: requested %d MSAA samples, but only %d supported\n", + sample_count, vs->caps.caps.v1.max_samples); + return; + } + + /* The following is basically copied from dri/i965gen6_get_sample_position + * The only addition is that we hold the msaa positions for all sample + * counts in a flat array. */ + uint32_t bits = 0; + if (sample_count == 1) { + out_value[0] = out_value[1] = 0.5f; + return; + } else if (sample_count == 2) { + bits = vs->caps.caps.v2.sample_locations[0] >> (8 * index); + } else if (sample_count <= 4) { + bits = vs->caps.caps.v2.sample_locations[1] >> (8 * index); + } else if (sample_count <= 8) { + bits = vs->caps.caps.v2.sample_locations[2 + (index >> 2)] >> (8 * (index & 3)); + } else if (sample_count <= 16) { + bits = vs->caps.caps.v2.sample_locations[4 + (index >> 2)] >> (8 * (index & 3)); + } + out_value[0] = ((bits >> 4) & 0xf) / 16.0f; + out_value[1] = (bits & 0xf) / 16.0f; + + if (virgl_debug & VIRGL_DEBUG_VERBOSE) + debug_printf("VIRGL: sample postion [%2d/%2d] = (%f, %f)\n", + index, sample_count, out_value[0], out_value[1]); +} + +static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs) +{ + if (rs->tweak_gles_emulate_bgra) + virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1); + + if (rs->tweak_gles_apply_bgra_dest_swizzle) + virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1); + + if (rs->tweak_gles_tf3_value > 0) + virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier, + rs->tweak_gles_tf3_value); +} + struct pipe_context *virgl_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags) @@ -873,8 +1443,9 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen, struct virgl_context *vctx; struct virgl_screen *rs = virgl_screen(pscreen); vctx = CALLOC_STRUCT(virgl_context); + const char *host_debug_flagstring; - vctx->cbuf = rs->vws->cmd_buf_create(rs->vws); + vctx->cbuf = rs->vws->cmd_buf_create(rs->vws, VIRGL_MAX_CMDBUF_DWORDS); if (!vctx->cbuf) { FREE(vctx); return NULL; @@ -899,21 +1470,32 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen, vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state; vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state; vctx->base.set_vertex_buffers = virgl_set_vertex_buffers; - vctx->base.set_index_buffer = virgl_set_index_buffer; vctx->base.set_constant_buffer = virgl_set_constant_buffer; + vctx->base.set_tess_state = virgl_set_tess_state; vctx->base.create_vs_state = virgl_create_vs_state; + vctx->base.create_tcs_state = virgl_create_tcs_state; + vctx->base.create_tes_state = virgl_create_tes_state; vctx->base.create_gs_state = virgl_create_gs_state; vctx->base.create_fs_state = virgl_create_fs_state; vctx->base.bind_vs_state = virgl_bind_vs_state; + vctx->base.bind_tcs_state = virgl_bind_tcs_state; + vctx->base.bind_tes_state = virgl_bind_tes_state; vctx->base.bind_gs_state = virgl_bind_gs_state; vctx->base.bind_fs_state = virgl_bind_fs_state; vctx->base.delete_vs_state = virgl_delete_vs_state; + vctx->base.delete_tcs_state = virgl_delete_tcs_state; + vctx->base.delete_tes_state = virgl_delete_tes_state; vctx->base.delete_gs_state = virgl_delete_gs_state; vctx->base.delete_fs_state = virgl_delete_fs_state; + vctx->base.create_compute_state = virgl_create_compute_state; + vctx->base.bind_compute_state = virgl_bind_compute_state; + vctx->base.delete_compute_state = virgl_delete_compute_state; + vctx->base.launch_grid = virgl_launch_grid; + vctx->base.clear = virgl_clear; vctx->base.draw_vbo = virgl_draw_vbo; vctx->base.flush = virgl_flush_from_st; @@ -921,6 +1503,7 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen, vctx->base.create_sampler_view = virgl_create_sampler_view; vctx->base.sampler_view_destroy = virgl_destroy_sampler_view; vctx->base.set_sampler_views = virgl_set_sampler_views; + vctx->base.texture_barrier = virgl_texture_barrier; vctx->base.create_sampler_state = virgl_create_sampler_state; vctx->base.delete_sampler_state = virgl_delete_sampler_state; @@ -929,33 +1512,69 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen, vctx->base.set_polygon_stipple = virgl_set_polygon_stipple; vctx->base.set_scissor_states = virgl_set_scissor_states; vctx->base.set_sample_mask = virgl_set_sample_mask; + vctx->base.set_min_samples = virgl_set_min_samples; vctx->base.set_stencil_ref = virgl_set_stencil_ref; vctx->base.set_clip_state = virgl_set_clip_state; vctx->base.set_blend_color = virgl_set_blend_color; + vctx->base.get_sample_position = virgl_get_sample_position; + vctx->base.resource_copy_region = virgl_resource_copy_region; vctx->base.flush_resource = virgl_flush_resource; vctx->base.blit = virgl_blit; + vctx->base.create_fence_fd = virgl_create_fence_fd; + vctx->base.fence_server_sync = virgl_fence_server_sync; + + vctx->base.set_shader_buffers = virgl_set_shader_buffers; + vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers; + vctx->base.set_shader_images = virgl_set_shader_images; + vctx->base.memory_barrier = virgl_memory_barrier; virgl_init_context_resource_functions(&vctx->base); virgl_init_query_functions(vctx); virgl_init_so_functions(vctx); - list_inithead(&vctx->to_flush_bufs); - slab_create_child(&vctx->texture_transfer_pool, &rs->texture_transfer_pool); + slab_create_child(&vctx->transfer_pool, &rs->transfer_pool); + virgl_transfer_queue_init(&vctx->queue, vctx); + vctx->encoded_transfers = (rs->vws->supports_encoded_transfers && + (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER)); + + /* Reserve some space for transfers. */ + if (vctx->encoded_transfers) + vctx->cbuf->cdw = VIRGL_MAX_TBUF_DWORDS; vctx->primconvert = util_primconvert_create(&vctx->base, rs->caps.caps.v1.prim_mask); vctx->uploader = u_upload_create(&vctx->base, 1024 * 1024, - PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM); + PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, 0); if (!vctx->uploader) goto fail; + vctx->base.stream_uploader = vctx->uploader; + vctx->base.const_uploader = vctx->uploader; + + /* We use a special staging buffer as the source of copy transfers. */ + if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) && + vctx->encoded_transfers) { + virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024); + vctx->supports_staging = true; + } vctx->hw_sub_ctx_id = rs->sub_ctx_id++; virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id); virgl_encoder_set_sub_ctx(vctx, vctx->hw_sub_ctx_id); + + if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_GUEST_MAY_INIT_LOG) { + host_debug_flagstring = getenv("VIRGL_HOST_DEBUG"); + if (host_debug_flagstring) + virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring); + } + + if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT) + virgl_send_tweaks(vctx, rs); + return &vctx->base; fail: + virgl_context_destroy(&vctx->base); return NULL; }