#include "virgl_protocol.h"
#include "virgl_resource.h"
#include "virgl_screen.h"
+#include "virgl_staging_mgr.h"
struct virgl_vertex_elements_state {
uint32_t handle;
return ++next_handle;
}
+bool
+virgl_can_rebind_resource(struct virgl_context *vctx,
+ struct pipe_resource *res)
+{
+ /* We cannot rebind resources that are referenced by host objects, which
+ * are
+ *
+ * - VIRGL_OBJECT_SURFACE
+ * - VIRGL_OBJECT_SAMPLER_VIEW
+ * - VIRGL_OBJECT_STREAMOUT_TARGET
+ *
+ * Because surfaces cannot be created from buffers, we require the resource
+ * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
+ */
+ const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
+ PIPE_BIND_STREAM_OUTPUT);
+ const unsigned bind_history = virgl_resource(res)->bind_history;
+ return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
+}
+
+void
+virgl_rebind_resource(struct virgl_context *vctx,
+ struct pipe_resource *res)
+{
+ /* Queries use internally created buffers and do not go through transfers.
+ * Index buffers are not bindable. They are not tracked.
+ */
+ ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
+ PIPE_BIND_CONSTANT_BUFFER |
+ PIPE_BIND_SHADER_BUFFER |
+ PIPE_BIND_SHADER_IMAGE);
+ const unsigned bind_history = virgl_resource(res)->bind_history;
+ unsigned i;
+
+ assert(virgl_can_rebind_resource(vctx, res) &&
+ (bind_history & tracked_bind) == bind_history);
+
+ if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
+ for (i = 0; i < vctx->num_vertex_buffers; i++) {
+ if (vctx->vertex_buffer[i].buffer.resource == res) {
+ vctx->vertex_array_dirty = true;
+ break;
+ }
+ }
+ }
+
+ if (bind_history & PIPE_BIND_SHADER_BUFFER) {
+ uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (vctx->atomic_buffers[i].buffer == res) {
+ const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
+ virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
+ }
+ }
+ }
+
+ /* check per-stage shader bindings */
+ if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
+ PIPE_BIND_SHADER_BUFFER |
+ PIPE_BIND_SHADER_IMAGE)) {
+ enum pipe_shader_type shader_type;
+ for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
+ const struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
+
+ if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
+ uint32_t remaining_mask = binding->ubo_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (binding->ubos[i].buffer == res) {
+ const struct pipe_constant_buffer *ubo = &binding->ubos[i];
+ virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
+ ubo->buffer_offset,
+ ubo->buffer_size,
+ virgl_resource(res));
+ }
+ }
+ }
+
+ if (bind_history & PIPE_BIND_SHADER_BUFFER) {
+ uint32_t remaining_mask = binding->ssbo_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (binding->ssbos[i].buffer == res) {
+ const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
+ virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
+ ssbo);
+ }
+ }
+ }
+
+ if (bind_history & PIPE_BIND_SHADER_IMAGE) {
+ uint32_t remaining_mask = binding->image_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (binding->images[i].resource == res) {
+ const struct pipe_image_view *image = &binding->images[i];
+ virgl_encode_set_shader_images(vctx, shader_type, i, 1,
+ image);
+ }
+ }
+ }
+ }
+ }
+}
+
static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
enum pipe_shader_type shader_type)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
- struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
+ const struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
+ uint32_t remaining_mask = binding->view_enabled_mask;
struct virgl_resource *res;
- uint32_t remaining_mask = tinfo->enabled_mask;
- unsigned i;
- while (remaining_mask) {
- i = u_bit_scan(&remaining_mask);
- assert(tinfo->views[i]);
- res = virgl_resource(tinfo->views[i]->base.texture);
- if (res)
- vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ assert(binding->views[i] && binding->views[i]->texture);
+ res = virgl_resource(binding->views[i]->texture);
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
}
enum pipe_shader_type shader_type)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ const struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
+ uint32_t remaining_mask = binding->ubo_enabled_mask;
struct virgl_resource *res;
- unsigned i;
- for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
- res = virgl_resource(vctx->ubos[shader_type][i]);
- if (res) {
- vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
- }
+
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ res = virgl_resource(binding->ubos[i].buffer);
+ assert(res);
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
}
enum pipe_shader_type shader_type)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ const struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
+ uint32_t remaining_mask = binding->ssbo_enabled_mask;
struct virgl_resource *res;
- unsigned i;
- for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
- res = virgl_resource(vctx->ssbos[shader_type][i]);
- if (res) {
- vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
- }
+
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ res = virgl_resource(binding->ssbos[i].buffer);
+ assert(res);
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
}
enum pipe_shader_type shader_type)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ const struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
+ uint32_t remaining_mask = binding->image_enabled_mask;
struct virgl_resource *res;
- unsigned i;
- for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
- res = virgl_resource(vctx->images[shader_type][i]);
- if (res) {
- vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
- }
+
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ res = virgl_resource(binding->images[i].resource);
+ assert(res);
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
}
static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
struct virgl_resource *res;
- unsigned i;
- for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
- res = virgl_resource(vctx->atomic_buffers[i]);
- if (res) {
- vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
- }
+
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ res = virgl_resource(vctx->atomic_buffers[i].buffer);
+ assert(res);
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
}
* after flushing, the hw context still has a bunch of
* resources bound, so we need to rebind those here.
*/
-static void virgl_reemit_res(struct virgl_context *vctx)
+static void virgl_reemit_draw_resources(struct virgl_context *vctx)
{
enum pipe_shader_type shader_type;
/* framebuffer, sampler views, vertex/index/uniform/stream buffers */
virgl_attach_res_framebuffer(vctx);
- for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
+ for (shader_type = 0; shader_type < PIPE_SHADER_COMPUTE; shader_type++) {
virgl_attach_res_sampler_views(vctx, shader_type);
virgl_attach_res_uniform_buffers(vctx, shader_type);
virgl_attach_res_shader_buffers(vctx, shader_type);
virgl_attach_res_so_targets(vctx);
}
+static void virgl_reemit_compute_resources(struct virgl_context *vctx)
+{
+ virgl_attach_res_sampler_views(vctx, PIPE_SHADER_COMPUTE);
+ virgl_attach_res_uniform_buffers(vctx, PIPE_SHADER_COMPUTE);
+ virgl_attach_res_shader_buffers(vctx, PIPE_SHADER_COMPUTE);
+ virgl_attach_res_shader_images(vctx, PIPE_SHADER_COMPUTE);
+
+ virgl_attach_res_atomic_buffers(vctx);
+}
+
static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
struct pipe_resource *resource,
const struct pipe_surface *templ)
vrs->rs = *rs_state;
vrs->handle = virgl_object_assign_handle();
+ assert(rs_state->depth_clip_near ||
+ virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
+
virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
return (void *)vrs;
}
&vctx->num_vertex_buffers,
buffers, start_slot, num_buffers);
+ if (buffers) {
+ for (unsigned i = 0; i < num_buffers; i++) {
+ struct virgl_resource *res =
+ virgl_resource(buffers[i].buffer.resource);
+ if (res && !buffers[i].is_user_buffer)
+ res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
+ }
+ }
+
vctx->vertex_array_dirty = TRUE;
}
const struct pipe_constant_buffer *buf)
{
struct virgl_context *vctx = virgl_context(ctx);
+ struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader];
- if (buf) {
- if (!buf->user_buffer){
- struct virgl_resource *res = virgl_resource(buf->buffer);
- virgl_encoder_set_uniform_buffer(vctx, shader, index, buf->buffer_offset,
- buf->buffer_size, res);
- pipe_resource_reference(&vctx->ubos[shader][index], buf->buffer);
- return;
- }
- pipe_resource_reference(&vctx->ubos[shader][index], NULL);
- virgl_encoder_write_constant_buffer(vctx, shader, index, buf->buffer_size / 4, buf->user_buffer);
+ if (buf && buf->buffer) {
+ struct virgl_resource *res = virgl_resource(buf->buffer);
+ res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
+
+ virgl_encoder_set_uniform_buffer(vctx, shader, index,
+ buf->buffer_offset,
+ buf->buffer_size, res);
+
+ pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
+ binding->ubos[index] = *buf;
+ binding->ubo_enabled_mask |= 1 << index;
} else {
- virgl_encoder_write_constant_buffer(vctx, shader, index, 0, NULL);
- pipe_resource_reference(&vctx->ubos[shader][index], NULL);
+ static const struct pipe_constant_buffer dummy_ubo;
+ if (!buf)
+ buf = &dummy_ubo;
+ virgl_encoder_write_constant_buffer(vctx, shader, index,
+ buf->buffer_size / 4,
+ buf->user_buffer);
+
+ pipe_resource_reference(&binding->ubos[index].buffer, NULL);
+ binding->ubo_enabled_mask &= ~(1 << index);
}
}
{
struct virgl_context *vctx = virgl_context(ctx);
+ if (!vctx->num_draws)
+ virgl_reemit_draw_resources(vctx);
+ vctx->num_draws++;
+
virgl_encode_clear(vctx, buffers, color, depth, stencil);
}
}
}
+ if (!vctx->num_draws)
+ virgl_reemit_draw_resources(vctx);
vctx->num_draws++;
+
virgl_hw_set_vertex_buffers(vctx);
if (info.index_size)
virgl_hw_set_index_buffer(vctx, &ib);
}
+static void virgl_submit_cmd(struct virgl_winsys *vws,
+ struct virgl_cmd_buf *cbuf,
+ struct pipe_fence_handle **fence)
+{
+ if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
+ struct pipe_fence_handle *sync_fence = NULL;
+
+ vws->submit_cmd(vws, cbuf, &sync_fence);
+
+ vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
+ vws->fence_reference(vws, &sync_fence, NULL);
+ } else {
+ vws->submit_cmd(vws, cbuf, fence);
+ }
+}
+
static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
struct pipe_fence_handle **fence)
{
ctx->num_draws = ctx->num_compute = 0;
virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
- rs->vws->submit_cmd(rs->vws, ctx->cbuf, fence);
+
+ virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
/* Reserve some space for transfers. */
if (ctx->encoded_transfers)
virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
- /* add back current framebuffer resources to reference list? */
- virgl_reemit_res(ctx);
-
ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
+
+ /* We have flushed the command queue, including any pending copy transfers
+ * involving staging resources.
+ */
+ ctx->queued_staging_res_size = 0;
}
static void virgl_flush_from_st(struct pipe_context *ctx,
struct pipe_sampler_view **views)
{
struct virgl_context *vctx = virgl_context(ctx);
- int i;
- uint32_t disable_mask = ~((1ull << num_views) - 1);
- struct virgl_textures_info *tinfo = &vctx->samplers[shader_type];
- uint32_t new_mask = 0;
- uint32_t remaining_mask;
-
- remaining_mask = tinfo->enabled_mask & disable_mask;
-
- while (remaining_mask) {
- i = u_bit_scan(&remaining_mask);
- assert(tinfo->views[i]);
+ struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
- pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
- }
-
- for (i = 0; i < num_views; i++) {
- struct virgl_sampler_view *grview = virgl_sampler_view(views[i]);
-
- if (views[i] == (struct pipe_sampler_view *)tinfo->views[i])
- continue;
+ binding->view_enabled_mask &= ~u_bit_consecutive(start_slot, num_views);
+ for (unsigned i = 0; i < num_views; i++) {
+ unsigned idx = start_slot + i;
+ if (views && views[i]) {
+ struct virgl_resource *res = virgl_resource(views[i]->texture);
+ res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
- if (grview) {
- new_mask |= 1 << i;
- pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], views[i]);
+ pipe_sampler_view_reference(&binding->views[idx], views[i]);
+ binding->view_enabled_mask |= 1 << idx;
} else {
- pipe_sampler_view_reference((struct pipe_sampler_view **)&tinfo->views[i], NULL);
- disable_mask |= 1 << i;
+ pipe_sampler_view_reference(&binding->views[idx], NULL);
}
}
- tinfo->enabled_mask &= ~disable_mask;
- tinfo->enabled_mask |= new_mask;
- virgl_encode_set_sampler_views(vctx, shader_type, start_slot, num_views, tinfo->views);
+ virgl_encode_set_sampler_views(vctx, shader_type,
+ start_slot, num_views, (struct virgl_sampler_view **)binding->views);
virgl_attach_res_sampler_views(vctx, shader_type);
}
struct virgl_resource *dres = virgl_resource(dst);
struct virgl_resource *sres = virgl_resource(src);
+ if (dres->u.b.target == PIPE_BUFFER)
+ util_range_add(&dres->valid_buffer_range, dstx, dstx + src_box->width);
virgl_resource_dirty(dres, dst_level);
+
virgl_encode_resource_copy_region(vctx, dres,
dst_level, dstx, dsty, dstz,
sres, src_level,
{
struct virgl_context *vctx = virgl_context(ctx);
+ vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
for (unsigned i = 0; i < count; i++) {
unsigned idx = start_slot + i;
-
- if (buffers) {
- if (buffers[i].buffer) {
- pipe_resource_reference(&vctx->atomic_buffers[idx],
- buffers[i].buffer);
- continue;
- }
+ if (buffers && buffers[i].buffer) {
+ struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+ res->bind_history |= PIPE_BIND_SHADER_BUFFER;
+
+ pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
+ buffers[i].buffer);
+ vctx->atomic_buffers[idx] = buffers[i];
+ vctx->atomic_buffer_enabled_mask |= 1 << idx;
+ } else {
+ pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
}
- pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
}
+
virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
}
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_screen *rs = virgl_screen(ctx->screen);
+ struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader];
+ binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
for (unsigned i = 0; i < count; i++) {
unsigned idx = start_slot + i;
+ if (buffers && buffers[i].buffer) {
+ struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+ res->bind_history |= PIPE_BIND_SHADER_BUFFER;
- if (buffers) {
- if (buffers[i].buffer) {
- pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
- continue;
- }
+ pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
+ binding->ssbos[idx] = buffers[i];
+ binding->ssbo_enabled_mask |= 1 << idx;
+ } else {
+ pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
}
- pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
}
uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_screen *rs = virgl_screen(ctx->screen);
+ struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader];
+ binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
for (unsigned i = 0; i < count; i++) {
unsigned idx = start_slot + i;
-
- if (images) {
- if (images[i].resource) {
- pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
- continue;
- }
+ if (images && images[i].resource) {
+ struct virgl_resource *res = virgl_resource(images[i].resource);
+ res->bind_history |= PIPE_BIND_SHADER_IMAGE;
+
+ pipe_resource_reference(&binding->images[idx].resource,
+ images[i].resource);
+ binding->images[idx] = images[i];
+ binding->image_enabled_mask |= 1 << idx;
+ } else {
+ pipe_resource_reference(&binding->images[idx].resource, NULL);
}
- pipe_resource_reference(&vctx->images[shader][idx], NULL);
}
uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
const struct pipe_grid_info *info)
{
struct virgl_context *vctx = virgl_context(ctx);
- virgl_encode_launch_grid(vctx, info);
+
+ if (!vctx->num_compute)
+ virgl_reemit_compute_resources(vctx);
vctx->num_compute++;
+
+ virgl_encode_launch_grid(vctx, info);
+}
+
+static void
+virgl_release_shader_binding(struct virgl_context *vctx,
+ enum pipe_shader_type shader_type)
+{
+ struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
+
+ while (binding->view_enabled_mask) {
+ int i = u_bit_scan(&binding->view_enabled_mask);
+ pipe_sampler_view_reference(
+ (struct pipe_sampler_view **)&binding->views[i], NULL);
+ }
+
+ while (binding->ubo_enabled_mask) {
+ int i = u_bit_scan(&binding->ubo_enabled_mask);
+ pipe_resource_reference(&binding->ubos[i].buffer, NULL);
+ }
+
+ while (binding->ssbo_enabled_mask) {
+ int i = u_bit_scan(&binding->ssbo_enabled_mask);
+ pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
+ }
+
+ while (binding->image_enabled_mask) {
+ int i = u_bit_scan(&binding->image_enabled_mask);
+ pipe_resource_reference(&binding->images[i].resource, NULL);
+ }
}
static void
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_screen *rs = virgl_screen(ctx->screen);
+ enum pipe_shader_type shader_type;
vctx->framebuffer.zsbuf = NULL;
vctx->framebuffer.nr_cbufs = 0;
virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
virgl_flush_eq(vctx, vctx, NULL);
+ for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
+ virgl_release_shader_binding(vctx, shader_type);
+
+ while (vctx->atomic_buffer_enabled_mask) {
+ int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
+ pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
+ }
+
rs->vws->cmd_buf_destroy(vctx->cbuf);
if (vctx->uploader)
u_upload_destroy(vctx->uploader);
+ if (vctx->supports_staging)
+ virgl_staging_destroy(&vctx->staging);
util_primconvert_destroy(vctx->primconvert);
virgl_transfer_queue_fini(&vctx->queue);
index, sample_count, out_value[0], out_value[1]);
}
+static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
+{
+ if (rs->tweak_gles_emulate_bgra)
+ virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
+
+ if (rs->tweak_gles_apply_bgra_dest_swizzle)
+ virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
+
+ if (rs->tweak_gles_tf3_value > 0)
+ virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
+ rs->tweak_gles_tf3_value);
+}
+
struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
void *priv,
unsigned flags)
virgl_init_so_functions(vctx);
slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
- virgl_transfer_queue_init(&vctx->queue, rs, &vctx->transfer_pool);
+ virgl_transfer_queue_init(&vctx->queue, vctx);
vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
(rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
vctx->base.stream_uploader = vctx->uploader;
vctx->base.const_uploader = vctx->uploader;
+ /* We use a special staging buffer as the source of copy transfers. */
+ if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
+ vctx->encoded_transfers) {
+ virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
+ vctx->supports_staging = true;
+ }
+
vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
}
+ if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
+ virgl_send_tweaks(vctx, rs);
+
return &vctx->base;
fail:
+ virgl_context_destroy(&vctx->base);
return NULL;
}