virgl: fixup_readback_format --> fixup_formats
[mesa.git] / src / gallium / drivers / virgl / virgl_context.c
index 2479690e8d483ddf8da0e86fc6aa32745e63b214..bbb5247c86f1b4585a02b8e3dcc1248785052af4 100644 (file)
@@ -47,6 +47,7 @@
 #include "virgl_protocol.h"
 #include "virgl_resource.h"
 #include "virgl_screen.h"
+#include "virgl_staging_mgr.h"
 
 struct virgl_vertex_elements_state {
    uint32_t handle;
@@ -60,6 +61,113 @@ uint32_t virgl_object_assign_handle(void)
    return ++next_handle;
 }
 
+bool
+virgl_can_rebind_resource(struct virgl_context *vctx,
+                          struct pipe_resource *res)
+{
+   /* We cannot rebind resources that are referenced by host objects, which
+    * are
+    *
+    *  - VIRGL_OBJECT_SURFACE
+    *  - VIRGL_OBJECT_SAMPLER_VIEW
+    *  - VIRGL_OBJECT_STREAMOUT_TARGET
+    *
+    * Because surfaces cannot be created from buffers, we require the resource
+    * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
+    */
+   const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
+                                      PIPE_BIND_STREAM_OUTPUT);
+   const unsigned bind_history = virgl_resource(res)->bind_history;
+   return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
+}
+
+void
+virgl_rebind_resource(struct virgl_context *vctx,
+                      struct pipe_resource *res)
+{
+   /* Queries use internally created buffers and do not go through transfers.
+    * Index buffers are not bindable.  They are not tracked.
+    */
+   ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
+                                               PIPE_BIND_CONSTANT_BUFFER |
+                                               PIPE_BIND_SHADER_BUFFER |
+                                               PIPE_BIND_SHADER_IMAGE);
+   const unsigned bind_history = virgl_resource(res)->bind_history;
+   unsigned i;
+
+   assert(virgl_can_rebind_resource(vctx, res) &&
+          (bind_history & tracked_bind) == bind_history);
+
+   if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
+      for (i = 0; i < vctx->num_vertex_buffers; i++) {
+         if (vctx->vertex_buffer[i].buffer.resource == res) {
+            vctx->vertex_array_dirty = true;
+            break;
+         }
+      }
+   }
+
+   if (bind_history & PIPE_BIND_SHADER_BUFFER) {
+      uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
+      while (remaining_mask) {
+         int i = u_bit_scan(&remaining_mask);
+         if (vctx->atomic_buffers[i].buffer == res) {
+            const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
+            virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
+         }
+      }
+   }
+
+   /* check per-stage shader bindings */
+   if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
+                       PIPE_BIND_SHADER_BUFFER |
+                       PIPE_BIND_SHADER_IMAGE)) {
+      enum pipe_shader_type shader_type;
+      for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
+         const struct virgl_shader_binding_state *binding =
+            &vctx->shader_bindings[shader_type];
+
+         if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
+            uint32_t remaining_mask = binding->ubo_enabled_mask;
+            while (remaining_mask) {
+               int i = u_bit_scan(&remaining_mask);
+               if (binding->ubos[i].buffer == res) {
+                  const struct pipe_constant_buffer *ubo = &binding->ubos[i];
+                  virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
+                                                   ubo->buffer_offset,
+                                                   ubo->buffer_size,
+                                                   virgl_resource(res));
+               }
+            }
+         }
+
+         if (bind_history & PIPE_BIND_SHADER_BUFFER) {
+            uint32_t remaining_mask = binding->ssbo_enabled_mask;
+            while (remaining_mask) {
+               int i = u_bit_scan(&remaining_mask);
+               if (binding->ssbos[i].buffer == res) {
+                  const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
+                  virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
+                                                  ssbo);
+               }
+            }
+         }
+
+         if (bind_history & PIPE_BIND_SHADER_IMAGE) {
+            uint32_t remaining_mask = binding->image_enabled_mask;
+            while (remaining_mask) {
+               int i = u_bit_scan(&remaining_mask);
+               if (binding->images[i].resource == res) {
+                  const struct pipe_image_view *image = &binding->images[i];
+                  virgl_encode_set_shader_images(vctx, shader_type, i, 1,
+                                                 image);
+               }
+            }
+         }
+      }
+   }
+}
+
 static void virgl_attach_res_framebuffer(struct virgl_context *vctx)
 {
    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
@@ -162,13 +270,16 @@ static void virgl_attach_res_shader_buffers(struct virgl_context *vctx,
                                             enum pipe_shader_type shader_type)
 {
    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+   const struct virgl_shader_binding_state *binding =
+      &vctx->shader_bindings[shader_type];
+   uint32_t remaining_mask = binding->ssbo_enabled_mask;
    struct virgl_resource *res;
-   unsigned i;
-   for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
-      res = virgl_resource(vctx->ssbos[shader_type][i]);
-      if (res) {
-         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
-      }
+
+   while (remaining_mask) {
+      int i = u_bit_scan(&remaining_mask);
+      res = virgl_resource(binding->ssbos[i].buffer);
+      assert(res);
+      vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
    }
 }
 
@@ -176,26 +287,30 @@ static void virgl_attach_res_shader_images(struct virgl_context *vctx,
                                            enum pipe_shader_type shader_type)
 {
    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+   const struct virgl_shader_binding_state *binding =
+      &vctx->shader_bindings[shader_type];
+   uint32_t remaining_mask = binding->image_enabled_mask;
    struct virgl_resource *res;
-   unsigned i;
-   for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
-      res = virgl_resource(vctx->images[shader_type][i]);
-      if (res) {
-         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
-      }
+
+   while (remaining_mask) {
+      int i = u_bit_scan(&remaining_mask);
+      res = virgl_resource(binding->images[i].resource);
+      assert(res);
+      vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
    }
 }
 
 static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
 {
    struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+   uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
    struct virgl_resource *res;
-   unsigned i;
-   for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
-      res = virgl_resource(vctx->atomic_buffers[i]);
-      if (res) {
-         vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
-      }
+
+   while (remaining_mask) {
+      int i = u_bit_scan(&remaining_mask);
+      res = virgl_resource(vctx->atomic_buffers[i].buffer);
+      assert(res);
+      vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
    }
 }
 
@@ -349,6 +464,9 @@ static void *virgl_create_rasterizer_state(struct pipe_context *ctx,
    vrs->rs = *rs_state;
    vrs->handle = virgl_object_assign_handle();
 
+   assert(rs_state->depth_clip_near ||
+          virgl_screen(ctx->screen)->caps.caps.v1.bset.depth_clip_disable);
+
    virgl_encode_rasterizer_state(vctx, vrs->handle, rs_state);
    return (void *)vrs;
 }
@@ -459,6 +577,15 @@ static void virgl_set_vertex_buffers(struct pipe_context *ctx,
                                  &vctx->num_vertex_buffers,
                                  buffers, start_slot, num_buffers);
 
+   if (buffers) {
+      for (unsigned i = 0; i < num_buffers; i++) {
+         struct virgl_resource *res =
+            virgl_resource(buffers[i].buffer.resource);
+         if (res && !buffers[i].is_user_buffer)
+            res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
+      }
+   }
+
    vctx->vertex_array_dirty = TRUE;
 }
 
@@ -513,6 +640,8 @@ static void virgl_set_constant_buffer(struct pipe_context *ctx,
 
    if (buf && buf->buffer) {
       struct virgl_resource *res = virgl_resource(buf->buffer);
+      res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
+
       virgl_encoder_set_uniform_buffer(vctx, shader, index,
                                        buf->buffer_offset,
                                        buf->buffer_size, res);
@@ -744,6 +873,22 @@ static void virgl_draw_vbo(struct pipe_context *ctx,
 
 }
 
+static void virgl_submit_cmd(struct virgl_winsys *vws,
+                             struct virgl_cmd_buf *cbuf,
+                            struct pipe_fence_handle **fence)
+{
+   if (unlikely(virgl_debug & VIRGL_DEBUG_SYNC)) {
+      struct pipe_fence_handle *sync_fence = NULL;
+
+      vws->submit_cmd(vws, cbuf, &sync_fence);
+
+      vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
+      vws->fence_reference(vws, &sync_fence, NULL);
+   } else {
+      vws->submit_cmd(vws, cbuf, fence);
+   }
+}
+
 static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
                           struct pipe_fence_handle **fence)
 {
@@ -762,7 +907,8 @@ static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
    ctx->num_draws = ctx->num_compute = 0;
 
    virgl_transfer_queue_clear(&ctx->queue, ctx->cbuf);
-   rs->vws->submit_cmd(rs->vws, ctx->cbuf, fence);
+
+   virgl_submit_cmd(rs->vws, ctx->cbuf, fence);
 
    /* Reserve some space for transfers. */
    if (ctx->encoded_transfers)
@@ -771,6 +917,11 @@ static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
    virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
 
    ctx->cbuf_initial_cdw = ctx->cbuf->cdw;
+
+   /* We have flushed the command queue, including any pending copy transfers
+    * involving staging resources.
+    */
+   ctx->queued_staging_res_size = 0;
 }
 
 static void virgl_flush_from_st(struct pipe_context *ctx,
@@ -826,6 +977,9 @@ static void virgl_set_sampler_views(struct pipe_context *ctx,
    for (unsigned i = 0; i < num_views; i++) {
       unsigned idx = start_slot + i;
       if (views && views[i]) {
+         struct virgl_resource *res = virgl_resource(views[i]->texture);
+         res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
+
          pipe_sampler_view_reference(&binding->views[idx], views[i]);
          binding->view_enabled_mask |= 1 << idx;
       } else {
@@ -1001,18 +1155,22 @@ static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
 {
    struct virgl_context *vctx = virgl_context(ctx);
 
+   vctx->atomic_buffer_enabled_mask &= ~u_bit_consecutive(start_slot, count);
    for (unsigned i = 0; i < count; i++) {
       unsigned idx = start_slot + i;
-
-      if (buffers) {
-         if (buffers[i].buffer) {
-            pipe_resource_reference(&vctx->atomic_buffers[idx],
-                                    buffers[i].buffer);
-            continue;
-         }
+      if (buffers && buffers[i].buffer) {
+         struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+         res->bind_history |= PIPE_BIND_SHADER_BUFFER;
+
+         pipe_resource_reference(&vctx->atomic_buffers[idx].buffer,
+                                 buffers[i].buffer);
+         vctx->atomic_buffers[idx] = buffers[i];
+         vctx->atomic_buffer_enabled_mask |= 1 << idx;
+      } else {
+         pipe_resource_reference(&vctx->atomic_buffers[idx].buffer, NULL);
       }
-      pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
    }
+
    virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
 }
 
@@ -1024,17 +1182,22 @@ static void virgl_set_shader_buffers(struct pipe_context *ctx,
 {
    struct virgl_context *vctx = virgl_context(ctx);
    struct virgl_screen *rs = virgl_screen(ctx->screen);
+   struct virgl_shader_binding_state *binding =
+      &vctx->shader_bindings[shader];
 
+   binding->ssbo_enabled_mask &= ~u_bit_consecutive(start_slot, count);
    for (unsigned i = 0; i < count; i++) {
       unsigned idx = start_slot + i;
+      if (buffers && buffers[i].buffer) {
+         struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+         res->bind_history |= PIPE_BIND_SHADER_BUFFER;
 
-      if (buffers) {
-         if (buffers[i].buffer) {
-            pipe_resource_reference(&vctx->ssbos[shader][idx], buffers[i].buffer);
-            continue;
-         }
+         pipe_resource_reference(&binding->ssbos[idx].buffer, buffers[i].buffer);
+         binding->ssbos[idx] = buffers[i];
+         binding->ssbo_enabled_mask |= 1 << idx;
+      } else {
+         pipe_resource_reference(&binding->ssbos[idx].buffer, NULL);
       }
-      pipe_resource_reference(&vctx->ssbos[shader][idx], NULL);
    }
 
    uint32_t max_shader_buffer = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
@@ -1074,17 +1237,23 @@ static void virgl_set_shader_images(struct pipe_context *ctx,
 {
    struct virgl_context *vctx = virgl_context(ctx);
    struct virgl_screen *rs = virgl_screen(ctx->screen);
+   struct virgl_shader_binding_state *binding =
+      &vctx->shader_bindings[shader];
 
+   binding->image_enabled_mask &= ~u_bit_consecutive(start_slot, count);
    for (unsigned i = 0; i < count; i++) {
       unsigned idx = start_slot + i;
-
-      if (images) {
-         if (images[i].resource) {
-            pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
-            continue;
-         }
+      if (images && images[i].resource) {
+         struct virgl_resource *res = virgl_resource(images[i].resource);
+         res->bind_history |= PIPE_BIND_SHADER_IMAGE;
+
+         pipe_resource_reference(&binding->images[idx].resource,
+                                 images[i].resource);
+         binding->images[idx] = images[i];
+         binding->image_enabled_mask |= 1 << idx;
+      } else {
+         pipe_resource_reference(&binding->images[idx].resource, NULL);
       }
-      pipe_resource_reference(&vctx->images[shader][idx], NULL);
    }
 
    uint32_t max_shader_images = (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE) ?
@@ -1172,6 +1341,16 @@ virgl_release_shader_binding(struct virgl_context *vctx,
       int i = u_bit_scan(&binding->ubo_enabled_mask);
       pipe_resource_reference(&binding->ubos[i].buffer, NULL);
    }
+
+   while (binding->ssbo_enabled_mask) {
+      int i = u_bit_scan(&binding->ssbo_enabled_mask);
+      pipe_resource_reference(&binding->ssbos[i].buffer, NULL);
+   }
+
+   while (binding->image_enabled_mask) {
+      int i = u_bit_scan(&binding->image_enabled_mask);
+      pipe_resource_reference(&binding->images[i].resource, NULL);
+   }
 }
 
 static void
@@ -1189,9 +1368,16 @@ virgl_context_destroy( struct pipe_context *ctx )
    for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++)
       virgl_release_shader_binding(vctx, shader_type);
 
+   while (vctx->atomic_buffer_enabled_mask) {
+      int i = u_bit_scan(&vctx->atomic_buffer_enabled_mask);
+      pipe_resource_reference(&vctx->atomic_buffers[i].buffer, NULL);
+   }
+
    rs->vws->cmd_buf_destroy(vctx->cbuf);
    if (vctx->uploader)
       u_upload_destroy(vctx->uploader);
+   if (vctx->supports_staging)
+      virgl_staging_destroy(&vctx->staging);
    util_primconvert_destroy(vctx->primconvert);
    virgl_transfer_queue_fini(&vctx->queue);
 
@@ -1237,6 +1423,19 @@ static void virgl_get_sample_position(struct pipe_context *ctx,
                    index, sample_count, out_value[0], out_value[1]);
 }
 
+static void virgl_send_tweaks(struct virgl_context *vctx, struct virgl_screen *rs)
+{
+   if (rs->tweak_gles_emulate_bgra)
+      virgl_encode_tweak(vctx, virgl_tweak_gles_brga_emulate, 1);
+
+   if (rs->tweak_gles_apply_bgra_dest_swizzle)
+      virgl_encode_tweak(vctx, virgl_tweak_gles_brga_apply_dest_swizzle, 1);
+
+   if (rs->tweak_gles_tf3_value > 0)
+      virgl_encode_tweak(vctx, virgl_tweak_gles_tf3_samples_passes_multiplier,
+                         rs->tweak_gles_tf3_value);
+}
+
 struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
                                           void *priv,
                                           unsigned flags)
@@ -1337,7 +1536,7 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
    virgl_init_so_functions(vctx);
 
    slab_create_child(&vctx->transfer_pool, &rs->transfer_pool);
-   virgl_transfer_queue_init(&vctx->queue, rs, &vctx->transfer_pool);
+   virgl_transfer_queue_init(&vctx->queue, vctx);
    vctx->encoded_transfers = (rs->vws->supports_encoded_transfers &&
                        (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER));
 
@@ -1353,6 +1552,13 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
    vctx->base.stream_uploader = vctx->uploader;
    vctx->base.const_uploader = vctx->uploader;
 
+   /* We use a special staging buffer as the source of copy transfers. */
+   if ((rs->caps.caps.v2.capability_bits & VIRGL_CAP_COPY_TRANSFER) &&
+       vctx->encoded_transfers) {
+      virgl_staging_init(&vctx->staging, &vctx->base, 1024 * 1024);
+      vctx->supports_staging = true;
+   }
+
    vctx->hw_sub_ctx_id = rs->sub_ctx_id++;
    virgl_encoder_create_sub_ctx(vctx, vctx->hw_sub_ctx_id);
 
@@ -1364,7 +1570,11 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
          virgl_encode_host_debug_flagstring(vctx, host_debug_flagstring);
    }
 
+   if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT)
+      virgl_send_tweaks(vctx, rs);
+
    return &vctx->base;
 fail:
+   virgl_context_destroy(&vctx->base);
    return NULL;
 }