virgl: Use copy transfers for textures
[mesa.git] / src / gallium / drivers / virgl / virgl_resource.c
index 6e23687b594269bd02825a910f8883549bac8628..897e0c16c7ef2ce1850ea7e42468e22bd659b709 100644 (file)
@@ -23,6 +23,7 @@
 #include "util/u_format.h"
 #include "util/u_inlines.h"
 #include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
 #include "virgl_context.h"
 #include "virgl_resource.h"
 #include "virgl_screen.h"
@@ -76,27 +77,46 @@ enum virgl_transfer_map_type
 virgl_resource_transfer_prepare(struct virgl_context *vctx,
                                 struct virgl_transfer *xfer)
 {
-   struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+   struct virgl_screen *vs = virgl_screen(vctx->base.screen);
+   struct virgl_winsys *vws = vs->vws;
    struct virgl_resource *res = virgl_resource(xfer->base.resource);
    enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
+   bool unsynchronized = xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED;
+   bool discard = xfer->base.usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+                                      PIPE_TRANSFER_DISCARD_RANGE);
    bool flush;
    bool readback;
    bool wait;
+   bool copy_transfer;
 
    /* there is no way to map the host storage currently */
    if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
       return VIRGL_TRANSFER_MAP_ERROR;
 
+   /* We break the logic down into four steps
+    *
+    * step 1: determine the required operations independently
+    * step 2: look for chances to skip the operations
+    * step 3: resolve dependencies between the operations
+    * step 4: execute the operations
+    */
+
    flush = virgl_res_needs_flush(vctx, xfer);
    readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
                                        xfer->base.level);
 
+   /* Check if we should perform a copy transfer through the transfer_uploader. */
+   copy_transfer = discard &&
+                   !readback &&
+                   !unsynchronized &&
+                   vctx->transfer_uploader &&
+                   !vctx->transfer_uploader_in_use &&
+                   (flush || vws->resource_is_busy(vws, res->hw_res));
+
    /* We need to wait for all cmdbufs, current or previous, that access the
-    * resource to finish, unless synchronization is disabled.  Readback, which
-    * is yet another command and is transparent to the state trackers, should
-    * also be waited for.
+    * resource to finish unless synchronization is disabled.
     */
-   wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) || readback;
+   wait = !unsynchronized;
 
    /* When the transfer range consists of only uninitialized data, we can
     * assume the GPU is not accessing the range and readback is unnecessary.
@@ -109,9 +129,33 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
       flush = false;
       readback = false;
       wait = false;
+      copy_transfer = false;
    }
 
-   /* XXX This is incorrect.  Consider
+   /* When performing a copy transfer there is no need to flush or wait for
+    * the target resource.
+    */
+   if (copy_transfer) {
+      flush = false;
+      wait = false;
+   }
+
+   /* readback has some implications */
+   if (readback) {
+      /* Readback is yet another command and is transparent to the state
+       * trackers.  It should be waited for in all cases, including when
+       * PIPE_TRANSFER_UNSYNCHRONIZED is set.
+       */
+      wait = true;
+
+      /* When the transfer queue has pending writes to this transfer's region,
+       * we have to flush before readback.
+       */
+      if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
+         flush = true;
+   }
+
+   /* XXX This is incorrect and will be removed.  Consider
     *
     *   glTexImage2D(..., data1);
     *   glDrawArrays();
@@ -127,21 +171,27 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
    if (flush)
       vctx->base.flush(&vctx->base, NULL, 0);
 
+   /* If we are not allowed to block, and we know that we will have to wait,
+    * either because the resource is busy, or because it will become busy due
+    * to a readback, return early to avoid performing an incomplete
+    * transfer_get. Such an incomplete transfer_get may finish at any time,
+    * during which another unsynchronized map could write to the resource
+    * contents, leaving the contents in an undefined state.
+    */
+   if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
+       (readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
+      return VIRGL_TRANSFER_MAP_ERROR;
+
    if (readback) {
       vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
                         xfer->l_stride, xfer->offset, xfer->base.level);
    }
 
-   if (wait) {
-      /* fail the mapping after flush and readback so that it will succeed in
-       * the future
-       */
-      if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
-          vws->resource_is_busy(vws, res->hw_res))
-         return VIRGL_TRANSFER_MAP_ERROR;
-
+   if (wait)
       vws->resource_wait(vws, res->hw_res);
-   }
+
+   if (copy_transfer)
+      map_type = VIRGL_TRANSFER_MAP_STAGING;
 
    return map_type;
 }
@@ -156,7 +206,7 @@ static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
    res->u.b = *templ;
    res->u.b.screen = &vs->base;
    pipe_reference_init(&res->u.b.reference, 1);
-   vbind = pipe_to_virgl_bind(vs, templ->bind);
+   vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
    virgl_resource_layout(&res->u.b, &res->metadata);
    res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
                                           templ->format, vbind,
@@ -378,6 +428,8 @@ virgl_resource_create_transfer(struct slab_child_pool *pool,
    trans->base.layer_stride = metadata->layer_stride[level];
    trans->offset = offset;
    util_range_init(&trans->range);
+   trans->copy_src_res = NULL;
+   trans->copy_src_offset = 0;
 
    if (trans->base.resource->target != PIPE_TEXTURE_3D &&
        trans->base.resource->target != PIPE_TEXTURE_CUBE &&
@@ -394,6 +446,7 @@ virgl_resource_create_transfer(struct slab_child_pool *pool,
 void virgl_resource_destroy_transfer(struct slab_child_pool *pool,
                                      struct virgl_transfer *trans)
 {
+   pipe_resource_reference(&trans->copy_src_res, NULL);
    util_range_destroy(&trans->range);
    slab_free(pool, trans);
 }
@@ -435,3 +488,104 @@ void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
          res->clean_mask &= ~(1 << level);
    }
 }
+
+/* Calculate the minimum size of the memory required to service a resource
+ * transfer map. Also return the stride and layer_stride for the corresponding
+ * layout.
+ */
+static unsigned virgl_transfer_map_size(struct virgl_transfer *vtransfer,
+                                        unsigned *out_stride,
+                                        unsigned *out_layer_stride)
+{
+   struct pipe_resource *pres = vtransfer->base.resource;
+   struct pipe_box *box = &vtransfer->base.box;
+   unsigned stride;
+   unsigned layer_stride;
+   unsigned size;
+
+   assert(out_stride);
+   assert(out_layer_stride);
+
+   stride = util_format_get_stride(pres->format, box->width);
+   layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
+
+   if (pres->target == PIPE_TEXTURE_CUBE ||
+       pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
+       pres->target == PIPE_TEXTURE_3D ||
+       pres->target == PIPE_TEXTURE_2D_ARRAY) {
+      size = box->depth * layer_stride;
+   } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
+      size = box->depth * stride;
+   } else {
+      size = layer_stride;
+   }
+
+   *out_stride = stride;
+   *out_layer_stride = layer_stride;
+
+   return size;
+}
+
+/* Maps a region from the transfer uploader to service the transfer. */
+void *virgl_transfer_uploader_map(struct virgl_context *vctx,
+                                  struct virgl_transfer *vtransfer)
+{
+   struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
+   unsigned size;
+   unsigned align_offset;
+   unsigned stride;
+   unsigned layer_stride;
+   void *map_addr;
+
+   assert(vctx->transfer_uploader);
+   assert(!vctx->transfer_uploader_in_use);
+
+   size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
+
+   /* For buffers we need to ensure that the start of the buffer would be
+    * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
+    * actually include it. To achieve this we may need to allocate a slightly
+    * larger range from the upload buffer, and later update the uploader
+    * resource offset and map address to point to the requested x coordinate
+    * within that range.
+    *
+    * 0       A       2A      3A
+    * |-------|---bbbb|bbbbb--|
+    *             |--------|    ==> size
+    *         |---|             ==> align_offset
+    *         |------------|    ==> allocation of size + align_offset
+    */
+   align_offset = vres->u.b.target == PIPE_BUFFER ?
+                  vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
+                  0;
+
+   u_upload_alloc(vctx->transfer_uploader, 0, size + align_offset,
+                  VIRGL_MAP_BUFFER_ALIGNMENT,
+                  &vtransfer->copy_src_offset,
+                  &vtransfer->copy_src_res, &map_addr);
+   if (map_addr) {
+      /* Update source offset and address to point to the requested x coordinate
+       * if we have an align_offset (see above for more information). */
+      vtransfer->copy_src_offset += align_offset;
+      map_addr += align_offset;
+
+      /* Mark as dirty, since we are updating the host side resource
+       * without going through the corresponding guest side resource, and
+       * hence the two will diverge.
+       */
+      virgl_resource_dirty(vres, vtransfer->base.level);
+
+      /* The pointer returned by u_upload_alloc already has +offset
+       * applied. */
+      vctx->transfer_uploader_in_use = true;
+
+      /* We are using the minimum required size to hold the contents,
+       * possibly using a layout different from the layout of the resource,
+       * so update the transfer strides accordingly.
+       */
+      vtransfer->base.stride = stride;
+      vtransfer->base.layer_stride = layer_stride;
+   }
+
+   return map_addr;
+}