virgl: Hide internal virgl_resource functions
authorAlexandros Frantzis <alexandros.frantzis@collabora.com>
Fri, 5 Jul 2019 13:08:43 +0000 (16:08 +0300)
committerChia-I Wu <olvaffe@gmail.com>
Sun, 7 Jul 2019 02:30:38 +0000 (19:30 -0700)
Since the transition to virgl_resource_transfer_map(), several
previously public virgl_resource functions are not required to be public
anymore.

We also move the functions earlier in the file so they can be used
without functions declarations.

Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
src/gallium/drivers/virgl/virgl_resource.c
src/gallium/drivers/virgl/virgl_resource.h

index 7e855a10e0c7485d6897b43a78a1097e8bb8edc1..c8e0d23fe12d620c7bede9cfd5eec870aa3fb33c 100644 (file)
  */
 #define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024)
 
+enum virgl_transfer_map_type {
+   VIRGL_TRANSFER_MAP_ERROR = -1,
+   VIRGL_TRANSFER_MAP_HW_RES,
+
+   /* Map a range of a staging buffer. The updated contents should be transferred
+    * with a copy transfer.
+    */
+   VIRGL_TRANSFER_MAP_STAGING,
+
+   /* Reallocate the underlying virgl_hw_res. */
+   VIRGL_TRANSFER_MAP_REALLOC,
+};
+
 /* We need to flush to properly sync the transfer with the current cmdbuf.
  * But there are cases where the flushing can be skipped:
  *
@@ -80,7 +93,7 @@ static bool virgl_res_needs_readback(struct virgl_context *vctx,
    return true;
 }
 
-enum virgl_transfer_map_type
+static enum virgl_transfer_map_type
 virgl_resource_transfer_prepare(struct virgl_context *vctx,
                                 struct virgl_transfer *xfer)
 {
@@ -213,6 +226,149 @@ virgl_resource_transfer_prepare(struct virgl_context *vctx,
    return map_type;
 }
 
+/* Calculate the minimum size of the memory required to service a resource
+ * transfer map. Also return the stride and layer_stride for the corresponding
+ * layout.
+ */
+static unsigned
+virgl_transfer_map_size(struct virgl_transfer *vtransfer,
+                        unsigned *out_stride,
+                        unsigned *out_layer_stride)
+{
+   struct pipe_resource *pres = vtransfer->base.resource;
+   struct pipe_box *box = &vtransfer->base.box;
+   unsigned stride;
+   unsigned layer_stride;
+   unsigned size;
+
+   assert(out_stride);
+   assert(out_layer_stride);
+
+   stride = util_format_get_stride(pres->format, box->width);
+   layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
+
+   if (pres->target == PIPE_TEXTURE_CUBE ||
+       pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
+       pres->target == PIPE_TEXTURE_3D ||
+       pres->target == PIPE_TEXTURE_2D_ARRAY) {
+      size = box->depth * layer_stride;
+   } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
+      size = box->depth * stride;
+   } else {
+      size = layer_stride;
+   }
+
+   *out_stride = stride;
+   *out_layer_stride = layer_stride;
+
+   return size;
+}
+
+/* Maps a region from staging to service the transfer. */
+static void *
+virgl_staging_map(struct virgl_context *vctx,
+                  struct virgl_transfer *vtransfer)
+{
+   struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
+   unsigned size;
+   unsigned align_offset;
+   unsigned stride;
+   unsigned layer_stride;
+   void *map_addr;
+   bool alloc_succeeded;
+
+   assert(vctx->supports_staging);
+
+   size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
+
+   /* For buffers we need to ensure that the start of the buffer would be
+    * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
+    * actually include it. To achieve this we may need to allocate a slightly
+    * larger range from the upload buffer, and later update the uploader
+    * resource offset and map address to point to the requested x coordinate
+    * within that range.
+    *
+    * 0       A       2A      3A
+    * |-------|---bbbb|bbbbb--|
+    *             |--------|    ==> size
+    *         |---|             ==> align_offset
+    *         |------------|    ==> allocation of size + align_offset
+    */
+   align_offset = vres->u.b.target == PIPE_BUFFER ?
+                  vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
+                  0;
+
+   alloc_succeeded =
+      virgl_staging_alloc(&vctx->staging, size + align_offset,
+                          VIRGL_MAP_BUFFER_ALIGNMENT,
+                          &vtransfer->copy_src_offset,
+                          &vtransfer->copy_src_hw_res,
+                          &map_addr);
+   if (alloc_succeeded) {
+      /* Update source offset and address to point to the requested x coordinate
+       * if we have an align_offset (see above for more information). */
+      vtransfer->copy_src_offset += align_offset;
+      map_addr += align_offset;
+
+      /* Mark as dirty, since we are updating the host side resource
+       * without going through the corresponding guest side resource, and
+       * hence the two will diverge.
+       */
+      virgl_resource_dirty(vres, vtransfer->base.level);
+
+      /* We are using the minimum required size to hold the contents,
+       * possibly using a layout different from the layout of the resource,
+       * so update the transfer strides accordingly.
+       */
+      vtransfer->base.stride = stride;
+      vtransfer->base.layer_stride = layer_stride;
+
+      /* Track the total size of active staging resources. */
+      vctx->queued_staging_res_size += size + align_offset;
+   }
+
+   return map_addr;
+}
+
+static bool
+virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
+{
+   struct virgl_screen *vs = virgl_screen(vctx->base.screen);
+   const struct pipe_resource *templ = &res->u.b;
+   unsigned vbind;
+   struct virgl_hw_res *hw_res;
+
+   vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
+   hw_res = vs->vws->resource_create(vs->vws,
+                                     templ->target,
+                                     templ->format,
+                                     vbind,
+                                     templ->width0,
+                                     templ->height0,
+                                     templ->depth0,
+                                     templ->array_size,
+                                     templ->last_level,
+                                     templ->nr_samples,
+                                     res->metadata.total_size);
+   if (!hw_res)
+      return false;
+
+   vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
+   res->hw_res = hw_res;
+
+   /* We can safely clear the range here, since it will be repopulated in the
+    * following rebind operation, according to the active buffer binds.
+    */
+   util_range_set_empty(&res->valid_buffer_range);
+
+   /* count toward the staging resource size limit */
+   vctx->queued_staging_res_size += res->metadata.total_size;
+
+   virgl_rebind_resource(vctx, &res->u.b);
+
+   return true;
+}
+
 void *
 virgl_resource_transfer_map(struct pipe_context *ctx,
                             struct pipe_resource *resource,
@@ -611,144 +767,3 @@ void virgl_resource_dirty(struct virgl_resource *res, uint32_t level)
          res->clean_mask &= ~(1 << level);
    }
 }
-
-/* Calculate the minimum size of the memory required to service a resource
- * transfer map. Also return the stride and layer_stride for the corresponding
- * layout.
- */
-static unsigned virgl_transfer_map_size(struct virgl_transfer *vtransfer,
-                                        unsigned *out_stride,
-                                        unsigned *out_layer_stride)
-{
-   struct pipe_resource *pres = vtransfer->base.resource;
-   struct pipe_box *box = &vtransfer->base.box;
-   unsigned stride;
-   unsigned layer_stride;
-   unsigned size;
-
-   assert(out_stride);
-   assert(out_layer_stride);
-
-   stride = util_format_get_stride(pres->format, box->width);
-   layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
-
-   if (pres->target == PIPE_TEXTURE_CUBE ||
-       pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
-       pres->target == PIPE_TEXTURE_3D ||
-       pres->target == PIPE_TEXTURE_2D_ARRAY) {
-      size = box->depth * layer_stride;
-   } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
-      size = box->depth * stride;
-   } else {
-      size = layer_stride;
-   }
-
-   *out_stride = stride;
-   *out_layer_stride = layer_stride;
-
-   return size;
-}
-
-/* Maps a region from staging to service the transfer. */
-void *virgl_staging_map(struct virgl_context *vctx,
-                        struct virgl_transfer *vtransfer)
-{
-   struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
-   unsigned size;
-   unsigned align_offset;
-   unsigned stride;
-   unsigned layer_stride;
-   void *map_addr;
-   bool alloc_succeeded;
-
-   assert(vctx->supports_staging);
-
-   size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
-
-   /* For buffers we need to ensure that the start of the buffer would be
-    * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
-    * actually include it. To achieve this we may need to allocate a slightly
-    * larger range from the upload buffer, and later update the uploader
-    * resource offset and map address to point to the requested x coordinate
-    * within that range.
-    *
-    * 0       A       2A      3A
-    * |-------|---bbbb|bbbbb--|
-    *             |--------|    ==> size
-    *         |---|             ==> align_offset
-    *         |------------|    ==> allocation of size + align_offset
-    */
-   align_offset = vres->u.b.target == PIPE_BUFFER ?
-                  vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
-                  0;
-
-   alloc_succeeded =
-      virgl_staging_alloc(&vctx->staging, size + align_offset,
-                          VIRGL_MAP_BUFFER_ALIGNMENT,
-                          &vtransfer->copy_src_offset,
-                          &vtransfer->copy_src_hw_res,
-                          &map_addr);
-   if (alloc_succeeded) {
-      /* Update source offset and address to point to the requested x coordinate
-       * if we have an align_offset (see above for more information). */
-      vtransfer->copy_src_offset += align_offset;
-      map_addr += align_offset;
-
-      /* Mark as dirty, since we are updating the host side resource
-       * without going through the corresponding guest side resource, and
-       * hence the two will diverge.
-       */
-      virgl_resource_dirty(vres, vtransfer->base.level);
-
-      /* We are using the minimum required size to hold the contents,
-       * possibly using a layout different from the layout of the resource,
-       * so update the transfer strides accordingly.
-       */
-      vtransfer->base.stride = stride;
-      vtransfer->base.layer_stride = layer_stride;
-
-      /* Track the total size of active staging resources. */
-      vctx->queued_staging_res_size += size + align_offset;
-   }
-
-   return map_addr;
-}
-
-bool
-virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res)
-{
-   struct virgl_screen *vs = virgl_screen(vctx->base.screen);
-   const struct pipe_resource *templ = &res->u.b;
-   unsigned vbind;
-   struct virgl_hw_res *hw_res;
-
-   vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
-   hw_res = vs->vws->resource_create(vs->vws,
-                                     templ->target,
-                                     templ->format,
-                                     vbind,
-                                     templ->width0,
-                                     templ->height0,
-                                     templ->depth0,
-                                     templ->array_size,
-                                     templ->last_level,
-                                     templ->nr_samples,
-                                     res->metadata.total_size);
-   if (!hw_res)
-      return false;
-
-   vs->vws->resource_reference(vs->vws, &res->hw_res, NULL);
-   res->hw_res = hw_res;
-
-   /* We can safely clear the range here, since it will be repopulated in the
-    * following rebind operation, according to the active buffer binds.
-    */
-   util_range_set_empty(&res->valid_buffer_range);
-
-   /* count toward the staging resource size limit */
-   vctx->queued_staging_res_size += res->metadata.total_size;
-
-   virgl_rebind_resource(vctx, &res->u.b);
-
-   return true;
-}
index 1e8eb4068b9084af702e2de00d21509467a86a22..95ed3e8ef139cfdbfb7d514072ce93284a909c75 100644 (file)
@@ -64,19 +64,6 @@ struct virgl_resource {
    unsigned bind_history;
 };
 
-enum virgl_transfer_map_type {
-   VIRGL_TRANSFER_MAP_ERROR = -1,
-   VIRGL_TRANSFER_MAP_HW_RES,
-
-   /* Map a range of a staging buffer. The updated contents should be transferred
-    * with a copy transfer.
-    */
-   VIRGL_TRANSFER_MAP_STAGING,
-
-   /* Reallocate the underlying virgl_hw_res. */
-   VIRGL_TRANSFER_MAP_REALLOC,
-};
-
 struct virgl_transfer {
    struct pipe_transfer base;
    uint32_t offset, l_stride;
@@ -160,10 +147,6 @@ static inline unsigned pipe_to_virgl_bind(const struct virgl_screen *vs,
    return outbind;
 }
 
-enum virgl_transfer_map_type
-virgl_resource_transfer_prepare(struct virgl_context *vctx,
-                                struct virgl_transfer *xfer);
-
 void *
 virgl_resource_transfer_map(struct pipe_context *ctx,
                             struct pipe_resource *resource,
@@ -194,11 +177,4 @@ boolean virgl_resource_get_handle(struct pipe_screen *screen,
 
 void virgl_resource_dirty(struct virgl_resource *res, uint32_t level);
 
-void *virgl_staging_map(struct virgl_context *vctx,
-                        struct virgl_transfer *vtransfer);
-
-bool
-virgl_resource_realloc(struct virgl_context *vctx,
-                       struct virgl_resource *res);
-
 #endif