xfer->base.level);
/* Check if we should perform a copy transfer through the transfer_uploader. */
- copy_transfer = res->u.b.target == PIPE_BUFFER &&
- discard &&
+ copy_transfer = discard &&
!readback &&
!unsynchronized &&
vctx->transfer_uploader &&
}
}
+/* Calculate the minimum size of the memory required to service a resource
+ * transfer map. Also return the stride and layer_stride for the corresponding
+ * layout.
+ */
+static unsigned virgl_transfer_map_size(struct virgl_transfer *vtransfer,
+ unsigned *out_stride,
+ unsigned *out_layer_stride)
+{
+ struct pipe_resource *pres = vtransfer->base.resource;
+ struct pipe_box *box = &vtransfer->base.box;
+ unsigned stride;
+ unsigned layer_stride;
+ unsigned size;
+
+ assert(out_stride);
+ assert(out_layer_stride);
+
+ stride = util_format_get_stride(pres->format, box->width);
+ layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
+
+ if (pres->target == PIPE_TEXTURE_CUBE ||
+ pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
+ pres->target == PIPE_TEXTURE_3D ||
+ pres->target == PIPE_TEXTURE_2D_ARRAY) {
+ size = box->depth * layer_stride;
+ } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
+ size = box->depth * stride;
+ } else {
+ size = layer_stride;
+ }
+
+ *out_stride = stride;
+ *out_layer_stride = layer_stride;
+
+ return size;
+}
+
+/* Maps a region from the transfer uploader to service the transfer. */
void *virgl_transfer_uploader_map(struct virgl_context *vctx,
struct virgl_transfer *vtransfer)
{
struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
unsigned size;
unsigned align_offset;
+ unsigned stride;
+ unsigned layer_stride;
void *map_addr;
assert(vctx->transfer_uploader);
assert(!vctx->transfer_uploader_in_use);
- size = vtransfer->base.box.width;
+ size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
/* For buffers we need to ensure that the start of the buffer would be
* aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
* |---| ==> align_offset
* |------------| ==> allocation of size + align_offset
*/
- align_offset = vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT;
+ align_offset = vres->u.b.target == PIPE_BUFFER ?
+ vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
+ 0;
u_upload_alloc(vctx->transfer_uploader, 0, size + align_offset,
VIRGL_MAP_BUFFER_ALIGNMENT,
/* The pointer returned by u_upload_alloc already has +offset
* applied. */
vctx->transfer_uploader_in_use = true;
+
+ /* We are using the minimum required size to hold the contents,
+ * possibly using a layout different from the layout of the resource,
+ * so update the transfer strides accordingly.
+ */
+ vtransfer->base.stride = stride;
+ vtransfer->base.layer_stride = layer_stride;
}
return map_addr;
#include "util/u_memory.h"
#include "virgl_context.h"
+#include "virgl_encode.h"
#include "virgl_resource.h"
#include "virgl_screen.h"
struct virgl_resource *vtex = virgl_resource(resource);
struct virgl_transfer *trans;
enum virgl_transfer_map_type map_type;
+ void *map_addr;
trans = virgl_resource_create_transfer(&vctx->transfer_pool, resource,
&vtex->metadata, level, usage, box);
switch (map_type) {
case VIRGL_TRANSFER_MAP_HW_RES:
trans->hw_res_map = vws->resource_map(vws, vtex->hw_res);
+ if (trans->hw_res_map)
+ map_addr = trans->hw_res_map + trans->offset;
+ else
+ map_addr = NULL;
+ break;
+ case VIRGL_TRANSFER_MAP_STAGING:
+ map_addr = virgl_transfer_uploader_map(vctx, trans);
+ /* Copy transfers don't make use of hw_res_map at the moment. */
+ trans->hw_res_map = NULL;
break;
case VIRGL_TRANSFER_MAP_ERROR:
default:
trans->hw_res_map = NULL;
+ map_addr = NULL;
break;
}
- if (!trans->hw_res_map) {
+ if (!map_addr) {
virgl_resource_destroy_transfer(&vctx->transfer_pool, trans);
return NULL;
}
*transfer = &trans->base;
- return trans->hw_res_map + trans->offset;
+ return map_addr;
}
static void *texture_transfer_map_resolve(struct pipe_context *ctx,
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_transfer *trans = virgl_transfer(transfer);
+ struct virgl_screen *vs = virgl_screen(ctx->screen);
+ struct pipe_resource *res = transfer->resource;
bool queue_unmap = false;
+ /* We don't need to transfer the contents of staging buffers, since they
+ * don't have any host-side storage. */
+ if (pipe_to_virgl_bind(vs, res->bind, res->flags) == VIRGL_BIND_STAGING) {
+ virgl_resource_destroy_transfer(&vctx->transfer_pool, trans);
+ return;
+ }
+
if (transfer->usage & PIPE_TRANSFER_WRITE &&
(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT) == 0) {
virgl_transfer(trans->resolve_transfer));
}
- if (queue_unmap)
- virgl_transfer_queue_unmap(&vctx->queue, trans);
- else
+ if (queue_unmap) {
+ if (trans->copy_src_res) {
+ virgl_encode_copy_transfer(vctx, trans);
+ /* It's now safe for other mappings to use the transfer_uploader. */
+ vctx->transfer_uploader_in_use = false;
+ virgl_resource_destroy_transfer(&vctx->transfer_pool, trans);
+ } else {
+ virgl_transfer_queue_unmap(&vctx->queue, trans);
+ }
+ } else {
virgl_resource_destroy_transfer(&vctx->transfer_pool, trans);
+ }
+
}
static const struct u_resource_vtbl virgl_texture_vtbl =