#include "util/u_format.h"
#include "util/u_inlines.h"
#include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
#include "virgl_context.h"
#include "virgl_resource.h"
#include "virgl_screen.h"
-bool virgl_res_needs_flush_wait(struct virgl_context *vctx,
- struct virgl_resource *res,
- unsigned usage)
+/* We need to flush to properly sync the transfer with the current cmdbuf.
+ * But there are cases where the flushing can be skipped:
+ *
+ * - synchronization is disabled
+ * - the resource is not referenced by the current cmdbuf
+ */
+static bool virgl_res_needs_flush(struct virgl_context *vctx,
+ struct virgl_transfer *trans)
{
- struct virgl_screen *vs = virgl_screen(vctx->base.screen);
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_resource *res = virgl_resource(trans->base.resource);
- if ((!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) && vs->vws->res_is_referenced(vs->vws, vctx->cbuf, res->hw_res)) {
- return true;
- }
- return false;
+ if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ return false;
+
+ if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
+ return false;
+
+ return true;
+}
+
+/* We need to read back from the host storage to make sure the guest storage
+ * is up-to-date. But there are cases where the readback can be skipped:
+ *
+ * - the content can be discarded
+ * - the host storage is read-only
+ *
+ * Note that PIPE_TRANSFER_WRITE without discard bits requires readback.
+ * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and
+ * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant.
+ */
+static bool virgl_res_needs_readback(struct virgl_context *vctx,
+ struct virgl_resource *res,
+ unsigned usage, unsigned level)
+{
+ if (usage & (PIPE_TRANSFER_DISCARD_RANGE |
+ PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
+ return false;
+
+ if (res->clean_mask & (1 << level))
+ return false;
+
+ return true;
}
-bool virgl_res_needs_readback(struct virgl_context *vctx,
- struct virgl_resource *res,
- unsigned usage, unsigned level)
+enum virgl_transfer_map_type
+virgl_resource_transfer_prepare(struct virgl_context *vctx,
+ struct virgl_transfer *xfer)
{
- bool readback = true;
- if (res->clean[level])
- readback = false;
- else if (usage & PIPE_TRANSFER_DISCARD_RANGE)
- readback = false;
- else if ((usage & (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT)) ==
- (PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT))
+ struct virgl_screen *vs = virgl_screen(vctx->base.screen);
+ struct virgl_winsys *vws = vs->vws;
+ struct virgl_resource *res = virgl_resource(xfer->base.resource);
+ enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES;
+ bool unsynchronized = xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED;
+ bool discard = xfer->base.usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_DISCARD_RANGE);
+ bool flush;
+ bool readback;
+ bool wait;
+ bool copy_transfer;
+
+ /* there is no way to map the host storage currently */
+ if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ return VIRGL_TRANSFER_MAP_ERROR;
+
+ /* We break the logic down into four steps
+ *
+ * step 1: determine the required operations independently
+ * step 2: look for chances to skip the operations
+ * step 3: resolve dependencies between the operations
+ * step 4: execute the operations
+ */
+
+ flush = virgl_res_needs_flush(vctx, xfer);
+ readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
+ xfer->base.level);
+
+ /* Check if we should perform a copy transfer through the transfer_uploader. */
+ copy_transfer = discard &&
+ !readback &&
+ !unsynchronized &&
+ vctx->transfer_uploader &&
+ !vctx->transfer_uploader_in_use &&
+ (flush || vws->resource_is_busy(vws, res->hw_res));
+
+ /* We need to wait for all cmdbufs, current or previous, that access the
+ * resource to finish unless synchronization is disabled.
+ */
+ wait = !unsynchronized;
+
+ /* When the transfer range consists of only uninitialized data, we can
+ * assume the GPU is not accessing the range and readback is unnecessary.
+ * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
+ * PIPE_TRANSFER_DISCARD_RANGE are set.
+ */
+ if (res->u.b.target == PIPE_BUFFER &&
+ !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
+ xfer->base.box.x + xfer->base.box.width)) {
+ flush = false;
readback = false;
- return readback;
+ wait = false;
+ copy_transfer = false;
+ }
+
+ /* When performing a copy transfer there is no need to flush or wait for
+ * the target resource.
+ */
+ if (copy_transfer) {
+ flush = false;
+ wait = false;
+ }
+
+ /* readback has some implications */
+ if (readback) {
+ /* Readback is yet another command and is transparent to the state
+ * trackers. It should be waited for in all cases, including when
+ * PIPE_TRANSFER_UNSYNCHRONIZED is set.
+ */
+ wait = true;
+
+ /* When the transfer queue has pending writes to this transfer's region,
+ * we have to flush before readback.
+ */
+ if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
+ flush = true;
+ }
+
+ /* XXX This is incorrect and will be removed. Consider
+ *
+ * glTexImage2D(..., data1);
+ * glDrawArrays();
+ * glFlush();
+ * glTexImage2D(..., data2);
+ *
+ * readback and flush are both false in the second glTexImage2D call. The
+ * draw call might end up seeing data2. Same applies to buffers with
+ * glBufferSubData.
+ */
+ wait = flush || readback;
+
+ if (flush)
+ vctx->base.flush(&vctx->base, NULL, 0);
+
+ /* If we are not allowed to block, and we know that we will have to wait,
+ * either because the resource is busy, or because it will become busy due
+ * to a readback, return early to avoid performing an incomplete
+ * transfer_get. Such an incomplete transfer_get may finish at any time,
+ * during which another unsynchronized map could write to the resource
+ * contents, leaving the contents in an undefined state.
+ */
+ if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) &&
+ (readback || (wait && vws->resource_is_busy(vws, res->hw_res))))
+ return VIRGL_TRANSFER_MAP_ERROR;
+
+ if (readback) {
+ vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride,
+ xfer->l_stride, xfer->offset, xfer->base.level);
+ }
+
+ if (wait)
+ vws->resource_wait(vws, res->hw_res);
+
+ if (copy_transfer)
+ map_type = VIRGL_TRANSFER_MAP_STAGING;
+
+ return map_type;
}
static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen,
res->u.b = *templ;
res->u.b.screen = &vs->base;
pipe_reference_init(&res->u.b.reference, 1);
- vbind = pipe_to_virgl_bind(templ->bind);
+ vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags);
virgl_resource_layout(&res->u.b, &res->metadata);
res->hw_res = vs->vws->resource_create(vs->vws, templ->target,
templ->format, vbind,
return NULL;
}
- for (uint32_t i = 0; i < VR_MAX_TEXTURE_2D_LEVELS; i++)
- res->clean[i] = TRUE;
+ res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
- if (templ->target == PIPE_BUFFER)
+ if (templ->target == PIPE_BUFFER) {
+ util_range_init(&res->valid_buffer_range);
virgl_buffer_init(res);
- else
+ } else {
virgl_texture_init(res);
+ }
return &res->u.b;
screen->resource_destroy = u_resource_destroy_vtbl;
}
+static bool virgl_buffer_transfer_extend(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data)
+{
+ struct virgl_context *vctx = virgl_context(ctx);
+ struct virgl_resource *vbuf = virgl_resource(resource);
+ struct virgl_transfer dummy_trans = { 0 };
+ bool flush;
+ struct virgl_transfer *queued;
+
+ /*
+ * Attempts to short circuit the entire process of mapping and unmapping
+ * a resource if there is an existing transfer that can be extended.
+ * Pessimestically falls back if a flush is required.
+ */
+ dummy_trans.base.resource = resource;
+ dummy_trans.base.usage = usage;
+ dummy_trans.base.box = *box;
+ dummy_trans.base.stride = vbuf->metadata.stride[0];
+ dummy_trans.base.layer_stride = vbuf->metadata.layer_stride[0];
+ dummy_trans.offset = box->x;
+
+ flush = virgl_res_needs_flush(vctx, &dummy_trans);
+ if (flush && util_ranges_intersect(&vbuf->valid_buffer_range,
+ box->x, box->x + box->width))
+ return false;
+
+ queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
+ if (!queued || !queued->hw_res_map)
+ return false;
+
+ memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
+ util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
+
+ return true;
+}
+
static void virgl_buffer_subdata(struct pipe_context *pipe,
struct pipe_resource *resource,
unsigned usage, unsigned offset,
unsigned size, const void *data)
{
+ struct pipe_transfer *transfer;
+ uint8_t *map;
struct pipe_box box;
+ assert(!(usage & PIPE_TRANSFER_READ));
+
+ /* the write flag is implicit by the nature of buffer_subdata */
+ usage |= PIPE_TRANSFER_WRITE;
+
if (offset == 0 && size == resource->width0)
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
else
u_box_1d(offset, size, &box);
- if (size >= (VIRGL_MAX_CMDBUF_DWORDS * 4))
- u_default_buffer_subdata(pipe, resource, usage, offset, size, data);
- else
- virgl_transfer_inline_write(pipe, resource, 0, usage, &box, data, 0, 0);
+ if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
+ virgl_buffer_transfer_extend(pipe, resource, usage, &box, data))
+ return;
+
+ map = pipe->transfer_map(pipe, resource, 0, usage, &box, &transfer);
+ if (map) {
+ memcpy(map, data, size);
+ pipe_transfer_unmap(pipe, transfer);
+ }
}
void virgl_init_context_resource_functions(struct pipe_context *ctx)
}
struct virgl_transfer *
-virgl_resource_create_transfer(struct pipe_context *ctx,
+virgl_resource_create_transfer(struct slab_child_pool *pool,
struct pipe_resource *pres,
const struct virgl_resource_metadata *metadata,
unsigned level, unsigned usage,
{
struct virgl_transfer *trans;
enum pipe_format format = pres->format;
- struct virgl_context *vctx = virgl_context(ctx);
const unsigned blocksy = box->y / util_format_get_blockheight(format);
const unsigned blocksx = box->x / util_format_get_blockwidth(format);
offset += blocksy * metadata->stride[level];
offset += blocksx * util_format_get_blocksize(format);
- trans = slab_alloc(&vctx->transfer_pool);
+ trans = slab_alloc(pool);
if (!trans)
return NULL;
trans->base.layer_stride = metadata->layer_stride[level];
trans->offset = offset;
util_range_init(&trans->range);
+ trans->copy_src_res = NULL;
+ trans->copy_src_offset = 0;
if (trans->base.resource->target != PIPE_TEXTURE_3D &&
trans->base.resource->target != PIPE_TEXTURE_CUBE &&
return trans;
}
-void virgl_resource_destroy_transfer(struct virgl_context *vctx,
+void virgl_resource_destroy_transfer(struct slab_child_pool *pool,
struct virgl_transfer *trans)
{
+ pipe_resource_reference(&trans->copy_src_res, NULL);
util_range_destroy(&trans->range);
- slab_free(&vctx->transfer_pool, trans);
+ slab_free(pool, trans);
}
void virgl_resource_destroy(struct pipe_screen *screen,
{
struct virgl_screen *vs = virgl_screen(screen);
struct virgl_resource *res = virgl_resource(resource);
+
+ if (res->u.b.target == PIPE_BUFFER)
+ util_range_destroy(&res->valid_buffer_range);
+
vs->vws->resource_unref(vs->vws, res->hw_res);
FREE(res);
}
{
if (res) {
if (res->u.b.target == PIPE_BUFFER)
- res->clean[0] = FALSE;
+ res->clean_mask &= ~1;
else
- res->clean[level] = FALSE;
+ res->clean_mask &= ~(1 << level);
+ }
+}
+
+/* Calculate the minimum size of the memory required to service a resource
+ * transfer map. Also return the stride and layer_stride for the corresponding
+ * layout.
+ */
+static unsigned virgl_transfer_map_size(struct virgl_transfer *vtransfer,
+ unsigned *out_stride,
+ unsigned *out_layer_stride)
+{
+ struct pipe_resource *pres = vtransfer->base.resource;
+ struct pipe_box *box = &vtransfer->base.box;
+ unsigned stride;
+ unsigned layer_stride;
+ unsigned size;
+
+ assert(out_stride);
+ assert(out_layer_stride);
+
+ stride = util_format_get_stride(pres->format, box->width);
+ layer_stride = util_format_get_2d_size(pres->format, stride, box->height);
+
+ if (pres->target == PIPE_TEXTURE_CUBE ||
+ pres->target == PIPE_TEXTURE_CUBE_ARRAY ||
+ pres->target == PIPE_TEXTURE_3D ||
+ pres->target == PIPE_TEXTURE_2D_ARRAY) {
+ size = box->depth * layer_stride;
+ } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) {
+ size = box->depth * stride;
+ } else {
+ size = layer_stride;
+ }
+
+ *out_stride = stride;
+ *out_layer_stride = layer_stride;
+
+ return size;
+}
+
+/* Maps a region from the transfer uploader to service the transfer. */
+void *virgl_transfer_uploader_map(struct virgl_context *vctx,
+ struct virgl_transfer *vtransfer)
+{
+ struct virgl_resource *vres = virgl_resource(vtransfer->base.resource);
+ unsigned size;
+ unsigned align_offset;
+ unsigned stride;
+ unsigned layer_stride;
+ void *map_addr;
+
+ assert(vctx->transfer_uploader);
+ assert(!vctx->transfer_uploader_in_use);
+
+ size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride);
+
+ /* For buffers we need to ensure that the start of the buffer would be
+ * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't
+ * actually include it. To achieve this we may need to allocate a slightly
+ * larger range from the upload buffer, and later update the uploader
+ * resource offset and map address to point to the requested x coordinate
+ * within that range.
+ *
+ * 0 A 2A 3A
+ * |-------|---bbbb|bbbbb--|
+ * |--------| ==> size
+ * |---| ==> align_offset
+ * |------------| ==> allocation of size + align_offset
+ */
+ align_offset = vres->u.b.target == PIPE_BUFFER ?
+ vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT :
+ 0;
+
+ u_upload_alloc(vctx->transfer_uploader, 0, size + align_offset,
+ VIRGL_MAP_BUFFER_ALIGNMENT,
+ &vtransfer->copy_src_offset,
+ &vtransfer->copy_src_res, &map_addr);
+ if (map_addr) {
+ /* Update source offset and address to point to the requested x coordinate
+ * if we have an align_offset (see above for more information). */
+ vtransfer->copy_src_offset += align_offset;
+ map_addr += align_offset;
+
+ /* Mark as dirty, since we are updating the host side resource
+ * without going through the corresponding guest side resource, and
+ * hence the two will diverge.
+ */
+ virgl_resource_dirty(vres, vtransfer->base.level);
+
+ /* The pointer returned by u_upload_alloc already has +offset
+ * applied. */
+ vctx->transfer_uploader_in_use = true;
+
+ /* We are using the minimum required size to hold the contents,
+ * possibly using a layout different from the layout of the resource,
+ * so update the transfer strides accordingly.
+ */
+ vtransfer->base.stride = stride;
+ vtransfer->base.layer_stride = layer_stride;
}
+
+ return map_addr;
}