*
* - synchronization is disabled
* - the resource is not referenced by the current cmdbuf
- * - the current cmdbuf has no draw/compute command that accesses the
- * resource (XXX there are also clear or blit commands)
- * - the transfer is to an undefined region and we can assume the current
- * cmdbuf has no command that accesses the region (XXX we cannot just check
- * for overlapping transfers)
*/
static bool virgl_res_needs_flush(struct virgl_context *vctx,
struct virgl_transfer *trans)
if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res))
return false;
- if (res->clean_mask & (1 << trans->base.level)) {
- /* XXX Consider
- *
- * glCopyBufferSubData(src, dst, ...);
- * glBufferSubData(src, ...);
- *
- * at the beginning of a cmdbuf. glBufferSubData will be incorrectly
- * reordered before glCopyBufferSubData.
- */
- if (vctx->num_draws == 0 && vctx->num_compute == 0)
- return false;
-
- /* XXX Consider
- *
- * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data1);
- * glFlush();
- * glDrawArrays(GL_TRIANGLES, 0, 3);
- * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data2);
- * glDrawArrays(GL_TRIANGLES, 0, 3);
- *
- * Both draws will see data2.
- */
- if (!virgl_transfer_queue_is_queued(&vctx->queue, trans))
- return false;
- }
-
return true;
}
if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)
return VIRGL_TRANSFER_MAP_ERROR;
+ /* We break the logic down into four steps
+ *
+ * step 1: determine the required operations independently
+ * step 2: look for chances to skip the operations
+ * step 3: resolve dependencies between the operations
+ * step 4: execute the operations
+ */
+
flush = virgl_res_needs_flush(vctx, xfer);
readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
xfer->base.level);
+ /* We need to wait for all cmdbufs, current or previous, that access the
+ * resource to finish unless synchronization is disabled.
+ */
+ wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED);
+
+ /* When the transfer range consists of only uninitialized data, we can
+ * assume the GPU is not accessing the range and readback is unnecessary.
+ * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
+ * PIPE_TRANSFER_DISCARD_RANGE are set.
+ */
+ if (res->u.b.target == PIPE_BUFFER &&
+ !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
+ xfer->base.box.x + xfer->base.box.width)) {
+ flush = false;
+ readback = false;
+ wait = false;
+ }
- /* XXX This is incorrect. Consider
+ /* readback has some implications */
+ if (readback) {
+ /* Readback is yet another command and is transparent to the state
+ * trackers. It should be waited for in all cases, including when
+ * PIPE_TRANSFER_UNSYNCHRONIZED is set.
+ */
+ wait = true;
+
+ /* When the transfer queue has pending writes to this transfer's region,
+ * we have to flush before readback.
+ */
+ if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer))
+ flush = true;
+ }
+
+ /* XXX This is incorrect and will be removed. Consider
*
* glTexImage2D(..., data1);
* glDrawArrays();
res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
- if (templ->target == PIPE_BUFFER)
+ if (templ->target == PIPE_BUFFER) {
+ util_range_init(&res->valid_buffer_range);
virgl_buffer_init(res);
- else
+ } else {
virgl_texture_init(res);
+ }
return &res->u.b;
dummy_trans.offset = box->x;
flush = virgl_res_needs_flush(vctx, &dummy_trans);
- if (flush)
+ if (flush && util_ranges_intersect(&vbuf->valid_buffer_range,
+ box->x, box->x + box->width))
return false;
queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
return false;
memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
+ util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
return true;
}
{
struct virgl_screen *vs = virgl_screen(screen);
struct virgl_resource *res = virgl_resource(resource);
+
+ if (res->u.b.target == PIPE_BUFFER)
+ util_range_destroy(&res->valid_buffer_range);
+
vs->vws->resource_unref(vs->vws, res->hw_res);
FREE(res);
}