return NULL;
}
+ if (usage & PIPE_TRANSFER_WRITE)
+ util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
+
*transfer = &trans->base;
return trans->hw_res_map + trans->offset;
}
struct virgl_resource *dres = virgl_resource(dst);
struct virgl_resource *sres = virgl_resource(src);
+ if (dres->u.b.target == PIPE_BUFFER)
+ util_range_add(&dres->valid_buffer_range, dstx, dstx + src_box->width);
virgl_resource_dirty(dres, dst_level);
+
virgl_encode_resource_copy_region(vctx, dres,
dst_level, dstx, dsty, dstz,
sres, src_level,
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
virgl_encoder_write_res(ctx, res);
+
+ util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset,
+ buffers[i].buffer_offset + buffers[i].buffer_size);
virgl_resource_dirty(res, 0);
} else {
virgl_encoder_write_dword(ctx->cbuf, 0);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
virgl_encoder_write_res(ctx, res);
+
+ util_range_add(&res->valid_buffer_range, buffers[i].buffer_offset,
+ buffers[i].buffer_offset + buffers[i].buffer_size);
virgl_resource_dirty(res, 0);
} else {
virgl_encoder_write_dword(ctx->cbuf, 0);
virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
virgl_encoder_write_res(ctx, res);
+
+ if (res->u.b.target == PIPE_BUFFER) {
+ util_range_add(&res->valid_buffer_range, images[i].u.buf.offset,
+ images[i].u.buf.offset + images[i].u.buf.size);
+ }
virgl_resource_dirty(res, images[i].u.tex.level);
} else {
virgl_encoder_write_dword(ctx->cbuf, 0);
query->result_size = (query_type == PIPE_QUERY_TIMESTAMP ||
query_type == PIPE_QUERY_TIME_ELAPSED) ? 8 : 4;
+ util_range_add(&query->buf->valid_buffer_range, 0,
+ sizeof(struct virgl_host_query_state));
+ virgl_resource_dirty(query->buf, 0);
+
virgl_encoder_create_query(vctx, query->handle,
pipe_to_virgl_query(query_type), index, query->buf, 0);
return false;
host_state->query_state = VIRGL_QUERY_STATE_WAIT_HOST;
- virgl_resource_dirty(query->buf, 0);
query->ready = false;
virgl_encoder_end_query(vctx, query->handle);
* - the resource is not referenced by the current cmdbuf
* - the current cmdbuf has no draw/compute command that accesses the
* resource (XXX there are also clear or blit commands)
- * - the transfer is to an undefined region and we can assume the current
- * cmdbuf has no command that accesses the region (XXX we cannot just check
- * for overlapping transfers)
*/
static bool virgl_res_needs_flush(struct virgl_context *vctx,
struct virgl_transfer *trans)
*/
if (vctx->num_draws == 0 && vctx->num_compute == 0)
return false;
-
- /* XXX Consider
- *
- * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data1);
- * glFlush();
- * glDrawArrays(GL_TRIANGLES, 0, 3);
- * glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 3, data2);
- * glDrawArrays(GL_TRIANGLES, 0, 3);
- *
- * Both draws will see data2.
- */
- if (!virgl_transfer_queue_is_queued(&vctx->queue, trans))
- return false;
}
return true;
readback = virgl_res_needs_readback(vctx, res, xfer->base.usage,
xfer->base.level);
+ /* We need to wait for all cmdbufs, current or previous, that access the
+ * resource to finish, unless synchronization is disabled. Readback, which
+ * is yet another command and is transparent to the state trackers, should
+ * also be waited for.
+ */
+ wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) || readback;
+
+ /* When the transfer range consists of only uninitialized data, we can
+ * assume the GPU is not accessing the range and readback is unnecessary.
+ * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and
+ * PIPE_TRANSFER_DISCARD_RANGE are set.
+ */
+ if (res->u.b.target == PIPE_BUFFER &&
+ !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x,
+ xfer->base.box.x + xfer->base.box.width)) {
+ flush = false;
+ readback = false;
+ wait = false;
+ }
+
/* XXX This is incorrect. Consider
*
* glTexImage2D(..., data1);
res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1;
- if (templ->target == PIPE_BUFFER)
+ if (templ->target == PIPE_BUFFER) {
+ util_range_init(&res->valid_buffer_range);
virgl_buffer_init(res);
- else
+ } else {
virgl_texture_init(res);
+ }
return &res->u.b;
dummy_trans.offset = box->x;
flush = virgl_res_needs_flush(vctx, &dummy_trans);
- if (flush)
+ if (flush && util_ranges_intersect(&vbuf->valid_buffer_range,
+ box->x, box->x + box->width))
return false;
queued = virgl_transfer_queue_extend(&vctx->queue, &dummy_trans);
return false;
memcpy(queued->hw_res_map + dummy_trans.offset, data, box->width);
+ util_range_add(&vbuf->valid_buffer_range, box->x, box->x + box->width);
return true;
}
{
struct virgl_screen *vs = virgl_screen(screen);
struct virgl_resource *res = virgl_resource(resource);
+
+ if (res->u.b.target == PIPE_BUFFER)
+ util_range_destroy(&res->valid_buffer_range);
+
vs->vws->resource_unref(vs->vws, res->hw_res);
FREE(res);
}
uint16_t clean_mask;
struct virgl_hw_res *hw_res;
struct virgl_resource_metadata metadata;
+
+ /* For PIPE_BUFFER only. Data outside of this range are uninitialized. */
+ struct util_range valid_buffer_range;
};
enum virgl_transfer_map_type {
t->base.buffer_offset = buffer_offset;
t->base.buffer_size = buffer_size;
t->handle = handle;
+
+ util_range_add(&res->valid_buffer_range, buffer_offset,
+ buffer_offset + buffer_size);
virgl_resource_dirty(res, 0);
+
virgl_encoder_create_so_target(vctx, handle, res, buffer_offset, buffer_size);
return &t->base;
}