Any driver can implement this simple and efficient optimization.
Team Fortress 2 hits it always. The DISCARD_RANGE codepath is not even used
with TF2 anymore, so we avoid a ton of useless buffer copies.
Tested-by: Andreas Boll <andreas.boll.dev@gmail.com>
NOTE: This is a candidate for the 9.1 branch.
src_offset += csize << shift;
size -= csize;
}
+
+ util_range_add(&rdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
}
* elements. */
surf->cb_color_dim = pipe_buffer->width0;
+ /* Set the buffer range the GPU will have access to: */
+ util_range_add(&r600_resource(pipe_buffer)->valid_buffer_range,
+ 0, pipe_buffer->width0);
+
surf->cb_color_cmask = surf->cb_color_base;
surf->cb_color_cmask_slice = 0;
surf->cb_color_fmask = surf->cb_color_base;
#include "../../winsys/radeon/drm/radeon_winsys.h"
#include "util/u_double_list.h"
+#include "util/u_range.h"
#include "util/u_transfer.h"
#define R600_ERR(fmt, args...) \
/* Resource state. */
unsigned domains;
+
+ /* The buffer range which is initialized (with a write transfer,
+ * streamout, DMA, or as a random access target). The rest of
+ * the buffer is considered invalid and can be mapped unsynchronized.
+ *
+ * This allows unsychronized mapping of a buffer range which hasn't
+ * been used yet. It's for applications which forget to use
+ * the unsynchronized map flag and expect the driver to figure it out.
+ */
+ struct util_range valid_buffer_range;
};
#define R600_BLOCK_MAX_BO 32
{
struct r600_resource *rbuffer = r600_resource(buf);
+ util_range_destroy(&rbuffer->valid_buffer_range);
pb_reference(&rbuffer->buf, NULL);
FREE(rbuffer);
}
assert(box->x + box->width <= resource->width0);
+ /* See if the buffer range being mapped has never been initialized,
+ * in which case it can be mapped unsynchronized. */
+ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ usage & PIPE_TRANSFER_WRITE &&
+ !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ }
+
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
assert(usage & PIPE_TRANSFER_WRITE);
{
struct r600_context *rctx = (struct r600_context*)pipe;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
+ struct r600_resource *rbuffer = r600_resource(transfer->resource);
if (rtransfer->staging) {
struct pipe_resource *dst, *src;
}
pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
}
+
+ if (transfer->usage & PIPE_TRANSFER_WRITE) {
+ util_range_add(&rbuffer->valid_buffer_range, transfer->box.x,
+ transfer->box.x + transfer->box.width);
+ }
util_slab_free(&rctx->pool_transfers, transfer);
}
res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
res->domains = domains;
+ util_range_set_empty(&res->valid_buffer_range);
return true;
}
pipe_reference_init(&rbuffer->b.b.reference, 1);
rbuffer->b.b.screen = screen;
rbuffer->b.vtbl = &r600_buffer_vtbl;
+ util_range_init(&rbuffer->valid_buffer_range);
if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE, templ->usage)) {
FREE(rbuffer);
/* Invalidate the read caches. */
rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+
+ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ dst_offset + size);
}
void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw)
src_offset += csize << shift;
size -= csize;
}
+
+ util_range_add(&rdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
}
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_so_target *t;
+ struct r600_resource *rbuffer = (struct r600_resource*)buffer;
t = CALLOC_STRUCT(r600_so_target);
if (!t) {
pipe_resource_reference(&t->b.buffer, buffer);
t->b.buffer_offset = buffer_offset;
t->b.buffer_size = buffer_size;
+
+ util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
+ buffer_offset + buffer_size);
return &t->b;
}