#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#include <inttypes.h>
+#include <stdio.h>
boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
struct radeon_winsys_cs_handle *buf,
if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
return TRUE;
}
- if (ctx->rings.dma.cs &&
+ if (ctx->rings.dma.cs && ctx->rings.dma.cs->cdw &&
ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
return TRUE;
}
unsigned usage)
{
enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
+ bool busy = false;
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
rusage = RADEON_USAGE_WRITE;
}
- if (ctx->rings.gfx.cs->cdw &&
+ if (ctx->rings.gfx.cs->cdw != ctx->initial_gfx_cs_size &&
ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs,
resource->cs_buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
+ ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->rings.gfx.flush(ctx, 0);
+ ctx->rings.gfx.flush(ctx, 0, NULL);
+ busy = true;
}
}
if (ctx->rings.dma.cs &&
ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs,
resource->cs_buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
+ ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->rings.dma.flush(ctx, 0);
+ ctx->rings.dma.flush(ctx, 0, NULL);
+ busy = true;
}
}
- if (ctx->ws->buffer_is_busy(resource->buf, rusage)) {
+ if (busy || ctx->ws->buffer_is_busy(resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
return NULL;
} else {
}
}
+ /* Setting the CS to NULL will prevent doing checks we have done already. */
return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
}
bool r600_init_resource(struct r600_common_screen *rscreen,
struct r600_resource *res,
unsigned size, unsigned alignment,
- bool use_reusable_pool, unsigned usage)
+ bool use_reusable_pool)
{
- uint32_t initial_domain, domains;
+ struct r600_texture *rtex = (struct r600_texture*)res;
+ struct pb_buffer *old_buf, *new_buf;
+ enum radeon_bo_flag flags = 0;
- switch(usage) {
+ switch (res->b.b.usage) {
case PIPE_USAGE_STAGING:
- /* Staging resources participate in transfers, i.e. are used
- * for uploads and downloads from regular resources.
- * We generate them internally for some transfers.
- */
- initial_domain = RADEON_DOMAIN_GTT;
- domains = RADEON_DOMAIN_GTT;
+ /* Transfers are likely to occur more often with these resources. */
+ res->domains = RADEON_DOMAIN_GTT;
break;
- case PIPE_USAGE_DYNAMIC:
case PIPE_USAGE_STREAM:
- /* Default to GTT, but allow the memory manager to move it to VRAM. */
- initial_domain = RADEON_DOMAIN_GTT;
- domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
- break;
+ case PIPE_USAGE_DYNAMIC:
+ /* Older kernels didn't always flush the HDP cache before
+ * CS execution
+ */
+ if (rscreen->info.drm_minor < 40) {
+ res->domains = RADEON_DOMAIN_GTT;
+ flags = RADEON_FLAG_GTT_WC;
+ break;
+ }
+ /* fall through */
case PIPE_USAGE_DEFAULT:
- case PIPE_USAGE_STATIC:
case PIPE_USAGE_IMMUTABLE:
default:
- /* Don't list GTT here, because the memory manager would put some
- * resources to GTT no matter what the initial domain is.
- * Not listing GTT in the domains improves performance a lot. */
- initial_domain = RADEON_DOMAIN_VRAM;
- domains = RADEON_DOMAIN_VRAM;
+ /* Not listing GTT here improves performance in some apps. */
+ res->domains = RADEON_DOMAIN_VRAM;
+ flags = RADEON_FLAG_GTT_WC;
break;
}
- res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
- use_reusable_pool,
- initial_domain);
- if (!res->buf) {
+ /* Use GTT for all persistent mappings with older kernels, because they
+ * didn't always flush the HDP cache before CS execution.
+ *
+ * Write-combined CPU mappings are fine, the kernel ensures all CPU
+ * writes finish before the GPU executes a command stream.
+ */
+ if (rscreen->info.drm_minor < 40 &&
+ res->b.b.target == PIPE_BUFFER &&
+ res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
+ PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
+ res->domains = RADEON_DOMAIN_GTT;
+ }
+
+ /* Tiled textures are unmappable. Always put them in VRAM. */
+ if (res->b.b.target != PIPE_BUFFER &&
+ rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
+ res->domains = RADEON_DOMAIN_VRAM;
+ }
+
+ /* Allocate a new resource. */
+ new_buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
+ use_reusable_pool,
+ res->domains, flags);
+ if (!new_buf) {
return false;
}
- res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
- res->domains = domains;
+ /* Replace the pointer such that if res->buf wasn't NULL, it won't be
+ * NULL. This should prevent crashes with multiple contexts using
+ * the same buffer where one of the contexts invalidates it while
+ * the others are using it. */
+ old_buf = res->buf;
+ res->cs_buf = rscreen->ws->buffer_get_cs_handle(new_buf); /* should be atomic */
+ res->buf = new_buf; /* should be atomic */
+
+ if (rscreen->info.r600_virtual_address)
+ res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->cs_buf);
+ else
+ res->gpu_address = 0;
+
+ pb_reference(&old_buf, NULL);
+
util_range_set_empty(&res->valid_buffer_range);
if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
- fprintf(stderr, "VM start=0x%"PRIu64" end=0x%"PRIu64" | Buffer %u bytes\n",
- r600_resource_va(&rscreen->b, &res->b.b),
- r600_resource_va(&rscreen->b, &res->b.b) + res->buf->size,
+ fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %u bytes\n",
+ res->gpu_address, res->gpu_address + res->buf->size,
res->buf->size);
}
return true;
return data;
}
+static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
+ unsigned dstx, unsigned srcx, unsigned size)
+{
+ bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
+
+ return rctx->screen->has_cp_dma ||
+ (dword_aligned && (rctx->rings.dma.cs ||
+ rctx->screen->has_streamout));
+
+}
+
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned level,
else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
!(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
- (rscreen->has_cp_dma ||
- (rscreen->has_streamout &&
- /* The buffer range must be aligned to 4 with streamout. */
- box->x % 4 == 0 && box->width % 4 == 0))) {
+ r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) {
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
data += box->x % R600_MAP_BUFFER_ALIGNMENT;
return r600_buffer_get_transfer(ctx, resource, level, usage, box,
ptransfer, data, staging, offset);
+ } else {
+ return NULL; /* error, shouldn't occur though */
}
}
+ /* At this point, the buffer is always idle (we checked it above). */
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ }
+ /* Using a staging buffer in GTT for larger reads is much faster. */
+ else if ((usage & PIPE_TRANSFER_READ) &&
+ !(usage & PIPE_TRANSFER_WRITE) &&
+ rbuffer->domains == RADEON_DOMAIN_VRAM &&
+ r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) {
+ unsigned offset;
+ struct r600_resource *staging = NULL;
+
+ u_upload_alloc(rctx->uploader, 0,
+ box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
+ &offset, (struct pipe_resource**)&staging, (void**)&data);
+
+ if (staging) {
+ data += box->x % R600_MAP_BUFFER_ALIGNMENT;
+
+ /* Copy the VRAM buffer to the staging buffer. */
+ rctx->dma_copy(ctx, &staging->b.b, 0,
+ offset + box->x % R600_MAP_BUFFER_ALIGNMENT,
+ 0, 0, resource, level, box);
+
+ /* Just do the synchronization. The buffer is mapped already. */
+ r600_buffer_map_sync_with_rings(rctx, staging, PIPE_TRANSFER_READ);
+
+ return r600_buffer_get_transfer(ctx, resource, level, usage, box,
+ ptransfer, data, staging, offset);
+ }
}
data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
struct r600_resource *rbuffer = r600_resource(transfer->resource);
if (rtransfer->staging) {
- struct pipe_resource *dst, *src;
- unsigned soffset, doffset, size;
- struct pipe_box box;
-
- dst = transfer->resource;
- src = &rtransfer->staging->b.b;
- size = transfer->box.width;
- doffset = transfer->box.x;
- soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT;
-
- u_box_1d(soffset, size, &box);
-
- /* Copy the staging buffer into the original one. */
- if (!(size % 4) && !(doffset % 4) && !(soffset % 4) &&
- rctx->dma_copy(ctx, dst, 0, doffset, 0, 0, src, 0, &box)) {
- /* DONE. */
- } else {
- ctx->resource_copy_region(ctx, dst, 0, doffset, 0, 0, src, 0, &box);
+ if (rtransfer->transfer.usage & PIPE_TRANSFER_WRITE) {
+ struct pipe_resource *dst, *src;
+ unsigned soffset, doffset, size;
+ struct pipe_box box;
+
+ dst = transfer->resource;
+ src = &rtransfer->staging->b.b;
+ size = transfer->box.width;
+ doffset = transfer->box.x;
+ soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT;
+
+ u_box_1d(soffset, size, &box);
+
+ /* Copy the staging buffer into the original one. */
+ rctx->dma_copy(ctx, dst, 0, doffset, 0, 0, src, 0, &box);
}
pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
}
pipe_reference_init(&rbuffer->b.b.reference, 1);
rbuffer->b.b.screen = screen;
rbuffer->b.vtbl = &r600_buffer_vtbl;
+ rbuffer->buf = NULL;
util_range_init(&rbuffer->valid_buffer_range);
- if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE, templ->usage)) {
+ if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE)) {
FREE(rbuffer);
return NULL;
}