#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#include <inttypes.h>
+#include <stdio.h>
boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
struct radeon_winsys_cs_handle *buf,
if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) {
return TRUE;
}
- if (ctx->rings.dma.cs &&
+ if (ctx->rings.dma.cs && ctx->rings.dma.cs->cdw &&
ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) {
return TRUE;
}
unsigned usage)
{
enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
+ bool busy = false;
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
rusage = RADEON_USAGE_WRITE;
}
- if (ctx->rings.gfx.cs->cdw &&
+ if (ctx->rings.gfx.cs->cdw != ctx->initial_gfx_cs_size &&
ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs,
resource->cs_buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
+ ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->rings.gfx.flush(ctx, 0);
+ ctx->rings.gfx.flush(ctx, 0, NULL);
+ busy = true;
}
}
if (ctx->rings.dma.cs &&
ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs,
resource->cs_buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
+ ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->rings.dma.flush(ctx, 0);
+ ctx->rings.dma.flush(ctx, 0, NULL);
+ busy = true;
}
}
- if (ctx->ws->buffer_is_busy(resource->buf, rusage)) {
+ if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
return NULL;
} else {
}
}
+ /* Setting the CS to NULL will prevent doing checks we have done already. */
return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
}
bool r600_init_resource(struct r600_common_screen *rscreen,
struct r600_resource *res,
unsigned size, unsigned alignment,
- bool use_reusable_pool, unsigned usage)
+ bool use_reusable_pool)
{
- uint32_t initial_domain, domains;
+ struct r600_texture *rtex = (struct r600_texture*)res;
+ struct pb_buffer *old_buf, *new_buf;
+ enum radeon_bo_flag flags = 0;
- switch(usage) {
+ switch (res->b.b.usage) {
+ case PIPE_USAGE_STREAM:
+ flags = RADEON_FLAG_GTT_WC;
+ /* fall through */
case PIPE_USAGE_STAGING:
- /* Staging resources participate in transfers, i.e. are used
- * for uploads and downloads from regular resources.
- * We generate them internally for some transfers.
- */
- initial_domain = RADEON_DOMAIN_GTT;
- domains = RADEON_DOMAIN_GTT;
+ /* Transfers are likely to occur more often with these resources. */
+ res->domains = RADEON_DOMAIN_GTT;
break;
case PIPE_USAGE_DYNAMIC:
- case PIPE_USAGE_STREAM:
- /* Default to GTT, but allow the memory manager to move it to VRAM. */
- initial_domain = RADEON_DOMAIN_GTT;
- domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
- break;
+ /* Older kernels didn't always flush the HDP cache before
+ * CS execution
+ */
+ if (rscreen->info.drm_major == 2 &&
+ rscreen->info.drm_minor < 40) {
+ res->domains = RADEON_DOMAIN_GTT;
+ flags |= RADEON_FLAG_GTT_WC;
+ break;
+ }
+ flags |= RADEON_FLAG_CPU_ACCESS;
+ /* fall through */
case PIPE_USAGE_DEFAULT:
- case PIPE_USAGE_STATIC:
case PIPE_USAGE_IMMUTABLE:
default:
- /* Don't list GTT here, because the memory manager would put some
- * resources to GTT no matter what the initial domain is.
- * Not listing GTT in the domains improves performance a lot. */
- initial_domain = RADEON_DOMAIN_VRAM;
- domains = RADEON_DOMAIN_VRAM;
+ /* Not listing GTT here improves performance in some apps. */
+ res->domains = RADEON_DOMAIN_VRAM;
+ flags |= RADEON_FLAG_GTT_WC;
break;
}
- res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
- use_reusable_pool,
- initial_domain);
- if (!res->buf) {
+ if (res->b.b.target == PIPE_BUFFER &&
+ res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
+ PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
+ /* Use GTT for all persistent mappings with older kernels,
+ * because they didn't always flush the HDP cache before CS
+ * execution.
+ *
+ * Write-combined CPU mappings are fine, the kernel ensures all CPU
+ * writes finish before the GPU executes a command stream.
+ */
+ if (rscreen->info.drm_major == 2 &&
+ rscreen->info.drm_minor < 40)
+ res->domains = RADEON_DOMAIN_GTT;
+ else if (res->domains & RADEON_DOMAIN_VRAM)
+ flags |= RADEON_FLAG_CPU_ACCESS;
+ }
+
+ /* Tiled textures are unmappable. Always put them in VRAM. */
+ if (res->b.b.target != PIPE_BUFFER &&
+ rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
+ res->domains = RADEON_DOMAIN_VRAM;
+ flags &= ~RADEON_FLAG_CPU_ACCESS;
+ flags |= RADEON_FLAG_NO_CPU_ACCESS;
+ }
+
+ if (rscreen->debug_flags & DBG_NO_WC)
+ flags &= ~RADEON_FLAG_GTT_WC;
+
+ /* Allocate a new resource. */
+ new_buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
+ use_reusable_pool,
+ res->domains, flags);
+ if (!new_buf) {
return false;
}
- res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
- res->domains = domains;
+ /* Replace the pointer such that if res->buf wasn't NULL, it won't be
+ * NULL. This should prevent crashes with multiple contexts using
+ * the same buffer where one of the contexts invalidates it while
+ * the others are using it. */
+ old_buf = res->buf;
+ res->cs_buf = rscreen->ws->buffer_get_cs_handle(new_buf); /* should be atomic */
+ res->buf = new_buf; /* should be atomic */
+
+ if (rscreen->info.r600_virtual_address)
+ res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->cs_buf);
+ else
+ res->gpu_address = 0;
+
+ pb_reference(&old_buf, NULL);
+
util_range_set_empty(&res->valid_buffer_range);
+ res->TC_L2_dirty = false;
if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
- fprintf(stderr, "VM start=0x%"PRIu64" end=0x%"PRIu64" | Buffer %u bytes\n",
- r600_resource_va(&rscreen->b, &res->b.b),
- r600_resource_va(&rscreen->b, &res->b.b) + res->buf->size,
+ fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %u bytes\n",
+ res->gpu_address, res->gpu_address + res->buf->size,
res->buf->size);
}
return true;
return data;
}
+static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
+ unsigned dstx, unsigned srcx, unsigned size)
+{
+ bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
+
+ return rctx->screen->has_cp_dma ||
+ (dword_aligned && (rctx->rings.dma.cs ||
+ rctx->screen->has_streamout));
+
+}
+
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned level,
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
}
/* At this point, the buffer is always idle. */
else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
!(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
- (rscreen->has_cp_dma ||
- (rscreen->has_streamout &&
- /* The buffer range must be aligned to 4 with streamout. */
- box->x % 4 == 0 && box->width % 4 == 0))) {
+ r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) {
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
unsigned offset;
struct r600_resource *staging = NULL;
return r600_buffer_get_transfer(ctx, resource, level, usage, box,
ptransfer, data, staging, offset);
}
+ } else {
+ /* At this point, the buffer is always idle (we checked it above). */
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ }
+ }
+ /* Using a staging buffer in GTT for larger reads is much faster. */
+ else if ((usage & PIPE_TRANSFER_READ) &&
+ !(usage & PIPE_TRANSFER_WRITE) &&
+ rbuffer->domains == RADEON_DOMAIN_VRAM &&
+ r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) {
+ struct r600_resource *staging;
+
+ staging = (struct r600_resource*) pipe_buffer_create(
+ ctx->screen, PIPE_BIND_TRANSFER_READ, PIPE_USAGE_STAGING,
+ box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
+ if (staging) {
+ /* Copy the VRAM buffer to the staging buffer. */
+ rctx->dma_copy(ctx, &staging->b.b, 0,
+ box->x % R600_MAP_BUFFER_ALIGNMENT,
+ 0, 0, resource, level, box);
+
+ data = r600_buffer_map_sync_with_rings(rctx, staging, PIPE_TRANSFER_READ);
+ data += box->x % R600_MAP_BUFFER_ALIGNMENT;
+
+ return r600_buffer_get_transfer(ctx, resource, level, usage, box,
+ ptransfer, data, staging, 0);
}
}
ptransfer, data, NULL, 0);
}
-static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
- struct pipe_transfer *transfer)
+static void r600_buffer_do_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
if (rtransfer->staging) {
struct pipe_resource *dst, *src;
- unsigned soffset, doffset, size;
- struct pipe_box box;
+ unsigned soffset;
+ struct pipe_box dma_box;
dst = transfer->resource;
src = &rtransfer->staging->b.b;
- size = transfer->box.width;
- doffset = transfer->box.x;
- soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT;
+ soffset = rtransfer->offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
- u_box_1d(soffset, size, &box);
+ u_box_1d(soffset, box->width, &dma_box);
/* Copy the staging buffer into the original one. */
- if (!(size % 4) && !(doffset % 4) && !(soffset % 4) &&
- rctx->dma_copy(ctx, dst, 0, doffset, 0, 0, src, 0, &box)) {
- /* DONE. */
- } else {
- ctx->resource_copy_region(ctx, dst, 0, doffset, 0, 0, src, 0, &box);
- }
- pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+ rctx->dma_copy(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
}
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
- util_range_add(&rbuffer->valid_buffer_range, transfer->box.x,
- transfer->box.x + transfer->box.width);
+ util_range_add(&rbuffer->valid_buffer_range, box->x,
+ box->x + box->width);
+}
+
+static void r600_buffer_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *rel_box)
+{
+ if (transfer->usage & (PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ struct pipe_box box;
+
+ u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
+ r600_buffer_do_flush_region(ctx, transfer, &box);
}
+}
+
+static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
+{
+ struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
+
+ if (transfer->usage & PIPE_TRANSFER_WRITE &&
+ !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
+ r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
+
+ if (rtransfer->staging)
+ pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+
util_slab_free(&rctx->pool_transfers, transfer);
}
NULL, /* get_handle */
r600_buffer_destroy, /* resource_destroy */
r600_buffer_transfer_map, /* transfer_map */
- NULL, /* transfer_flush_region */
+ r600_buffer_flush_region, /* transfer_flush_region */
r600_buffer_transfer_unmap, /* transfer_unmap */
NULL /* transfer_inline_write */
};
-struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- unsigned alignment)
+static struct r600_resource *
+r600_alloc_buffer_struct(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
{
- struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
struct r600_resource *rbuffer;
rbuffer = MALLOC_STRUCT(r600_resource);
pipe_reference_init(&rbuffer->b.b.reference, 1);
rbuffer->b.b.screen = screen;
rbuffer->b.vtbl = &r600_buffer_vtbl;
+ rbuffer->buf = NULL;
+ rbuffer->TC_L2_dirty = false;
util_range_init(&rbuffer->valid_buffer_range);
+ return rbuffer;
+}
+
+struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ unsigned alignment)
+{
+ struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
+
+ if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE)) {
+ FREE(rbuffer);
+ return NULL;
+ }
+ return &rbuffer->b.b;
+}
+
+struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
+ unsigned bind,
+ unsigned usage,
+ unsigned size,
+ unsigned alignment)
+{
+ struct pipe_resource buffer;
+
+ memset(&buffer, 0, sizeof buffer);
+ buffer.target = PIPE_BUFFER;
+ buffer.format = PIPE_FORMAT_R8_UNORM;
+ buffer.bind = bind;
+ buffer.usage = usage;
+ buffer.flags = 0;
+ buffer.width0 = size;
+ buffer.height0 = 1;
+ buffer.depth0 = 1;
+ buffer.array_size = 1;
+ return r600_buffer_create(screen, &buffer, alignment);
+}
+
+struct pipe_resource *
+r600_buffer_from_user_memory(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ void *user_memory)
+{
+ struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ struct radeon_winsys *ws = rscreen->ws;
+ struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
- if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE, templ->usage)) {
+ rbuffer->domains = RADEON_DOMAIN_GTT;
+ util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
+
+ /* Convert a user pointer to a buffer. */
+ rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
+ if (!rbuffer->buf) {
FREE(rbuffer);
return NULL;
}
+
+ rbuffer->cs_buf = ws->buffer_get_cs_handle(rbuffer->buf);
+
+ if (rscreen->info.r600_virtual_address)
+ rbuffer->gpu_address =
+ ws->buffer_get_virtual_address(rbuffer->cs_buf);
+ else
+ rbuffer->gpu_address = 0;
+
return &rbuffer->b.b;
}