}
}
- if (busy || ctx->ws->buffer_is_busy(resource->buf, rusage)) {
+ if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
return NULL;
} else {
enum radeon_bo_flag flags = 0;
switch (res->b.b.usage) {
+ case PIPE_USAGE_STREAM:
+ flags = RADEON_FLAG_GTT_WC;
+ /* fall through */
case PIPE_USAGE_STAGING:
/* Transfers are likely to occur more often with these resources. */
res->domains = RADEON_DOMAIN_GTT;
break;
- case PIPE_USAGE_STREAM:
case PIPE_USAGE_DYNAMIC:
/* Older kernels didn't always flush the HDP cache before
* CS execution
*/
- if (rscreen->info.drm_minor < 40) {
+ if (rscreen->info.drm_major == 2 &&
+ rscreen->info.drm_minor < 40) {
res->domains = RADEON_DOMAIN_GTT;
- flags = RADEON_FLAG_GTT_WC;
+ flags |= RADEON_FLAG_GTT_WC;
break;
}
- flags = RADEON_FLAG_CPU_ACCESS;
+ flags |= RADEON_FLAG_CPU_ACCESS;
/* fall through */
case PIPE_USAGE_DEFAULT:
case PIPE_USAGE_IMMUTABLE:
default:
/* Not listing GTT here improves performance in some apps. */
res->domains = RADEON_DOMAIN_VRAM;
- flags = RADEON_FLAG_GTT_WC;
+ flags |= RADEON_FLAG_GTT_WC;
break;
}
* Write-combined CPU mappings are fine, the kernel ensures all CPU
* writes finish before the GPU executes a command stream.
*/
- if (rscreen->info.drm_minor < 40)
+ if (rscreen->info.drm_major == 2 &&
+ rscreen->info.drm_minor < 40)
res->domains = RADEON_DOMAIN_GTT;
else if (res->domains & RADEON_DOMAIN_VRAM)
flags |= RADEON_FLAG_CPU_ACCESS;
rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
res->domains = RADEON_DOMAIN_VRAM;
flags &= ~RADEON_FLAG_CPU_ACCESS;
+ flags |= RADEON_FLAG_NO_CPU_ACCESS;
}
+ if (rscreen->debug_flags & DBG_NO_WC)
+ flags &= ~RADEON_FLAG_GTT_WC;
+
/* Allocate a new resource. */
new_buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
use_reusable_pool,
pb_reference(&old_buf, NULL);
util_range_set_empty(&res->valid_buffer_range);
+ res->TC_L2_dirty = false;
if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %u bytes\n",
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
}
/* At this point, the buffer is always idle. */
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
unsigned offset;
struct r600_resource *staging = NULL;
data += box->x % R600_MAP_BUFFER_ALIGNMENT;
return r600_buffer_get_transfer(ctx, resource, level, usage, box,
ptransfer, data, staging, offset);
- } else {
- return NULL; /* error, shouldn't occur though */
}
+ } else {
+ /* At this point, the buffer is always idle (we checked it above). */
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
}
- /* At this point, the buffer is always idle (we checked it above). */
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
}
/* Using a staging buffer in GTT for larger reads is much faster. */
else if ((usage & PIPE_TRANSFER_READ) &&
ptransfer, data, NULL, 0);
}
-static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
- struct pipe_transfer *transfer)
+static void r600_buffer_do_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_resource *rbuffer = r600_resource(transfer->resource);
if (rtransfer->staging) {
- if (rtransfer->transfer.usage & PIPE_TRANSFER_WRITE) {
- struct pipe_resource *dst, *src;
- unsigned soffset, doffset, size;
- struct pipe_box box;
+ struct pipe_resource *dst, *src;
+ unsigned soffset;
+ struct pipe_box dma_box;
- dst = transfer->resource;
- src = &rtransfer->staging->b.b;
- size = transfer->box.width;
- doffset = transfer->box.x;
- soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT;
+ dst = transfer->resource;
+ src = &rtransfer->staging->b.b;
+ soffset = rtransfer->offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
- u_box_1d(soffset, size, &box);
+ u_box_1d(soffset, box->width, &dma_box);
- /* Copy the staging buffer into the original one. */
- rctx->dma_copy(ctx, dst, 0, doffset, 0, 0, src, 0, &box);
- }
- pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+ /* Copy the staging buffer into the original one. */
+ rctx->dma_copy(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
}
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
- util_range_add(&rbuffer->valid_buffer_range, transfer->box.x,
- transfer->box.x + transfer->box.width);
+ util_range_add(&rbuffer->valid_buffer_range, box->x,
+ box->x + box->width);
+}
+
+static void r600_buffer_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *rel_box)
+{
+ if (transfer->usage & (PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_FLUSH_EXPLICIT)) {
+ struct pipe_box box;
+
+ u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
+ r600_buffer_do_flush_region(ctx, transfer, &box);
}
+}
+
+static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
+{
+ struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
+
+ if (transfer->usage & PIPE_TRANSFER_WRITE &&
+ !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
+ r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
+
+ if (rtransfer->staging)
+ pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+
util_slab_free(&rctx->pool_transfers, transfer);
}
NULL, /* get_handle */
r600_buffer_destroy, /* resource_destroy */
r600_buffer_transfer_map, /* transfer_map */
- NULL, /* transfer_flush_region */
+ r600_buffer_flush_region, /* transfer_flush_region */
r600_buffer_transfer_unmap, /* transfer_unmap */
NULL /* transfer_inline_write */
};
-struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- unsigned alignment)
+static struct r600_resource *
+r600_alloc_buffer_struct(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
{
- struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
struct r600_resource *rbuffer;
rbuffer = MALLOC_STRUCT(r600_resource);
rbuffer->b.b.screen = screen;
rbuffer->b.vtbl = &r600_buffer_vtbl;
rbuffer->buf = NULL;
+ rbuffer->TC_L2_dirty = false;
util_range_init(&rbuffer->valid_buffer_range);
+ return rbuffer;
+}
+
+struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ unsigned alignment)
+{
+ struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, TRUE)) {
FREE(rbuffer);
}
return &rbuffer->b.b;
}
+
+struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
+ unsigned bind,
+ unsigned usage,
+ unsigned size,
+ unsigned alignment)
+{
+ struct pipe_resource buffer;
+
+ memset(&buffer, 0, sizeof buffer);
+ buffer.target = PIPE_BUFFER;
+ buffer.format = PIPE_FORMAT_R8_UNORM;
+ buffer.bind = bind;
+ buffer.usage = usage;
+ buffer.flags = 0;
+ buffer.width0 = size;
+ buffer.height0 = 1;
+ buffer.depth0 = 1;
+ buffer.array_size = 1;
+ return r600_buffer_create(screen, &buffer, alignment);
+}
+
+struct pipe_resource *
+r600_buffer_from_user_memory(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ void *user_memory)
+{
+ struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ struct radeon_winsys *ws = rscreen->ws;
+ struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
+
+ rbuffer->domains = RADEON_DOMAIN_GTT;
+ util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
+
+ /* Convert a user pointer to a buffer. */
+ rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
+ if (!rbuffer->buf) {
+ FREE(rbuffer);
+ return NULL;
+ }
+
+ rbuffer->cs_buf = ws->buffer_get_cs_handle(rbuffer->buf);
+
+ if (rscreen->info.r600_virtual_address)
+ rbuffer->gpu_address =
+ ws->buffer_get_virtual_address(rbuffer->cs_buf);
+ else
+ rbuffer->gpu_address = 0;
+
+ return &rbuffer->b.b;
+}