X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fnouveau%2Fnouveau_buffer.c;h=49ff100c4ece47b5006cf4c0cd6872242dd74bb1;hb=5e04526399ca5d249cc5ec600aa6546356abccbb;hp=0b21530e19b53fd3b9c3d915818cee831f283cf6;hpb=06359e368bbe74fd9f7cb15b1801034c58513b36;p=mesa.git diff --git a/src/gallium/drivers/nouveau/nouveau_buffer.c b/src/gallium/drivers/nouveau/nouveau_buffer.c index 0b21530e19b..49ff100c4ec 100644 --- a/src/gallium/drivers/nouveau/nouveau_buffer.c +++ b/src/gallium/drivers/nouveau/nouveau_buffer.c @@ -69,6 +69,8 @@ nouveau_buffer_allocate(struct nouveau_screen *screen, if (buf->bo) buf->address = buf->bo->offset + buf->offset; + util_range_set_empty(&buf->valid_buffer_range); + return TRUE; } @@ -124,6 +126,8 @@ nouveau_buffer_destroy(struct pipe_screen *pscreen, nouveau_fence_ref(NULL, &res->fence); nouveau_fence_ref(NULL, &res->fence_wr); + util_range_destroy(&res->valid_buffer_range); + FREE(res); NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1); @@ -338,6 +342,8 @@ nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage) return FALSE; if (unlikely(buf->base.bind & PIPE_BIND_SHARED)) return FALSE; + if (unlikely(usage & PIPE_TRANSFER_PERSISTENT)) + return FALSE; return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE); } @@ -387,6 +393,20 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, if (usage & PIPE_TRANSFER_WRITE) NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1); + /* If we are trying to write to an uninitialized range, the user shouldn't + * care what was there before. So we can treat the write as if the target + * range were being discarded. Furthermore, since we know that even if this + * buffer is busy due to GPU activity, because the contents were + * uninitialized, the GPU can't care what was there, and so we can treat + * the write as being unsynchronized. + */ + if ((usage & PIPE_TRANSFER_WRITE) && + !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) + usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED; + + if (usage & PIPE_TRANSFER_PERSISTENT) + usage |= PIPE_TRANSFER_UNSYNCHRONIZED; + if (buf->domain == NOUVEAU_BO_VRAM) { if (usage & NOUVEAU_TRANSFER_DISCARD) { /* Set up a staging area for the user to write to. It will be copied @@ -492,15 +512,21 @@ nouveau_buffer_transfer_flush_region(struct pipe_context *pipe, const struct pipe_box *box) { struct nouveau_transfer *tx = nouveau_transfer(transfer); + struct nv04_resource *buf = nv04_resource(transfer->resource); + if (tx->map) nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width); + + util_range_add(&buf->valid_buffer_range, + tx->base.box.x + box->x, + tx->base.box.x + box->x + box->width); } /* Unmap stage of the transfer. If it was a WRITE transfer and the map that * was returned was not the real resource's data, this needs to transfer the * data back to the resource. * - * Also marks vbo/cb dirty if the buffer's binding + * Also marks vbo dirty based on the buffer's binding */ static void nouveau_buffer_transfer_unmap(struct pipe_context *pipe, @@ -519,9 +545,10 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe, /* make sure we invalidate dedicated caches */ if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) nv->vbo_dirty = TRUE; - if (bind & (PIPE_BIND_CONSTANT_BUFFER)) - nv->cb_dirty = TRUE; } + + util_range_add(&buf->valid_buffer_range, + tx->base.box.x, tx->base.box.x + tx->base.box.width); } if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE)) @@ -562,6 +589,8 @@ nouveau_copy_buffer(struct nouveau_context *nv, &dst->base, 0, dstx, 0, 0, &src->base, 0, &src_box); } + + util_range_add(&dst->valid_buffer_range, dstx, dstx + size); } @@ -621,12 +650,14 @@ nouveau_buffer_create(struct pipe_screen *pscreen, pipe_reference_init(&buffer->base.reference, 1); buffer->base.screen = pscreen; - if (buffer->base.bind & - (screen->vidmem_bindings & screen->sysmem_bindings)) { + if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT | + PIPE_RESOURCE_FLAG_MAP_COHERENT)) { + buffer->domain = NOUVEAU_BO_GART; + } else if (buffer->base.bind & + (screen->vidmem_bindings & screen->sysmem_bindings)) { switch (buffer->base.usage) { case PIPE_USAGE_DEFAULT: case PIPE_USAGE_IMMUTABLE: - case PIPE_USAGE_STATIC: buffer->domain = NOUVEAU_BO_VRAM; break; case PIPE_USAGE_DYNAMIC: @@ -660,6 +691,8 @@ nouveau_buffer_create(struct pipe_screen *pscreen, NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1); + util_range_init(&buffer->valid_buffer_range); + return &buffer->base; fail: @@ -691,6 +724,9 @@ nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr, buffer->data = ptr; buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY; + util_range_init(&buffer->valid_buffer_range); + util_range_add(&buffer->valid_buffer_range, 0, bytes); + return &buffer->base; }