X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fnouveau%2Fnouveau_buffer.c;h=97305d993ff99941b154cb6c6b2896e45e4d6222;hb=d1d2bb8c07d1e20d654f558ea4750aeb09d34ff9;hp=67e181e803a11c50806cd21870888c4052161cec;hpb=a2a1a5805fd617e7f3cc8be44dd79b50da07ebb9;p=mesa.git diff --git a/src/gallium/drivers/nouveau/nouveau_buffer.c b/src/gallium/drivers/nouveau/nouveau_buffer.c index 67e181e803a..97305d993ff 100644 --- a/src/gallium/drivers/nouveau/nouveau_buffer.c +++ b/src/gallium/drivers/nouveau/nouveau_buffer.c @@ -11,8 +11,6 @@ #include "nouveau_buffer.h" #include "nouveau_mm.h" -#define NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD 192 - struct nouveau_transfer { struct pipe_transfer base; @@ -80,7 +78,12 @@ release_allocation(struct nouveau_mm_allocation **mm, inline void nouveau_buffer_release_gpu_storage(struct nv04_resource *buf) { - nouveau_bo_ref(NULL, &buf->bo); + if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) { + nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo); + buf->bo = NULL; + } else { + nouveau_bo_ref(NULL, &buf->bo); + } if (buf->mm) release_allocation(&buf->mm, buf->fence); @@ -142,7 +145,7 @@ nouveau_transfer_staging(struct nouveau_context *nv, if (!nv->push_data) permit_pb = false; - if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) { + if ((size <= nv->screen->transfer_pushbuf_threshold) && permit_pb) { tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN); if (tx->map) tx->map += adj; @@ -158,7 +161,7 @@ nouveau_transfer_staging(struct nouveau_context *nv, return tx->map; } -/* Copies data from the resource into the the transfer's temporary GART +/* Copies data from the resource into the transfer's temporary GART * buffer. Also updates buf->data if present. * * Maybe just migrate to GART right away if we actually need to do this. */ @@ -206,8 +209,8 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain, tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size); else - if ((buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) && nv->push_cb && can_cb) - nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0, + if (nv->push_cb && can_cb) + nv->push_cb(nv, buf, base, size / 4, (const uint32_t *)data); else nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data); @@ -220,21 +223,22 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, * for write/read by waiting on the buffer's relevant fences. */ static inline bool -nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw) +nouveau_buffer_sync(struct nouveau_context *nv, + struct nv04_resource *buf, unsigned rw) { if (rw == PIPE_TRANSFER_READ) { if (!buf->fence_wr) return true; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence_wr)); - if (!nouveau_fence_wait(buf->fence_wr)) + if (!nouveau_fence_wait(buf->fence_wr, &nv->debug)) return false; } else { if (!buf->fence) return true; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence)); - if (!nouveau_fence_wait(buf->fence)) + if (!nouveau_fence_wait(buf->fence, &nv->debug)) return false; nouveau_fence_ref(NULL, &buf->fence); @@ -281,7 +285,8 @@ nouveau_buffer_transfer_del(struct nouveau_context *nv, { if (tx->map) { if (likely(tx->bo)) { - nouveau_bo_ref(NULL, &tx->bo); + nouveau_fence_work(nv->screen->fence.current, + nouveau_fence_unref_bo, tx->bo); if (tx->mm) release_allocation(&tx->mm, nv->screen->fence.current); } else { @@ -399,9 +404,6 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED; - if (usage & PIPE_TRANSFER_PERSISTENT) - usage |= PIPE_TRANSFER_UNSYNCHRONIZED; - if (buf->domain == NOUVEAU_BO_VRAM) { if (usage & NOUVEAU_TRANSFER_DISCARD) { /* Set up a staging area for the user to write to. It will be copied @@ -469,10 +471,11 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, * complete its operation, or set up a staging area to perform our work in. */ if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) { - if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) { + if (unlikely(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | + PIPE_TRANSFER_PERSISTENT))) { /* Discarding was not possible, must sync because * subsequent transfers might use UNSYNCHRONIZED. */ - nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); + nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE); } else if (usage & PIPE_TRANSFER_DISCARD_RANGE) { /* The whole range is being discarded, so it doesn't matter what was @@ -484,7 +487,7 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, if (usage & PIPE_TRANSFER_DONTBLOCK) map = NULL; else - nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); + nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE); } else { /* It is expected that the returned buffer be a representation of the * data in question, so we must copy it over from the buffer. */ @@ -532,8 +535,13 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe, struct nv04_resource *buf = nv04_resource(transfer->resource); if (tx->base.usage & PIPE_TRANSFER_WRITE) { - if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map) - nouveau_transfer_write(nv, tx, 0, tx->base.box.width); + if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) { + if (tx->map) + nouveau_transfer_write(nv, tx, 0, tx->base.box.width); + + util_range_add(&buf->valid_buffer_range, + tx->base.box.x, tx->base.box.x + tx->base.box.width); + } if (likely(buf->domain)) { const uint8_t bind = buf->base.bind; @@ -541,9 +549,6 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe, if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) nv->vbo_dirty = true; } - - util_range_add(&buf->valid_buffer_range, - tx->base.box.x, tx->base.box.x + tx->base.box.width); } if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE)) @@ -607,7 +612,7 @@ nouveau_resource_map_offset(struct nouveau_context *nv, if (res->mm) { unsigned rw; rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ; - nouveau_buffer_sync(res, rw); + nouveau_buffer_sync(nv, res, rw); if (nouveau_bo_map(res->bo, 0, NULL)) return NULL; } else { @@ -625,7 +630,6 @@ const struct u_resource_vtbl nouveau_buffer_vtbl = nouveau_buffer_transfer_map, /* transfer_map */ nouveau_buffer_transfer_flush_region, /* transfer_flush_region */ nouveau_buffer_transfer_unmap, /* transfer_unmap */ - u_default_transfer_inline_write /* transfer_inline_write */ }; struct pipe_resource * @@ -648,8 +652,8 @@ nouveau_buffer_create(struct pipe_screen *pscreen, if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT | PIPE_RESOURCE_FLAG_MAP_COHERENT)) { buffer->domain = NOUVEAU_BO_GART; - } else if (buffer->base.bind & - (screen->vidmem_bindings & screen->sysmem_bindings)) { + } else if (buffer->base.bind == 0 || (buffer->base.bind & + (screen->vidmem_bindings & screen->sysmem_bindings))) { switch (buffer->base.usage) { case PIPE_USAGE_DEFAULT: case PIPE_USAGE_IMMUTABLE: @@ -676,6 +680,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen, if (buffer->base.bind & screen->sysmem_bindings) buffer->domain = NOUVEAU_BO_GART; } + ret = nouveau_buffer_allocate(screen, buffer, buffer->domain); if (ret == false) @@ -780,7 +785,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv, nv->copy_data(nv, buf->bo, buf->offset, new_domain, bo, offset, old_domain, buf->base.width0); - nouveau_bo_ref(NULL, &bo); + nouveau_fence_work(screen->fence.current, nouveau_fence_unref_bo, bo); if (mm) release_allocation(&mm, screen->fence.current); } else @@ -830,6 +835,39 @@ nouveau_user_buffer_upload(struct nouveau_context *nv, return true; } +/* Invalidate underlying buffer storage, reset fences, reallocate to non-busy + * buffer. + */ +void +nouveau_buffer_invalidate(struct pipe_context *pipe, + struct pipe_resource *resource) +{ + struct nouveau_context *nv = nouveau_context(pipe); + struct nv04_resource *buf = nv04_resource(resource); + int ref = buf->base.reference.count - 1; + + /* Shared buffers shouldn't get reallocated */ + if (unlikely(buf->base.bind & PIPE_BIND_SHARED)) + return; + + /* We can't touch persistent/coherent buffers */ + if (buf->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT | + PIPE_RESOURCE_FLAG_MAP_COHERENT)) + return; + + /* If the buffer is sub-allocated and not currently being written, just + * wipe the valid buffer range. Otherwise we have to create fresh + * storage. (We don't keep track of fences for non-sub-allocated BO's.) + */ + if (buf->mm && !nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE)) { + util_range_set_empty(&buf->valid_buffer_range); + } else { + nouveau_buffer_reallocate(nv->screen, buf, buf->domain); + if (ref > 0) /* any references inside context possible ? */ + nv->invalidate_resource_storage(nv, &buf->base, ref); + } +} + /* Scratch data allocation. */