X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;ds=inline;f=src%2Fgallium%2Fdrivers%2Fnouveau%2Fnouveau_buffer.c;h=97305d993ff99941b154cb6c6b2896e45e4d6222;hb=5586411de481fadd875194273000ba75437fb01e;hp=5b0b93b4a62fdc2a39b4d5c840bcee7e342c2a40;hpb=c32114460dbb7f33885c181a0d7dee07b15b8751;p=mesa.git diff --git a/src/gallium/drivers/nouveau/nouveau_buffer.c b/src/gallium/drivers/nouveau/nouveau_buffer.c index 5b0b93b4a62..97305d993ff 100644 --- a/src/gallium/drivers/nouveau/nouveau_buffer.c +++ b/src/gallium/drivers/nouveau/nouveau_buffer.c @@ -11,8 +11,6 @@ #include "nouveau_buffer.h" #include "nouveau_mm.h" -#define NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD 192 - struct nouveau_transfer { struct pipe_transfer base; @@ -22,13 +20,13 @@ struct nouveau_transfer { uint32_t offset; }; -static INLINE struct nouveau_transfer * +static inline struct nouveau_transfer * nouveau_transfer(struct pipe_transfer *transfer) { return (struct nouveau_transfer *)transfer; } -static INLINE boolean +static inline bool nouveau_buffer_malloc(struct nv04_resource *buf) { if (!buf->data) @@ -36,16 +34,11 @@ nouveau_buffer_malloc(struct nv04_resource *buf) return !!buf->data; } -static INLINE boolean +static inline bool nouveau_buffer_allocate(struct nouveau_screen *screen, struct nv04_resource *buf, unsigned domain) { - uint32_t size = buf->base.width0; - - if (buf->base.bind & (PIPE_BIND_CONSTANT_BUFFER | - PIPE_BIND_COMPUTE_RESOURCE | - PIPE_BIND_SHADER_RESOURCE)) - size = align(size, 0x100); + uint32_t size = align(buf->base.width0, 0x100); if (domain == NOUVEAU_BO_VRAM) { buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size, @@ -58,21 +51,23 @@ nouveau_buffer_allocate(struct nouveau_screen *screen, buf->mm = nouveau_mm_allocate(screen->mm_GART, size, &buf->bo, &buf->offset); if (!buf->bo) - return FALSE; + return false; NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0); } else { assert(domain == 0); if (!nouveau_buffer_malloc(buf)) - return FALSE; + return false; } buf->domain = domain; if (buf->bo) buf->address = buf->bo->offset + buf->offset; - return TRUE; + util_range_set_empty(&buf->valid_buffer_range); + + return true; } -static INLINE void +static inline void release_allocation(struct nouveau_mm_allocation **mm, struct nouveau_fence *fence) { @@ -80,10 +75,15 @@ release_allocation(struct nouveau_mm_allocation **mm, (*mm) = NULL; } -INLINE void +inline void nouveau_buffer_release_gpu_storage(struct nv04_resource *buf) { - nouveau_bo_ref(NULL, &buf->bo); + if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) { + nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo); + buf->bo = NULL; + } else { + nouveau_bo_ref(NULL, &buf->bo); + } if (buf->mm) release_allocation(&buf->mm, buf->fence); @@ -96,7 +96,7 @@ nouveau_buffer_release_gpu_storage(struct nv04_resource *buf) buf->domain = 0; } -static INLINE boolean +static inline bool nouveau_buffer_reallocate(struct nouveau_screen *screen, struct nv04_resource *buf, unsigned domain) { @@ -124,6 +124,8 @@ nouveau_buffer_destroy(struct pipe_screen *pscreen, nouveau_fence_ref(NULL, &res->fence); nouveau_fence_ref(NULL, &res->fence_wr); + util_range_destroy(&res->valid_buffer_range); + FREE(res); NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1); @@ -135,15 +137,15 @@ nouveau_buffer_destroy(struct pipe_screen *pscreen, */ static uint8_t * nouveau_transfer_staging(struct nouveau_context *nv, - struct nouveau_transfer *tx, boolean permit_pb) + struct nouveau_transfer *tx, bool permit_pb) { const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK; const unsigned size = align(tx->base.box.width, 4) + adj; if (!nv->push_data) - permit_pb = FALSE; + permit_pb = false; - if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) { + if ((size <= nv->screen->transfer_pushbuf_threshold) && permit_pb) { tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN); if (tx->map) tx->map += adj; @@ -159,11 +161,11 @@ nouveau_transfer_staging(struct nouveau_context *nv, return tx->map; } -/* Copies data from the resource into the the transfer's temporary GART +/* Copies data from the resource into the transfer's temporary GART * buffer. Also updates buf->data if present. * * Maybe just migrate to GART right away if we actually need to do this. */ -static boolean +static bool nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx) { struct nv04_resource *buf = nv04_resource(tx->base.resource); @@ -176,12 +178,12 @@ nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx) buf->bo, buf->offset + base, buf->domain, size); if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client)) - return FALSE; + return false; if (buf->data) memcpy(buf->data + base, tx->map, size); - return TRUE; + return true; } static void @@ -191,7 +193,7 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, struct nv04_resource *buf = nv04_resource(tx->base.resource); uint8_t *data = tx->map + offset; const unsigned base = tx->base.box.x + offset; - const boolean can_cb = !((base | size) & 3); + const bool can_cb = !((base | size) & 3); if (buf->data) memcpy(data, buf->data + base, size); @@ -207,8 +209,8 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain, tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size); else - if ((buf->base.bind & PIPE_BIND_CONSTANT_BUFFER) && nv->push_cb && can_cb) - nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0, + if (nv->push_cb && can_cb) + nv->push_cb(nv, buf, base, size / 4, (const uint32_t *)data); else nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data); @@ -220,32 +222,33 @@ nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx, /* Does a CPU wait for the buffer's backing data to become reliably accessible * for write/read by waiting on the buffer's relevant fences. */ -static INLINE boolean -nouveau_buffer_sync(struct nv04_resource *buf, unsigned rw) +static inline bool +nouveau_buffer_sync(struct nouveau_context *nv, + struct nv04_resource *buf, unsigned rw) { if (rw == PIPE_TRANSFER_READ) { if (!buf->fence_wr) - return TRUE; + return true; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence_wr)); - if (!nouveau_fence_wait(buf->fence_wr)) - return FALSE; + if (!nouveau_fence_wait(buf->fence_wr, &nv->debug)) + return false; } else { if (!buf->fence) - return TRUE; + return true; NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count, !nouveau_fence_signalled(buf->fence)); - if (!nouveau_fence_wait(buf->fence)) - return FALSE; + if (!nouveau_fence_wait(buf->fence, &nv->debug)) + return false; nouveau_fence_ref(NULL, &buf->fence); } nouveau_fence_ref(NULL, &buf->fence_wr); - return TRUE; + return true; } -static INLINE boolean +static inline bool nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw) { if (rw == PIPE_TRANSFER_READ) @@ -254,7 +257,7 @@ nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw) return (buf->fence && !nouveau_fence_signalled(buf->fence)); } -static INLINE void +static inline void nouveau_buffer_transfer_init(struct nouveau_transfer *tx, struct pipe_resource *resource, const struct pipe_box *box, @@ -276,13 +279,14 @@ nouveau_buffer_transfer_init(struct nouveau_transfer *tx, tx->map = NULL; } -static INLINE void +static inline void nouveau_buffer_transfer_del(struct nouveau_context *nv, struct nouveau_transfer *tx) { if (tx->map) { if (likely(tx->bo)) { - nouveau_bo_ref(NULL, &tx->bo); + nouveau_fence_work(nv->screen->fence.current, + nouveau_fence_unref_bo, tx->bo); if (tx->mm) release_allocation(&tx->mm, nv->screen->fence.current); } else { @@ -293,11 +297,11 @@ nouveau_buffer_transfer_del(struct nouveau_context *nv, } /* Creates a cache in system memory of the buffer data. */ -static boolean +static bool nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) { struct nouveau_transfer tx; - boolean ret; + bool ret; tx.base.resource = &buf->base; tx.base.box.x = 0; tx.base.box.width = buf->base.width0; @@ -306,13 +310,13 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) if (!buf->data) if (!nouveau_buffer_malloc(buf)) - return FALSE; + return false; if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY)) - return TRUE; + return true; nv->stats.buf_cache_count++; - if (!nouveau_transfer_staging(nv, &tx, FALSE)) - return FALSE; + if (!nouveau_transfer_staging(nv, &tx, false)) + return false; ret = nouveau_transfer_read(nv, &tx); if (ret) { @@ -331,13 +335,15 @@ nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf) * resource. This can be useful if we would otherwise have to wait for a read * operation to complete on this data. */ -static INLINE boolean +static inline bool nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage) { if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) - return FALSE; + return false; if (unlikely(buf->base.bind & PIPE_BIND_SHARED)) - return FALSE; + return false; + if (unlikely(usage & PIPE_TRANSFER_PERSISTENT)) + return false; return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE); } @@ -387,13 +393,24 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, if (usage & PIPE_TRANSFER_WRITE) NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1); + /* If we are trying to write to an uninitialized range, the user shouldn't + * care what was there before. So we can treat the write as if the target + * range were being discarded. Furthermore, since we know that even if this + * buffer is busy due to GPU activity, because the contents were + * uninitialized, the GPU can't care what was there, and so we can treat + * the write as being unsynchronized. + */ + if ((usage & PIPE_TRANSFER_WRITE) && + !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) + usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED; + if (buf->domain == NOUVEAU_BO_VRAM) { if (usage & NOUVEAU_TRANSFER_DISCARD) { /* Set up a staging area for the user to write to. It will be copied * back into VRAM on unmap. */ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK; - nouveau_transfer_staging(nv, tx, TRUE); + nouveau_transfer_staging(nv, tx, true); } else { if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) { /* The GPU is currently writing to this buffer. Copy its current @@ -404,13 +421,13 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, align_free(buf->data); buf->data = NULL; } - nouveau_transfer_staging(nv, tx, FALSE); + nouveau_transfer_staging(nv, tx, false); nouveau_transfer_read(nv, tx); } else { /* The buffer is currently idle. Create a staging area for writes, * and make sure that the cached data is up-to-date. */ if (usage & PIPE_TRANSFER_WRITE) - nouveau_transfer_staging(nv, tx, TRUE); + nouveau_transfer_staging(nv, tx, true); if (!buf->data) nouveau_buffer_cache(nv, buf); } @@ -454,26 +471,27 @@ nouveau_buffer_transfer_map(struct pipe_context *pipe, * complete its operation, or set up a staging area to perform our work in. */ if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) { - if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) { + if (unlikely(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | + PIPE_TRANSFER_PERSISTENT))) { /* Discarding was not possible, must sync because * subsequent transfers might use UNSYNCHRONIZED. */ - nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); + nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE); } else if (usage & PIPE_TRANSFER_DISCARD_RANGE) { /* The whole range is being discarded, so it doesn't matter what was * there before. No need to copy anything over. */ - nouveau_transfer_staging(nv, tx, TRUE); + nouveau_transfer_staging(nv, tx, true); map = tx->map; } else if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) { if (usage & PIPE_TRANSFER_DONTBLOCK) map = NULL; else - nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE); + nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE); } else { /* It is expected that the returned buffer be a representation of the * data in question, so we must copy it over from the buffer. */ - nouveau_transfer_staging(nv, tx, TRUE); + nouveau_transfer_staging(nv, tx, true); if (tx->map) memcpy(tx->map, map, box->width); map = tx->map; @@ -492,15 +510,21 @@ nouveau_buffer_transfer_flush_region(struct pipe_context *pipe, const struct pipe_box *box) { struct nouveau_transfer *tx = nouveau_transfer(transfer); + struct nv04_resource *buf = nv04_resource(transfer->resource); + if (tx->map) nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width); + + util_range_add(&buf->valid_buffer_range, + tx->base.box.x + box->x, + tx->base.box.x + box->x + box->width); } /* Unmap stage of the transfer. If it was a WRITE transfer and the map that * was returned was not the real resource's data, this needs to transfer the * data back to the resource. * - * Also marks vbo/cb dirty if the buffer's binding + * Also marks vbo dirty based on the buffer's binding */ static void nouveau_buffer_transfer_unmap(struct pipe_context *pipe, @@ -511,16 +535,19 @@ nouveau_buffer_transfer_unmap(struct pipe_context *pipe, struct nv04_resource *buf = nv04_resource(transfer->resource); if (tx->base.usage & PIPE_TRANSFER_WRITE) { - if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map) - nouveau_transfer_write(nv, tx, 0, tx->base.box.width); + if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) { + if (tx->map) + nouveau_transfer_write(nv, tx, 0, tx->base.box.width); + + util_range_add(&buf->valid_buffer_range, + tx->base.box.x, tx->base.box.x + tx->base.box.width); + } if (likely(buf->domain)) { const uint8_t bind = buf->base.bind; /* make sure we invalidate dedicated caches */ if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) - nv->vbo_dirty = TRUE; - if (bind & (PIPE_BIND_CONSTANT_BUFFER)) - nv->cb_dirty = TRUE; + nv->vbo_dirty = true; } } @@ -562,6 +589,8 @@ nouveau_copy_buffer(struct nouveau_context *nv, &dst->base, 0, dstx, 0, 0, &src->base, 0, &src_box); } + + util_range_add(&dst->valid_buffer_range, dstx, dstx + size); } @@ -583,7 +612,7 @@ nouveau_resource_map_offset(struct nouveau_context *nv, if (res->mm) { unsigned rw; rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ; - nouveau_buffer_sync(res, rw); + nouveau_buffer_sync(nv, res, rw); if (nouveau_bo_map(res->bo, 0, NULL)) return NULL; } else { @@ -601,7 +630,6 @@ const struct u_resource_vtbl nouveau_buffer_vtbl = nouveau_buffer_transfer_map, /* transfer_map */ nouveau_buffer_transfer_flush_region, /* transfer_flush_region */ nouveau_buffer_transfer_unmap, /* transfer_unmap */ - u_default_transfer_inline_write /* transfer_inline_write */ }; struct pipe_resource * @@ -610,7 +638,7 @@ nouveau_buffer_create(struct pipe_screen *pscreen, { struct nouveau_screen *screen = nouveau_screen(pscreen); struct nv04_resource *buffer; - boolean ret; + bool ret; buffer = CALLOC_STRUCT(nv04_resource); if (!buffer) @@ -621,18 +649,21 @@ nouveau_buffer_create(struct pipe_screen *pscreen, pipe_reference_init(&buffer->base.reference, 1); buffer->base.screen = pscreen; - if (buffer->base.bind & - (screen->vidmem_bindings & screen->sysmem_bindings)) { + if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT | + PIPE_RESOURCE_FLAG_MAP_COHERENT)) { + buffer->domain = NOUVEAU_BO_GART; + } else if (buffer->base.bind == 0 || (buffer->base.bind & + (screen->vidmem_bindings & screen->sysmem_bindings))) { switch (buffer->base.usage) { case PIPE_USAGE_DEFAULT: case PIPE_USAGE_IMMUTABLE: - buffer->domain = NOUVEAU_BO_VRAM; + buffer->domain = NV_VRAM_DOMAIN(screen); break; case PIPE_USAGE_DYNAMIC: /* For most apps, we'd have to do staging transfers to avoid sync * with this usage, and GART -> GART copies would be suboptimal. */ - buffer->domain = NOUVEAU_BO_VRAM; + buffer->domain = NV_VRAM_DOMAIN(screen); break; case PIPE_USAGE_STAGING: case PIPE_USAGE_STREAM: @@ -644,14 +675,15 @@ nouveau_buffer_create(struct pipe_screen *pscreen, } } else { if (buffer->base.bind & screen->vidmem_bindings) - buffer->domain = NOUVEAU_BO_VRAM; + buffer->domain = NV_VRAM_DOMAIN(screen); else if (buffer->base.bind & screen->sysmem_bindings) buffer->domain = NOUVEAU_BO_GART; } + ret = nouveau_buffer_allocate(screen, buffer, buffer->domain); - if (ret == FALSE) + if (ret == false) goto fail; if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy) @@ -659,6 +691,8 @@ nouveau_buffer_create(struct pipe_screen *pscreen, NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1); + util_range_init(&buffer->valid_buffer_range); + return &buffer->base; fail: @@ -690,23 +724,26 @@ nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr, buffer->data = ptr; buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY; + util_range_init(&buffer->valid_buffer_range); + util_range_add(&buffer->valid_buffer_range, 0, bytes); + return &buffer->base; } -static INLINE boolean +static inline bool nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf, struct nouveau_bo *bo, unsigned offset, unsigned size) { if (!nouveau_buffer_malloc(buf)) - return FALSE; + return false; if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client)) - return FALSE; + return false; memcpy(buf->data, (uint8_t *)bo->map + offset, size); - return TRUE; + return true; } /* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */ -boolean +bool nouveau_buffer_migrate(struct nouveau_context *nv, struct nv04_resource *buf, const unsigned new_domain) { @@ -721,7 +758,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv, if (new_domain == NOUVEAU_BO_GART && old_domain == 0) { if (!nouveau_buffer_allocate(screen, buf, new_domain)) - return FALSE; + return false; ret = nouveau_bo_map(buf->bo, 0, nv->client); if (ret) return ret; @@ -734,7 +771,7 @@ nouveau_buffer_migrate(struct nouveau_context *nv, if (new_domain == NOUVEAU_BO_VRAM) { /* keep a system memory copy of our data in case we hit a fallback */ if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size)) - return FALSE; + return false; if (nouveau_mesa_debug) debug_printf("migrating %u KiB to VRAM\n", size / 1024); } @@ -748,35 +785,35 @@ nouveau_buffer_migrate(struct nouveau_context *nv, nv->copy_data(nv, buf->bo, buf->offset, new_domain, bo, offset, old_domain, buf->base.width0); - nouveau_bo_ref(NULL, &bo); + nouveau_fence_work(screen->fence.current, nouveau_fence_unref_bo, bo); if (mm) release_allocation(&mm, screen->fence.current); } else if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) { struct nouveau_transfer tx; if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM)) - return FALSE; + return false; tx.base.resource = &buf->base; tx.base.box.x = 0; tx.base.box.width = buf->base.width0; tx.bo = NULL; tx.map = NULL; - if (!nouveau_transfer_staging(nv, &tx, FALSE)) - return FALSE; + if (!nouveau_transfer_staging(nv, &tx, false)) + return false; nouveau_transfer_write(nv, &tx, 0, tx.base.box.width); nouveau_buffer_transfer_del(nv, &tx); } else - return FALSE; + return false; assert(buf->domain == new_domain); - return TRUE; + return true; } /* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART. * We'd like to only allocate @size bytes here, but then we'd have to rebase * the vertex indices ... */ -boolean +bool nouveau_user_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf, unsigned base, unsigned size) @@ -788,20 +825,53 @@ nouveau_user_buffer_upload(struct nouveau_context *nv, buf->base.width0 = base + size; if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART)) - return FALSE; + return false; ret = nouveau_bo_map(buf->bo, 0, nv->client); if (ret) - return FALSE; + return false; memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size); - return TRUE; + return true; +} + +/* Invalidate underlying buffer storage, reset fences, reallocate to non-busy + * buffer. + */ +void +nouveau_buffer_invalidate(struct pipe_context *pipe, + struct pipe_resource *resource) +{ + struct nouveau_context *nv = nouveau_context(pipe); + struct nv04_resource *buf = nv04_resource(resource); + int ref = buf->base.reference.count - 1; + + /* Shared buffers shouldn't get reallocated */ + if (unlikely(buf->base.bind & PIPE_BIND_SHARED)) + return; + + /* We can't touch persistent/coherent buffers */ + if (buf->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT | + PIPE_RESOURCE_FLAG_MAP_COHERENT)) + return; + + /* If the buffer is sub-allocated and not currently being written, just + * wipe the valid buffer range. Otherwise we have to create fresh + * storage. (We don't keep track of fences for non-sub-allocated BO's.) + */ + if (buf->mm && !nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE)) { + util_range_set_empty(&buf->valid_buffer_range); + } else { + nouveau_buffer_reallocate(nv->screen, buf, buf->domain); + if (ref > 0) /* any references inside context possible ? */ + nv->invalidate_resource_storage(nv, &buf->base, ref); + } } /* Scratch data allocation. */ -static INLINE int +static inline int nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo, unsigned size) { @@ -809,17 +879,28 @@ nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo, 4096, size, NULL, pbo); } +static void +nouveau_scratch_unref_bos(void *d) +{ + struct runout *b = d; + int i; + + for (i = 0; i < b->nr; ++i) + nouveau_bo_ref(NULL, &b->bo[i]); + + FREE(b); +} + void nouveau_scratch_runout_release(struct nouveau_context *nv) { - if (!nv->scratch.nr_runout) + if (!nv->scratch.runout) + return; + + if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos, + nv->scratch.runout)) return; - do { - --nv->scratch.nr_runout; - nouveau_bo_ref(NULL, &nv->scratch.runout[nv->scratch.nr_runout]); - } while (nv->scratch.nr_runout); - FREE(nv->scratch.runout); nv->scratch.end = 0; nv->scratch.runout = NULL; } @@ -827,25 +908,30 @@ nouveau_scratch_runout_release(struct nouveau_context *nv) /* Allocate an extra bo if we can't fit everything we need simultaneously. * (Could happen for very large user arrays.) */ -static INLINE boolean +static inline bool nouveau_scratch_runout(struct nouveau_context *nv, unsigned size) { int ret; - const unsigned n = nv->scratch.nr_runout++; - - nv->scratch.runout = REALLOC(nv->scratch.runout, - (n + 0) * sizeof(*nv->scratch.runout), - (n + 1) * sizeof(*nv->scratch.runout)); - nv->scratch.runout[n] = NULL; + unsigned n; - ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout[n], size); + if (nv->scratch.runout) + n = nv->scratch.runout->nr; + else + n = 0; + nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 : + (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)), + sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *)); + nv->scratch.runout->nr = n + 1; + nv->scratch.runout->bo[n] = NULL; + + ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size); if (!ret) { - ret = nouveau_bo_map(nv->scratch.runout[n], 0, NULL); + ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL); if (ret) - nouveau_bo_ref(NULL, &nv->scratch.runout[--nv->scratch.nr_runout]); + nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]); } if (!ret) { - nv->scratch.current = nv->scratch.runout[n]; + nv->scratch.current = nv->scratch.runout->bo[n]; nv->scratch.offset = 0; nv->scratch.end = size; nv->scratch.map = nv->scratch.current->map; @@ -856,7 +942,7 @@ nouveau_scratch_runout(struct nouveau_context *nv, unsigned size) /* Continue to next scratch buffer, if available (no wrapping, large enough). * Allocate it if it has not yet been created. */ -static INLINE boolean +static inline bool nouveau_scratch_next(struct nouveau_context *nv, unsigned size) { struct nouveau_bo *bo; @@ -864,14 +950,14 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size) const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS; if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap)) - return FALSE; + return false; nv->scratch.id = i; bo = nv->scratch.bo[i]; if (!bo) { ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size); if (ret) - return FALSE; + return false; nv->scratch.bo[i] = bo; } nv->scratch.current = bo; @@ -884,10 +970,10 @@ nouveau_scratch_next(struct nouveau_context *nv, unsigned size) return !ret; } -static boolean +static bool nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size) { - boolean ret; + bool ret; ret = nouveau_scratch_next(nv, min_size); if (!ret)