if (buf->bo)
buf->address = buf->bo->offset + buf->offset;
+ util_range_set_empty(&buf->valid_buffer_range);
+
return TRUE;
}
nouveau_fence_ref(NULL, &res->fence);
nouveau_fence_ref(NULL, &res->fence_wr);
+ util_range_destroy(&res->valid_buffer_range);
+
FREE(res);
NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1);
return FALSE;
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return FALSE;
+ if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
+ return FALSE;
return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
}
if (usage & PIPE_TRANSFER_WRITE)
NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
+ /* If we are trying to write to an uninitialized range, the user shouldn't
+ * care what was there before. So we can treat the write as if the target
+ * range were being discarded. Furthermore, since we know that even if this
+ * buffer is busy due to GPU activity, because the contents were
+ * uninitialized, the GPU can't care what was there, and so we can treat
+ * the write as being unsynchronized.
+ */
+ if ((usage & PIPE_TRANSFER_WRITE) &&
+ !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
+ usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
+
+ if (usage & PIPE_TRANSFER_PERSISTENT)
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+
if (buf->domain == NOUVEAU_BO_VRAM) {
if (usage & NOUVEAU_TRANSFER_DISCARD) {
/* Set up a staging area for the user to write to. It will be copied
const struct pipe_box *box)
{
struct nouveau_transfer *tx = nouveau_transfer(transfer);
+ struct nv04_resource *buf = nv04_resource(transfer->resource);
+
if (tx->map)
nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
+
+ util_range_add(&buf->valid_buffer_range,
+ tx->base.box.x + box->x,
+ tx->base.box.x + box->x + box->width);
}
/* Unmap stage of the transfer. If it was a WRITE transfer and the map that
* was returned was not the real resource's data, this needs to transfer the
* data back to the resource.
*
- * Also marks vbo/cb dirty if the buffer's binding
+ * Also marks vbo dirty based on the buffer's binding
*/
static void
nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
/* make sure we invalidate dedicated caches */
if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
nv->vbo_dirty = TRUE;
- if (bind & (PIPE_BIND_CONSTANT_BUFFER))
- nv->cb_dirty = TRUE;
}
+
+ util_range_add(&buf->valid_buffer_range,
+ tx->base.box.x, tx->base.box.x + tx->base.box.width);
}
if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
&dst->base, 0, dstx, 0, 0,
&src->base, 0, &src_box);
}
+
+ util_range_add(&dst->valid_buffer_range, dstx, dstx + size);
}
pipe_reference_init(&buffer->base.reference, 1);
buffer->base.screen = pscreen;
- if (buffer->base.bind &
- (screen->vidmem_bindings & screen->sysmem_bindings)) {
+ if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
+ PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
+ buffer->domain = NOUVEAU_BO_GART;
+ } else if (buffer->base.bind &
+ (screen->vidmem_bindings & screen->sysmem_bindings)) {
switch (buffer->base.usage) {
case PIPE_USAGE_DEFAULT:
case PIPE_USAGE_IMMUTABLE:
- case PIPE_USAGE_STATIC:
buffer->domain = NOUVEAU_BO_VRAM;
break;
case PIPE_USAGE_DYNAMIC:
NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);
+ util_range_init(&buffer->valid_buffer_range);
+
return &buffer->base;
fail:
buffer->data = ptr;
buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
+ util_range_init(&buffer->valid_buffer_range);
+ util_range_add(&buffer->valid_buffer_range, 0, bytes);
+
return &buffer->base;
}