inline void
nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
{
+ assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
+
if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo);
buf->bo = NULL;
{
assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER);
+ assert(!(dst->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
+ assert(!(src->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
+
if (likely(dst->domain) && likely(src->domain)) {
nv->copy_data(nv,
dst->bo, dst->offset + dstx, dst->domain,
struct nv04_resource *res, uint32_t offset,
uint32_t flags)
{
- if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
+ if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY) ||
+ unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_PTR))
return res->data + offset;
if (res->domain == NOUVEAU_BO_VRAM) {
return (uint8_t *)res->bo->map + res->offset + offset;
}
-
const struct u_resource_vtbl nouveau_buffer_vtbl =
{
u_default_resource_get_handle, /* get_handle */
nouveau_buffer_transfer_unmap, /* transfer_unmap */
};
+static void
+nouveau_user_ptr_destroy(struct pipe_screen *pscreen,
+ struct pipe_resource *presource)
+{
+ struct nv04_resource *res = nv04_resource(presource);
+ FREE(res);
+}
+
+static void *
+nouveau_user_ptr_transfer_map(struct pipe_context *pipe,
+ struct pipe_resource *resource,
+ unsigned level, unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
+{
+ struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
+ if (!tx)
+ return NULL;
+ nouveau_buffer_transfer_init(tx, resource, box, usage);
+ *ptransfer = &tx->base;
+ return nv04_resource(resource)->data;
+}
+
+static void
+nouveau_user_ptr_transfer_unmap(struct pipe_context *pipe,
+ struct pipe_transfer *transfer)
+{
+ struct nouveau_transfer *tx = nouveau_transfer(transfer);
+ FREE(tx);
+}
+
+const struct u_resource_vtbl nouveau_user_ptr_buffer_vtbl =
+{
+ u_default_resource_get_handle, /* get_handle */
+ nouveau_user_ptr_destroy, /* resource_destroy */
+ nouveau_user_ptr_transfer_map, /* transfer_map */
+ u_default_transfer_flush_region, /* transfer_flush_region */
+ nouveau_user_ptr_transfer_unmap, /* transfer_unmap */
+};
+
struct pipe_resource *
nouveau_buffer_create(struct pipe_screen *pscreen,
const struct pipe_resource *templ)
return NULL;
}
+struct pipe_resource *
+nouveau_buffer_create_from_user(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ,
+ void *user_ptr)
+{
+ struct nv04_resource *buffer;
+
+ buffer = CALLOC_STRUCT(nv04_resource);
+ if (!buffer)
+ return NULL;
+
+ buffer->base = *templ;
+ buffer->vtbl = &nouveau_user_ptr_buffer_vtbl;
+ /* set address and data to the same thing for higher compatibility with
+ * existing code. It's correct nonetheless as the same pointer is equally
+ * valid on the CPU and the GPU.
+ */
+ buffer->address = (uint64_t)user_ptr;
+ buffer->data = user_ptr;
+ buffer->status = NOUVEAU_BUFFER_STATUS_USER_PTR;
+ buffer->base.screen = pscreen;
+
+ pipe_reference_init(&buffer->base.reference, 1);
+
+ return &buffer->base;
+}
struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
nouveau_buffer_migrate(struct nouveau_context *nv,
struct nv04_resource *buf, const unsigned new_domain)
{
+ assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
+
struct nouveau_screen *screen = nv->screen;
struct nouveau_bo *bo;
const unsigned old_domain = buf->domain;
struct nv04_resource *buf,
unsigned base, unsigned size)
{
+ assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
+
struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
int ret;
struct nv04_resource *buf = nv04_resource(resource);
int ref = buf->base.reference.count - 1;
+ assert(!(buf->status & NOUVEAU_BUFFER_STATUS_USER_PTR));
+
/* Shared buffers shouldn't get reallocated */
if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
return;
*
* USER_MEMORY: resource->data is a pointer to client memory and may change
* between GL calls
+ *
+ * USER_PTR: bo is backed by user memory mapped into the GPUs VM
*/
#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
#define NOUVEAU_BUFFER_STATUS_DIRTY (1 << 2)
+#define NOUVEAU_BUFFER_STATUS_USER_PTR (1 << 6)
#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
#define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
nouveau_buffer_create(struct pipe_screen *pscreen,
const struct pipe_resource *templ);
+struct pipe_resource *
+nouveau_buffer_create_from_user(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ,
+ void *user_ptr);
+
struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
unsigned bytes, unsigned usage);